repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
Analyzing-the-Generalization-Capability-of-SGLD-Using-Properties-of-Gaussian-Channels | Analyzing-the-Generalization-Capability-of-SGLD-Using-Properties-of-Gaussian-Channels-main/code/models/cnn.py | import torch.nn as nn
import torch.nn.functional as F
class Network(nn.Module):
def __init__(self, nchannels, nclasses):
super(Network, self).__init__()
self.conv1 = nn.Conv2d(nchannels, 32, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(32, 32, 5)
self.fc1 = nn.Linear(32 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, nclasses)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 32 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x | 680 | 29.954545 | 48 | py |
LiDAL | LiDAL-main/evaluate.py | import argparse
import numpy as np
import random
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from torchsparse import SparseTensor
import time
import utils.iou_sk as iou_sk
import utils.iou_nu as iou_nu
from dataset.sk_dataloader import SK_Dataloader
from dataset.nu_dataloader import NU_Dataloader
from network.spvcnn import SPVCNN
from network.minkunet import MinkUNet
def eval(rank, world_size, args):
####################################### Traininig ###############################################
# set random seed
random.seed(1 + rank)
np.random.seed(1 + rank)
torch.manual_seed(7122)
# Initialize DDP
if world_size > 1:
dist.init_process_group(backend='nccl', init_method='tcp://localhost:{}'.format(args.host_num),
world_size=world_size, rank=rank)
# Set device
if world_size > 1:
torch.cuda.set_device(rank)
pytorch_device = torch.device('cuda', rank)
else:
pytorch_device = torch.device('cuda:0')
# Network
if 'SPVCNN' in args.model_name:
if args.dataset_name == 'SK':
model = SPVCNN(class_num=19)
if args.dataset_name == 'NU':
model = SPVCNN(class_num=16)
elif 'Mink' in args.model_name:
if args.dataset_name == 'SK':
model = MinkUNet(class_num=19)
if args.dataset_name == 'NU':
model = MinkUNet(class_num=16)
model.to(pytorch_device)
if world_size > 1:
model = \
torch.nn.parallel.DistributedDataParallel(model,
device_ids=[rank],
output_device=rank)
# Load training statics
if args.r_id == 0:
directory = 'check_points/{}/{}/fr/0r'.format(args.dataset_name, args.model_name)
elif args.metric_name == 'full':
directory = 'check_points/{}/{}/{}/full'.format(args.dataset_name, args.model_name, args.label_unit)
else:
directory = 'check_points/{}/{}/{}/{}/{}r'.format(args.dataset_name, args.model_name, args.label_unit, args.metric_name, args.r_id)
PATH = directory + '/current.pt'
map_location = {'cuda:%d' % 0: 'cuda:%d' % rank}
checkpoint = torch.load(PATH, map_location=map_location)
if world_size > 1:
model.module.load_state_dict(checkpoint['model_state_dict'], strict=True)
else:
model.load_state_dict(checkpoint['model_state_dict'], strict=True)
if rank == 0:
print("Restored from: {}".format(PATH))
if world_size > 1:
dist.barrier()
# Dataset
if args.dataset_name == 'SK':
sampler, val_data_loader = SK_Dataloader(gpu_num = world_size, gpu_rank = rank).val_data_loader()
if args.dataset_name == 'NU':
sampler, val_data_loader = NU_Dataloader(gpu_num = world_size, gpu_rank = rank).val_data_loader()
start = time.time()
# Evaluation process
with torch.no_grad():
model.eval()
if args.dataset_name == 'SK':
c_matrix = np.zeros((19, 19)).astype(np.int32)
if args.dataset_name == 'NU':
c_matrix = np.zeros((16, 16)).astype(np.int32)
if rank == 0:
print("*****************************Validation*************************************")
for i, batch in enumerate(val_data_loader):
# Load data
coords_v_b = batch['coords_v_b'].cuda()
feats_v_b = batch['feats_v_b'].cuda()
labels_p_b = batch['labels_p_b']
logits_v_b, _ = model(SparseTensor(feats_v_b, coords_v_b))
# Project to original points
logits_v_b = logits_v_b.cpu()
inverse_indices_b = batch["inverse_indices_b"]
logits_p_b = logits_v_b[inverse_indices_b]
if args.dataset_name == 'SK':
c_matrix += iou_sk.confusion_matrix(logits_p_b.max(1)[1].numpy(), labels_p_b.numpy())
if args.dataset_name == 'NU':
c_matrix += iou_nu.confusion_matrix(logits_p_b.max(1)[1].numpy(), labels_p_b.numpy())
if world_size > 1:
dist.barrier()
c_matrix = torch.from_numpy(c_matrix).cuda()
if world_size > 1:
dist.all_reduce(c_matrix, op=dist.ReduceOp.SUM)
if rank == 0:
if args.dataset_name == 'SK':
iou_sk.evaluate(confusion=c_matrix.cpu().numpy())
if args.dataset_name == 'NU':
iou_nu.evaluate(confusion=c_matrix.cpu().numpy())
end = time.time()
print(end - start)
if world_size > 1:
dist.destroy_process_group()
def main(args):
world_size = torch.cuda.device_count()
if world_size > 1:
mp.spawn(eval,
args=(world_size, args,),
nprocs=world_size,
join=True)
else:
eval(0, world_size, args)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = 'Trained model evaluation')
parser.add_argument('--dataset_name', type = str, required = True,
help = 'name of the used dataset')
parser.add_argument('--model_name', type = str, required = True,
help = 'name of the trained model to be loaded')
parser.add_argument('--label_unit', type = str, required = True,
help = 'fr for frame-based and sv for supervoxel-based')
parser.add_argument('--metric_name', type = str, required = True,
help = 'name of the active selection metric used for the trained model')
parser.add_argument('--r_id', type = int, required = True,
help = 'current trained round')
parser.add_argument('--host_num', type = str, default = 7112)
args = parser.parse_args()
use_cuda = torch.cuda.is_available()
print("use_cuda: {}".format(use_cuda))
if use_cuda is False:
raise ValueError("CUDA is not available!")
main(args) | 6,118 | 36.084848 | 139 | py |
LiDAL | LiDAL-main/train.py | import os
import argparse
import numpy as np
import random
import torch
import torch.optim as optim
import torch.distributed as dist
import torch.multiprocessing as mp
from torchsparse import SparseTensor
from dataset.sk_dataloader import SK_Dataloader
from dataset.nu_dataloader import NU_Dataloader
from network.spvcnn import SPVCNN
from network.minkunet import MinkUNet
def train(rank, world_size, MAX_ITER, directory, args):
####################################### Traininig ###############################################
# set random seed
random.seed(1 + rank)
np.random.seed(1 + rank)
torch.manual_seed(7122)
# Initialize DDP
if world_size > 1:
dist.init_process_group(backend='nccl', init_method='tcp://localhost:{}'.format(args.host_num),
world_size=world_size, rank=rank)
# Set device
if world_size > 1:
torch.cuda.set_device(rank)
pytorch_device = torch.device('cuda', rank)
else:
pytorch_device = torch.device('cuda:0')
# Network
if 'SPVCNN' in args.model_name:
if args.dataset_name == 'SK':
model = SPVCNN(class_num=19)
if args.dataset_name == 'NU':
model = SPVCNN(class_num=16)
if 'Mink' in args.model_name:
if args.dataset_name == 'SK':
model = MinkUNet(class_num=19)
if args.dataset_name == 'NU':
model = MinkUNet(class_num=16)
model.to(pytorch_device)
if world_size > 1:
model = \
torch.nn.parallel.DistributedDataParallel(model,
device_ids=[rank],
output_device=rank)
# Optimizer
optimizer = optim.Adam(model.parameters())
# Load training statics
curr_iter = 0
ep_id = 0
PATH = directory + '/current.pt'
map_location = {'cuda:%d' % 0: 'cuda:%d' % rank}
if os.path.exists(PATH):
checkpoint = torch.load(PATH, map_location=map_location)
if world_size > 1:
model.module.load_state_dict(checkpoint['model_state_dict'], strict=True)
else:
model.load_state_dict(checkpoint['model_state_dict'], strict=True)
curr_iter = checkpoint['iteration']
ep_id = checkpoint['ep_id']
if rank == 0:
print("Restored from: {}".format(PATH))
elif args.r_id > 0:
# Load weights of last trained model
if args.r_id == 1:
PATH = 'check_points/{}/{}/0r/current.pt'.format(args.dataset_name, args.model_name)
else:
PATH = 'check_points/{}/{}/{}/{}/{}r/current.pt'.format(args.dataset_name, args.model_name, args.label_unit, args.metric_name, args.r_id - 1)
checkpoint = torch.load(PATH, map_location=map_location)
if world_size > 1:
model.module.load_state_dict(checkpoint['model_state_dict'], strict=True)
else:
model.load_state_dict(checkpoint['model_state_dict'], strict=True)
if rank == 0:
print("Restored from: {}".format(PATH))
if world_size > 1:
dist.barrier()
# Dataset
if args.r_id == 0:
if args.dataset_name == 'SK':
sampler, train_data_loader = SK_Dataloader(gpu_num = world_size, gpu_rank = rank).train_data_loader_0r()
if args.dataset_name == 'NU':
sampler, train_data_loader = NU_Dataloader(gpu_num = world_size, gpu_rank = rank).train_data_loader_0r()
elif args.metric_name == 'full':
if args.dataset_name == 'SK':
sampler, train_data_loader = SK_Dataloader(gpu_num = world_size, gpu_rank = rank).train_data_loader_full()
if args.dataset_name == 'NU':
sampler, train_data_loader = NU_Dataloader(gpu_num = world_size, gpu_rank = rank).train_data_loader_full()
elif args.label_unit == 'fr':
if args.dataset_name == 'SK':
sampler, train_data_loader = SK_Dataloader(gpu_num = world_size, gpu_rank = rank).train_data_loader_fr(model_name=args.model_name, metric_name=args.metric_name, r_id=args.r_id)
if args.dataset_name == 'NU':
sampler, train_data_loader = NU_Dataloader(gpu_num = world_size, gpu_rank = rank).train_data_loader_fr(model_name=args.model_name, metric_name=args.metric_name, r_id=args.r_id)
elif args.label_unit == 'sv':
if args.dataset_name == 'SK':
sampler, train_data_loader = SK_Dataloader(gpu_num = world_size, gpu_rank = rank).train_data_loader_sv(model_name=args.model_name, metric_name=args.metric_name, r_id=args.r_id)
if args.dataset_name == 'NU':
sampler, train_data_loader = NU_Dataloader(gpu_num = world_size, gpu_rank = rank).train_data_loader_sv(model_name=args.model_name, metric_name=args.metric_name, r_id=args.r_id)
# Training process
is_training = True
while(is_training):
model.train()
if sampler:
sampler.set_epoch(ep_id)
for i, batch in enumerate(train_data_loader):
if rank == 0:
print("Iteration: {}".format(curr_iter))
# Load data
coords_v_b = batch['coords_v_b'].cuda()
feats_v_b = batch['feats_v_b'].cuda()
labels_v_b = batch['labels_v_b'].cuda()
optimizer.zero_grad()
torch.cuda.synchronize()
logits_v_b, _ = model(SparseTensor(feats_v_b, coords_v_b))
loss = torch.nn.functional.cross_entropy(logits_v_b, labels_v_b, ignore_index=255, reduction='mean')
loss.backward()
# Update
optimizer.step()
if curr_iter >= MAX_ITER:
is_training = False
break
curr_iter += 1
if rank == 0:
print('loss: {}'.format(loss.item()))
if curr_iter % 500 == 0:
torch.save({
'model_state_dict': model.module.state_dict() if world_size > 1 else model.state_dict(),
'iteration': curr_iter,
'ep_id': ep_id,
}, directory + '/current.pt')
torch.cuda.synchronize()
ep_id += 1
if world_size > 1:
dist.destroy_process_group()
def main(args):
# Training statics
MAX_ITER = 20000
world_size = torch.cuda.device_count()
directory = 'check_points/{}/{}'.format(args.dataset_name, args.model_name)
if not os.path.exists(directory):
os.makedirs(directory)
directory = 'check_points/{}/{}/{}'.format(args.dataset_name, args.model_name, args.label_unit)
if not os.path.exists(directory):
os.makedirs(directory)
if args.r_id == 0:
directory = 'check_points/{}/{}/0r'.format(args.dataset_name, args.model_name)
if not os.path.exists(directory):
os.makedirs(directory)
elif args.metric_name == 'full':
directory = 'check_points/{}/{}/full'.format(args.dataset_name, args.model_name)
if not os.path.exists(directory):
os.makedirs(directory)
else:
directory = 'check_points/{}/{}/{}/{}'.format(args.dataset_name, args.model_name, args.label_unit, args.metric_name)
if not os.path.exists(directory):
os.makedirs(directory)
directory = 'check_points/{}/{}/{}/{}/{}r'.format(args.dataset_name, args.model_name, args.label_unit, args.metric_name, args.r_id)
if not os.path.exists(directory):
os.makedirs(directory)
if world_size > 1:
mp.spawn(train,
args=(world_size, MAX_ITER, directory, args,),
nprocs=world_size,
join=True)
else:
train(0, world_size, MAX_ITER, directory, args)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = 'Train')
parser.add_argument('--dataset_name', type = str, required = True,
help = 'name of the used dataset, [SK: semantic-kitti, NU: nuScenes]')
parser.add_argument('--model_name', type = str, required = True,
help = 'name of the current model to be trained, [SPVCNN, Mink]')
parser.add_argument('--label_unit', type = str, required = True,
help = '[fr: frame-based, sv: supervoxel-based]')
parser.add_argument('--metric_name', type = str, required = True,
help = 'name of the active selection metric used for training')
parser.add_argument('--r_id', type = int, required = True,
help = 'current training r_id, -1 for fully-supervised setting.')
parser.add_argument('--host_num', type = str, default = 7112)
args = parser.parse_args()
use_cuda = torch.cuda.is_available()
print("use_cuda: {}".format(use_cuda))
if use_cuda is False:
raise ValueError("CUDA is not available!")
main(args) | 9,130 | 39.22467 | 200 | py |
LiDAL | LiDAL-main/dataset/nu_dataset.py | import os
import pickle
import math
import numpy as np
import torch
from torch.utils import data
####################################### Meta ###############################################
# labels:
# 0: 'noise'
# 1: 'animal'
# 2: 'human.pedestrian.adult'
# 3: 'human.pedestrian.child'
# 4: 'human.pedestrian.construction_worker'
# 5: 'human.pedestrian.personal_mobility'
# 6: 'human.pedestrian.police_officer'
# 7: 'human.pedestrian.stroller'
# 8: 'human.pedestrian.wheelchair'
# 9: 'movable_object.barrier'
# 10: 'movable_object.debris'
# 11: 'movable_object.pushable_pullable'
# 12: 'movable_object.trafficcone'
# 13: 'static_object.bicycle_rack'
# 14: 'vehicle.bicycle'
# 15: 'vehicle.bus.bendy'
# 16: 'vehicle.bus.rigid'
# 17: 'vehicle.car'
# 18: 'vehicle.construction'
# 19: 'vehicle.emergency.ambulance'
# 20: 'vehicle.emergency.police'
# 21: 'vehicle.motorcycle'
# 22: 'vehicle.trailer'
# 23: 'vehicle.truck'
# 24: 'flat.driveable_surface'
# 25: 'flat.other'
# 26: 'flat.sidewalk'
# 27: 'flat.terrain'
# 28: 'static.manmade'
# 29: 'static.other'
# 30: 'static.vegetation'
# 31: 'vehicle.ego'
# labels_16:
# 255: 'noise'
# 0: 'barrier'
# 1: 'bicycle'
# 2: 'bus'
# 3: 'car'
# 4: 'construction_vehicle'
# 5: 'motorcycle'
# 6: 'pedestrian'
# 7: 'traffic_cone'
# 8: 'trailer'
# 9: 'truck'
# 10: 'driveable_surface'
# 11: 'other_flat'
# 12: 'sidewalk'
# 13: 'terrain'
# 14: 'manmade'
# 15: 'vegetation'
learning_map = {
1: 255,
5: 255,
7: 255,
8: 255,
10: 255,
11: 255,
13: 255,
19: 255,
20: 255,
0: 255,
29: 255,
31: 255,
9: 0,
14: 1,
15: 2,
16: 2,
17: 3,
18: 4,
21: 5,
2: 6,
3: 6,
4: 6,
6: 6,
12: 7,
22: 8,
23: 9,
24: 10,
25: 11,
26: 12,
27: 13,
28: 14,
30: 15
}
class NU_Dataset(data.Dataset):
def __init__(self, mode, lidar_files, label_files = None, pseudo_files = None, sv_flag_files = None, sv_info_files = None, seq_frame = None, scale = 20, full_scale = [8192, 8192, 8192]):
self.mode = mode
self.lidar_files = lidar_files
self.label_files = label_files
self.pseudo_files = pseudo_files
self.sv_flag_files = sv_flag_files
self.sv_info_files = sv_info_files
self.seq_frame = seq_frame
self.scale = scale
self.full_scale = full_scale
self.label_map = np.ones(100, dtype=np.int64) * 255
for key in learning_map:
self.label_map[key] = learning_map[key]
def __len__(self):
'Denotes the total number of samples'
return len(self.lidar_files)
def __getitem__(self, idx):
# Load point data
raw_data = np.fromfile(self.lidar_files[idx], dtype=np.float32).reshape(-1, 5)
raw_data = raw_data[:, :4]
feats_p = np.zeros_like(raw_data)
coords_p = raw_data[:, :3]
feats_p[:, 3] = raw_data[:, 3]
if not self.mode == 'score':
# Load annotated labels
labels_anno_p = np.fromfile(self.label_files[idx], dtype=np.uint8).reshape(-1)
# Label mapping
labels_anno_p = self.label_map[labels_anno_p].astype(np.int64)
labels_p = labels_anno_p
if 'pseudo' in self.mode:
# Load pseudo labels
labels_pseudo_p = np.load(self.pseudo_files[idx])
assert labels_pseudo_p.shape[0] == labels_anno_p.shape[0]
if 'train_sv' in self.mode:
# Load sv data
sv_flag = np.load(self.sv_flag_files[idx])
with open(self.sv_info_files[idx], 'rb') as f:
_, sv2point = pickle.load(f)
# Label mask (annotated)
label_anno_mask = np.ones_like(labels_anno_p, dtype=bool)
for sv_id_frame in np.where(sv_flag == 1)[0]:
p_ids = sv2point[sv_id_frame]
label_anno_mask[p_ids] = False
labels_p[label_anno_mask] = 255
if 'pseudo' in self.mode:
# Label mask (pseudo)
label_pseudo_mask = np.zeros_like(labels_pseudo_p, dtype=bool)
for sv_id_frame in np.where(sv_flag == 2)[0]:
p_ids = sv2point[sv_id_frame]
label_pseudo_mask[p_ids] = True
labels_p[label_pseudo_mask] = labels_pseudo_p[label_pseudo_mask]
# Affine linear transformation
trans_m = np.eye(3) + np.random.randn(3, 3) * 0.1
trans_m[0][0] *= np.random.randint(0, 2) * 2 - 1
theta = np.random.rand() * 2 * math.pi
trans_m = np.matmul(trans_m, [[math.cos(theta), math.sin(theta), 0], [-math.sin(theta), math.cos(theta), 0], [0, 0, 1]])
coords_p = np.matmul(coords_p, trans_m)
feats_p[:, :3] = coords_p
coords_p *= self.scale
# Random translation
coords_min = coords_p.min(0)
coords_max = coords_p.max(0)
offset = -coords_min + np.clip(self.full_scale - coords_max + coords_min - 0.001, 0, None) * np.random.rand(3) + np.clip(self.full_scale - coords_max + coords_min + 0.001, None, 0) * np.random.rand(3)
coords_p += offset
# Clip valid positions
valid_idxs = (coords_p.min(1) >= 0) * (coords_p.max(1) < self.full_scale[0])
assert sum(valid_idxs) == len(valid_idxs), 'input voxels are not valid'
# Voxelization
coords_v = coords_p.astype(int)
# Remove duplicate items
_, unique_idxs, inverse_idxs = np.unique(coords_v, axis=0, return_index=True, return_inverse=True)
coords_v = coords_v[unique_idxs]
feats_v = feats_p[unique_idxs]
if 'train' in self.mode:
labels_v = labels_p[unique_idxs]
if 'train' in self.mode:
return {'coords_v': coords_v, 'feats_v': feats_v, 'labels_v': labels_v}
elif self.mode == 'val':
return {'coords_v': coords_v, 'feats_v': feats_v, 'labels_p': labels_p,
'inverse_idxs': inverse_idxs}
elif self.mode == 'score':
return {'coords_v': coords_v, 'feats_v': feats_v, 'inverse_idxs': inverse_idxs,
'seq_frame': self.seq_frame[idx]}
def collate_fn(self, inputs):
# Data in batch
coords_v_b = [] # N X 4(x,y,z,B)
feats_v_b = [] # N X 4(x,y,z,sig)
if 'train' in self.mode:
labels_v_b = [] # N
if self.mode == 'val':
labels_p_b = []
# From voxels to points
if not 'train' in self.mode:
inverse_indices_b = [[np.array(-1)]]
# Put into containers
for idx, sample in enumerate(inputs):
coords_v = torch.from_numpy(sample['coords_v']).int()
coords_v_b += [torch.cat([coords_v, torch.IntTensor(coords_v.shape[0], 1).fill_(idx)], 1)]
feats_v_b += [torch.from_numpy(sample['feats_v']).float()]
if 'train' in self.mode:
labels_v_b += [torch.from_numpy(sample['labels_v']).long()]
if self.mode == 'val':
labels_p_b += [torch.from_numpy(sample['labels_p']).long()]
if not 'train' in self.mode:
inverse_offset = max(inverse_indices_b[-1]) + 1
inverse_idxs = torch.from_numpy(sample['inverse_idxs'])
inverse_indices_b += [inverse_idxs + inverse_offset]
# Concatenation
coords_v_b = torch.cat(coords_v_b, 0)
feats_v_b = torch.cat(feats_v_b, 0)
if 'train' in self.mode:
labels_v_b = torch.cat(labels_v_b, 0)
if self.mode == 'val':
labels_p_b = torch.cat(labels_p_b, 0)
if not 'train' in self.mode:
inverse_indices_b = torch.cat(inverse_indices_b[1:], 0).long()
if 'train' in self.mode:
return {'coords_v_b': coords_v_b, 'feats_v_b': feats_v_b, 'labels_v_b': labels_v_b}
elif self.mode == 'val':
return {'coords_v_b': coords_v_b, 'feats_v_b': feats_v_b, 'labels_p_b': labels_p_b,
'inverse_indices_b': inverse_indices_b}
elif self.mode == 'score':
return {'coords_v_b': coords_v_b, 'feats_v_b': feats_v_b, 'inverse_indices_b': inverse_indices_b,
'seq_frame': inputs[0]['seq_frame']}
| 8,431 | 33 | 208 | py |
LiDAL | LiDAL-main/dataset/sk_dataloader.py | import os
import numpy as np
import torch
import glob
import pickle
import tqdm
import torch.utils.data
import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
from dataset.sk_dataset import SK_Dataset
####################################### Meta ###############################################
train_split = ['00', '01', '02', '03', '04', '05', '06', '07', '09', '10']
val_split = ['08']
class SK_Dataloader():
def __init__(self, gpu_num = None, gpu_rank = None, scale = 20, full_scale = [8192, 8192, 8192], batch_size = 5, num_workers = 4):
self.gpu_num = gpu_num
self.gpu_rank = gpu_rank
self.scale = scale
self.full_scale = full_scale
self.batch_size = batch_size
self.num_workers = num_workers
if self.gpu_rank == 0:
if not os.path.exists('Processing_files/SK'):
os.mkdir('Processing_files/SK')
if self.gpu_num > 1:
dist.barrier()
def get_data_loader(self, dataset):
if self.gpu_num > 1:
sampler = DistributedSampler(dataset, num_replicas=self.gpu_num, rank=self.gpu_rank)
else:
sampler = None
batch_size = self.batch_size
if dataset.mode == 'val':
batch_size = 2 * self.batch_size
return sampler, torch.utils.data.DataLoader(
dataset = dataset,
batch_size = batch_size,
collate_fn = dataset.collate_fn,
sampler = sampler,
num_workers = self.num_workers,
shuffle = (sampler is None),
pin_memory = True
)
###################################################################################################################################
####################################################### Frame Level ###############################################################
###################################################################################################################################
# Fully supervised
def train_data_loader_full(self):
mode = 'train'
# Load samples
lidar_files = []
for i_folder in train_split:
lidar_files += sorted(glob.glob('Semantic_kitti/dataset/sequences/{}'.format(i_folder) + '/velodyne/*.bin'))
dataset = SK_Dataset(mode=mode, lidar_files=lidar_files)
sampler, dataloader = self.get_data_loader(dataset)
return sampler, dataloader
# Intial training
def train_data_loader_0r(self):
mode = 'train_frame'
if self.gpu_rank == 0:
if not os.path.exists('Processing_files/SK/frame_flag'):
os.mkdir('Processing_files/SK/frame_flag')
os.mkdir('Processing_files/SK/sv_flag')
os.mkdir('Processing_files/SK/sv_flag/KMeans')
os.mkdir('Processing_files/SK/sv_flag/VCCS')
if not os.path.exists('Processing_files/SK/frame_flag/0r'):
os.mkdir('Processing_files/SK/frame_flag/0r')
os.mkdir('Processing_files/SK/sv_flag/KMeans/0r')
os.mkdir('Processing_files/SK/sv_flag/VCCS/0r')
# Randomly select 1% fully labeled frames
for i_folder in train_split:
frames = sorted(glob.glob('Semantic_kitti/dataset/sequences/{}'.format(i_folder) + '/velodyne/*.bin'))
frame_flag = np.zeros(len(frames), dtype=bool)
selected_ids = np.random.choice(np.arange(len(frames)), int(np.round(0.01 * len(frames))))
frame_flag[selected_ids] = True
# Save frame_flag
np.save('Processing_files/SK/frame_flag/0r/{}.npy'.format(i_folder), frame_flag)
# Save sv_flag
# KMeans
os.makedirs('Processing_files/SK/sv_flag/KMeans/0r/{}'.format(i_folder))
for idx, fr in enumerate(frames):
fr_elements = fr.split('/')
with open('Processing_files/SK/super_voxel/KMeans/{}'.format(i_folder) + '/' + fr_elements[-1][:-3] + 'pickle', 'rb') as fjson:
sv_id, _ = pickle.load(fjson)
if idx in selected_ids:
sv_flag = np.ones(len(sv_id), dtype=bool)
else:
sv_flag = np.zeros(len(sv_id), dtype=bool)
np.save('Processing_files/SK/sv_flag/KMeans/0r/{}'.format(i_folder) + '/' + fr_elements[-1][:-3] + 'npy', sv_flag)
# VCCS
os.makedirs('Processing_files/SK/sv_flag/VCCS/0r/{}'.format(i_folder))
for idx, fr in enumerate(frames):
fr_elements = fr.split('/')
with open('Processing_files/SK/super_voxel/VCCS/{}'.format(i_folder) + '/' + fr_elements[-1][:-3] + 'pickle', 'rb') as fjson:
sv_id, _ = pickle.load(fjson)
if idx in selected_ids:
sv_flag = np.ones(len(sv_id), dtype=bool)
else:
sv_flag = np.zeros(len(sv_id), dtype=bool)
np.save('Processing_files/SK/sv_flag/VCCS/0r/{}'.format(i_folder) + '/' + fr_elements[-1][:-3] + 'npy', sv_flag)
if self.gpu_num > 1:
dist.barrier()
# Load samples
lidar_files = []
for i_folder in train_split:
frames = sorted(glob.glob('Semantic_kitti/dataset/sequences/{}'.format(i_folder) + '/velodyne/*.bin'))
frame_flag = np.load('Processing_files/SK/frame_flag/0r/{}.npy'.format(i_folder))
selected_frames = list(np.array(frames)[frame_flag])
lidar_files += selected_frames
print('Train_0r samples:', len(lidar_files))
dataset = SK_Dataset(mode=mode, lidar_files=lidar_files)
sampler, dataloader = self.get_data_loader(dataset)
return sampler, dataloader
# Data loader for round r_id training (frame level)
def train_data_loader_fr(self, model_name, metric_name, r_id):
mode = 'train'
# Load samples
lidar_files = []
for i_folder in train_split:
lidar_files += sorted(glob.glob('Semantic_kitti/dataset/sequences/{}'.format(i_folder) + '/velodyne/*.bin'))
frame_flag_all = np.array([])
# Load frames of current r_id
for i_folder in train_split:
if metric_name == 'RAND':
frame_flag = np.load('Processing_files/SK/frame_flag/RAND/{}r/{}.npy'.format(r_id, i_folder))
else:
frame_flag = np.load('Processing_files/SK/frame_flag/{}/{}/{}r/{}.npy'.format(model_name, metric_name, r_id, i_folder))
assert r_id > 0
frame_flag_all = np.append(frame_flag_all, frame_flag)
frame_flag_all = frame_flag_all.astype(bool)
lidar_files = list(np.array(lidar_files)[frame_flag_all])
print('Train_{}r samples:'.format(r_id), len(lidar_files))
dataset = SK_Dataset(mode=mode, lidar_files=lidar_files)
sampler, dataloader = self.get_data_loader(dataset)
return sampler, dataloader
# Data loader for uncertainty scoring
# Since there are no connections between sequences, we process each sequence separately
def score_data_loader(self, inf_reps : int):
""" seq id in train_split
"""
mode = 'score'
# Load samples
lidar_files = []
for i_folder in train_split:
lidar_files += sorted(glob.glob('Semantic_kitti/dataset/sequences/{}'.format(i_folder) + '/velodyne/*.bin'))
print('Score samples:', len(lidar_files))
if self.gpu_num > 1:
split_num = int(np.ceil(len(lidar_files) / self.gpu_num))
lidar_files = lidar_files[self.gpu_rank * split_num : (self.gpu_rank + 1) * split_num]
dataset = SK_Dataset(mode=mode, lidar_files=np.repeat(lidar_files, inf_reps))
return torch.utils.data.DataLoader(
dataset = dataset,
batch_size = inf_reps,
collate_fn = dataset.collate_fn,
num_workers = self.num_workers,
shuffle = False,
drop_last = False,
pin_memory = True
)
# Data loader for validation
def val_data_loader(self):
mode = 'val'
# Load samples
lidar_files = []
for i_folder in val_split:
lidar_files += sorted(glob.glob('Semantic_kitti/dataset/sequences/{}'.format(i_folder) + '/velodyne/*.bin'))
print('Validation samples:', len(lidar_files))
dataset = SK_Dataset(mode=mode, lidar_files=lidar_files)
sampler, dataloader = self.get_data_loader(dataset)
return sampler, dataloader
#########################################################################################################################################
####################################################### Super Voxel Level ###############################################################
#########################################################################################################################################
# Data loader for round r_id training (sv level)
def train_data_loader_sv(self, model_name, metric_name, r_id):
assert r_id > 0
if 'pseudo' in metric_name:
mode = 'train_sv_pseudo'
else:
mode = 'train_sv'
# Load samples
lidar_files = []
sv_flag_files = []
sv_info_files = []
pseudo_files = None
if 'pseudo' in metric_name:
pseudo_files =[]
# Load frames with labeled super voxels of current r_id
for i_folder in train_split:
frames = sorted(glob.glob('Semantic_kitti/dataset/sequences/{}'.format(i_folder) + '/velodyne/*.bin'))
if metric_name == 'ReDAL':
sv_infos = sorted(glob.glob('Processing_files/SK/super_voxel/VCCS/{}/*.pickle'.format(i_folder)))
else:
sv_infos = sorted(glob.glob('Processing_files/SK/super_voxel/KMeans/{}/*.pickle'.format(i_folder)))
if metric_name == 'RAND':
sv_flags = sorted(glob.glob('Processing_files/SK/sv_flag/KMeans/RAND/{}r/{}/*.npy'.format(r_id, i_folder)))
elif metric_name == 'ReDAL':
sv_flags = sorted(glob.glob('Processing_files/SK/sv_flag/VCCS/{}/{}/{}r/{}/*.npy'.format(model_name, metric_name, r_id, i_folder)))
else:
sv_flags = sorted(glob.glob('Processing_files/SK/sv_flag/KMeans/{}/{}/{}r/{}/*.npy'.format(model_name, metric_name, r_id, i_folder)))
assert len(sv_flags) == len(sv_infos)
assert len(frames) == len(sv_flags)
if 'pseudo' in metric_name:
if r_id == 1:
pseudos = sorted(glob.glob('Processing_files/SK/pred/{}/fr/0r/{}/*.npy'.format(model_name, i_folder)))
else:
pseudos = sorted(glob.glob('Processing_files/SK/pred/{}/sv/{}/{}r/{}/*.npy'.format(model_name, metric_name, r_id - 1, i_folder)))
assert len(pseudos) == len(sv_flags)
# Labeled frames for training
frame_flag = np.zeros_like(frames, dtype=bool)
for idx, sv_flag in enumerate(sv_flags):
flag = np.load(sv_flag)
if flag.sum() != 0:
frame_flag[idx] = True
labeled_frames = list(np.array(frames)[frame_flag])
lidar_files += labeled_frames
sv_flag_files += list(np.array(sv_flags)[frame_flag])
sv_info_files += list(np.array(sv_infos)[frame_flag])
if 'pseudo' in metric_name:
pseudo_files += list(np.array(pseudos)[frame_flag])
print('Train_{}r samples:'.format(r_id), len(lidar_files))
dataset = SK_Dataset(mode=mode, lidar_files=lidar_files, pseudo_files=pseudo_files, sv_flag_files=sv_flag_files, sv_info_files=sv_info_files)
sampler, dataloader = self.get_data_loader(dataset)
return sampler, dataloader | 12,755 | 41.805369 | 157 | py |
LiDAL | LiDAL-main/dataset/sk_dataset.py | import os
import pickle
import math
import numpy as np
import torch
from torch.utils import data
####################################### Meta ###############################################
label_name_mapping = {
0: 'unlabeled',
1: 'outlier',
10: 'car',
11: 'bicycle',
13: 'bus',
15: 'motorcycle',
16: 'on-rails',
18: 'truck',
20: 'other-vehicle',
30: 'person',
31: 'bicyclist',
32: 'motorcyclist',
40: 'road',
44: 'parking',
48: 'sidewalk',
49: 'other-ground',
50: 'building',
51: 'fence',
52: 'other-structure',
60: 'lane-marking',
70: 'vegetation',
71: 'trunk',
72: 'terrain',
80: 'pole',
81: 'traffic-sign',
99: 'other-object',
252: 'moving-car',
253: 'moving-bicyclist',
254: 'moving-person',
255: 'moving-motorcyclist',
256: 'moving-on-rails',
257: 'moving-bus',
258: 'moving-truck',
259: 'moving-other-vehicle'
}
kept_labels = [
'road', 'sidewalk', 'parking', 'other-ground', 'building', 'car', 'truck',
'bicycle', 'motorcycle', 'other-vehicle', 'vegetation', 'trunk', 'terrain',
'person', 'bicyclist', 'motorcyclist', 'fence', 'pole', 'traffic-sign'
]
class SK_Dataset(data.Dataset):
def __init__(self, mode, lidar_files, pseudo_files = None, sv_flag_files = None, sv_info_files = None, scale = 20, full_scale = [8192, 8192, 8192]):
self.mode = mode
self.lidar_files = lidar_files
self.pseudo_files = pseudo_files
self.sv_flag_files = sv_flag_files
self.sv_info_files = sv_info_files
self.scale = scale
self.full_scale = full_scale
# Construct label mapping
if os.path.exists('Processing_files/SK/label_map.npy'):
self.label_map = np.load('Processing_files/SK/label_map.npy')
else:
reverse_label_name_mapping = {}
label_map = np.zeros(260)
cnt = 0
for label_id in label_name_mapping:
if label_id > 250:
if label_name_mapping[label_id].replace('moving-',
'') in kept_labels:
label_map[label_id] = reverse_label_name_mapping[
label_name_mapping[label_id].replace('moving-', '')]
else:
label_map[label_id] = 255
elif label_id == 0:
label_map[label_id] = 255
else:
if label_name_mapping[label_id] in kept_labels:
label_map[label_id] = cnt
reverse_label_name_mapping[
label_name_mapping[label_id]] = cnt
cnt += 1
else:
label_map[label_id] = 255
np.save("Processing_files/SK/label_map.npy", label_map)
self.label_map = label_map
def __len__(self):
'Denotes the total number of samples'
return len(self.lidar_files)
def __getitem__(self, idx):
# Load point data
raw_data = np.fromfile(self.lidar_files[idx], dtype=np.float32).reshape(-1, 4)
feats_p = np.zeros_like(raw_data)
coords_p = raw_data[:, :3]
feats_p[:, 3] = raw_data[:, 3]
if not self.mode == 'score':
# Load annotated labels
labels_anno_p = np.fromfile(self.lidar_files[idx].replace('velodyne', 'labels')[:-3] + 'label',
dtype=np.uint32).reshape(-1)
# Delete high 16 digits binary
labels_anno_p = labels_anno_p & 0xFFFF
# Label mapping
labels_anno_p = self.label_map[labels_anno_p].astype(np.int64)
labels_p = labels_anno_p
if 'pseudo' in self.mode:
# Load pseudo labels
labels_pseudo_p = np.load(self.pseudo_files[idx])
assert labels_pseudo_p.shape[0] == labels_anno_p.shape[0]
if 'train_sv' in self.mode:
# Load sv data
sv_flag = np.load(self.sv_flag_files[idx])
with open(self.sv_info_files[idx], 'rb') as f:
_, sv2point = pickle.load(f)
# Label mask (annotated)
label_anno_mask = np.ones_like(labels_anno_p, dtype=bool)
for sv_id_frame in np.where(sv_flag == 1)[0]:
p_ids = sv2point[sv_id_frame]
label_anno_mask[p_ids] = False
labels_p[label_anno_mask] = 255
if 'pseudo' in self.mode:
# Label mask (pseudo)
label_pseudo_mask = np.zeros_like(labels_pseudo_p, dtype=bool)
for sv_id_frame in np.where(sv_flag == 2)[0]:
p_ids = sv2point[sv_id_frame]
label_pseudo_mask[p_ids] = True
labels_p[label_pseudo_mask] = labels_pseudo_p[label_pseudo_mask]
# Affine linear transformation
trans_m = np.eye(3) + np.random.randn(3, 3) * 0.1
trans_m[0][0] *= np.random.randint(0, 2) * 2 - 1
theta = np.random.rand() * 2 * math.pi
trans_m = np.matmul(trans_m, [[math.cos(theta), math.sin(theta), 0], [-math.sin(theta), math.cos(theta), 0], [0, 0, 1]])
coords_p = np.matmul(coords_p, trans_m)
feats_p[:, :3] = coords_p
coords_p *= self.scale
# Random translation
coords_min = coords_p.min(0)
coords_max = coords_p.max(0)
offset = -coords_min + np.clip(self.full_scale - coords_max + coords_min - 0.001, 0, None) * np.random.rand(3) + np.clip(self.full_scale - coords_max + coords_min + 0.001, None, 0) * np.random.rand(3)
coords_p += offset
# Clip valid positions
valid_idxs = (coords_p.min(1) >= 0) * (coords_p.max(1) < self.full_scale[0])
assert sum(valid_idxs) == len(valid_idxs), 'input voxels are not valid'
# Voxelization
coords_v = coords_p.astype(int)
# Remove duplicate items
_, unique_idxs, inverse_idxs = np.unique(coords_v, axis=0, return_index=True, return_inverse=True)
coords_v = coords_v[unique_idxs]
feats_v = feats_p[unique_idxs]
if 'train' in self.mode:
labels_v = labels_p[unique_idxs]
# # frame_offsets: Offsets of scenes in the collection of points
# if 'val' in self.mode:
# point_ids = np.nonzero(valid_idxs)[0] + self.frame_offsets[idx]
if 'train' in self.mode:
return {'coords_v': coords_v, 'feats_v': feats_v, 'labels_v': labels_v}
elif self.mode == 'val':
return {'coords_v': coords_v, 'feats_v': feats_v, 'labels_p': labels_p,
'inverse_idxs': inverse_idxs}
# , 'point_ids': point_ids}
elif self.mode == 'score':
return {'coords_v': coords_v, 'feats_v': feats_v, 'inverse_idxs': inverse_idxs,
'lidar_file': self.lidar_files[idx]}
def collate_fn(self, inputs):
# Data in batch
coords_v_b = [] # N X 4(x,y,z,B)
feats_v_b = [] # N X 4(x,y,z,sig)
if 'train' in self.mode:
labels_v_b = [] # N
if self.mode == 'val':
labels_p_b = []
# From voxels to points
if not 'train' in self.mode:
inverse_indices_b = [[np.array(-1)]]
# # id in the collection of all points
# if self.mode == 'val':
# point_ids_b = []
# Put into containers
for idx, sample in enumerate(inputs):
coords_v = torch.from_numpy(sample['coords_v']).int()
coords_v_b += [torch.cat([coords_v, torch.IntTensor(coords_v.shape[0], 1).fill_(idx)], 1)]
feats_v_b += [torch.from_numpy(sample['feats_v']).float()]
if 'train' in self.mode:
labels_v_b += [torch.from_numpy(sample['labels_v']).long()]
if self.mode == 'val':
labels_p_b += [torch.from_numpy(sample['labels_p']).long()]
if not 'train' in self.mode:
inverse_offset = max(inverse_indices_b[-1]) + 1
inverse_idxs = torch.from_numpy(sample['inverse_idxs'])
inverse_indices_b += [inverse_idxs + inverse_offset]
# if self.mode == 'val':
# point_ids_b += [torch.from_numpy(sample['point_ids'])]
# Concatenation
coords_v_b = torch.cat(coords_v_b, 0)
feats_v_b = torch.cat(feats_v_b, 0)
if 'train' in self.mode:
labels_v_b = torch.cat(labels_v_b, 0)
if self.mode == 'val':
labels_p_b = torch.cat(labels_p_b, 0)
if not 'train' in self.mode:
inverse_indices_b = torch.cat(inverse_indices_b[1:], 0).long()
# if self.mode == 'val':
# point_ids_b = torch.cat(point_ids_b, 0)
if 'train' in self.mode:
return {'coords_v_b': coords_v_b, 'feats_v_b': feats_v_b, 'labels_v_b': labels_v_b}
elif self.mode == 'val':
return {'coords_v_b': coords_v_b, 'feats_v_b': feats_v_b, 'labels_p_b': labels_p_b,
'inverse_indices_b': inverse_indices_b}
# , 'point_ids_b': point_ids_b}
elif self.mode == 'score':
return {'coords_v_b': coords_v_b, 'feats_v_b': feats_v_b, 'inverse_indices_b': inverse_indices_b,
'lidar_file': inputs[0]['lidar_file']}
| 9,618 | 38.584362 | 208 | py |
LiDAL | LiDAL-main/dataset/nu_dataloader.py | import os
import numpy as np
import torch
import glob
import pickle
import torch.utils.data
import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
from nuscenes import NuScenes
from nuscenes.utils.splits import create_splits_scenes
from dataset.nu_dataset import NU_Dataset
class NU_Dataloader():
def __init__(self, gpu_num = None, gpu_rank = None, scale = 20, full_scale = [8192, 8192, 8192], batch_size = 15, num_workers = 4):
self.gpu_num = gpu_num
self.gpu_rank = gpu_rank
self.scale = scale
self.full_scale = full_scale
self.batch_size = batch_size
self.num_workers = num_workers
if self.gpu_rank == 0:
if not os.path.exists('Processing_files/NU'):
os.mkdir('Processing_files/NU')
if not os.path.isfile('Processing_files/NU/lidar_files_train.pickle'):
nusc = NuScenes(version='v1.0-trainval', dataroot='nuScenes', verbose=True)
scene_splits = create_splits_scenes()
train_split = scene_splits['train']
# Load samples
lidar_files = []
label_files = []
for scene in nusc.scene:
scene_name = scene['name']
if scene_name in train_split:
print('Load ' + scene_name)
sample_token = scene['first_sample_token']
while(sample_token):
sample = nusc.get('sample', sample_token)
sample_data = nusc.get('sample_data', sample['data']['LIDAR_TOP'])
lidar_files += ['nuScenes/' + sample_data['filename']]
label_files += ['nuScenes/' + nusc.get('lidarseg', sample_data['token'])['filename']]
sample_token = sample['next']
# Save
with open('Processing_files/NU/lidar_files_train.pickle', 'wb') as fjson:
pickle.dump(lidar_files, fjson)
with open('Processing_files/NU/label_files_train.pickle', 'wb') as fjson:
pickle.dump(label_files, fjson)
if self.gpu_num > 1:
dist.barrier()
def get_data_loader(self, dataset):
if self.gpu_num > 1:
sampler = DistributedSampler(dataset, num_replicas=self.gpu_num, rank=self.gpu_rank)
else:
sampler = None
batch_size = self.batch_size
if dataset.mode == 'val':
batch_size = 2 * self.batch_size
return sampler, torch.utils.data.DataLoader(
dataset = dataset,
batch_size = batch_size,
collate_fn = dataset.collate_fn,
sampler = sampler,
num_workers = self.num_workers,
shuffle = (sampler is None),
pin_memory = True
)
###################################################################################################################################
####################################################### Frame Level ###############################################################
###################################################################################################################################
# Fully supervised
def train_data_loader_full(self):
mode = 'train'
# Load files
with open('Processing_files/NU/lidar_files_train.pickle', 'rb') as fjson:
lidar_files = pickle.load(fjson)
with open('Processing_files/NU/label_files_train.pickle', 'rb') as fjson:
label_files = pickle.load(fjson)
dataset = NU_Dataset(mode=mode, lidar_files=lidar_files, label_files=label_files)
sampler, dataloader = self.get_data_loader(dataset)
return sampler, dataloader
# Intial training
def train_data_loader_0r(self):
mode = 'train_frame'
# Load files
with open('Processing_files/NU/lidar_files_train.pickle', 'rb') as fjson:
lidar_files = pickle.load(fjson)
with open('Processing_files/NU/label_files_train.pickle', 'rb') as fjson:
label_files = pickle.load(fjson)
nusc = NuScenes(version='v1.0-trainval', dataroot='nuScenes', verbose=False)
scene_splits = create_splits_scenes()
train_split = scene_splits['train']
if self.gpu_rank == 0:
if not os.path.exists('Processing_files/NU/frame_flag'):
os.mkdir('Processing_files/NU/frame_flag')
os.mkdir('Processing_files/NU/sv_flag')
os.mkdir('Processing_files/NU/sv_flag/KMeans')
os.mkdir('Processing_files/NU/sv_flag/VCCS')
if not os.path.exists('Processing_files/NU/frame_flag/0r'):
os.mkdir('Processing_files/NU/frame_flag/0r')
os.mkdir('Processing_files/NU/sv_flag/KMeans/0r')
os.mkdir('Processing_files/NU/sv_flag/VCCS/0r')
# Randomly select 1% fully labeled frames
frame_flag = np.zeros(len(lidar_files), dtype=bool)
selected_ids = np.random.choice(np.arange(len(lidar_files)), int(np.round(0.01 * len(lidar_files))))
frame_flag[selected_ids] = True
# Save frame_flag and sv_flag
scene_offset = 0
for scene in nusc.scene:
scene_name = scene['name']
if scene_name in train_split:
print('Save flag for ' + scene_name)
# Save frame_flag
frame_num = scene['nbr_samples']
scene_flag = frame_flag[scene_offset : scene_offset + frame_num]
np.save('Processing_files/NU/frame_flag/0r/{}.npy'.format(scene_name), scene_flag)
scene_offset += frame_num
# Save sv_flag
# KMeans
if not os.path.exists('Processing_files/NU/sv_flag/KMeans/0r/' + scene_name):
os.mkdir('Processing_files/NU/sv_flag/KMeans/0r/' + scene_name)
for idx, flag in enumerate(scene_flag):
frame_id_str = str(idx)
while len(frame_id_str) < 6:
frame_id_str = '0' + frame_id_str
sv_info_path = 'Processing_files/NU/super_voxel/KMeans/' + scene_name + '/' + frame_id_str + '.pickle'
with open(sv_info_path, 'rb') as f:
sv_id, _ = pickle.load(f)
if flag:
sv_flag = np.ones(len(sv_id), dtype=bool)
else:
sv_flag = np.zeros(len(sv_id), dtype=bool)
np.save('Processing_files/NU/sv_flag/KMeans/0r/{}'.format(scene_name) + '/' + frame_id_str + '.npy', sv_flag)
# VCCS
if not os.path.exists('Processing_files/NU/sv_flag/VCCS/0r/' + scene_name):
os.mkdir('Processing_files/NU/sv_flag/VCCS/0r/' + scene_name)
for idx, flag in enumerate(scene_flag):
frame_id_str = str(idx)
while len(frame_id_str) < 6:
frame_id_str = '0' + frame_id_str
sv_info_path = 'Processing_files/NU/super_voxel/VCCS/' + scene_name + '/' + frame_id_str + '.pickle'
with open(sv_info_path, 'rb') as f:
sv_id, _ = pickle.load(f)
if flag:
sv_flag = np.ones(len(sv_id), dtype=bool)
else:
sv_flag = np.zeros(len(sv_id), dtype=bool)
np.save('Processing_files/NU/sv_flag/VCCS/0r/{}'.format(scene_name) + '/' + frame_id_str + '.npy', sv_flag)
if self.gpu_num > 1:
dist.barrier()
# Load flag
frame_flag = np.array([])
for scene in nusc.scene:
scene_name = scene['name']
if scene_name in train_split:
frame_f = np.load('Processing_files/NU/frame_flag/0r/{}.npy'.format(scene_name))
frame_flag = np.append(frame_flag, frame_f)
frame_flag = frame_flag.astype(bool)
lidar_files = list(np.array(lidar_files)[frame_flag])
label_files = list(np.array(label_files)[frame_flag])
print('Train_0r samples:', len(lidar_files))
dataset = NU_Dataset(mode=mode, lidar_files=lidar_files, label_files=label_files)
sampler, dataloader = self.get_data_loader(dataset)
return sampler, dataloader
# Data loader for round r_id training (frame level)
def train_data_loader_fr(self, model_name, metric_name, r_id):
mode = 'train'
# Load samples
with open('Processing_files/NU/lidar_files_train.pickle', 'rb') as fjson:
lidar_files = pickle.load(fjson)
with open('Processing_files/NU/label_files_train.pickle', 'rb') as fjson:
label_files = pickle.load(fjson)
scene_splits = create_splits_scenes()
train_split = scene_splits['train']
frame_flag_all = np.array([])
# Load frames of current r_id
for i_folder in train_split:
if metric_name == 'RAND':
frame_flag = np.load('Processing_files/NU/frame_flag/RAND/{}r/{}.npy'.format(r_id, i_folder))
else:
frame_flag = np.load('Processing_files/NU/frame_flag/{}/{}/{}r/{}.npy'.format(model_name, metric_name, r_id, i_folder))
assert r_id > 0
frame_flag_all = np.append(frame_flag_all, frame_flag)
frame_flag_all = frame_flag_all.astype(bool)
lidar_files = list(np.array(lidar_files)[frame_flag_all])
label_files = list(np.array(label_files)[frame_flag_all])
print('Train_{}r samples:'.format(r_id), len(lidar_files))
dataset = NU_Dataset(mode=mode, lidar_files=lidar_files, label_files=label_files)
sampler, dataloader = self.get_data_loader(dataset)
return sampler, dataloader
# Data loader for uncertainty scoring
# Since there are no connections between sequences, we process each sequence separately
def score_data_loader(self, inf_reps : int):
""" seq id in train_split
"""
mode = 'score'
# Load files
with open('Processing_files/NU/lidar_files_train.pickle', 'rb') as fjson:
lidar_files = pickle.load(fjson)
seq_frame = []
nusc = NuScenes(version='v1.0-trainval', dataroot='nuScenes', verbose=False)
scene_splits = create_splits_scenes()
train_split = scene_splits['train']
for scene in nusc.scene:
scene_name = scene['name']
if scene_name in train_split:
for idx in range(scene['nbr_samples']):
frame_id_str = str(idx)
while len(frame_id_str) < 6:
frame_id_str = '0' + frame_id_str
seq_frame += ['{}/{}'.format(scene_name, frame_id_str)]
assert len(seq_frame) == len(lidar_files)
if self.gpu_num > 1:
split_num = int(np.ceil(len(lidar_files) / self.gpu_num))
lidar_files = lidar_files[self.gpu_rank * split_num : (self.gpu_rank + 1) * split_num]
seq_frame = seq_frame[self.gpu_rank * split_num : (self.gpu_rank + 1) * split_num]
dataset = NU_Dataset(mode=mode, lidar_files=np.repeat(lidar_files, inf_reps), seq_frame=np.repeat(seq_frame, inf_reps))
return torch.utils.data.DataLoader(
dataset = dataset,
batch_size = inf_reps,
collate_fn = dataset.collate_fn,
num_workers = self.num_workers,
shuffle = False,
drop_last = False,
pin_memory = True
)
# Data loader for validation
def val_data_loader(self):
mode = 'val'
if self.gpu_rank == 0:
if not os.path.isfile('Processing_files/NU/lidar_files_val.pickle'):
nusc = NuScenes(version='v1.0-trainval', dataroot='nuScenes', verbose=True)
scene_splits = create_splits_scenes()
val_split = scene_splits['val']
# Load samples
lidar_files = []
label_files = []
for scene in nusc.scene:
scene_name = scene['name']
if scene_name in val_split:
print('Load ' + scene_name)
sample_token = scene['first_sample_token']
while(sample_token):
sample = nusc.get('sample', sample_token)
sample_data = nusc.get('sample_data', sample['data']['LIDAR_TOP'])
lidar_files += ['nuScenes/' + sample_data['filename']]
label_files += ['nuScenes/' + nusc.get('lidarseg', sample_data['token'])['filename']]
sample_token = sample['next']
# Save
with open('Processing_files/NU/lidar_files_val.pickle', 'wb') as fjson:
pickle.dump(lidar_files, fjson)
with open('Processing_files/NU/label_files_val.pickle', 'wb') as fjson:
pickle.dump(label_files, fjson)
if self.gpu_num > 1:
dist.barrier()
# Load files
with open('Processing_files/NU/lidar_files_val.pickle', 'rb') as fjson:
lidar_files = pickle.load(fjson)
with open('Processing_files/NU/label_files_val.pickle', 'rb') as fjson:
label_files = pickle.load(fjson)
dataset = NU_Dataset(mode=mode, lidar_files=lidar_files, label_files=label_files)
sampler, dataloader = self.get_data_loader(dataset)
return sampler, dataloader
# #########################################################################################################################################
# ####################################################### Super Voxel Level ###############################################################
# #########################################################################################################################################
# Data loader for round r_id training (sv level)
def train_data_loader_sv(self, model_name, metric_name, r_id):
assert r_id > 0
if 'pseudo' in metric_name:
mode = 'train_sv_pseudo'
else:
mode = 'train_sv'
# Load samples
with open('Processing_files/NU/lidar_files_train.pickle', 'rb') as fjson:
lidar_files = pickle.load(fjson)
with open('Processing_files/NU/label_files_train.pickle', 'rb') as fjson:
label_files = pickle.load(fjson)
scene_splits = create_splits_scenes()
train_split = scene_splits['train']
sv_flag_files = []
sv_info_files = []
pseudo_files = None
if 'pseudo' in metric_name:
pseudo_files =[]
frame_flag_all = np.array([])
# Load frames with labeled super voxels of current r_id
for i_folder in train_split:
if metric_name == 'ReDAL':
sv_infos = sorted(glob.glob('Processing_files/NU/super_voxel/VCCS/{}/*.pickle'.format(i_folder)))
else:
sv_infos = sorted(glob.glob('Processing_files/NU/super_voxel/KMeans/{}/*.pickle'.format(i_folder)))
if metric_name == 'RAND':
sv_flags = sorted(glob.glob('Processing_files/NU/sv_flag/RAND/{}r/{}/*.npy'.format(r_id, i_folder)))
elif metric_name == 'ReDAL':
sv_flags = sorted(glob.glob('Processing_files/NU/sv_flag/VCCS/{}/{}/{}r/{}/*.npy'.format(model_name, metric_name, r_id, i_folder)))
else:
sv_flags = sorted(glob.glob('Processing_files/NU/sv_flag/KMeans/{}/{}/{}r/{}/*.npy'.format(model_name, metric_name, r_id, i_folder)))
assert len(sv_flags) == len(sv_infos)
if 'pseudo' in metric_name:
if r_id == 1:
pseudos = sorted(glob.glob('Processing_files/NU/pred/{}/fr/0r/{}/*.npy'.format(model_name, i_folder)))
else:
pseudos = sorted(glob.glob('Processing_files/NU/pred/{}/sv/{}/{}r/{}/*.npy'.format(model_name, metric_name, r_id - 1, i_folder)))
assert len(pseudos) == len(sv_flags)
# Labeled frames for training
frame_flag = np.zeros_like(sv_flags, dtype=bool)
for idx, sv_flag in enumerate(sv_flags):
flag = np.load(sv_flag)
if flag.sum() != 0:
frame_flag[idx] = True
frame_flag_all = np.append(frame_flag_all, frame_flag)
sv_flag_files += sv_flags
sv_info_files += sv_infos
if 'pseudo' in metric_name:
pseudo_files += pseudos
assert len(lidar_files) == len(sv_flag_files)
assert frame_flag_all.shape[0] == len(lidar_files)
frame_flag_all = frame_flag_all.astype(bool)
lidar_files = list(np.array(lidar_files)[frame_flag_all])
label_files = list(np.array(label_files)[frame_flag_all])
sv_flag_files = list(np.array(sv_flag_files)[frame_flag_all])
sv_info_files = list(np.array(sv_info_files)[frame_flag_all])
if 'pseudo' in metric_name:
pseudo_files = list(np.array(pseudo_files)[frame_flag_all])
print('Train_{}r samples:'.format(r_id), len(lidar_files))
dataset = NU_Dataset(mode=mode, lidar_files=lidar_files, label_files=label_files, pseudo_files=pseudo_files, sv_flag_files=sv_flag_files, sv_info_files=sv_info_files)
sampler, dataloader = self.get_data_loader(dataset)
return sampler, dataloader | 18,999 | 44.346062 | 174 | py |
LiDAL | LiDAL-main/score/prob_inference.py | from http.client import ImproperConnectionState
import os
import random
import numpy as np
import argparse
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from torchsparse import SparseTensor
from nuscenes.utils.splits import create_splits_scenes
from dataset.sk_dataloader import SK_Dataloader
from dataset.nu_dataloader import NU_Dataloader
from network.spvcnn import SPVCNN
from network.minkunet import MinkUNet
def inference(rank, world_size, save_folder_prob, save_folder_pred, save_folder_outfeat, args):
####################################### Traininig ###############################################
# set random seed
random.seed(1 + rank)
np.random.seed(1 + rank)
torch.manual_seed(7122)
# Initialize DDP
if world_size > 1:
dist.init_process_group(backend='nccl', init_method='tcp://localhost:{}'.format(args.host_num),
world_size=world_size, rank=rank)
# Set device
if world_size > 1:
torch.cuda.set_device(rank)
pytorch_device = torch.device('cuda', rank)
else:
pytorch_device = torch.device('cuda:0')
# Network
if 'SPVCNN' in args.model_name:
if args.dataset_name == 'SK':
model = SPVCNN(class_num=19)
if args.dataset_name == 'NU':
model = SPVCNN(class_num=16)
if 'Mink' in args.model_name:
if args.dataset_name == 'SK':
model = MinkUNet(class_num=19)
if args.dataset_name == 'NU':
model = MinkUNet(class_num=16)
model.to(pytorch_device)
if world_size > 1:
model = \
torch.nn.parallel.DistributedDataParallel(model,
device_ids=[rank],
output_device=rank)
# Load training statics
if args.r_id == 0:
directory = 'check_points/{}/{}/{}/0r'.format(args.dataset_name, args.model_name, args.label_unit)
else:
directory = 'check_points/{}/{}/{}/{}/{}r'.format(args.dataset_name, args.model_name, args.label_unit, args.metric_name, args.r_id)
PATH = directory + '/current.pt'
map_location = {'cuda:%d' % 0: 'cuda:%d' % rank}
checkpoint = torch.load(PATH, map_location=map_location)
if world_size > 1:
model.module.load_state_dict(checkpoint['model_state_dict'], strict=True)
else:
model.load_state_dict(checkpoint['model_state_dict'], strict=True)
if rank == 0:
print("Restored from: {}".format(PATH))
if world_size > 1:
dist.barrier()
# Dataset
if args.dataset_name == 'SK':
score_data_loader = SK_Dataloader(gpu_num = world_size, gpu_rank = rank).score_data_loader(inf_reps=args.inf_reps)
if args.dataset_name == 'NU':
score_data_loader = NU_Dataloader(gpu_num = world_size, gpu_rank = rank).score_data_loader(inf_reps=args.inf_reps)
# Evaluation process
with torch.no_grad():
model.eval()
if rank == 0:
print("*****************************Inference*************************************")
for i, batch in enumerate(score_data_loader):
# Load data
coords_v_b = batch['coords_v_b'].cuda()
feats_v_b = batch['feats_v_b'].cuda()
logits_v_b, out_feat_v_b = model(SparseTensor(feats_v_b, coords_v_b))
# Project to original points
logits_v_b = logits_v_b.cpu()
inverse_indices_b = batch["inverse_indices_b"]
logits_p_b = logits_v_b[inverse_indices_b]
if args.r_id == 0 or args.metric_name in ['ReDAL', 'CSET']:
out_feat_v_b = out_feat_v_b.cpu()
out_feat_p_b = out_feat_v_b[inverse_indices_b]
# Probability map
prob_map = torch.softmax(logits_p_b, dim=1)
prob_map = prob_map.numpy().reshape(args.inf_reps, -1, prob_map.shape[-1])
prob_map_mean = np.mean(prob_map, axis=0)
# Pred
pred = np.argmax(prob_map_mean, axis=1)
# Output features
if args.r_id == 0 or args.metric_name in ['ReDAL', 'CSET']:
out_feat = out_feat_p_b.numpy().reshape(args.inf_reps, -1, out_feat_p_b.shape[-1])
out_feat = np.mean(out_feat, axis=0)
# Save
if args.dataset_name == 'SK':
lidar_file = batch['lidar_file']
seq_id = lidar_file.split('/')[-3]
frame_id = lidar_file.split('/')[-1][:-4]
if args.dataset_name == 'NU':
seq_frame = batch['seq_frame']
seq_id = seq_frame.split('/')[-2]
frame_id = seq_frame.split('/')[-1]
np.save(save_folder_prob + '/{}' '/{}.npy'.format(seq_id, frame_id), prob_map_mean)
np.save(save_folder_pred + '/{}' '/{}.npy'.format(seq_id, frame_id), pred)
if args.r_id == 0 or args.metric_name in ['ReDAL', 'CSET']:
np.save(save_folder_outfeat + '/{}' '/{}.npy'.format(seq_id, frame_id), out_feat)
print('Processing {}/{}'.format(seq_id, frame_id))
if world_size > 1:
dist.destroy_process_group()
def main(args):
world_size = torch.cuda.device_count()
# Save folder
save_folder_prob = 'Processing_files/{}/prob_map/{}'.format(args.dataset_name, args.model_name)
if not os.path.exists(save_folder_prob):
os.makedirs(save_folder_prob)
save_folder_prob = 'Processing_files/{}/prob_map/{}/{}'.format(args.dataset_name, args.model_name, args.label_unit)
if not os.path.exists(save_folder_prob):
os.makedirs(save_folder_prob)
if args.r_id == 0:
save_folder_prob = 'Processing_files/{}/prob_map/{}/{}/0r'.format(args.dataset_name, args.model_name, args.label_unit)
if not os.path.exists(save_folder_prob):
os.makedirs(save_folder_prob)
else:
save_folder_prob = 'Processing_files/{}/prob_map/{}/{}/{}'.format(args.dataset_name, args.model_name, args.label_unit, args.metric_name)
if not os.path.exists(save_folder_prob):
os.makedirs(save_folder_prob)
save_folder_prob = 'Processing_files/{}/prob_map/{}/{}/{}/{}r'.format(args.dataset_name, args.model_name, args.label_unit, args.metric_name, args.r_id)
if not os.path.exists(save_folder_prob):
os.makedirs(save_folder_prob)
save_folder_pred = 'Processing_files/{}/pred/{}'.format(args.dataset_name, args.model_name)
if not os.path.exists(save_folder_pred):
os.makedirs(save_folder_pred)
save_folder_pred = 'Processing_files/{}/pred/{}/{}'.format(args.dataset_name, args.model_name, args.label_unit)
if not os.path.exists(save_folder_pred):
os.makedirs(save_folder_pred)
if args.r_id == 0:
save_folder_pred = 'Processing_files/{}/pred/{}/{}/0r'.format(args.dataset_name, args.model_name, args.label_unit)
if not os.path.exists(save_folder_pred):
os.makedirs(save_folder_pred)
else:
save_folder_pred = 'Processing_files/{}/pred/{}/{}/{}'.format(args.dataset_name, args.model_name, args.label_unit, args.metric_name)
if not os.path.exists(save_folder_pred):
os.makedirs(save_folder_pred)
save_folder_pred = 'Processing_files/{}/pred/{}/{}/{}/{}r'.format(args.dataset_name, args.model_name, args.label_unit, args.metric_name, args.r_id)
if not os.path.exists(save_folder_pred):
os.makedirs(save_folder_pred)
save_folder_outfeat = None
if args.r_id == 0 or args.metric_name in ['ReDAL', 'CSET']:
save_folder_outfeat = 'Processing_files/{}/outfeat/{}'.format(args.dataset_name, args.model_name)
if not os.path.exists(save_folder_outfeat):
os.makedirs(save_folder_outfeat)
save_folder_outfeat = 'Processing_files/{}/outfeat/{}/{}'.format(args.dataset_name, args.model_name, args.label_unit)
if not os.path.exists(save_folder_outfeat):
os.makedirs(save_folder_outfeat)
if args.r_id == 0:
save_folder_outfeat = 'Processing_files/{}/outfeat/{}/{}/0r'.format(args.dataset_name, args.model_name, args.label_unit)
if not os.path.exists(save_folder_outfeat):
os.makedirs(save_folder_outfeat)
else:
save_folder_outfeat = 'Processing_files/{}/outfeat/{}/{}/{}'.format(args.dataset_name, args.model_name, args.label_unit, args.metric_name)
if not os.path.exists(save_folder_outfeat):
os.makedirs(save_folder_outfeat)
save_folder_outfeat = 'Processing_files/{}/outfeat/{}/{}/{}/{}r'.format(args.dataset_name, args.model_name, args.label_unit, args.metric_name, args.r_id)
if not os.path.exists(save_folder_outfeat):
os.makedirs(save_folder_outfeat)
if args.dataset_name == 'SK':
train_split = ['00', '01', '02', '03', '04', '05', '06', '07', '09', '10']
if args.dataset_name == 'NU':
scene_splits = create_splits_scenes()
train_split = scene_splits['train']
for seq_id in train_split:
seq_folder_prob = save_folder_prob + '/' + seq_id
if not os.path.exists(seq_folder_prob):
os.makedirs(seq_folder_prob)
for seq_id in train_split:
seq_folder_pred = save_folder_pred + '/' + seq_id
if not os.path.exists(seq_folder_pred):
os.makedirs(seq_folder_pred)
if args.r_id == 0 or args.metric_name in ['ReDAL', 'CSET']:
for seq_id in train_split:
seq_folder_outfeat = save_folder_outfeat + '/' + seq_id
if not os.path.exists(seq_folder_outfeat):
os.makedirs(seq_folder_outfeat)
if world_size > 1:
mp.spawn(inference,
args=(world_size, save_folder_prob, save_folder_pred, save_folder_outfeat, args,),
nprocs=world_size,
join=True)
else:
inference(0, world_size, save_folder_prob, save_folder_pred, save_folder_outfeat, args)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = 'Probability map inference')
parser.add_argument('--dataset_name', type = str, required = True,
help = 'name of the used dataset')
parser.add_argument('--model_name', type = str, required = True,
help = 'name of the trained model to be loaded')
parser.add_argument('--label_unit', type = str, required = True,
help = 'fr for frame-based and sv for supervoxel-based')
parser.add_argument('--metric_name', type = str, required = True,
help = 'name of the active selection metric used for the trained model')
parser.add_argument('--r_id', type = int, required = True,
help = 'current trained r_id')
parser.add_argument('--inf_reps', default = 8, type = int, required = False,
help = 'Number of inference views, 1 or more')
parser.add_argument('--host_num', type = str, default = 7112)
args = parser.parse_args()
use_cuda = torch.cuda.is_available()
print("use_cuda: {}".format(use_cuda))
if use_cuda is False:
raise ValueError("CUDA is not available!")
main(args) | 11,519 | 44.896414 | 165 | py |
LiDAL | LiDAL-main/network/utils.py | import torch
import torchsparse.nn.functional as F
import torchsparse.nn as spnn
from torch import nn
from torchsparse import PointTensor, SparseTensor
from torchsparse.nn.utils import get_kernel_offsets
__all__ = ['initial_voxelize', 'point_to_voxel', 'voxel_to_point']
# z: PointTensor
# return: SparseTensor
def initial_voxelize(z, init_res, after_res):
new_float_coord = torch.cat(
[(z.C[:, :3] * init_res) / after_res, z.C[:, -1].view(-1, 1)], 1)
pc_hash = F.sphash(torch.floor(new_float_coord).int())
sparse_hash = torch.unique(pc_hash)
idx_query = F.sphashquery(pc_hash, sparse_hash)
counts = F.spcount(idx_query.int(), len(sparse_hash))
inserted_coords = F.spvoxelize(torch.floor(new_float_coord), idx_query,
counts)
inserted_coords = torch.round(inserted_coords).int()
inserted_feat = F.spvoxelize(z.F, idx_query, counts)
new_tensor = SparseTensor(inserted_feat, inserted_coords, 1)
new_tensor.cmaps.setdefault(new_tensor.stride, new_tensor.coords)
z.additional_features['idx_query'][1] = idx_query
z.additional_features['counts'][1] = counts
z.C = new_float_coord
return new_tensor
# x: SparseTensor, z: PointTensor
# return: SparseTensor
def point_to_voxel(x, z):
if z.additional_features is None or z.additional_features.get(
'idx_query') is None or z.additional_features['idx_query'].get(
x.s) is None:
pc_hash = F.sphash(
torch.cat([
torch.floor(z.C[:, :3] / x.s[0]).int() * x.s[0],
z.C[:, -1].int().view(-1, 1)
], 1))
sparse_hash = F.sphash(x.C)
idx_query = F.sphashquery(pc_hash, sparse_hash)
counts = F.spcount(idx_query.int(), x.C.shape[0])
z.additional_features['idx_query'][x.s] = idx_query
z.additional_features['counts'][x.s] = counts
else:
idx_query = z.additional_features['idx_query'][x.s]
counts = z.additional_features['counts'][x.s]
inserted_feat = F.spvoxelize(z.F, idx_query, counts)
new_tensor = SparseTensor(inserted_feat, x.C, x.s)
new_tensor.cmaps = x.cmaps
new_tensor.kmaps = x.kmaps
return new_tensor
# x: SparseTensor, z: PointTensor
# return: PointTensor
def voxel_to_point(x, z, nearest=False):
if z.idx_query is None or z.weights is None or z.idx_query.get(
x.s) is None or z.weights.get(x.s) is None:
off = get_kernel_offsets(2, x.s, 1, device=z.F.device)
old_hash = F.sphash(
torch.cat([
torch.floor(z.C[:, :3] / x.s[0]).int() * x.s[0],
z.C[:, -1].int().view(-1, 1)
], 1), off)
pc_hash = F.sphash(x.C.to(z.F.device))
idx_query = F.sphashquery(old_hash, pc_hash)
weights = F.calc_ti_weights(z.C, idx_query,
scale=x.s[0]).transpose(0, 1).contiguous()
idx_query = idx_query.transpose(0, 1).contiguous()
if nearest:
weights[:, 1:] = 0.
idx_query[:, 1:] = -1
new_feat = F.spdevoxelize(x.F, idx_query, weights)
new_tensor = PointTensor(new_feat,
z.C,
idx_query=z.idx_query,
weights=z.weights)
new_tensor.additional_features = z.additional_features
new_tensor.idx_query[x.s] = idx_query
new_tensor.weights[x.s] = weights
z.idx_query[x.s] = idx_query
z.weights[x.s] = weights
else:
new_feat = F.spdevoxelize(x.F, z.idx_query.get(x.s), z.weights.get(x.s))
new_tensor = PointTensor(new_feat,
z.C,
idx_query=z.idx_query,
weights=z.weights)
new_tensor.additional_features = z.additional_features
return new_tensor
class BasicConvolutionBlock(nn.Module):
def __init__(self, inc, outc, ks=3, stride=1, dilation=1):
super().__init__()
self.net = nn.Sequential(
spnn.Conv3d(inc,
outc,
kernel_size=ks,
dilation=dilation,
stride=stride),
spnn.BatchNorm(outc),
spnn.ReLU(True),
)
def forward(self, x):
out = self.net(x)
return out
class BasicDeconvolutionBlock(nn.Module):
def __init__(self, inc, outc, ks=3, stride=1):
super().__init__()
self.net = nn.Sequential(
spnn.Conv3d(inc,
outc,
kernel_size=ks,
stride=stride,
transposed=True),
spnn.BatchNorm(outc),
spnn.ReLU(True),
)
def forward(self, x):
return self.net(x)
class ResidualBlock(nn.Module):
def __init__(self, inc, outc, ks=3, stride=1, dilation=1):
super().__init__()
self.net = nn.Sequential(
spnn.Conv3d(inc,
outc,
kernel_size=ks,
dilation=dilation,
stride=stride),
spnn.BatchNorm(outc),
spnn.ReLU(True),
spnn.Conv3d(outc, outc, kernel_size=ks, dilation=dilation,
stride=1),
spnn.BatchNorm(outc),
)
if inc == outc and stride == 1:
self.downsample = nn.Identity()
else:
self.downsample = nn.Sequential(
spnn.Conv3d(inc, outc, kernel_size=1, dilation=1,
stride=stride),
spnn.BatchNorm(outc),
)
self.relu = spnn.ReLU(True)
def forward(self, x):
out = self.relu(self.net(x) + self.downsample(x))
return out | 5,866 | 33.110465 | 80 | py |
LiDAL | LiDAL-main/network/spvcnn.py | import torchsparse
import torchsparse.nn as spnn
from torch import nn
from torchsparse import PointTensor
from network.utils import initial_voxelize, point_to_voxel, voxel_to_point, BasicConvolutionBlock, BasicDeconvolutionBlock, ResidualBlock
class SPVCNN(nn.Module):
def __init__(self, class_num):
super().__init__()
cr = 1.0
cs = [32, 32, 64, 128, 256, 256, 128, 96, 96]
cs = [int(cr * x) for x in cs]
self.pres = 0.05
self.vres = 0.05
self.stem = nn.Sequential(
spnn.Conv3d(4, cs[0], kernel_size=3, stride=1),
spnn.BatchNorm(cs[0]), spnn.ReLU(True),
spnn.Conv3d(cs[0], cs[0], kernel_size=3, stride=1),
spnn.BatchNorm(cs[0]), spnn.ReLU(True))
self.stage1 = nn.Sequential(
BasicConvolutionBlock(cs[0], cs[0], ks=2, stride=2, dilation=1),
ResidualBlock(cs[0], cs[1], ks=3, stride=1, dilation=1),
ResidualBlock(cs[1], cs[1], ks=3, stride=1, dilation=1),
)
self.stage2 = nn.Sequential(
BasicConvolutionBlock(cs[1], cs[1], ks=2, stride=2, dilation=1),
ResidualBlock(cs[1], cs[2], ks=3, stride=1, dilation=1),
ResidualBlock(cs[2], cs[2], ks=3, stride=1, dilation=1),
)
self.stage3 = nn.Sequential(
BasicConvolutionBlock(cs[2], cs[2], ks=2, stride=2, dilation=1),
ResidualBlock(cs[2], cs[3], ks=3, stride=1, dilation=1),
ResidualBlock(cs[3], cs[3], ks=3, stride=1, dilation=1),
)
self.stage4 = nn.Sequential(
BasicConvolutionBlock(cs[3], cs[3], ks=2, stride=2, dilation=1),
ResidualBlock(cs[3], cs[4], ks=3, stride=1, dilation=1),
ResidualBlock(cs[4], cs[4], ks=3, stride=1, dilation=1),
)
self.up1 = nn.ModuleList([
BasicDeconvolutionBlock(cs[4], cs[5], ks=2, stride=2),
nn.Sequential(
ResidualBlock(cs[5] + cs[3], cs[5], ks=3, stride=1, dilation=1),
ResidualBlock(cs[5], cs[5], ks=3, stride=1, dilation=1),
)
])
self.up2 = nn.ModuleList([
BasicDeconvolutionBlock(cs[5], cs[6], ks=2, stride=2),
nn.Sequential(
ResidualBlock(cs[6] + cs[2], cs[6], ks=3, stride=1, dilation=1),
ResidualBlock(cs[6], cs[6], ks=3, stride=1, dilation=1),
)
])
self.up3 = nn.ModuleList([
BasicDeconvolutionBlock(cs[6], cs[7], ks=2, stride=2),
nn.Sequential(
ResidualBlock(cs[7] + cs[1], cs[7], ks=3, stride=1, dilation=1),
ResidualBlock(cs[7], cs[7], ks=3, stride=1, dilation=1),
)
])
self.up4 = nn.ModuleList([
BasicDeconvolutionBlock(cs[7], cs[8], ks=2, stride=2),
nn.Sequential(
ResidualBlock(cs[8] + cs[0], cs[8], ks=3, stride=1, dilation=1),
ResidualBlock(cs[8], cs[8], ks=3, stride=1, dilation=1),
)
])
self.classifier = nn.Sequential(nn.Linear(cs[8], class_num))
self.point_transforms = nn.ModuleList([
nn.Sequential(
nn.Linear(cs[0], cs[4]),
nn.BatchNorm1d(cs[4]),
nn.ReLU(True),
),
nn.Sequential(
nn.Linear(cs[4], cs[6]),
nn.BatchNorm1d(cs[6]),
nn.ReLU(True),
),
nn.Sequential(
nn.Linear(cs[6], cs[8]),
nn.BatchNorm1d(cs[8]),
nn.ReLU(True),
)
])
self.weight_initialization()
self.dropout = nn.Dropout(0.3, True)
def weight_initialization(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
# x: SparseTensor z: PointTensor
z = PointTensor(x.F, x.C.float())
x0 = initial_voxelize(z, self.pres, self.vres)
x0 = self.stem(x0)
z0 = voxel_to_point(x0, z, nearest=False)
z0.F = z0.F
x1 = point_to_voxel(x0, z0)
x1 = self.stage1(x1)
x2 = self.stage2(x1)
x3 = self.stage3(x2)
x4 = self.stage4(x3)
z1 = voxel_to_point(x4, z0)
z1.F = z1.F + self.point_transforms[0](z0.F)
y1 = point_to_voxel(x4, z1)
y1.F = self.dropout(y1.F)
y1 = self.up1[0](y1)
y1 = torchsparse.cat([y1, x3])
y1 = self.up1[1](y1)
y2 = self.up2[0](y1)
y2 = torchsparse.cat([y2, x2])
y2 = self.up2[1](y2)
z2 = voxel_to_point(y2, z1)
z2.F = z2.F + self.point_transforms[1](z1.F)
y3 = point_to_voxel(y2, z2)
y3.F = self.dropout(y3.F)
y3 = self.up3[0](y3)
y3 = torchsparse.cat([y3, x1])
y3 = self.up3[1](y3)
y4 = self.up4[0](y3)
y4 = torchsparse.cat([y4, x0])
y4 = self.up4[1](y4)
z3 = voxel_to_point(y4, z2)
z3.F = z3.F + self.point_transforms[2](z2.F)
out = self.classifier(z3.F)
return out, z3.F
| 5,207 | 32.384615 | 137 | py |
LiDAL | LiDAL-main/network/minkunet.py | import time
from collections import OrderedDict
import torch
import torchsparse
import torch.nn as nn
import torchsparse.nn as spnn
from network.utils import BasicConvolutionBlock, BasicDeconvolutionBlock, ResidualBlock
__all__ = ['MinkUNet']
class MinkUNet(nn.Module):
def __init__(self, class_num):
super().__init__()
cr = 1.0
cs = [32, 32, 64, 128, 256, 256, 128, 96, 96]
cs = [int(cr * x) for x in cs]
self.stem = nn.Sequential(
spnn.Conv3d(4, cs[0], kernel_size=3, stride=1),
spnn.BatchNorm(cs[0]), spnn.ReLU(True),
spnn.Conv3d(cs[0], cs[0], kernel_size=3, stride=1),
spnn.BatchNorm(cs[0]), spnn.ReLU(True))
self.stage1 = nn.Sequential(
BasicConvolutionBlock(cs[0], cs[0], ks=2, stride=2, dilation=1),
ResidualBlock(cs[0], cs[1], ks=3, stride=1, dilation=1),
ResidualBlock(cs[1], cs[1], ks=3, stride=1, dilation=1),
)
self.stage2 = nn.Sequential(
BasicConvolutionBlock(cs[1], cs[1], ks=2, stride=2, dilation=1),
ResidualBlock(cs[1], cs[2], ks=3, stride=1, dilation=1),
ResidualBlock(cs[2], cs[2], ks=3, stride=1, dilation=1))
self.stage3 = nn.Sequential(
BasicConvolutionBlock(cs[2], cs[2], ks=2, stride=2, dilation=1),
ResidualBlock(cs[2], cs[3], ks=3, stride=1, dilation=1),
ResidualBlock(cs[3], cs[3], ks=3, stride=1, dilation=1),
)
self.stage4 = nn.Sequential(
BasicConvolutionBlock(cs[3], cs[3], ks=2, stride=2, dilation=1),
ResidualBlock(cs[3], cs[4], ks=3, stride=1, dilation=1),
ResidualBlock(cs[4], cs[4], ks=3, stride=1, dilation=1),
)
self.up1 = nn.ModuleList([
BasicDeconvolutionBlock(cs[4], cs[5], ks=2, stride=2),
nn.Sequential(
ResidualBlock(cs[5] + cs[3], cs[5], ks=3, stride=1,
dilation=1),
ResidualBlock(cs[5], cs[5], ks=3, stride=1, dilation=1),
)
])
self.up2 = nn.ModuleList([
BasicDeconvolutionBlock(cs[5], cs[6], ks=2, stride=2),
nn.Sequential(
ResidualBlock(cs[6] + cs[2], cs[6], ks=3, stride=1,
dilation=1),
ResidualBlock(cs[6], cs[6], ks=3, stride=1, dilation=1),
)
])
self.up3 = nn.ModuleList([
BasicDeconvolutionBlock(cs[6], cs[7], ks=2, stride=2),
nn.Sequential(
ResidualBlock(cs[7] + cs[1], cs[7], ks=3, stride=1,
dilation=1),
ResidualBlock(cs[7], cs[7], ks=3, stride=1, dilation=1),
)
])
self.up4 = nn.ModuleList([
BasicDeconvolutionBlock(cs[7], cs[8], ks=2, stride=2),
nn.Sequential(
ResidualBlock(cs[8] + cs[0], cs[8], ks=3, stride=1,
dilation=1),
ResidualBlock(cs[8], cs[8], ks=3, stride=1, dilation=1),
)
])
self.classifier = nn.Sequential(nn.Linear(cs[8], class_num))
self.weight_initialization()
def weight_initialization(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
x0 = self.stem(x)
x1 = self.stage1(x0)
x2 = self.stage2(x1)
x3 = self.stage3(x2)
x4 = self.stage4(x3)
y1 = self.up1[0](x4)
y1 = torchsparse.cat([y1, x3])
y1 = self.up1[1](y1)
y2 = self.up2[0](y1)
y2 = torchsparse.cat([y2, x2])
y2 = self.up2[1](y2)
y3 = self.up3[0](y2)
y3 = torchsparse.cat([y3, x1])
y3 = self.up3[1](y3)
y4 = self.up4[0](y3)
y4 = torchsparse.cat([y4, x0])
y4 = self.up4[1](y4)
out = self.classifier(y4.F)
return out, y4.F
| 4,067 | 32.073171 | 87 | py |
THUMT | THUMT-master/setup.py | #!/usr/bin/env python3
# coding=utf-8
# Copyright 2017-2020 The THUMT Authors
from setuptools import find_packages
from setuptools import setup
setup(
name="thumt",
version="1.2.0",
author="The THUMT Authors",
author_email="thumt17@gmail.com",
description="THUMT: An open-source toolkit for neural machine translation",
url="http://thumt.thunlp.org",
entry_points={
"console_scripts": [
"thumt-trainer = thumt.bin.trainer:cli_main",
"thumt-translator = thumt.bin.translator:cli_main",
"thumt-scorer=thumt.bin.scorer:cli_main"
]},
scripts=[
"thumt/scripts/average_checkpoints.py",
"thumt/scripts/build_vocab.py",
"thumt/scripts/convert_checkpoint.py",
"thumt/scripts/shuffle_corpus.py"],
packages=find_packages(),
install_requires=[
"future",
"pillow",
"torch>=1.1.0",
"regex"],
classifiers=[
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Artificial Intelligence"])
| 1,076 | 29.771429 | 79 | py |
THUMT | THUMT-master/thumt/modules/embedding.py | # coding=utf-8
# Copyright 2017-2020 The THUMT Authors
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import torch
class PositionalEmbedding(torch.nn.Module):
def __init__(self):
super(PositionalEmbedding, self).__init__()
def forward(self, inputs):
if inputs.dim() != 3:
raise ValueError("The rank of input must be 3.")
length = inputs.shape[1]
channels = inputs.shape[2]
half_dim = channels // 2
positions = torch.arange(length, dtype=inputs.dtype,
device=inputs.device)
dimensions = torch.arange(half_dim, dtype=inputs.dtype,
device=inputs.device)
scale = math.log(10000.0) / float(half_dim - 1)
dimensions.mul_(-scale).exp_()
scaled_time = positions.unsqueeze(1) * dimensions.unsqueeze(0)
signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)],
dim=1)
if channels % 2 == 1:
pad = torch.zeros([signal.shape[0], 1], dtype=inputs.dtype,
device=inputs.device)
signal = torch.cat([signal, pad], axis=1)
return inputs + torch.reshape(signal, [1, -1, channels]).to(inputs)
| 1,341 | 30.209302 | 76 | py |
THUMT | THUMT-master/thumt/modules/losses.py | # coding=utf-8
# Copyright 2017-2020 The THUMT Authors
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import torch
class SmoothedCrossEntropyLoss(torch.nn.Module):
def __init__(self, smoothing=0.0, normalize=True):
super(SmoothedCrossEntropyLoss, self).__init__()
self.smoothing = smoothing
self.normalize = normalize
def forward(self, logits, labels):
shape = labels.shape
logits = torch.reshape(logits, [-1, logits.shape[-1]])
labels = torch.reshape(labels, [-1])
log_probs = torch.nn.functional.log_softmax(logits, dim=-1)
batch_idx = torch.arange(labels.shape[0], device=logits.device)
loss = log_probs[batch_idx, labels]
if not self.smoothing or not self.training:
return -torch.reshape(loss, shape)
n = logits.shape[-1] - 1.0
p = 1.0 - self.smoothing
q = self.smoothing / n
if log_probs.dtype != torch.float16:
sum_probs = torch.sum(log_probs, dim=-1)
loss = p * loss + q * (sum_probs - loss)
else:
# Prevent FP16 overflow
sum_probs = torch.sum(log_probs.to(torch.float32), dim=-1)
loss = loss.to(torch.float32)
loss = p * loss + q * (sum_probs - loss)
loss = loss.to(torch.float16)
loss = -torch.reshape(loss, shape)
if self.normalize:
normalizing = -(p * math.log(p) + n * q * math.log(q + 1e-20))
return loss - normalizing
else:
return loss
| 1,620 | 30.173077 | 74 | py |
THUMT | THUMT-master/thumt/modules/affine.py | # coding=utf-8
# Copyright 2017-2020 The THUMT Authors
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import torch
import torch.nn as nn
import thumt.utils as utils
from thumt.modules.module import Module
class Affine(Module):
def __init__(self, in_features, out_features, bias=True, name="affine"):
super(Affine, self).__init__(name=name)
self.in_features = in_features
self.out_features = out_features
with utils.scope(name):
self.weight = nn.Parameter(torch.Tensor(out_features, in_features))
self.add_name(self.weight, "weight")
if bias:
self.bias = nn.Parameter(torch.Tensor(out_features))
self.add_name(self.bias, "bias")
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(self.bias, -bound, bound)
def forward(self, input):
return nn.functional.linear(input, self.weight, self.bias)
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features, self.out_features, self.bias is not None
)
| 1,491 | 30.083333 | 79 | py |
THUMT | THUMT-master/thumt/modules/module.py | # coding=utf-8
# Copyright 2017-2020 The THUMT Authors
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import thumt.utils as utils
class Module(nn.Module):
def __init__(self, name=""):
super(Module, self).__init__()
scope = utils.get_scope()
self._name = scope + "/" + name if scope else name
def add_name(self, tensor, name):
tensor.tensor_name = utils.unique_name(name)
@property
def name(self):
return self._name
| 576 | 20.37037 | 58 | py |
THUMT | THUMT-master/thumt/modules/feed_forward.py | # coding=utf-8
# Copyright 2017-2020 The THUMT Authors
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import thumt.utils as utils
from thumt.modules.module import Module
from thumt.modules.affine import Affine
class FeedForward(Module):
def __init__(self, input_size, hidden_size, output_size=None, dropout=0.0,
name="feed_forward"):
super(FeedForward, self).__init__(name=name)
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size or input_size
self.dropout = dropout
with utils.scope(name):
self.input_transform = Affine(input_size, hidden_size,
name="input_transform")
self.output_transform = Affine(hidden_size, self.output_size,
name="output_transform")
self.reset_parameters()
def forward(self, x):
h = nn.functional.relu(self.input_transform(x))
h = nn.functional.dropout(h, self.dropout, self.training)
return self.output_transform(h)
def reset_parameters(self):
nn.init.xavier_uniform_(self.input_transform.weight)
nn.init.xavier_uniform_(self.output_transform.weight)
nn.init.constant_(self.input_transform.bias, 0.0)
nn.init.constant_(self.output_transform.bias, 0.0)
| 1,475 | 31.8 | 78 | py |
THUMT | THUMT-master/thumt/modules/recurrent.py | # coding=utf-8
# Copyright 2017-2020 The THUMT Authors
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import thumt.utils as utils
from thumt.modules.module import Module
from thumt.modules.affine import Affine
from thumt.modules.layer_norm import LayerNorm
class GRUCell(Module):
def __init__(self, input_size, output_size, normalization=False,
name="gru"):
super(GRUCell, self).__init__(name=name)
self.input_size = input_size
self.output_size = output_size
with utils.scope(name):
self.reset_gate = Affine(input_size + output_size, output_size,
bias=False, name="reset_gate")
self.update_gate = Affine(input_size + output_size, output_size,
bias=False, name="update_gate")
self.transform = Affine(input_size + output_size, output_size,
name="transform")
def forward(self, x, h):
r = torch.sigmoid(self.reset_gate(torch.cat([x, h], -1)))
u = torch.sigmoid(self.update_gate(torch.cat([x, h], -1)))
c = self.transform(torch.cat([x, r * h], -1))
new_h = (1.0 - u) * h + u * torch.tanh(h)
return new_h, new_h
def init_state(self, batch_size, dtype, device):
h = torch.zeros([batch_size, self.output_size], dtype=dtype,
device=device)
return h
def mask_state(self, h, prev_h, mask):
mask = mask[:, None]
new_h = mask * h + (1.0 - mask) * prev_h
return new_h
def reset_parameters(self, initializer="uniform"):
if initializer == "uniform_scaling":
nn.init.xavier_uniform_(self.gates.weight)
nn.init.constant_(self.gates.bias, 0.0)
elif initializer == "uniform":
nn.init.uniform_(self.gates.weight, -0.08, 0.08)
nn.init.uniform_(self.gates.bias, -0.08, 0.08)
else:
raise ValueError("Unknown initializer %d" % initializer)
class LSTMCell(Module):
def __init__(self, input_size, output_size, normalization=False,
activation=None, name="lstm"):
super(LSTMCell, self).__init__(name=name)
self.input_size = input_size
self.output_size = output_size
self.activation = activation
with utils.scope(name):
self.gates = Affine(input_size + output_size, 4 * output_size,
name="gates")
if normalization:
self.layer_norm = LayerNorm([4, output_size])
else:
self.layer_norm = None
self.reset_parameters()
def forward(self, x, state):
c, h = state
gates = self.gates(torch.cat([x, h], 1))
if self.layer_norm is not None:
combined = self.layer_norm(
torch.reshape(gates, [-1, 4, self.output_size]))
else:
combined = torch.reshape(gates, [-1, 4, self.output_size])
i, j, f, o = torch.unbind(combined, 1)
i, f, o = torch.sigmoid(i), torch.sigmoid(f), torch.sigmoid(o)
new_c = f * c + i * torch.tanh(j)
if self.activation is None:
# Do not use tanh activation
new_h = o * new_c
else:
new_h = o * self.activation(new_c)
return new_h, (new_c, new_h)
def init_state(self, batch_size, dtype, device):
c = torch.zeros([batch_size, self.output_size], dtype=dtype,
device=device)
h = torch.zeros([batch_size, self.output_size], dtype=dtype,
device=device)
return c, h
def mask_state(self, state, prev_state, mask):
c, h = state
prev_c, prev_h = prev_state
mask = mask[:, None]
new_c = mask * c + (1.0 - mask) * prev_c
new_h = mask * h + (1.0 - mask) * prev_h
return new_c, new_h
def reset_parameters(self, initializer="uniform"):
if initializer == "uniform_scaling":
nn.init.xavier_uniform_(self.gates.weight)
nn.init.constant_(self.gates.bias, 0.0)
elif initializer == "uniform":
nn.init.uniform_(self.gates.weight, -0.04, 0.04)
nn.init.uniform_(self.gates.bias, -0.04, 0.04)
else:
raise ValueError("Unknown initializer %d" % initializer)
| 4,484 | 32.721805 | 76 | py |
THUMT | THUMT-master/thumt/modules/layer_norm.py | # coding=utf-8
# Copyright 2017-2020 The THUMT Authors
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numbers
import torch
import torch.nn as nn
import thumt.utils as utils
from thumt.modules.module import Module
class LayerNorm(Module):
def __init__(self, normalized_shape, eps=1e-5, elementwise_affine=True,
name="layer_norm"):
super(LayerNorm, self).__init__(name=name)
if isinstance(normalized_shape, numbers.Integral):
normalized_shape = (normalized_shape,)
self.normalized_shape = tuple(normalized_shape)
self.eps = eps
self.elementwise_affine = elementwise_affine
with utils.scope(name):
if self.elementwise_affine:
self.weight = nn.Parameter(torch.Tensor(*normalized_shape))
self.bias = nn.Parameter(torch.Tensor(*normalized_shape))
self.add_name(self.weight, "weight")
self.add_name(self.bias, "bias")
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
if self.elementwise_affine:
nn.init.ones_(self.weight)
nn.init.zeros_(self.bias)
def forward(self, input):
return nn.functional.layer_norm(
input, self.normalized_shape, self.weight, self.bias, self.eps)
def extra_repr(self):
return '{normalized_shape}, eps={eps}, ' \
'elementwise_affine={elementwise_affine}'.format(**self.__dict__)
| 1,654 | 32.1 | 77 | py |
THUMT | THUMT-master/thumt/modules/attention.py | # coding=utf-8
# Copyright 2017-2020 The THUMT Authors
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import thumt.utils as utils
from thumt.modules.module import Module
from thumt.modules.affine import Affine
class Attention(Module):
def __init__(self, q_size, k_size, hidden_size, name="attention"):
super(Attention, self).__init__(name)
self._q_size = q_size
self._k_size = k_size
self._hidden_size = hidden_size
with utils.scope(name):
self.q_transform = Affine(q_size, hidden_size, name="q_transform")
self.k_transform = Affine(k_size, hidden_size, name="k_transform")
self.v_transform = Affine(hidden_size, 1,
name="v_transform")
self.reset_parameters()
def compute_cache(self, memory):
return self.k_transform(memory)
def forward(self, query, bias, memory, cache=None):
q = self.q_transform(query)
if cache is None:
k = self.k_transform(memory)
else:
k = cache
# q: [batch, 1, hidden_size]
# k: [batch, length, hidden_size]
logits = self.v_transform(torch.tanh(q + k))
# [batch, length, 1]
logits = torch.transpose(logits, 1, 2)
# [batch, 1, 1, length]
logits = torch.unsqueeze(logits, 2)
if bias is not None:
logits = logits + bias
weights = torch.softmax(logits, dim=-1)
# [batch, 1, length]
weights = torch.squeeze(weights, 2)
output = torch.matmul(weights, memory)
return output
def reset_parameters(self, initializer="uniform_scaling", **kwargs):
if initializer == "uniform_scaling":
# 6 / (4 * hidden_size) -> 6 / (2 * hidden_size)
nn.init.xavier_uniform_(self.q_transform.weight)
nn.init.xavier_uniform_(self.k_transform.weight)
nn.init.xavier_uniform_(self.v_transform.weight)
nn.init.constant_(self.q_transform.bias, 0.0)
nn.init.constant_(self.k_transform.bias, 0.0)
nn.init.constant_(self.v_transform.bias, 0.0)
elif initializer == "uniform":
nn.init.uniform_(self.q_transform.weight, -0.04, 0.04)
nn.init.uniform_(self.k_transform.weight, -0.04, 0.04)
nn.init.uniform_(self.v_transform.weight, -0.04, 0.04)
nn.init.uniform_(self.q_transform.bias, -0.04, 0.04)
nn.init.uniform_(self.k_transform.bias, -0.04, 0.04)
nn.init.uniform_(self.v_transform.bias, -0.04, 0.04)
else:
raise ValueError("Unknown initializer %d" % initializer)
class MultiHeadAttentionBase(Module):
def __init__(self, name="multihead_attention_base"):
super(MultiHeadAttentionBase, self).__init__(name=name)
@staticmethod
def split_heads(x, heads):
batch = x.shape[0]
length = x.shape[1]
channels = x.shape[2]
y = torch.reshape(x, [batch, length, heads, channels // heads])
return torch.transpose(y, 2, 1)
@staticmethod
def combine_heads(x):
batch = x.shape[0]
heads = x.shape[1]
length = x.shape[2]
channels = x.shape[3]
y = torch.transpose(x, 2, 1)
return torch.reshape(y, [batch, length, heads * channels])
class MultiHeadAttention(MultiHeadAttentionBase):
def __init__(self, hidden_size, num_heads, dropout=0.0,
name="multihead_attention"):
super(MultiHeadAttention, self).__init__(name=name)
self.num_heads = num_heads
self.hidden_size = hidden_size
self.dropout = dropout
with utils.scope(name):
self.q_transform = Affine(hidden_size, hidden_size,
name="q_transform")
self.k_transform = Affine(hidden_size, hidden_size,
name="k_transform")
self.v_transform = Affine(hidden_size, hidden_size,
name="v_transform")
self.o_transform = Affine(hidden_size, hidden_size,
name="o_transform")
self.reset_parameters()
def forward(self, query, bias, memory=None, kv=None):
q = self.q_transform(query)
if memory is not None:
if kv is not None:
k, v = kv
else:
k, v = None, None
# encoder-decoder attention
k = k or self.k_transform(memory)
v = v or self.v_transform(memory)
else:
# self-attention
k = self.k_transform(query)
v = self.v_transform(query)
if kv is not None:
k = torch.cat([kv[0], k], dim=1)
v = torch.cat([kv[1], v], dim=1)
# split heads
qh = self.split_heads(q, self.num_heads)
kh = self.split_heads(k, self.num_heads)
vh = self.split_heads(v, self.num_heads)
# scale query
qh = qh * (self.hidden_size // self.num_heads) ** -0.5
# dot-product attention
kh = torch.transpose(kh, -2, -1)
logits = torch.matmul(qh, kh)
if bias is not None:
logits = logits + bias
weights = torch.nn.functional.dropout(torch.softmax(logits, dim=-1),
p=self.dropout,
training=self.training)
x = torch.matmul(weights, vh)
# combine heads
output = self.o_transform(self.combine_heads(x))
if kv is not None:
return output, k, v
return output
def reset_parameters(self, initializer="uniform_scaling", **kwargs):
if initializer == "uniform_scaling":
# 6 / (4 * hidden_size) -> 6 / (2 * hidden_size)
nn.init.xavier_uniform_(self.q_transform.weight, 2 ** -0.5)
nn.init.xavier_uniform_(self.k_transform.weight, 2 ** -0.5)
nn.init.xavier_uniform_(self.v_transform.weight, 2 ** -0.5)
nn.init.xavier_uniform_(self.o_transform.weight)
nn.init.constant_(self.q_transform.bias, 0.0)
nn.init.constant_(self.k_transform.bias, 0.0)
nn.init.constant_(self.v_transform.bias, 0.0)
nn.init.constant_(self.o_transform.bias, 0.0)
else:
raise ValueError("Unknown initializer %d" % initializer)
class MultiHeadAdditiveAttention(MultiHeadAttentionBase):
def __init__(self, q_size, k_size, hidden_size, num_heads, dropout=0.0,
name="multihead_attention"):
super(MultiHeadAdditiveAttention, self).__init__(name=name)
self.num_heads = num_heads
self.hidden_size = hidden_size
self.dropout = dropout
with utils.scope(name):
self.q_transform = Affine(q_size, hidden_size,
name="q_transform")
self.k_transform = Affine(k_size, hidden_size,
name="k_transform")
self.v_transform = Affine(hidden_size, num_heads,
name="v_transform")
self.o_transform = Affine(k_size, k_size,
name="o_transform")
self.reset_parameters()
def compute_cache(self, memory):
return self.k_transform(memory)
def forward(self, query, bias, memory, cache=None):
q = self.q_transform(query)
if cache is None:
k = self.k_transform(memory)
else:
k = cache
# split heads
qh = self.split_heads(q, self.num_heads)
kh = self.split_heads(k, self.num_heads)
# q: [batch, 1, hidden_size]
# k: [batch, length, hidden_size]
logits = self.v_transform(torch.tanh(q + k))
# [batch, length, num_heads]
logits = torch.transpose(logits, 1, 2)
# [batch, num_heads, 1, length]
logits = torch.unsqueeze(logits, 2)
if bias is not None:
logits = logits + bias
weights = torch.nn.functional.dropout(torch.softmax(logits, dim=-1),
p=self.dropout,
training=self.training)
vh = self.split_heads(memory, self.num_heads)
x = torch.matmul(weights, vh)
# combine heads
output = self.o_transform(self.combine_heads(x))
return output
def reset_parameters(self, initializer="uniform_scaling", **kwargs):
if initializer == "uniform_scaling":
# 6 / (4 * hidden_size) -> 6 / (2 * hidden_size)
nn.init.xavier_uniform_(self.q_transform.weight, 2 ** -0.5)
nn.init.xavier_uniform_(self.k_transform.weight, 2 ** -0.5)
nn.init.xavier_uniform_(self.v_transform.weight, 2 ** -0.5)
nn.init.xavier_uniform_(self.o_transform.weight)
nn.init.constant_(self.q_transform.bias, 0.0)
nn.init.constant_(self.k_transform.bias, 0.0)
nn.init.constant_(self.v_transform.bias, 0.0)
nn.init.constant_(self.o_transform.bias, 0.0)
elif initializer == "uniform":
nn.init.uniform_(self.q_transform.weight, -0.04, 0.04)
nn.init.uniform_(self.k_transform.weight, -0.04, 0.04)
nn.init.uniform_(self.v_transform.weight, -0.04, 0.04)
nn.init.uniform_(self.o_transform.weight, -0.04, 0.04)
nn.init.uniform_(self.q_transform.bias, -0.04, 0.04)
nn.init.uniform_(self.k_transform.bias, -0.04, 0.04)
nn.init.uniform_(self.v_transform.bias, -0.04, 0.04)
nn.init.uniform_(self.o_transform.bias, -0.04, 0.04)
else:
raise ValueError("Unknown initializer %d" % initializer)
| 9,955 | 34.942238 | 78 | py |
THUMT | THUMT-master/thumt/models/transformer.py | # coding=utf-8
# Copyright 2017-2020 The THUMT Authors
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import torch
import torch.nn as nn
import thumt.utils as utils
import thumt.modules as modules
class AttentionSubLayer(modules.Module):
def __init__(self, params, name="attention"):
super(AttentionSubLayer, self).__init__(name=name)
self.dropout = params.residual_dropout
self.normalization = params.normalization
with utils.scope(name):
self.attention = modules.MultiHeadAttention(
params.hidden_size, params.num_heads, params.attention_dropout)
self.layer_norm = modules.LayerNorm(params.hidden_size)
def forward(self, x, bias, memory=None, state=None):
if self.normalization == "before":
y = self.layer_norm(x)
else:
y = x
if self.training or state is None:
y = self.attention(y, bias, memory, None)
else:
kv = [state["k"], state["v"]]
y, k, v = self.attention(y, bias, memory, kv)
state["k"], state["v"] = k, v
y = nn.functional.dropout(y, self.dropout, self.training)
if self.normalization == "before":
return x + y
else:
return self.layer_norm(x + y)
class FFNSubLayer(modules.Module):
def __init__(self, params, dtype=None, name="ffn_layer"):
super(FFNSubLayer, self).__init__(name=name)
self.dropout = params.residual_dropout
self.normalization = params.normalization
with utils.scope(name):
self.ffn_layer = modules.FeedForward(params.hidden_size,
params.filter_size,
dropout=params.relu_dropout)
self.layer_norm = modules.LayerNorm(params.hidden_size)
def forward(self, x):
if self.normalization == "before":
y = self.layer_norm(x)
else:
y = x
y = self.ffn_layer(y)
y = nn.functional.dropout(y, self.dropout, self.training)
if self.normalization == "before":
return x + y
else:
return self.layer_norm(x + y)
class TransformerEncoderLayer(modules.Module):
def __init__(self, params, name="layer"):
super(TransformerEncoderLayer, self).__init__(name=name)
with utils.scope(name):
self.self_attention = AttentionSubLayer(params)
self.feed_forward = FFNSubLayer(params)
def forward(self, x, bias):
x = self.self_attention(x, bias)
x = self.feed_forward(x)
return x
class TransformerDecoderLayer(modules.Module):
def __init__(self, params, name="layer"):
super(TransformerDecoderLayer, self).__init__(name=name)
with utils.scope(name):
self.self_attention = AttentionSubLayer(params,
name="self_attention")
self.encdec_attention = AttentionSubLayer(params,
name="encdec_attention")
self.feed_forward = FFNSubLayer(params)
def __call__(self, x, attn_bias, encdec_bias, memory, state=None):
x = self.self_attention(x, attn_bias, state=state)
x = self.encdec_attention(x, encdec_bias, memory)
x = self.feed_forward(x)
return x
class TransformerEncoder(modules.Module):
def __init__(self, params, name="encoder"):
super(TransformerEncoder, self).__init__(name=name)
self.normalization = params.normalization
with utils.scope(name):
self.layers = nn.ModuleList([
TransformerEncoderLayer(params, name="layer_%d" % i)
for i in range(params.num_encoder_layers)])
if self.normalization == "before":
self.layer_norm = modules.LayerNorm(params.hidden_size)
else:
self.layer_norm = None
def forward(self, x, bias):
for layer in self.layers:
x = layer(x, bias)
if self.normalization == "before":
x = self.layer_norm(x)
return x
class TransformerDecoder(modules.Module):
def __init__(self, params, name="decoder"):
super(TransformerDecoder, self).__init__(name=name)
self.normalization = params.normalization
with utils.scope(name):
self.layers = nn.ModuleList([
TransformerDecoderLayer(params, name="layer_%d" % i)
for i in range(params.num_decoder_layers)])
if self.normalization == "before":
self.layer_norm = modules.LayerNorm(params.hidden_size)
else:
self.layer_norm = None
def forward(self, x, attn_bias, encdec_bias, memory, state=None):
for i, layer in enumerate(self.layers):
if state is not None:
x = layer(x, attn_bias, encdec_bias, memory,
state["decoder"]["layer_%d" % i])
else:
x = layer(x, attn_bias, encdec_bias, memory, None)
if self.normalization == "before":
x = self.layer_norm(x)
return x
class Transformer(modules.Module):
def __init__(self, params, name="transformer"):
super(Transformer, self).__init__(name=name)
self.params = params
with utils.scope(name):
self.build_embedding(params)
self.encoding = modules.PositionalEmbedding()
self.encoder = TransformerEncoder(params)
self.decoder = TransformerDecoder(params)
self.criterion = modules.SmoothedCrossEntropyLoss(
params.label_smoothing)
self.dropout = params.residual_dropout
self.hidden_size = params.hidden_size
self.num_encoder_layers = params.num_encoder_layers
self.num_decoder_layers = params.num_decoder_layers
self.reset_parameters()
def build_embedding(self, params):
svoc_size = len(params.vocabulary["source"])
tvoc_size = len(params.vocabulary["target"])
if params.shared_source_target_embedding and svoc_size != tvoc_size:
raise ValueError("Cannot share source and target embedding.")
if not params.shared_embedding_and_softmax_weights:
self.softmax_weights = torch.nn.Parameter(
torch.empty([tvoc_size, params.hidden_size]))
self.add_name(self.softmax_weights, "softmax_weights")
if not params.shared_source_target_embedding:
self.source_embedding = torch.nn.Parameter(
torch.empty([svoc_size, params.hidden_size]))
self.target_embedding = torch.nn.Parameter(
torch.empty([tvoc_size, params.hidden_size]))
self.add_name(self.source_embedding, "source_embedding")
self.add_name(self.target_embedding, "target_embedding")
else:
self.weights = torch.nn.Parameter(
torch.empty([svoc_size, params.hidden_size]))
self.add_name(self.weights, "weights")
self.bias = torch.nn.Parameter(torch.zeros([params.hidden_size]))
self.add_name(self.bias, "bias")
@property
def src_embedding(self):
if self.params.shared_source_target_embedding:
return self.weights
else:
return self.source_embedding
@property
def tgt_embedding(self):
if self.params.shared_source_target_embedding:
return self.weights
else:
return self.target_embedding
@property
def softmax_embedding(self):
if not self.params.shared_embedding_and_softmax_weights:
return self.softmax_weights
else:
return self.tgt_embedding
def reset_parameters(self):
nn.init.normal_(self.src_embedding, mean=0.0,
std=self.params.hidden_size ** -0.5)
nn.init.normal_(self.tgt_embedding, mean=0.0,
std=self.params.hidden_size ** -0.5)
if not self.params.shared_embedding_and_softmax_weights:
nn.init.normal_(self.softmax_weights, mean=0.0,
std=self.params.hidden_size ** -0.5)
def encode(self, features, state):
src_seq = features["source"]
src_mask = features["source_mask"]
enc_attn_bias = self.masking_bias(src_mask)
inputs = torch.nn.functional.embedding(src_seq, self.src_embedding)
inputs = inputs * (self.hidden_size ** 0.5)
inputs = inputs + self.bias
inputs = nn.functional.dropout(self.encoding(inputs), self.dropout,
self.training)
enc_attn_bias = enc_attn_bias.to(inputs)
encoder_output = self.encoder(inputs, enc_attn_bias)
state["encoder_output"] = encoder_output
state["enc_attn_bias"] = enc_attn_bias
return state
def decode(self, features, state, mode="infer"):
tgt_seq = features["target"]
enc_attn_bias = state["enc_attn_bias"]
dec_attn_bias = self.causal_bias(tgt_seq.shape[1])
targets = torch.nn.functional.embedding(tgt_seq, self.tgt_embedding)
targets = targets * (self.hidden_size ** 0.5)
decoder_input = torch.cat(
[targets.new_zeros([targets.shape[0], 1, targets.shape[-1]]),
targets[:, 1:, :]], dim=1)
decoder_input = nn.functional.dropout(self.encoding(decoder_input),
self.dropout, self.training)
encoder_output = state["encoder_output"]
dec_attn_bias = dec_attn_bias.to(targets)
if mode == "infer":
decoder_input = decoder_input[:, -1:, :]
dec_attn_bias = dec_attn_bias[:, :, -1:, :]
decoder_output = self.decoder(decoder_input, dec_attn_bias,
enc_attn_bias, encoder_output, state)
decoder_output = torch.reshape(decoder_output, [-1, self.hidden_size])
decoder_output = torch.transpose(decoder_output, -1, -2)
logits = torch.matmul(self.softmax_embedding, decoder_output)
logits = torch.transpose(logits, 0, 1)
return logits, state
def forward(self, features, labels, mode="train", level="sentence"):
mask = features["target_mask"]
state = self.empty_state(features["target"].shape[0],
labels.device)
state = self.encode(features, state)
logits, _ = self.decode(features, state, mode=mode)
loss = self.criterion(logits, labels)
mask = mask.to(torch.float32)
# Prevent FP16 overflow
if loss.dtype == torch.float16:
loss = loss.to(torch.float32)
if mode == "eval":
if level == "sentence":
return -torch.sum(loss * mask, 1)
else:
return torch.exp(-loss) * mask - (1 - mask)
return (torch.sum(loss * mask) / torch.sum(mask)).to(logits)
def empty_state(self, batch_size, device):
state = {
"decoder": {
"layer_%d" % i: {
"k": torch.zeros([batch_size, 0, self.hidden_size],
device=device),
"v": torch.zeros([batch_size, 0, self.hidden_size],
device=device)
} for i in range(self.num_decoder_layers)
}
}
return state
@staticmethod
def masking_bias(mask, inf=-1e9):
ret = (1.0 - mask) * inf
return torch.unsqueeze(torch.unsqueeze(ret, 1), 1)
@staticmethod
def causal_bias(length, inf=-1e9):
ret = torch.ones([length, length]) * inf
ret = torch.triu(ret, diagonal=1)
return torch.reshape(ret, [1, 1, length, length])
@staticmethod
def base_params():
params = utils.HParams(
pad="<pad>",
bos="<eos>",
eos="<eos>",
unk="<unk>",
hidden_size=512,
filter_size=2048,
num_heads=8,
num_encoder_layers=6,
num_decoder_layers=6,
attention_dropout=0.0,
residual_dropout=0.1,
relu_dropout=0.0,
label_smoothing=0.1,
normalization="after",
shared_embedding_and_softmax_weights=False,
shared_source_target_embedding=False,
# Override default parameters
warmup_steps=4000,
train_steps=100000,
learning_rate=7e-4,
learning_rate_schedule="linear_warmup_rsqrt_decay",
batch_size=4096,
fixed_batch_size=False,
adam_beta1=0.9,
adam_beta2=0.98,
adam_epsilon=1e-9,
clip_grad_norm=0.0
)
return params
@staticmethod
def base_params_v2():
params = Transformer.base_params()
params.attention_dropout = 0.1
params.relu_dropout = 0.1
params.learning_rate = 12e-4
params.warmup_steps = 8000
params.normalization = "before"
params.adam_beta2 = 0.997
return params
@staticmethod
def big_params():
params = Transformer.base_params()
params.hidden_size = 1024
params.filter_size = 4096
params.num_heads = 16
params.residual_dropout = 0.3
params.learning_rate = 5e-4
params.train_steps = 300000
return params
@staticmethod
def big_params_v2():
params = Transformer.base_params_v2()
params.hidden_size = 1024
params.filter_size = 4096
params.num_heads = 16
params.residual_dropout = 0.3
params.learning_rate = 7e-4
params.train_steps = 300000
return params
@staticmethod
def default_params(name=None):
if name == "base":
return Transformer.base_params()
elif name == "base_v2":
return Transformer.base_params_v2()
elif name == "big":
return Transformer.big_params()
elif name == "big_v2":
return Transformer.big_params_v2()
else:
return Transformer.base_params()
| 14,385 | 32.61215 | 79 | py |
THUMT | THUMT-master/thumt/bin/scorer.py | #! /usr/bin python
# coding=utf-8
# Copyright 2017-2020 The THUMT Authors
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import six
import time
import copy
import torch
import socket
import logging
import argparse
import numpy as np
import torch.distributed as dist
import thumt.data as data
import thumt.utils as utils
import thumt.models as models
logging.getLogger().setLevel(logging.INFO)
def parse_args():
parser = argparse.ArgumentParser(
description="Score input sentences with pre-trained checkpoints.",
usage="scorer.py [<args>] [-h | --help]"
)
# input files
parser.add_argument("--input", type=str, required=True, nargs=2,
help="Path to input file.")
parser.add_argument("--output", type=str, required=True,
help="Path to output file.")
parser.add_argument("--checkpoint", type=str, required=True,
help="Path to trained checkpoint.")
parser.add_argument("--vocabulary", type=str, nargs=2, required=True,
help="Path to source and target vocabulary.")
# model and configuration
parser.add_argument("--model", type=str, required=True,
help="Name of the model.")
parser.add_argument("--parameters", type=str, default="",
help="Additional hyper-parameters.")
parser.add_argument("--half", action="store_true",
help="Enable Half-precision for decoding.")
return parser.parse_args()
def default_params():
params = utils.HParams(
input=None,
output=None,
vocabulary=None,
model=None,
# vocabulary specific
pad="<pad>",
bos="<bos>",
eos="<eos>",
unk="<unk>",
append_eos=False,
monte_carlo=False,
device_list=[0],
decode_batch_size=32,
buffer_size=10000,
level="sentence"
)
return params
def merge_params(params1, params2):
params = utils.HParams()
for (k, v) in six.iteritems(params1.values()):
params.add_hparam(k, v)
params_dict = params.values()
for (k, v) in six.iteritems(params2.values()):
if k in params_dict:
# Override
setattr(params, k, v)
else:
params.add_hparam(k, v)
return params
def import_params(model_dir, model_name, params):
model_dir = os.path.abspath(model_dir)
m_name = os.path.join(model_dir, model_name + ".json")
if not os.path.exists(m_name):
return params
with open(m_name) as fd:
logging.info("Restoring model parameters from %s" % m_name)
json_str = fd.readline()
params.parse_json(json_str)
return params
def override_params(params, args):
if args.parameters:
params.parse(args.parameters.lower())
params.vocabulary = {
"source": data.Vocabulary(args.vocabulary[0]),
"target": data.Vocabulary(args.vocabulary[1])
}
return params
def infer_gpu_num(param_str):
result = re.match(r".*device_list=\[(.*?)\].*", param_str)
if not result:
return 1
dev_str = result.groups()[-1]
return len(dev_str.split(","))
def main(args):
model_cls = models.get_model(args.model)
# Import and override parameters
# Priorities (low -> high):
# default -> saved -> command
params = default_params()
params = merge_params(params, model_cls.default_params())
params = import_params(args.checkpoint, args.model, params)
params = override_params(params, args)
params.device = params.device_list[args.local_rank]
dist.init_process_group("nccl", init_method=args.url,
rank=args.local_rank,
world_size=len(params.device_list))
torch.cuda.set_device(params.device_list[args.local_rank])
torch.set_default_tensor_type(torch.cuda.FloatTensor)
if args.half:
torch.set_default_dtype(torch.half)
torch.set_default_tensor_type(torch.cuda.HalfTensor)
def score_fn(inputs, _model, level="sentence"):
_features, _labels = inputs
_score = _model(_features, _labels, mode="eval", level=level)
return _score
# Create model
with torch.no_grad():
model = model_cls(params).cuda()
if args.half:
model = model.half()
if not params.monte_carlo:
model.eval()
model.load_state_dict(
torch.load(utils.latest_checkpoint(args.checkpoint),
map_location="cpu")["model"])
dataset = data.MTPipeline.get_eval_dataset(args.input, params)
data_iter = iter(dataset)
counter = 0
pad_max = 1024
# Buffers for synchronization
size = torch.zeros([dist.get_world_size()]).long()
if params.level == "sentence":
t_list = [torch.empty([params.decode_batch_size]).float()
for _ in range(dist.get_world_size())]
else:
t_list = [torch.empty([params.decode_batch_size, pad_max]).float()
for _ in range(dist.get_world_size())]
if dist.get_rank() == 0:
fd = open(args.output, "w")
else:
fd = None
while True:
try:
features = next(data_iter)
batch_size = features[0]["source"].shape[0]
except:
features = {
"source": torch.ones([1, 1]).long(),
"source_mask": torch.ones([1, 1]).float(),
"target": torch.ones([1, 1]).long(),
"target_mask": torch.ones([1, 1]).float()
}, torch.ones([1, 1]).long()
batch_size = 0
t = time.time()
counter += 1
scores = score_fn(features, model, params.level)
# Padding
if params.level == "sentence":
pad_batch = params.decode_batch_size - scores.shape[0]
scores = torch.nn.functional.pad(scores, [0, pad_batch])
else:
pad_batch = params.decode_batch_size - scores.shape[0]
pad_length = pad_max - scores.shape[1]
scores = torch.nn.functional.pad(
scores, (0, pad_length, 0, pad_batch), value=-1)
# Synchronization
size.zero_()
size[dist.get_rank()].copy_(torch.tensor(batch_size))
dist.all_reduce(size)
dist.all_gather(t_list, scores.float())
if size.sum() == 0:
break
if dist.get_rank() != 0:
continue
for i in range(params.decode_batch_size):
for j in range(dist.get_world_size()):
n = size[j]
score = t_list[j][i]
if i >= n:
continue
if params.level == "sentence":
fd.write("{:.4f}\n".format(score))
else:
s_list = score.tolist()
for s in s_list:
if s >= 0:
fd.write("{:.8f} ".format(s))
else:
fd.write("\n")
break
t = time.time() - t
logging.info("Finished batch: %d (%.3f sec)" % (counter, t))
if dist.get_rank() == 0:
fd.close()
# Wrap main function
def process_fn(rank, args):
local_args = copy.copy(args)
local_args.local_rank = rank
main(local_args)
def cli_main():
parsed_args = parse_args()
# Pick a free port
with socket.socket() as s:
s.bind(("localhost", 0))
port = s.getsockname()[1]
url = "tcp://localhost:" + str(port)
parsed_args.url = url
world_size = infer_gpu_num(parsed_args.parameters)
if world_size > 1:
torch.multiprocessing.spawn(process_fn, args=(parsed_args,),
nprocs=world_size)
else:
process_fn(0, parsed_args)
if __name__ == "__main__":
cli_main()
| 8,317 | 28.185965 | 78 | py |
THUMT | THUMT-master/thumt/bin/trainer.py | # coding=utf-8
# Copyright 2017-2020 The THUMT Authors
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import copy
import glob
import logging
import os
import re
import six
import socket
import time
import torch
import thumt.data as data
import torch.distributed as dist
import thumt.models as models
import thumt.optimizers as optimizers
import thumt.utils as utils
import thumt.utils.summary as summary
def parse_args(args=None):
parser = argparse.ArgumentParser(
description="Train a neural machine translation model.",
usage="trainer.py [<args>] [-h | --help]"
)
# input files
parser.add_argument("--input", type=str, nargs=2,
help="Path to source and target corpus.")
parser.add_argument("--output", type=str, default="train",
help="Path to load/store checkpoints.")
parser.add_argument("--vocabulary", type=str, nargs=2,
help="Path to source and target vocabulary.")
parser.add_argument("--validation", type=str,
help="Path to validation file.")
parser.add_argument("--references", type=str,
help="Pattern to reference files.")
parser.add_argument("--checkpoint", type=str,
help="Path to pre-trained checkpoint.")
parser.add_argument("--distributed", action="store_true",
help="Enable distributed training.")
parser.add_argument("--local_rank", type=int,
help="Local rank of this process.")
parser.add_argument("--half", action="store_true",
help="Enable mixed-precision training.")
parser.add_argument("--hparam_set", type=str,
help="Name of pre-defined hyper-parameter set.")
# model and configuration
parser.add_argument("--model", type=str, required=True,
help="Name of the model.")
parser.add_argument("--parameters", type=str, default="",
help="Additional hyper-parameters.")
return parser.parse_args(args)
def default_params():
params = utils.HParams(
input=["", ""],
output="",
model="transformer",
vocab=["", ""],
pad="<pad>",
bos="<eos>",
eos="<eos>",
unk="<unk>",
# Dataset
batch_size=4096,
fixed_batch_size=False,
min_length=1,
max_length=256,
buffer_size=10000,
# Initialization
initializer_gain=1.0,
initializer="uniform_unit_scaling",
# Regularization
scale_l1=0.0,
scale_l2=0.0,
# Training
initial_step=0,
warmup_steps=4000,
train_steps=100000,
update_cycle=1,
optimizer="Adam",
adam_beta1=0.9,
adam_beta2=0.999,
adam_epsilon=1e-8,
adadelta_rho=0.95,
adadelta_epsilon=1e-7,
pattern="",
clipping="global_norm",
clip_grad_norm=5.0,
learning_rate=1.0,
initial_learning_rate=0.0,
learning_rate_schedule="linear_warmup_rsqrt_decay",
learning_rate_boundaries=[0],
learning_rate_values=[0.0],
device_list=[0],
# Checkpoint Saving
keep_checkpoint_max=20,
keep_top_checkpoint_max=5,
save_summary=True,
save_checkpoint_secs=0,
save_checkpoint_steps=1000,
# Validation
eval_steps=2000,
eval_secs=0,
top_beams=1,
beam_size=4,
decode_batch_size=32,
decode_alpha=0.6,
decode_ratio=1.0,
decode_length=50,
validation="",
references="",
)
return params
def import_params(model_dir, model_name, params):
model_dir = os.path.abspath(model_dir)
p_name = os.path.join(model_dir, "params.json")
m_name = os.path.join(model_dir, model_name + ".json")
if os.path.exists(p_name):
with open(p_name) as fd:
logging.info("Restoring hyper parameters from %s" % p_name)
json_str = fd.readline()
params.parse_json(json_str)
if os.path.exists(m_name):
with open(m_name) as fd:
logging.info("Restoring model parameters from %s" % m_name)
json_str = fd.readline()
params.parse_json(json_str)
return params
def export_params(output_dir, name, params):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Save params as params.json
filename = os.path.join(output_dir, name)
with open(filename, "w") as fd:
fd.write(params.to_json())
def merge_params(params1, params2):
params = utils.HParams()
for (k, v) in six.iteritems(params1.values()):
params.add_hparam(k, v)
params_dict = params.values()
for (k, v) in six.iteritems(params2.values()):
if k in params_dict:
# Override
setattr(params, k, v)
else:
params.add_hparam(k, v)
return params
def override_params(params, args):
params.model = args.model or params.model
params.input = args.input or params.input
params.output = args.output or params.output
params.vocab = args.vocabulary or params.vocab
params.validation = args.validation or params.validation
params.references = args.references or params.references
params.parse(args.parameters.lower())
params.vocabulary = {
"source": data.Vocabulary(params.vocab[0]),
"target": data.Vocabulary(params.vocab[1])
}
return params
def collect_params(all_params, params):
collected = utils.HParams()
for k in six.iterkeys(params.values()):
collected.add_hparam(k, getattr(all_params, k))
return collected
def print_variables(model, pattern, log=True):
flags = []
for (name, var) in model.named_parameters():
if re.search(pattern, name):
flags.append(True)
else:
flags.append(False)
weights = {v[0]: v[1] for v in model.named_parameters()}
total_size = 0
for name in sorted(list(weights)):
if re.search(pattern, name):
v = weights[name]
total_size += v.nelement()
if log:
print("%s %s" % (name.ljust(60), str(list(v.shape)).rjust(15)))
if log:
print("Total trainable variables size: %d" % total_size)
return flags
def exclude_variables(flags, grads_and_vars):
idx = 0
new_grads = []
new_vars = []
for grad, (name, var) in grads_and_vars:
if flags[idx]:
new_grads.append(grad)
new_vars.append((name, var))
idx += 1
return zip(new_grads, new_vars)
def save_checkpoint(step, epoch, model, optimizer, params):
if dist.get_rank() == 0:
state = {
"step": step,
"epoch": epoch,
"model": model.state_dict(),
"optimizer": optimizer.state_dict()
}
utils.save(state, params.output, params.keep_checkpoint_max)
def infer_gpu_num(param_str):
result = re.match(r".*device_list=\[(.*?)\].*", param_str)
if not result:
return 1
else:
dev_str = result.groups()[-1]
return len(dev_str.split(","))
def broadcast(model):
for var in model.parameters():
dist.broadcast(var.data, 0)
def get_learning_rate_schedule(params):
if params.learning_rate_schedule == "linear_warmup_rsqrt_decay":
schedule = optimizers.LinearWarmupRsqrtDecay(
params.learning_rate, params.warmup_steps,
initial_learning_rate=params.initial_learning_rate,
summary=params.save_summary)
elif params.learning_rate_schedule == "piecewise_constant_decay":
schedule = optimizers.PiecewiseConstantDecay(
params.learning_rate_boundaries, params.learning_rate_values,
summary=params.save_summary)
elif params.learning_rate_schedule == "linear_exponential_decay":
schedule = optimizers.LinearExponentialDecay(
params.learning_rate, params.warmup_steps,
params.start_decay_step, params.end_decay_step,
dist.get_world_size(), summary=params.save_summary)
elif params.learning_rate_schedule == "constant":
schedule = params.learning_rate
else:
raise ValueError("Unknown schedule %s" % params.learning_rate_schedule)
return schedule
def get_clipper(params):
if params.clipping.lower() == "none":
clipper = None
elif params.clipping.lower() == "adaptive":
clipper = optimizers.adaptive_clipper(0.95)
elif params.clipping.lower() == "global_norm":
clipper = optimizers.global_norm_clipper(params.clip_grad_norm)
else:
raise ValueError("Unknown clipper %s" % params.clipping)
return clipper
def get_optimizer(params, schedule, clipper):
if params.optimizer.lower() == "adam":
optimizer = optimizers.AdamOptimizer(learning_rate=schedule,
beta_1=params.adam_beta1,
beta_2=params.adam_beta2,
epsilon=params.adam_epsilon,
clipper=clipper,
summaries=params.save_summary)
elif params.optimizer.lower() == "adadelta":
optimizer = optimizers.AdadeltaOptimizer(
learning_rate=schedule, rho=params.adadelta_rho,
epsilon=params.adadelta_epsilon, clipper=clipper,
summaries=params.save_summary)
elif params.optimizer.lower() == "sgd":
optimizer = optimizers.SGDOptimizer(
learning_rate=schedule, clipper=clipper,
summaries=params.save_summary)
else:
raise ValueError("Unknown optimizer %s" % params.optimizer)
return optimizer
def load_references(pattern):
if not pattern:
return None
files = glob.glob(pattern)
references = []
for name in files:
ref = []
with open(name, "rb") as fd:
for line in fd:
items = line.strip().split()
ref.append(items)
references.append(ref)
return list(zip(*references))
def main(args):
model_cls = models.get_model(args.model)
# Import and override parameters
# Priorities (low -> high):
# default -> saved -> command
params = default_params()
params = merge_params(params, model_cls.default_params(args.hparam_set))
params = import_params(args.output, args.model, params)
params = override_params(params, args)
# Initialize distributed utility
if args.distributed:
params.device = args.local_rank
dist.init_process_group("nccl")
torch.cuda.set_device(args.local_rank)
torch.set_default_tensor_type(torch.cuda.FloatTensor)
else:
params.device = params.device_list[args.local_rank]
dist.init_process_group("nccl", init_method=args.url,
rank=args.local_rank,
world_size=len(params.device_list))
torch.cuda.set_device(params.device_list[args.local_rank])
torch.set_default_tensor_type(torch.cuda.FloatTensor)
# Export parameters
if dist.get_rank() == 0:
export_params(params.output, "params.json", params)
export_params(params.output, "%s.json" % params.model,
collect_params(params, model_cls.default_params()))
model = model_cls(params).cuda()
if args.half:
model = model.half()
torch.set_default_dtype(torch.half)
torch.set_default_tensor_type(torch.cuda.HalfTensor)
model.train()
# Init tensorboard
summary.init(params.output, params.save_summary)
schedule = get_learning_rate_schedule(params)
clipper = get_clipper(params)
optimizer = get_optimizer(params, schedule, clipper)
if args.half:
optimizer = optimizers.LossScalingOptimizer(optimizer)
optimizer = optimizers.MultiStepOptimizer(optimizer, params.update_cycle)
trainable_flags = print_variables(model, params.pattern,
dist.get_rank() == 0)
dataset = data.MTPipeline.get_train_dataset(params.input, params)
if params.validation:
sorted_key, eval_dataset = data.MTPipeline.get_infer_dataset(
params.validation, params)
references = load_references(params.references)
else:
sorted_key = None
eval_dataset = None
references = None
# Load checkpoint
checkpoint = utils.latest_checkpoint(params.output)
if args.checkpoint is not None:
# Load pre-trained models
state = torch.load(args.checkpoint, map_location="cpu")
model.load_state_dict(state["model"])
step = params.initial_step
epoch = 0
broadcast(model)
elif checkpoint is not None:
state = torch.load(checkpoint, map_location="cpu")
step = state["step"]
epoch = state["epoch"]
model.load_state_dict(state["model"])
if "optimizer" in state:
optimizer.load_state_dict(state["optimizer"])
else:
step = 0
epoch = 0
broadcast(model)
def train_fn(inputs):
features, labels = inputs
loss = model(features, labels)
return loss
counter = 0
while True:
for features in dataset:
if counter % params.update_cycle == 0:
step += 1
utils.set_global_step(step)
counter += 1
t = time.time()
loss = train_fn(features)
gradients = optimizer.compute_gradients(loss,
list(model.parameters()))
grads_and_vars = exclude_variables(
trainable_flags,
zip(gradients, list(model.named_parameters())))
optimizer.apply_gradients(grads_and_vars)
t = time.time() - t
summary.scalar("loss", loss, step, write_every_n_steps=1)
summary.scalar("global_step/sec", t, step)
print("epoch = %d, step = %d, loss = %.3f (%.3f sec)" %
(epoch + 1, step, float(loss), t))
if counter % params.update_cycle == 0:
if step >= params.train_steps:
utils.evaluate(model, sorted_key, eval_dataset,
params.output, references, params)
save_checkpoint(step, epoch, model, optimizer, params)
if dist.get_rank() == 0:
summary.close()
return
if step % params.eval_steps == 0:
utils.evaluate(model, sorted_key, eval_dataset,
params.output, references, params)
if step % params.save_checkpoint_steps == 0:
save_checkpoint(step, epoch, model, optimizer, params)
epoch += 1
# Wrap main function
def process_fn(rank, args):
local_args = copy.copy(args)
local_args.local_rank = rank
main(local_args)
def cli_main():
parsed_args = parse_args()
if parsed_args.distributed:
main(parsed_args)
else:
# Pick a free port
with socket.socket() as s:
s.bind(("localhost", 0))
port = s.getsockname()[1]
url = "tcp://localhost:" + str(port)
parsed_args.url = url
world_size = infer_gpu_num(parsed_args.parameters)
if world_size > 1:
torch.multiprocessing.spawn(process_fn, args=(parsed_args,),
nprocs=world_size)
else:
process_fn(0, parsed_args)
if __name__ == "__main__":
cli_main()
| 15,969 | 29.770713 | 79 | py |
THUMT | THUMT-master/thumt/bin/translator.py | # coding=utf-8
# Copyright 2017-2020 The THUMT Authors
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import copy
import logging
import os
import re
import six
import socket
import time
import torch
import thumt.data as data
import torch.distributed as dist
import thumt.models as models
import thumt.utils as utils
def parse_args():
parser = argparse.ArgumentParser(
description="Decode input sentences with pre-trained checkpoints.",
usage="translator.py [<args>] [-h | --help]"
)
# input files
parser.add_argument("--input", type=str, required=True, nargs="+",
help="Path to input file.")
parser.add_argument("--output", type=str, required=True,
help="Path to output file.")
parser.add_argument("--checkpoints", type=str, required=True, nargs="+",
help="Path to trained checkpoints.")
parser.add_argument("--vocabulary", type=str, nargs=2, required=True,
help="Path to source and target vocabulary.")
# model and configuration
parser.add_argument("--models", type=str, required=True, nargs="+",
help="Name of the models.")
parser.add_argument("--parameters", type=str, default="",
help="Additional hyper-parameters.")
# mutually exclusive parameters
group = parser.add_mutually_exclusive_group()
group.add_argument("--half", action="store_true",
help="Enable Half-precision for decoding.")
group.add_argument("--cpu", action="store_true",
help="Enable CPU for decoding.")
return parser.parse_args()
def default_params():
params = utils.HParams(
input=None,
output=None,
vocabulary=None,
# vocabulary specific
pad="<pad>",
bos="<bos>",
eos="<eos>",
unk="<unk>",
device_list=[0],
# decoding
top_beams=1,
beam_size=4,
decode_alpha=0.6,
decode_ratio=1.0,
decode_length=50,
decode_batch_size=32,
)
return params
def merge_params(params1, params2):
params = utils.HParams()
for (k, v) in six.iteritems(params1.values()):
params.add_hparam(k, v)
params_dict = params.values()
for (k, v) in six.iteritems(params2.values()):
if k in params_dict:
# Override
setattr(params, k, v)
else:
params.add_hparam(k, v)
return params
def import_params(model_dir, model_name, params):
model_dir = os.path.abspath(model_dir)
m_name = os.path.join(model_dir, model_name + ".json")
if not os.path.exists(m_name):
return params
with open(m_name) as fd:
logging.info("Restoring model parameters from %s" % m_name)
json_str = fd.readline()
params.parse_json(json_str)
return params
def override_params(params, args):
params.parse(args.parameters.lower())
params.vocabulary = {
"source": data.Vocabulary(args.vocabulary[0]),
"target": data.Vocabulary(args.vocabulary[1])
}
return params
def convert_to_string(tensor, params, direction="target"):
ids = tensor.tolist()
output = []
eos_id = params.vocabulary[direction][params.eos]
for wid in ids:
if wid == eos_id:
break
output.append(params.vocabulary[direction][wid])
output = b" ".join(output)
return output
def infer_gpu_num(param_str):
result = re.match(r".*device_list=\[(.*?)\].*", param_str)
if not result:
return 1
else:
dev_str = result.groups()[-1]
return len(dev_str.split(","))
def main(args):
# Load configs
model_cls_list = [models.get_model(model) for model in args.models]
params_list = [default_params() for _ in range(len(model_cls_list))]
params_list = [
merge_params(params, model_cls.default_params())
for params, model_cls in zip(params_list, model_cls_list)]
params_list = [
import_params(args.checkpoints[i], args.models[i], params_list[i])
for i in range(len(args.checkpoints))]
params_list = [
override_params(params_list[i], args)
for i in range(len(model_cls_list))]
params = params_list[0]
if args.cpu:
dist.init_process_group("gloo",
init_method=args.url,
rank=args.local_rank,
world_size=1)
torch.set_default_tensor_type(torch.FloatTensor)
else:
params.device = params.device_list[args.local_rank]
dist.init_process_group("nccl",
init_method=args.url,
rank=args.local_rank,
world_size=len(params.device_list))
torch.cuda.set_device(params.device_list[args.local_rank])
torch.set_default_tensor_type(torch.cuda.FloatTensor)
if args.half:
torch.set_default_dtype(torch.half)
torch.set_default_tensor_type(torch.cuda.HalfTensor)
# Create model
with torch.no_grad():
model_list = []
for i in range(len(args.models)):
if args.cpu:
model = model_cls_list[i](params_list[i])
else:
model = model_cls_list[i](params_list[i]).cuda()
if args.half:
model = model.half()
model.eval()
model.load_state_dict(
torch.load(utils.latest_checkpoint(args.checkpoints[i]),
map_location="cpu")["model"])
model_list.append(model)
if len(args.input) == 1:
mode = "infer"
sorted_key, dataset = data.MTPipeline.get_infer_dataset(
args.input[0], params)
else:
# Teacher-forcing
mode = "eval"
dataset = data.MTPipeline.get_eval_dataset(args.input, params)
sorted_key = None
iterator = iter(dataset)
counter = 0
pad_max = 1024
top_beams = params.top_beams
decode_batch_size = params.decode_batch_size
# Buffers for synchronization
size = torch.zeros([dist.get_world_size()]).long()
t_list = [torch.empty([decode_batch_size, top_beams, pad_max]).long()
for _ in range(dist.get_world_size())]
all_outputs = []
while True:
try:
features = next(iterator)
if mode == "eval":
features = features[0]
batch_size = features["source"].shape[0]
except:
features = {
"source": torch.ones([1, 1]).long(),
"source_mask": torch.ones([1, 1]).float()
}
if mode == "eval":
features["target"] = torch.ones([1, 1]).long()
features["target_mask"] = torch.ones([1, 1]).float()
batch_size = 0
t = time.time()
counter += 1
# Decode
if mode != "eval":
seqs, _ = utils.beam_search(model_list, features, params)
else:
seqs, _ = utils.argmax_decoding(model_list, features, params)
# Padding
pad_batch = decode_batch_size - seqs.shape[0]
pad_beams = top_beams - seqs.shape[1]
pad_length = pad_max - seqs.shape[2]
seqs = torch.nn.functional.pad(
seqs, (0, pad_length, 0, pad_beams, 0, pad_batch))
# Synchronization
size.zero_()
size[dist.get_rank()].copy_(torch.tensor(batch_size))
if args.cpu:
t_list[dist.get_rank()].copy_(seqs)
else:
dist.all_reduce(size)
dist.all_gather(t_list, seqs)
if size.sum() == 0:
break
if dist.get_rank() != 0:
continue
for i in range(decode_batch_size):
for j in range(dist.get_world_size()):
beam_seqs = []
pad_flag = i >= size[j]
for k in range(top_beams):
seq = convert_to_string(t_list[j][i][k], params)
if pad_flag:
continue
beam_seqs.append(seq)
if pad_flag:
continue
all_outputs.append(beam_seqs)
t = time.time() - t
print("Finished batch: %d (%.3f sec)" % (counter, t))
if dist.get_rank() == 0:
restored_outputs = []
if sorted_key is not None:
for idx in range(len(all_outputs)):
restored_outputs.append(all_outputs[sorted_key[idx]])
else:
restored_outputs = all_outputs
with open(args.output, "wb") as fd:
if top_beams == 1:
for seqs in restored_outputs:
fd.write(seqs[0] + b"\n")
else:
for idx, seqs in enumerate(restored_outputs):
for k, seq in enumerate(seqs):
fd.write(b"%d\t%d\t" % (idx, k))
fd.write(seq + b"\n")
# Wrap main function
def process_fn(rank, args):
local_args = copy.copy(args)
local_args.local_rank = rank
main(local_args)
def cli_main():
parsed_args = parse_args()
# Pick a free port
with socket.socket() as s:
s.bind(("localhost", 0))
port = s.getsockname()[1]
url = "tcp://localhost:" + str(port)
parsed_args.url = url
if parsed_args.cpu:
world_size = 1
else:
world_size = infer_gpu_num(parsed_args.parameters)
if world_size > 1:
torch.multiprocessing.spawn(process_fn, args=(parsed_args,),
nprocs=world_size)
else:
process_fn(0, parsed_args)
if __name__ == "__main__":
cli_main()
| 10,260 | 28.317143 | 77 | py |
THUMT | THUMT-master/thumt/scripts/average_checkpoints.py | #!/usr/bin/env python
# coding=utf-8
# Copyright 2017-2020 The THUMT Authors
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import glob
import argparse
import collections
import torch
import shutil
def parse_args():
parser = argparse.ArgumentParser(description="Create vocabulary")
parser.add_argument("--path", help="checkpoint directory")
parser.add_argument("--output", default="average",
help="Output path")
parser.add_argument("--checkpoints", default=5, type=int,
help="Number of checkpoints to average")
return parser.parse_args()
def list_checkpoints(path):
names = glob.glob(os.path.join(path, "*.pt"))
if not names:
return None
vals = []
for name in names:
counter = int(name.rstrip(".pt").split("-")[-1])
vals.append([counter, name])
return [item[1] for item in sorted(vals)]
def main(args):
checkpoints = list_checkpoints(args.path)
if not checkpoints:
raise ValueError("No checkpoint to average")
checkpoints = checkpoints[-args.checkpoints:]
values = collections.OrderedDict()
for checkpoint in checkpoints:
print("Loading checkpoint: %s" % checkpoint)
state = torch.load(checkpoint, map_location="cpu")["model"]
for key in state:
if key not in values:
values[key] = state[key].float().clone()
else:
values[key].add_(state[key].float())
for key in values:
values[key].div_(len(checkpoints))
state = {"step": 0, "epoch": 0, "model": values}
if not os.path.exists(args.output):
os.makedirs(args.output)
torch.save(state, os.path.join(args.output, "average-0.pt"))
params_pattern = os.path.join(args.path, "*.json")
params_files = glob.glob(params_pattern)
for name in params_files:
new_name = name.replace(args.path.rstrip("/"), args.output.rstrip("/"))
shutil.copy(name, new_name)
if __name__ == "__main__":
main(parse_args())
| 2,121 | 24.878049 | 79 | py |
THUMT | THUMT-master/thumt/scripts/convert_checkpoint.py | #!/usr/bin/env python
# coding=utf-8
# Copyright 2017-2020 The THUMT Authors
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import numpy as np
import tensorflow as tf
import torch
def convert_tensor(variables, name, tensor):
# 1. replace '/' with '.'
name = name.replace("/", ".")
# 2. strip "transformer."
if "transformer" in name:
name = name[12:]
# 3. layer_* -> layers.*
name = name.replace("layer_", "layers.")
name = name.replace("layers.norm", "layer_norm")
# 4. offset -> bias
name = name.replace("offset", "bias")
# 5. scale -> weight
name = name.replace("scale", "weight")
# 6. matrix -> weight, transpose
if "matrix" in name:
name = name.replace("matrix", "weight")
tensor = tensor.transpose()
# 7. multihead_attention -> attention
name = name.replace("multihead_attention", "attention")
variables[name] = torch.tensor(tensor)
def main():
if len(sys.argv) != 3:
print("convert_checkpoint.py input output")
exit(-1)
var_list = tf.train.list_variables(sys.argv[1])
variables = {}
reader = tf.train.load_checkpoint(sys.argv[1])
for (name, _) in var_list:
tensor = reader.get_tensor(name)
if not name.startswith("transformer") or "Adam" in name:
continue
if "qkv_transform" in name:
if "matrix" in name:
n1 = name.replace("qkv_transform", "q_transform")
n2 = name.replace("qkv_transform", "k_transform")
n3 = name.replace("qkv_transform", "v_transform")
v1, v2, v3 = np.split(tensor, 3, axis=1)
convert_tensor(variables, n1, v1)
convert_tensor(variables, n2, v2)
convert_tensor(variables, n3, v3)
elif "bias" in name:
n1 = name.replace("qkv_transform", "q_transform")
n2 = name.replace("qkv_transform", "k_transform")
n3 = name.replace("qkv_transform", "v_transform")
v1, v2, v3 = np.split(tensor, 3)
convert_tensor(variables, n1, v1)
convert_tensor(variables, n2, v2)
convert_tensor(variables, n3, v3)
elif "kv_transform" in name:
if "matrix" in name:
n1 = name.replace("kv_transform", "k_transform")
n2 = name.replace("kv_transform", "v_transform")
v1, v2 = np.split(tensor, 2, axis=1)
convert_tensor(variables, n1, v1)
convert_tensor(variables, n2, v2)
elif "bias" in name:
n1 = name.replace("kv_transform", "k_transform")
n2 = name.replace("kv_transform", "v_transform")
v1, v2 = np.split(tensor, 2)
convert_tensor(variables, n1, v1)
convert_tensor(variables, n2, v2)
elif "multihead_attention/output_transform" in name:
name = name.replace("multihead_attention/output_transform",
"multihead_attention/o_transform")
convert_tensor(variables, name, tensor)
elif "ffn_layer/output_layer/linear" in name:
name = name.replace("ffn_layer/output_layer/linear",
"ffn_layer/output_transform")
convert_tensor(variables, name, tensor)
elif "ffn_layer/input_layer/linear" in name:
name = name.replace("ffn_layer/input_layer/linear",
"ffn_layer/input_transform")
convert_tensor(variables, name, tensor)
else:
convert_tensor(variables, name, tensor)
torch.save({"model": variables}, sys.argv[2])
if __name__ == "__main__":
main()
| 3,860 | 36.125 | 71 | py |
THUMT | THUMT-master/thumt/optimizers/optimizers.py | # coding=utf-8
# Copyright 2017-2020 The THUMT Authors
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import math
import torch
import torch.distributed as dist
import thumt.utils as utils
import thumt.utils.summary as summary
from thumt.optimizers.schedules import LearningRateSchedule
def _save_summary(grads_and_vars):
total_norm = 0.0
for grad, var in grads_and_vars:
if grad is None:
continue
_, var = var
grad_norm = grad.data.norm()
total_norm += grad_norm ** 2
summary.histogram(var.tensor_name, var,
utils.get_global_step())
summary.scalar("norm/" + var.tensor_name, var.norm(),
utils.get_global_step())
summary.scalar("grad_norm/" + var.tensor_name, grad_norm,
utils.get_global_step())
total_norm = total_norm ** 0.5
summary.scalar("grad_norm", total_norm, utils.get_global_step())
return float(total_norm)
def _compute_grad_norm(gradients):
total_norm = 0.0
for grad in gradients:
if grad is None:
continue
total_norm += float(grad.data.norm() ** 2)
return float(total_norm ** 0.5)
class Optimizer(object):
def __init__(self, name, **kwargs):
self._name = name
self._iterations = 0
self._slots = {}
def detach_gradients(self, gradients):
for grad in gradients:
if grad is not None:
grad.detach_()
def scale_gradients(self, gradients, scale):
for grad in gradients:
if grad is not None:
grad.mul_(scale)
def sync_gradients(self, gradients, compress=True):
grad_vec = utils.params_to_vec(gradients)
if compress:
grad_vec_half = grad_vec.half()
dist.all_reduce(grad_vec_half)
grad_vec = grad_vec_half.to(grad_vec)
else:
dist.all_reduce(grad_vec)
utils.vec_to_params(grad_vec, gradients)
def zero_gradients(self, gradients):
for grad in gradients:
if grad is not None:
grad.zero_()
def compute_gradients(self, loss, var_list, aggregate=False):
var_list = list(var_list)
grads = [v.grad if v is not None else None for v in var_list]
self.detach_gradients(grads)
if not aggregate:
self.zero_gradients(grads)
loss.backward()
return [v.grad if v is not None else None for v in var_list]
def apply_gradients(self, grads_and_vars):
raise NotImplementedError("Not implemented")
@property
def iterations(self):
return self._iterations
def state_dict(self):
raise NotImplementedError("Not implemented")
def load_state_dict(self):
raise NotImplementedError("Not implemented")
class SGDOptimizer(Optimizer):
def __init__(self, learning_rate, summaries=True, name="SGD", **kwargs):
super(SGDOptimizer, self).__init__(name, **kwargs)
self._learning_rate = learning_rate
self._summaries = summaries
self._clipper = None
if "clipper" in kwargs and kwargs["clipper"] is not None:
self._clipper = kwargs["clipper"]
def apply_gradients(self, grads_and_vars):
self._iterations += 1
lr = self._learning_rate
grads, var_list = list(zip(*grads_and_vars))
if self._summaries:
grad_norm = _save_summary(zip(grads, var_list))
else:
grad_norm = _compute_grad_norm(grads)
if self._clipper is not None:
reject, grads = self._clipper(grads, grad_norm)
if reject:
return
for grad, var in zip(grads, var_list):
if grad is None:
continue
# Convert if grad is not FP32
grad = grad.data.float()
_, var = var
if isinstance(lr, LearningRateSchedule):
lr = lr(self._iterations)
step_size = lr
if var.dtype == torch.float32:
var.data.add_(grad, alpha=-step_size)
else:
fp32_var = var.data.float()
fp32_var.add_(grad, alpha=-step_size)
var.data.copy_(fp32_var)
def state_dict(self):
state = {
"iterations": self._iterations,
}
if not isinstance(self._learning_rate, LearningRateSchedule):
state["learning_rate"] = self._learning_rate
return state
def load_state_dict(self, state):
self._iterations = state.get("iterations", self._iterations)
class AdamOptimizer(Optimizer):
def __init__(self, learning_rate=0.01, beta_1=0.9, beta_2=0.999,
epsilon=1e-7, name="Adam", **kwargs):
super(AdamOptimizer, self).__init__(name, **kwargs)
self._learning_rate = learning_rate
self._beta_1 = beta_1
self._beta_2 = beta_2
self._epsilon = epsilon
self._summaries = True
self._clipper = None
if "summaries" in kwargs and not kwargs["summaries"]:
self._summaries = False
if "clipper" in kwargs and kwargs["clipper"] is not None:
self._clipper = kwargs["clipper"]
def apply_gradients(self, grads_and_vars):
self._iterations += 1
lr = self._learning_rate
beta_1 = self._beta_1
beta_2 = self._beta_2
epsilon = self._epsilon
grads, var_list = list(zip(*grads_and_vars))
if self._summaries:
grad_norm = _save_summary(zip(grads, var_list))
else:
grad_norm = _compute_grad_norm(grads)
if self._clipper is not None:
reject, grads = self._clipper(grads, grad_norm)
if reject:
return
for grad, var in zip(grads, var_list):
if grad is None:
continue
# Convert if grad is not FP32
grad = grad.data.float()
name, var = var
if self._slots.get(name, None) is None:
self._slots[name] = {}
self._slots[name]["m"] = torch.zeros_like(var.data,
dtype=torch.float32)
self._slots[name]["v"] = torch.zeros_like(var.data,
dtype=torch.float32)
m, v = self._slots[name]["m"], self._slots[name]["v"]
bias_corr_1 = 1 - beta_1 ** self._iterations
bias_corr_2 = 1 - beta_2 ** self._iterations
m.mul_(beta_1).add_(grad, alpha=1 - beta_1)
v.mul_(beta_2).addcmul_(grad, grad, value=1 - beta_2)
denom = (v.sqrt() / math.sqrt(bias_corr_2)).add_(epsilon)
if isinstance(lr, LearningRateSchedule):
lr = lr(self._iterations)
step_size = lr / bias_corr_1
if var.dtype == torch.float32:
var.data.addcdiv_(m, denom, value=-step_size)
else:
fp32_var = var.data.float()
fp32_var.addcdiv_(m, denom, value=-step_size)
var.data.copy_(fp32_var)
def state_dict(self):
state = {
"beta_1": self._beta_1,
"beta_2": self._beta_2,
"epsilon": self._epsilon,
"iterations": self._iterations,
"slot": self._slots
}
if not isinstance(self._learning_rate, LearningRateSchedule):
state["learning_rate"] = self._learning_rate
return state
def load_state_dict(self, state):
self._iterations = state.get("iterations", self._iterations)
slots = state.get("slot", {})
self._slots = {}
for key in slots:
m, v = slots[key]["m"], slots[key]["v"]
self._slots[key] = {}
self._slots[key]["m"] = torch.zeros(m.shape, dtype=torch.float32)
self._slots[key]["v"] = torch.zeros(v.shape, dtype=torch.float32)
self._slots[key]["m"].copy_(m)
self._slots[key]["v"].copy_(v)
class AdadeltaOptimizer(Optimizer):
def __init__(self, learning_rate=0.001, rho=0.95, epsilon=1e-07,
name="Adadelta", **kwargs):
super(AdadeltaOptimizer, self).__init__(name, **kwargs)
self._learning_rate = learning_rate
self._rho = rho
self._epsilon = epsilon
self._summaries = True
self._clipper = None
if "summaries" in kwargs and not kwargs["summaries"]:
self._summaries = False
if "clipper" in kwargs and kwargs["clipper"] is not None:
self._clipper = kwargs["clipper"]
def apply_gradients(self, grads_and_vars):
self._iterations += 1
lr = self._learning_rate
rho = self._rho
epsilon = self._epsilon
grads, var_list = list(zip(*grads_and_vars))
if self._summaries:
grad_norm = _save_summary(zip(grads, var_list))
else:
grad_norm = _compute_grad_norm(grads)
if self._clipper is not None:
reject, grads = self._clipper(grads, grad_norm)
if reject:
return
for grad, var in zip(grads, var_list):
if grad is None:
continue
# Convert if grad is not FP32
grad = grad.data.float()
name, var = var
if self._slots.get(name, None) is None:
self._slots[name] = {}
self._slots[name]["m"] = torch.zeros_like(var.data,
dtype=torch.float32)
self._slots[name]["v"] = torch.zeros_like(var.data,
dtype=torch.float32)
square_avg = self._slots[name]["m"]
acc_delta = self._slots[name]["v"]
if isinstance(lr, LearningRateSchedule):
lr = lr(self._iterations)
square_avg.mul_(rho).addcmul_(grad, grad, value=1 - rho)
std = square_avg.add(epsilon).sqrt_()
delta = acc_delta.add(epsilon).sqrt_().div_(std).mul_(grad)
acc_delta.mul_(rho).addcmul_(delta, delta, value=1 - rho)
if var.dtype == torch.float32:
var.data.add_(delta, alpha=-lr)
else:
fp32_var = var.data.float()
fp32_var.add_(delta, alpha=-lr)
var.data.copy_(fp32_var)
def state_dict(self):
state = {
"rho": self._rho,
"epsilon": self._epsilon,
"iterations": self._iterations,
"slot": self._slots
}
if not isinstance(self._learning_rate, LearningRateSchedule):
state["learning_rate"] = self._learning_rate
return state
def load_state_dict(self, state):
self._iterations = state.get("iterations", self._iterations)
slots = state.get("slot", {})
self._slots = {}
for key in slots:
m, v = slots[key]["m"], slots[key]["v"]
self._slots[key] = {}
self._slots[key]["m"] = torch.zeros(m.shape, dtype=torch.float32)
self._slots[key]["v"] = torch.zeros(v.shape, dtype=torch.float32)
self._slots[key]["m"].copy_(m)
self._slots[key]["v"].copy_(v)
class LossScalingOptimizer(Optimizer):
def __init__(self, optimizer, scale=2.0**7, increment_period=2000,
multiplier=2.0, name="LossScalingOptimizer", **kwargs):
super(LossScalingOptimizer, self).__init__(name, **kwargs)
self._optimizer = optimizer
self._scale = scale
self._increment_period = increment_period
self._multiplier = multiplier
self._num_good_steps = 0
self._summaries = True
if "summaries" in kwargs and not kwargs["summaries"]:
self._summaries = False
def _update_if_finite_grads(self):
if self._num_good_steps + 1 > self._increment_period:
self._scale *= self._multiplier
self._scale = min(self._scale, 2.0**16)
self._num_good_steps = 0
else:
self._num_good_steps += 1
def _update_if_not_finite_grads(self):
self._scale = max(self._scale / self._multiplier, 1)
def compute_gradients(self, loss, var_list, aggregate=False):
var_list = list(var_list)
grads = [v.grad if v is not None else None for v in var_list]
self.detach_gradients(grads)
if not aggregate:
self.zero_gradients(grads)
loss = loss * self._scale
loss.backward()
return [v.grad if v is not None else None for v in var_list]
def apply_gradients(self, grads_and_vars):
self._iterations += 1
grads, var_list = list(zip(*grads_and_vars))
new_grads = []
if self._summaries:
summary.scalar("optimizer/scale", self._scale,
utils.get_global_step())
for grad in grads:
if grad is None:
new_grads.append(None)
continue
norm = grad.data.norm()
if not torch.isfinite(norm):
self._update_if_not_finite_grads()
return
else:
# Rescale gradients
new_grads.append(grad.data.float().mul_(1.0 / self._scale))
self._update_if_finite_grads()
self._optimizer.apply_gradients(zip(new_grads, var_list))
def state_dict(self):
state = {
"scale": self._scale,
"increment_period": self._increment_period,
"multiplier": self._multiplier,
"num_good_steps": self._num_good_steps,
"optimizer": self._optimizer.state_dict()
}
return state
def load_state_dict(self, state):
self._num_good_steps = state.get("num_good_steps",
self._num_good_steps)
self._optimizer.load_state_dict(state.get("optimizer", {}))
class MultiStepOptimizer(Optimizer):
def __init__(self, optimizer, n=1, compress=True,
name="MultiStepOptimizer", **kwargs):
super(MultiStepOptimizer, self).__init__(name, **kwargs)
self._n = n
self._optimizer = optimizer
self._compress = compress
def compute_gradients(self, loss, var_list, aggregate=False):
if self._iterations % self._n == 0:
return self._optimizer.compute_gradients(loss, var_list, aggregate)
else:
return self._optimizer.compute_gradients(loss, var_list, True)
def apply_gradients(self, grads_and_vars):
size = dist.get_world_size()
grads, var_list = list(zip(*grads_and_vars))
self._iterations += 1
if self._n == 1:
if size > 1:
self.sync_gradients(grads, compress=self._compress)
self.scale_gradients(grads, 1.0 / size)
self._optimizer.apply_gradients(zip(grads, var_list))
else:
if self._iterations % self._n != 0:
return
if size > 1:
self.sync_gradients(grads, compress=self._compress)
self.scale_gradients(grads, 1.0 / (self._n * size))
self._optimizer.apply_gradients(zip(grads, var_list))
def state_dict(self):
state = {
"n": self._n,
"iterations": self._iterations,
"compress": self._compress,
"optimizer": self._optimizer.state_dict()
}
return state
def load_state_dict(self, state):
self._iterations = state.get("iterations", self._iterations)
self._optimizer.load_state_dict(state.get("optimizer", {}))
| 15,912 | 30.636183 | 79 | py |
THUMT | THUMT-master/thumt/utils/inference.py | # coding=utf-8
# Copyright 2017-2020 The THUMT Authors
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import torch
from collections import namedtuple
from thumt.utils.nest import map_structure
def _merge_first_two_dims(tensor):
shape = list(tensor.shape)
shape[1] *= shape[0]
return torch.reshape(tensor, shape[1:])
def _split_first_two_dims(tensor, dim_0, dim_1):
shape = [dim_0, dim_1] + list(tensor.shape)[1:]
return torch.reshape(tensor, shape)
def _tile_to_beam_size(tensor, beam_size):
tensor = torch.unsqueeze(tensor, 1)
tile_dims = [1] * int(tensor.dim())
tile_dims[1] = beam_size
return tensor.repeat(tile_dims)
def _gather_2d(params, indices, name=None):
batch_size = params.shape[0]
range_size = indices.shape[1]
batch_pos = torch.arange(batch_size * range_size, device=params.device)
batch_pos = batch_pos // range_size
batch_pos = torch.reshape(batch_pos, [batch_size, range_size])
output = params[batch_pos, indices]
return output
class BeamSearchState(namedtuple("BeamSearchState",
("inputs", "state", "finish"))):
pass
def _get_inference_fn(model_fns, features):
def inference_fn(inputs, state):
local_features = {
"source": features["source"],
"source_mask": features["source_mask"],
"target": inputs,
"target_mask": torch.ones(*inputs.shape).to(inputs).float()
}
outputs = []
next_state = []
for (model_fn, model_state) in zip(model_fns, state):
if model_state:
logits, new_state = model_fn(local_features, model_state)
outputs.append(torch.nn.functional.log_softmax(logits,
dim=-1))
next_state.append(new_state)
else:
logits = model_fn(local_features)
outputs.append(torch.nn.functional.log_softmax(logits,
dim=-1))
next_state.append({})
# Ensemble
log_prob = sum(outputs) / float(len(outputs))
return log_prob.float(), next_state
return inference_fn
def _beam_search_step(time, func, state, batch_size, beam_size, alpha,
pad_id, eos_id, min_length, max_length, inf=-1e9):
# Compute log probabilities
seqs, log_probs = state.inputs[:2]
flat_seqs = _merge_first_two_dims(seqs)
flat_state = map_structure(lambda x: _merge_first_two_dims(x), state.state)
step_log_probs, next_state = func(flat_seqs, flat_state)
step_log_probs = _split_first_two_dims(step_log_probs, batch_size,
beam_size)
next_state = map_structure(
lambda x: _split_first_two_dims(x, batch_size, beam_size), next_state)
curr_log_probs = torch.unsqueeze(log_probs, 2) + step_log_probs
# Apply length penalty
length_penalty = ((5.0 + float(time + 1)) / 6.0) ** alpha
curr_scores = curr_log_probs / length_penalty
vocab_size = curr_scores.shape[-1]
# Prevent null translation
min_length_flags = torch.ge(min_length, time + 1).float().mul_(inf)
curr_scores[:, :, eos_id].add_(min_length_flags)
# Select top-k candidates
# [batch_size, beam_size * vocab_size]
curr_scores = torch.reshape(curr_scores, [-1, beam_size * vocab_size])
# [batch_size, 2 * beam_size]
top_scores, top_indices = torch.topk(curr_scores, k=2*beam_size)
# Shape: [batch_size, 2 * beam_size]
beam_indices = top_indices // vocab_size
symbol_indices = top_indices % vocab_size
# Expand sequences
# [batch_size, 2 * beam_size, time]
candidate_seqs = _gather_2d(seqs, beam_indices)
candidate_seqs = torch.cat([candidate_seqs,
torch.unsqueeze(symbol_indices, 2)], 2)
# Expand sequences
# Suppress finished sequences
flags = torch.eq(symbol_indices, eos_id).to(torch.bool)
# [batch, 2 * beam_size]
alive_scores = top_scores + flags.to(torch.float32) * inf
# [batch, beam_size]
alive_scores, alive_indices = torch.topk(alive_scores, beam_size)
alive_symbols = _gather_2d(symbol_indices, alive_indices)
alive_indices = _gather_2d(beam_indices, alive_indices)
alive_seqs = _gather_2d(seqs, alive_indices)
# [batch_size, beam_size, time + 1]
alive_seqs = torch.cat([alive_seqs, torch.unsqueeze(alive_symbols, 2)], 2)
alive_state = map_structure(
lambda x: _gather_2d(x, alive_indices),
next_state)
alive_log_probs = alive_scores * length_penalty
# Check length constraint
length_flags = torch.le(max_length, time + 1).float()
alive_log_probs = alive_log_probs + length_flags * inf
alive_scores = alive_scores + length_flags * inf
# Select finished sequences
prev_fin_flags, prev_fin_seqs, prev_fin_scores = state.finish
# [batch, 2 * beam_size]
step_fin_scores = top_scores + (1.0 - flags.to(torch.float32)) * inf
# [batch, 3 * beam_size]
fin_flags = torch.cat([prev_fin_flags, flags], dim=1)
fin_scores = torch.cat([prev_fin_scores, step_fin_scores], dim=1)
# [batch, beam_size]
fin_scores, fin_indices = torch.topk(fin_scores, beam_size)
fin_flags = _gather_2d(fin_flags, fin_indices)
pad_seqs = prev_fin_seqs.new_full([batch_size, beam_size, 1], pad_id)
prev_fin_seqs = torch.cat([prev_fin_seqs, pad_seqs], dim=2)
fin_seqs = torch.cat([prev_fin_seqs, candidate_seqs], dim=1)
fin_seqs = _gather_2d(fin_seqs, fin_indices)
new_state = BeamSearchState(
inputs=(alive_seqs, alive_log_probs, alive_scores),
state=alive_state,
finish=(fin_flags, fin_seqs, fin_scores),
)
return new_state
def beam_search(models, features, params):
if not isinstance(models, (list, tuple)):
raise ValueError("'models' must be a list or tuple")
beam_size = params.beam_size
top_beams = params.top_beams
alpha = params.decode_alpha
decode_ratio = params.decode_ratio
decode_length = params.decode_length
pad_id = params.vocabulary["target"][params.pad]
bos_id = params.vocabulary["target"][params.bos]
eos_id = params.vocabulary["target"][params.eos]
min_val = -1e9
shape = features["source"].shape
device = features["source"].device
batch_size = shape[0]
seq_length = shape[1]
# Compute initial state if necessary
states = []
funcs = []
for model in models:
state = model.empty_state(batch_size, device)
states.append(model.encode(features, state))
funcs.append(model.decode)
# For source sequence length
max_length = features["source_mask"].sum(1) * decode_ratio
max_length = max_length.long() + decode_length
max_step = max_length.max()
# [batch, beam_size]
max_length = torch.unsqueeze(max_length, 1).repeat([1, beam_size])
min_length = torch.ones_like(max_length)
# Expand the inputs
# [batch, length] => [batch * beam_size, length]
features["source"] = torch.unsqueeze(features["source"], 1)
features["source"] = features["source"].repeat([1, beam_size, 1])
features["source"] = torch.reshape(features["source"],
[batch_size * beam_size, seq_length])
features["source_mask"] = torch.unsqueeze(features["source_mask"], 1)
features["source_mask"] = features["source_mask"].repeat([1, beam_size, 1])
features["source_mask"] = torch.reshape(features["source_mask"],
[batch_size * beam_size, seq_length])
decoding_fn = _get_inference_fn(funcs, features)
states = map_structure(
lambda x: _tile_to_beam_size(x, beam_size),
states)
# Initial beam search state
init_seqs = torch.full([batch_size, beam_size, 1], bos_id, device=device)
init_seqs = init_seqs.long()
init_log_probs = init_seqs.new_tensor(
[[0.] + [min_val] * (beam_size - 1)], dtype=torch.float32)
init_log_probs = init_log_probs.repeat([batch_size, 1])
init_scores = torch.zeros_like(init_log_probs)
fin_seqs = torch.zeros([batch_size, beam_size, 1], dtype=torch.int64,
device=device)
fin_scores = torch.full([batch_size, beam_size], min_val,
dtype=torch.float32, device=device)
fin_flags = torch.zeros([batch_size, beam_size], dtype=torch.bool,
device=device)
state = BeamSearchState(
inputs=(init_seqs, init_log_probs, init_scores),
state=states,
finish=(fin_flags, fin_seqs, fin_scores),
)
for time in range(max_step):
state = _beam_search_step(time, decoding_fn, state, batch_size,
beam_size, alpha, pad_id, eos_id,
min_length, max_length)
max_penalty = ((5.0 + max_step) / 6.0) ** alpha
best_alive_score = torch.max(state.inputs[1][:, 0] / max_penalty)
worst_finished_score = torch.min(state.finish[2])
cond = torch.gt(worst_finished_score, best_alive_score)
is_finished = bool(cond)
if is_finished:
break
final_state = state
alive_seqs = final_state.inputs[0]
alive_scores = final_state.inputs[2]
final_flags = final_state.finish[0].byte()
final_seqs = final_state.finish[1]
final_scores = final_state.finish[2]
final_seqs = torch.where(final_flags[:, :, None], final_seqs, alive_seqs)
final_scores = torch.where(final_flags, final_scores, alive_scores)
# Append extra <eos>
final_seqs = torch.nn.functional.pad(final_seqs, (0, 1, 0, 0, 0, 0),
value=eos_id)
return final_seqs[:, :top_beams, 1:], final_scores[:, :top_beams]
def argmax_decoding(models, features, params):
if not isinstance(models, (list, tuple)):
raise ValueError("'models' must be a list or tuple")
# Compute initial state if necessary
log_probs = []
shape = features["target"].shape
device = features["target"].device
batch_size = features["target"].shape[0]
target_mask = features["target_mask"]
target_length = target_mask.sum(1).long()
eos_id = params.vocabulary["target"][params.eos]
for model in models:
state = model.empty_state(batch_size, device)
state = model.encode(features, state)
logits, _ = model.decode(features, state, "eval")
log_probs.append(torch.nn.functional.log_softmax(logits, dim=-1))
log_prob = sum(log_probs) / len(models)
ret = torch.max(log_prob, -1)
values = torch.reshape(ret.values, shape)
indices = torch.reshape(ret.indices, shape)
batch_pos = torch.arange(batch_size, device=device)
seq_pos = target_length - 1
indices[batch_pos, seq_pos] = eos_id
return indices[:, None, :], torch.sum(values * target_mask, -1)[:, None]
| 11,062 | 36.375 | 79 | py |
THUMT | THUMT-master/thumt/utils/convert_params.py | # coding=utf-8
# Copyright 2017-2020 The THUMT Authors
# Modified from torch.nn.utils.convert_parameters.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
def params_to_vec(parameters):
r"""Convert parameters to one vector
Arguments:
parameters (Iterable[Tensor]): an iterator of Tensors that are the
parameters of a model.
Returns:
The parameters represented by a single vector
"""
# Flag for the device where the parameter is located
param_device = None
vec = []
for param in parameters:
if param is None:
continue
# Ensure the parameters are located in the same device
param_device = _check_param_device(param, param_device)
vec.append(param.view(-1))
return torch.cat(vec)
def vec_to_params(vec, parameters):
r"""Convert one vector to the parameters
Arguments:
vec (Tensor): a single vector represents the parameters of a model.
parameters (Iterable[Tensor]): an iterator of Tensors that are the
parameters of a model.
"""
# Ensure vec of type Tensor
if not isinstance(vec, torch.Tensor):
raise TypeError("expected torch.Tensor, but got: {}"
.format(torch.typename(vec)))
# Flag for the device where the parameter is located
param_device = None
# Pointer for slicing the vector for each parameter
pointer = 0
for param in parameters:
if param is None:
continue
# Ensure the parameters are located in the same device
param_device = _check_param_device(param, param_device)
# The length of the parameter
num_param = param.numel()
# Slice the vector, reshape it, and replace the old data of the parameter
param.data = vec[pointer:pointer + num_param].view_as(param).data
# Increment the pointer
pointer += num_param
def _check_param_device(param, old_param_device):
r"""This helper function is to check if the parameters are located
in the same device. Currently, the conversion between model parameters
and single vector form is not supported for multiple allocations,
e.g. parameters in different GPUs, or mixture of CPU/GPU.
Arguments:
param ([Tensor]): a Tensor of a parameter of a model
old_param_device (int): the device where the first parameter of a
model is allocated.
Returns:
old_param_device (int): report device for the first time
"""
# Meet the first parameter
if old_param_device is None:
old_param_device = param.get_device() if param.is_cuda else -1
else:
warn = False
if param.is_cuda: # Check if in same GPU
warn = (param.get_device() != old_param_device)
else: # Check if in CPU
warn = (old_param_device != -1)
if warn:
raise TypeError("Found two parameters on different devices,"
" this is currently not supported.")
return old_param_device
| 3,165 | 28.867925 | 81 | py |
THUMT | THUMT-master/thumt/utils/checkpoint.py | # coding=utf-8
# Copyright 2017-2020 The THUMT Authors
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import glob
import torch
def oldest_checkpoint(path):
names = glob.glob(os.path.join(path, "*.pt"))
if not names:
return None
oldest_counter = 10000000
checkpoint_name = names[0]
for name in names:
counter = name.rstrip(".pt").split("-")[-1]
if not counter.isdigit():
continue
else:
counter = int(counter)
if counter < oldest_counter:
checkpoint_name = name
oldest_counter = counter
return checkpoint_name
def latest_checkpoint(path):
names = glob.glob(os.path.join(path, "*.pt"))
if not names:
return None
latest_counter = 0
checkpoint_name = names[0]
for name in names:
counter = name.rstrip(".pt").split("-")[-1]
if not counter.isdigit():
continue
else:
counter = int(counter)
if counter > latest_counter:
checkpoint_name = name
latest_counter = counter
return checkpoint_name
def save(state, path, max_to_keep=None):
checkpoints = glob.glob(os.path.join(path, "*.pt"))
if not checkpoints:
counter = 1
else:
checkpoint = latest_checkpoint(path)
counter = int(checkpoint.rstrip(".pt").split("-")[-1]) + 1
if max_to_keep and len(checkpoints) >= max_to_keep:
checkpoint = oldest_checkpoint(path)
os.remove(checkpoint)
checkpoint = os.path.join(path, "model-%d.pt" % counter)
print("Saving checkpoint: %s" % checkpoint)
torch.save(state, checkpoint)
| 1,738 | 21.584416 | 66 | py |
THUMT | THUMT-master/thumt/utils/evaluation.py | # coding=utf-8
# Copyright 2017-2020 The THUMT Authors
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import glob
import operator
import os
import shutil
import time
import torch
import torch.distributed as dist
from thumt.utils.checkpoint import save, latest_checkpoint
from thumt.utils.inference import beam_search
from thumt.utils.bleu import bleu
from thumt.utils.bpe import BPE
from thumt.utils.misc import get_global_step
from thumt.utils.summary import scalar
def _save_log(filename, result):
metric, global_step, score = result
with open(filename, "a") as fd:
time = datetime.datetime.now()
msg = "%s: %s at step %d: %f\n" % (time, metric, global_step, score)
fd.write(msg)
def _read_score_record(filename):
# "checkpoint_name": score
records = []
if not os.path.exists(filename):
return records
with open(filename) as fd:
for line in fd:
name, score = line.strip().split(":")
name = name.strip()[1:-1]
score = float(score)
records.append([name, score])
return records
def _save_score_record(filename, records):
keys = []
for record in records:
checkpoint_name = record[0]
step = int(checkpoint_name.strip().split("-")[-1].rstrip(".pt"))
keys.append((step, record))
sorted_keys = sorted(keys, key=operator.itemgetter(0),
reverse=True)
sorted_records = [item[1] for item in sorted_keys]
with open(filename, "w") as fd:
for record in sorted_records:
checkpoint_name, score = record
fd.write("\"%s\": %f\n" % (checkpoint_name, score))
def _add_to_record(records, record, max_to_keep):
added = None
removed = None
models = {}
for (name, score) in records:
models[name] = score
if len(records) < max_to_keep:
if record[0] not in models:
added = record[0]
records.append(record)
else:
sorted_records = sorted(records, key=lambda x: -x[1])
worst_score = sorted_records[-1][1]
current_score = record[1]
if current_score >= worst_score:
if record[0] not in models:
added = record[0]
removed = sorted_records[-1][0]
records = sorted_records[:-1] + [record]
# Sort
records = sorted(records, key=lambda x: -x[1])
return added, removed, records
def _convert_to_string(tensor, params, direction="target"):
ids = tensor.tolist()
output = []
eos_id = params.vocabulary[direction][params.eos]
for wid in ids:
if wid == eos_id:
break
output.append(params.vocabulary[direction][wid])
output = b" ".join(output)
return output
def _evaluate_model(model, sorted_key, dataset, references, params):
# Create model
with torch.no_grad():
model.eval()
iterator = iter(dataset)
counter = 0
pad_max = 1024
# Buffers for synchronization
size = torch.zeros([dist.get_world_size()]).long()
t_list = [torch.empty([params.decode_batch_size, pad_max]).long()
for _ in range(dist.get_world_size())]
results = []
while True:
try:
features = next(iterator)
batch_size = features["source"].shape[0]
except:
features = {
"source": torch.ones([1, 1]).long(),
"source_mask": torch.ones([1, 1]).float()
}
batch_size = 0
t = time.time()
counter += 1
# Decode
seqs, _ = beam_search([model], features, params)
# Padding
seqs = torch.squeeze(seqs, dim=1)
pad_batch = params.decode_batch_size - seqs.shape[0]
pad_length = pad_max - seqs.shape[1]
seqs = torch.nn.functional.pad(seqs, (0, pad_length, 0, pad_batch))
# Synchronization
size.zero_()
size[dist.get_rank()].copy_(torch.tensor(batch_size))
dist.all_reduce(size)
dist.all_gather(t_list, seqs)
if size.sum() == 0:
break
if dist.get_rank() != 0:
continue
for i in range(params.decode_batch_size):
for j in range(dist.get_world_size()):
n = size[j]
seq = _convert_to_string(t_list[j][i], params)
if i >= n:
continue
# Restore BPE segmentation
seq = BPE.decode(seq)
results.append(seq.split())
t = time.time() - t
print("Finished batch: %d (%.3f sec)" % (counter, t))
model.train()
if dist.get_rank() == 0:
restored_results = []
for idx in range(len(results)):
restored_results.append(results[sorted_key[idx]])
return bleu(restored_results, references)
return 0.0
def evaluate(model, sorted_key, dataset, base_dir, references, params):
if not references:
return
base_dir = base_dir.rstrip("/")
save_path = os.path.join(base_dir, "eval")
record_name = os.path.join(save_path, "record")
log_name = os.path.join(save_path, "log")
max_to_keep = params.keep_top_checkpoint_max
if dist.get_rank() == 0:
# Create directory and copy files
if not os.path.exists(save_path):
print("Making dir: %s" % save_path)
os.makedirs(save_path)
params_pattern = os.path.join(base_dir, "*.json")
params_files = glob.glob(params_pattern)
for name in params_files:
new_name = name.replace(base_dir, save_path)
shutil.copy(name, new_name)
# Do validation here
global_step = get_global_step()
if dist.get_rank() == 0:
print("Validating model at step %d" % global_step)
score = _evaluate_model(model, sorted_key, dataset, references, params)
# Save records
if dist.get_rank() == 0:
scalar("BLEU/score", score, global_step, write_every_n_steps=1)
print("BLEU at step %d: %f" % (global_step, score))
# Save checkpoint to save_path
save({"model": model.state_dict(), "step": global_step}, save_path)
_save_log(log_name, ("BLEU", global_step, score))
records = _read_score_record(record_name)
record = [latest_checkpoint(save_path).split("/")[-1], score]
added, removed, records = _add_to_record(records, record, max_to_keep)
if added is None:
# Remove latest checkpoint
filename = latest_checkpoint(save_path)
print("Removing %s" % filename)
files = glob.glob(filename + "*")
for name in files:
os.remove(name)
if removed is not None:
filename = os.path.join(save_path, removed)
print("Removing %s" % filename)
files = glob.glob(filename + "*")
for name in files:
os.remove(name)
_save_score_record(record_name, records)
best_score = records[0][1]
print("Best score at step %d: %f" % (global_step, best_score))
| 7,411 | 27.398467 | 79 | py |
THUMT | THUMT-master/thumt/utils/summary.py | # coding=utf-8
# Copyright 2017-2020 The THUMT Authors
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import queue
import threading
import torch
import torch.distributed as dist
import torch.utils.tensorboard as tensorboard
_SUMMARY_WRITER = None
_QUEUE = None
_THREAD = None
class SummaryWorker(threading.Thread):
def run(self):
global _QUEUE
while True:
item = _QUEUE.get()
name, kwargs = item
if name == "stop":
break
self.write_summary(name, **kwargs)
def write_summary(self, name, **kwargs):
if name == "scalar":
_SUMMARY_WRITER.add_scalar(**kwargs)
elif name == "histogram":
_SUMMARY_WRITER.add_histogram(**kwargs)
def stop(self):
global _QUEUE
_QUEUE.put(("stop", None))
self.join()
def init(log_dir, enable=True):
global _SUMMARY_WRITER
global _QUEUE
global _THREAD
if enable and dist.get_rank() == 0:
_SUMMARY_WRITER = tensorboard.SummaryWriter(log_dir)
_QUEUE = queue.Queue()
thread = SummaryWorker(daemon=True)
thread.start()
_THREAD = thread
def scalar(tag, scalar_value, global_step=None, walltime=None,
write_every_n_steps=100):
if _SUMMARY_WRITER is not None:
if global_step % write_every_n_steps == 0:
scalar_value = float(scalar_value)
kwargs = dict(tag=tag, scalar_value=scalar_value,
global_step=global_step, walltime=walltime)
_QUEUE.put(("scalar", kwargs))
def histogram(tag, values, global_step=None, bins="tensorflow", walltime=None,
max_bins=None, write_every_n_steps=100):
if _SUMMARY_WRITER is not None:
if global_step % write_every_n_steps == 0:
values = values.detach().cpu()
kwargs = dict(tag=tag, values=values, global_step=global_step,
bins=bins, walltime=walltime, max_bins=max_bins)
_QUEUE.put(("histogram", kwargs))
def close():
if _SUMMARY_WRITER is not None:
_THREAD.stop()
_SUMMARY_WRITER.close()
| 2,224 | 25.176471 | 78 | py |
THUMT | THUMT-master/thumt/data/dataset.py | # coding=utf-8
# Copyright 2017-Present The THUMT Authors
import abc
import torch
from collections.abc import Sequence
from torch.utils.data import IterableDataset
from thumt.data.iterator import Iterator
from thumt.data.vocab import Vocabulary
from thumt.tokenizers import Tokenizer
from typing import Any, Dict, NoReturn, List, Tuple, Union, Callable
class ElementSpec(object):
def __init__(self, elem_type, shape):
self._type = elem_type
self._shape = shape
def __repr__(self) -> str:
return "%s, %s" % (self._type, self._shape)
@property
def elem_type(self) -> Any:
return self._type
@property
def shape(self) -> str:
return self._shape
class MapFunc(object):
def __init__(self, fn: Callable, spec: ElementSpec):
self._fn = fn
self._elem_spec = spec
def __call__(self, *args, **kwargs) -> Any:
return self._fn(*args, **kwargs)
@property
def function(self):
return self._fn
@property
def element_spec(self):
return self._elem_spec
class Dataset(IterableDataset):
def __init__(self):
self._iterator = None
def __iter__(self) -> Iterator:
return Iterator(self)
@abc.abstractproperty
def _inputs(self) -> NoReturn:
raise NotImplementedError("Not implemented.")
@abc.abstractmethod
def copy(self) -> NoReturn:
raise NotImplementedError("Dataset.copy not implemented.")
@abc.abstractproperty
def element_spec(self) -> NoReturn:
raise NotImplementedError("Dataset.element_spec not implemented.")
@property
def name(self) -> str:
return "Dataset"
def new_iterator(self) -> Iterator:
return Iterator(self)
def background(self) -> "BackgroundDataset":
return BackgroundDataset(self)
def map(self, fn: MapFunc) -> "MapDataset":
return MapDataset(self, fn)
def padded_batch(self, batch_size: int, pad: int) -> "PaddedBatchDataset":
return PaddedBatchDataset(self, batch_size, pad)
def repeat(self, n: int) -> "RepeatDataset":
return RepeatDataset(self, n)
def shard(self, num_shards: int, index: int) -> "ShardDataset":
return ShardDataset(self, num_shards, index)
@abc.abstractmethod
def set_inputs(self, datasets: Tuple["Dataset"]) -> NoReturn:
raise NotImplementedError("Dataset.set_inputs not implemented.")
def tokenize(self, tokenizer: Tokenizer, bos: bytes = b"<bos>",
eos: bytes = b"<eos>") -> "TokenizedLineDataset":
return TokenizedLineDataset(self, tokenizer, bos, eos)
@staticmethod
def bucket_by_sequence_length(dataset: "Dataset",
bucket_boundaries: List[int],
batch_sizes: List[int],
pad: int = 0,
min_length: int = -1,
max_length: int = 10000) -> "BucketDataset":
return BucketDataset(dataset, bucket_boundaries, batch_sizes, pad,
min_length, max_length)
@staticmethod
def lookup(dataset: "Dataset", vocabulary: Dict[bytes, int], unk_id):
return LookupDataset(dataset, vocabulary, unk_id)
@staticmethod
def zip(datasets: Tuple["Dataset"]) -> "ZipDataset":
return ZipDataset(datasets)
class DatasetSource(Dataset):
def _inputs(self):
return []
class BackgroundDataset(Dataset):
def __init__(self, dataset: Dataset):
self._dataset = dataset
super(BackgroundDataset, self).__init__()
def __repr__(self) -> str:
return "<BackgroundDataset:%s>" % self._dataset
def _inputs(self):
return [self._dataset]
@property
def name(self):
return "BackgroundDataset"
@property
def element_spec(self):
return self._dataset._spec
class BucketDataset(Dataset):
def __init__(self, dataset: Dataset, bucket_boundaries : List[int],
batch_sizes: List[int], pad: int = 0, min_length: int = -1,
max_length: int = 10000):
if not self._check_type(dataset.element_spec):
raise ValueError("The input dataset must produces an example of "
"`List[int]` or `Tuple[List[int], ...]`")
self._dataset = dataset
self._pad = pad
self._bucket_boundaries = bucket_boundaries
self._batch_sizes = batch_sizes
self._min_length = min_length
self._max_length = max_length
_elem_spec = self._dataset.element_spec
if _elem_spec.elem_type is List[int]:
_elem_type = List[List[int]]
_elem_shape = "[None, None]"
else:
# Tuple[List[int], ...] -> Tuple[List[List[int]], ...]
args = _elem_spec.elem_type.__args__
args = [List[t] for t in args]
_elem_type = Tuple[tuple(args)]
_elem_shape = ",".join(["[None, None]" for _ in args])
if len(args) == 1:
_elem_shape = "(" + _elem_shape + ",)"
else:
_elem_shape = "(" + _elem_shape + ")"
self._spec = ElementSpec(_elem_type, _elem_shape)
super(BucketDataset, self).__init__()
def __repr__(self) -> str:
return "<BucketDataset:%s>" % self._dataset
def _inputs(self) -> List[Dataset]:
return [self._dataset]
def _check_type(self, elem_spec) -> bool:
if elem_spec.elem_type is List[int]:
return True
elif not isinstance(elem_spec.elem_type,
type(Tuple[List[int], ...])):
return False
else:
args = elem_spec.elem_type.__args__
if len(args) == 0:
return False
for t in args:
if t is not List[int]:
return False
return True
def copy(self) -> "BucketDataset":
return BucketDataset(self._dataset.copy(), self._bucket_boundaries,
self._batch_sizes, self._pad)
@property
def name(self):
return "BucketDataset"
@property
def bucket_boundaries(self) -> List[int]:
return self._bucket_boundaries
@property
def batch_sizes(self) -> List[int]:
return self._batch_sizes
@property
def min_length(self) -> int:
return self._min_length
@property
def max_length(self) -> int:
return self._max_length
@property
def pad(self) -> bytes:
return self._pad
@property
def element_spec(self) -> ElementSpec:
return self._spec
def set_inputs(self, datasets: Tuple[Dataset]) -> None:
if len(datasets) != 1:
raise ValueError("``datasets'' must be a tuple with one dataset.")
dataset = datasets[0]
if not self._check_type(dataset.element_spec):
raise ValueError("The input dataset must produces an example of "
"`List[int]` or `Tuple[List[int], ...]`")
self._dataset = dataset
_elem_spec = self._dataset.element_spec
if _elem_spec.elem_type is List[int]:
_elem_type = List[List[int]]
_elem_shape = "[None, None]"
else:
# Tuple[List[int], ...] -> Tuple[List[List[int]], ...]
args = _elem_spec.elem_type.__args__
args = [List[t] for t in args]
_elem_type = Tuple[tuple(args)]
_elem_shape = ",".join(["[None, None]" for _ in args])
if len(args) == 1:
_elem_shape = "(" + _elem_shape + ",)"
else:
_elem_shape = "(" + _elem_shape + ")"
self._spec = ElementSpec(_elem_type, _elem_shape)
class FilterDataset(Dataset):
def __init__(self, dataset: Dataset, min_len: int, max_len: int):
if dataset.element_spec.elem_type is not List[int]:
raise ValueError("The input dataset must produces an example of "
"`List[int]`.")
self._dataset = dataset
self._min_len = min_len
self._max_len = max_len
super(FilterDataset, self).__init__()
def __repr__(self) -> str:
return "<FilterDataset:%s>" % self._dataset
def _inputs(self) -> List[Dataset]:
return [self._dataset]
def copy(self) -> "FilterDataset":
return FilterDataset(self._dataset.copy(), self._min_len,
self._max_len)
@property
def name(self) -> str:
return "FilterDataset"
@property
def max_len(self) -> int:
return self._max_len
@property
def min_len(self) -> int:
return self._min_len
@property
def element_spec(self) -> ElementSpec:
return self._dataset._spec
def set_inputs(self, datasets: Tuple[Dataset]) -> None:
if len(datasets) != 1:
raise ValueError("``datasets'' must be a tuple with one dataset.")
self._dataset = datasets[0]
class LookupDataset(Dataset):
def __init__(self, dataset: Dataset, vocabulary: Vocabulary,
unk_id : int = -1):
if dataset.element_spec.elem_type is not List[bytes]:
raise ValueError("The input dataset must produces an example of "
"`List[bytes]`.")
self._dataset = dataset
self._vocab = vocabulary
self._unk_id = unk_id
self._spec = ElementSpec(List[int], "[None]")
super(LookupDataset, self).__init__()
def __repr__(self) -> str:
return "<LookupDataset:%s>" % self._dataset
def _inputs(self) -> List[Dataset]:
return [self._dataset]
def copy(self) -> "LookupDataset":
return LookupDataset(self._dataset.copy(), self._vocab, self._unk_id)
@property
def name(self) -> str:
return "LookupDataset"
@property
def unk_id(self) -> int:
return self._unk_id
@property
def vocabulary(self) -> Vocabulary:
return self._vocab
@property
def element_spec(self) -> ElementSpec:
return self._spec
def set_inputs(self, datasets: Tuple[Dataset]) -> None:
if len(datasets) != 1:
raise ValueError("``datasets'' must be a tuple with one dataset.")
self._dataset = datasets[0]
class MapDataset(Dataset):
def __init__(self, dataset: Dataset, fn: MapFunc):
if not isinstance(fn, MapFunc):
raise ValueError("fn must be an instance of MapFunc.")
self._dataset = dataset
self._fn = fn
self._spec = fn.element_spec
super(MapDataset, self).__init__()
def __repr__(self) -> str:
return "<MapDataset:%s>" % str(self._dataset)
def copy(self) -> "MapDataset":
return MapDataset(self._dataset, self._fn)
@property
def name(self) -> str:
return "MapDataset"
@property
def element_spec(self) -> ElementSpec:
return self._spec
class PaddedBatchDataset(Dataset):
def __init__(self, dataset: Dataset, batch_size: int, pad: int):
self._dataset = dataset
self._batch_size = batch_size
self._pad = pad
_elem_spec = self._dataset.element_spec
if _elem_spec.elem_type is List[int]:
_elem_type = List[List[int]]
_elem_shape = "[None, None]"
else:
# Tuple[List[int], ...] -> Tuple[List[List[int]], ...]
args = _elem_spec.elem_type.__args__
args = [List[t] for t in args]
_elem_type = Tuple[tuple(args)]
_elem_shape = ",".join(["[None, None]" for _ in args])
if len(args) == 1:
_elem_shape = "(" + _elem_shape + ",)"
else:
_elem_shape = "(" + _elem_shape + ")"
self._spec = ElementSpec(_elem_type, _elem_shape)
super(PaddedBatchDataset, self).__init__()
def __repr__(self) -> str:
return "<PaddedDataset:%s>" % str(self._dataset)
def _inputs(self) -> List[Dataset]:
return [self._dataset]
def copy(self) -> "PaddedDataset":
return PaddedBatchDataset(self._dataset.copy(),
self._batch_size, self._pad_id)
@property
def name(self) -> str:
return "PaddedBatchDataset"
@property
def batch_size(self) -> int:
return self._batch_size
@property
def pad(self) -> int:
return self._pad
@property
def element_spec(self) -> ElementSpec:
return self._spec
class RepeatDataset(Dataset):
def __init__(self, dataset: Dataset, count: int = None):
self._dataset = dataset
self._count = -1 if count is None else count
super(RepeatDataset, self).__init__()
def __repr__(self) -> str:
return "<RepeatDataset:%s,%d>" % (self._dataset, self._count)
def _inputs(self) -> List[Dataset]:
return [self._dataset]
def copy(self) -> "RepeatDataset":
return RepeatDataset(self._dataset.copy(), self._count)
@property
def name(self) -> str:
return "RepeatDataset"
@property
def count(self) -> int:
return self._count
@property
def element_spec(self) -> ElementSpec:
self._dataset.element_spec
def set_inputs(self, datasets: Tuple[Dataset]) -> None:
if len(datasets) != 1:
raise ValueError("``datasets'' must be a tuple with one dataset.")
self._dataset = datasets[0]
class ShardDataset(Dataset):
def __init__(self, dataset: Dataset, num_shards : int, index : int):
self._dataset = dataset
self._num_shards = num_shards
self._index = index
super(ShardDataset, self).__init__()
def __repr__(self) -> str:
info = (self._dataset, self._num_shards, self._index)
return "<ShardDataset:%s,%d,%d>" % info
def _inputs(self) -> List[Dataset]:
return [self._dataset]
def copy(self) -> "ShardDataset":
return ShardDataset(self._dataset.copy(), self._num_shards,
self._index)
@property
def name(self) -> str:
return "ShardDataset"
@property
def num_shards(self) -> int:
return self._num_shards
@property
def index(self) -> int:
return self._index
@property
def element_spec(self) -> ElementSpec:
return self._dataset.element_spec
def set_inputs(self, datasets: Tuple[Dataset]) -> None:
if len(datasets) != 1:
raise ValueError("``datasets'' must be a tuple with one dataset.")
self._dataset = datasets[0]
class TextLineDataset(DatasetSource):
def __init__(self, buffer_or_filename: Union[List, str]):
self._source = buffer_or_filename
self._spec = ElementSpec(bytes, "[]")
super(TextLineDataset, self).__init__()
def __repr__(self) -> str:
return "<TextLineDataset:%s>" % self._filename
def copy(self) -> "TextLineDataset":
return TextLineDataset(self._source)
@property
def name(self) -> str:
return "TextLineDataset"
@property
def input_source(self) -> Union[List, str]:
return self._source
@property
def element_spec(self) -> ElementSpec:
return self._spec
def set_inputs(self, datasets: Tuple[Dataset]) -> None:
return None
class TokenizedLineDataset(Dataset):
def __init__(self, dataset: TextLineDataset, tokenizer: Tokenizer,
bos: bytes = b"<bos>", eos: bytes = b"<eos>"):
elem_spec = dataset.element_spec
if elem_spec.elem_type is not bytes or elem_spec.shape != "[]":
raise ValueError("TokenizedLineDataset only accepts a dataset with"
" ElementSpec(bytes, '[None]')")
self._dataset = dataset
self._tokenizer = tokenizer
self._bos = bos
self._eos = eos
self._spec = ElementSpec(List[bytes], "[None]")
super(TokenizedLineDataset, self).__init__()
def __repr__(self) -> str:
return "<TokenizedLineDataset:%s>" % self._dataset
def _inputs(self) -> List[Dataset]:
return [self._dataset]
def copy(self) -> "TokenizedLineDataset":
return TokenizedLineDataset(self._dataset.copy(), self._tokenizer,
self._bos, self._eos)
@property
def name(self) -> str:
return "TokenizedLineDataset"
@property
def element_spec(self) -> ElementSpec:
return self._spec
@property
def tokenizer(self) -> Tokenizer:
return self._tokenizer
@property
def bos(self) -> bytes:
return self._bos
@property
def eos(self) -> bytes:
return self._eos
def set_inputs(self, datasets: Tuple[Dataset]) -> None:
if len(datasets) != 1:
raise ValueError("``datasets'' must be a tuple with one dataset.")
self._dataset = datasets[0]
class ZipDataset(Dataset):
def __init__(self, datasets: Tuple[Dataset]):
if not isinstance(datasets, tuple):
raise ValueError("ZipDataset expects a tuple of datasets as "
"the input.")
self._datasets = datasets
self._num_inputs = len(datasets)
_type = tuple(dataset.element_spec.elem_type for dataset in datasets)
_type = Tuple[_type]
_shape = ",".join([dataset.element_spec.shape for dataset in datasets])
if len(self._datasets) == 1:
_shape = "(" + _shape + ",)"
else:
_shape = "(" + _shape + ",)"
self._spec = ElementSpec(_type, _shape)
super(ZipDataset, self).__init__()
def __repr__(self) -> str:
if len(self._datasets == 1):
ds_repr = "(%s,)" % self._datasets[0]
else:
ds_repr = ",".join([str(ds) for ds in self._datasets])
return "<ZipDataset:(%s,)>" % ds_repr
def _inputs(self) -> List[Dataset]:
return list(self._datasets)
def copy(self) -> "ZipDataset":
datasets = tuple([ds.copy() for ds in self._datasets])
return ZipDataset(datasets)
@property
def name(self) -> str:
return "ZipDataset"
@property
def num_inputs(self) -> int:
return self._num_inputs
@property
def element_spec(self) -> ElementSpec:
return self._spec
def set_inputs(self, datasets: Tuple[Dataset]) -> None:
self._datasets = datasets
_type = tuple(dataset.element_spec.elem_type for dataset in datasets)
_type = Tuple[_type]
_shape = ",".join([dataset.element_spec.shape for dataset in datasets])
if len(self._datasets) == 1:
_shape = "(" + _shape + ",)"
else:
_shape = "(" + _shape + ",)"
self._spec = ElementSpec(_type, _shape)
| 18,929 | 27.338323 | 79 | py |
THUMT | THUMT-master/thumt/data/vocab.py | # coding=utf-8
# Copyright 2017-Present The THUMT Authors
import numpy as np
import six
import torch
from typing import Union
class Vocabulary(object):
def __init__(self, filename):
self._idx2word = {}
self._word2idx = {}
cnt = 0
with open(filename, "rb") as fd:
for line in fd:
self._word2idx[line.strip()] = cnt
self._idx2word[cnt] = line.strip()
cnt = cnt + 1
def __getitem__(self, key: Union[bytes, int]):
if isinstance(key, int):
return self._idx2word[key]
elif isinstance(key, bytes):
return self._word2idx[key]
elif isinstance(key, str):
key = key.encode("utf-8")
return self._word2idx[key]
else:
raise LookupError("Cannot lookup key %s." % key)
def __contains__(self, key):
if isinstance(key, str):
key = key.encode("utf-8")
return key in self._word2idx
def __iter__(self):
return six.iterkeys(self._word2idx)
def __len__(self):
return len(self._idx2word)
| 1,123 | 23.434783 | 60 | py |
THUMT | THUMT-master/thumt/data/pipeline.py | # coding=utf-8
# Copyright 2017-Present The THUMT Authors
import torch
from thumt.data.dataset import Dataset, ElementSpec, MapFunc, TextLineDataset
from thumt.data.vocab import Vocabulary
from thumt.tokenizers import WhiteSpaceTokenizer
def _sort_input_file(filename, reverse=True):
with open(filename, "rb") as fd:
inputs = [line.strip() for line in fd]
input_lens = [
(i, len(line.split())) for i, line in enumerate(inputs)]
sorted_input_lens = sorted(input_lens, key=lambda x: x[1],
reverse=reverse)
sorted_keys = {}
sorted_inputs = []
for i, (idx, _) in enumerate(sorted_input_lens):
sorted_inputs.append(inputs[idx])
sorted_keys[idx] = i
return sorted_keys, sorted_inputs
class MTPipeline(object):
@staticmethod
def get_train_dataset(filenames, params, cpu=False):
src_vocab = params.vocabulary["source"]
tgt_vocab = params.vocabulary["target"]
src_dataset = TextLineDataset(filenames[0])
tgt_dataset = TextLineDataset(filenames[1])
lab_dataset = TextLineDataset(filenames[1])
src_dataset = src_dataset.tokenize(WhiteSpaceTokenizer(),
None, params.eos)
tgt_dataset = tgt_dataset.tokenize(WhiteSpaceTokenizer(),
params.bos, None)
lab_dataset = lab_dataset.tokenize(WhiteSpaceTokenizer(),
None, params.eos)
src_dataset = Dataset.lookup(src_dataset, src_vocab,
src_vocab[params.unk])
tgt_dataset = Dataset.lookup(tgt_dataset, tgt_vocab,
tgt_vocab[params.unk])
lab_dataset = Dataset.lookup(lab_dataset, tgt_vocab,
tgt_vocab[params.unk])
dataset = Dataset.zip((src_dataset, tgt_dataset, lab_dataset))
dataset = dataset.shard(torch.distributed.get_world_size(),
torch.distributed.get_rank())
def bucket_boundaries(max_length, min_length=8, step=8):
x = min_length
boundaries = []
while x <= max_length:
boundaries.append(x + 1)
x += step
return boundaries
batch_size = params.batch_size
max_length = (params.max_length // 8) * 8
min_length = params.min_length
boundaries = bucket_boundaries(max_length)
batch_sizes = [max(1, batch_size // (x - 1))
if not params.fixed_batch_size else batch_size
for x in boundaries] + [1]
dataset = Dataset.bucket_by_sequence_length(
dataset, boundaries, batch_sizes, pad=src_vocab[params.pad],
min_length=params.min_length, max_length=params.max_length)
def map_fn(inputs):
src_seq, tgt_seq, labels = inputs
src_seq = torch.tensor(src_seq)
tgt_seq = torch.tensor(tgt_seq)
labels = torch.tensor(labels)
src_mask = src_seq != params.vocabulary["source"][params.pad]
tgt_mask = tgt_seq != params.vocabulary["target"][params.pad]
src_mask = src_mask.float()
tgt_mask = tgt_mask.float()
if not cpu:
src_seq = src_seq.cuda(params.device)
src_mask = src_mask.cuda(params.device)
tgt_seq = tgt_seq.cuda(params.device)
tgt_mask = tgt_mask.cuda(params.device)
features = {
"source": src_seq,
"source_mask": src_mask,
"target": tgt_seq,
"target_mask": tgt_mask
}
return features, labels
map_obj = MapFunc(map_fn, ElementSpec("Tensor", "{key: [None, None]}"))
dataset = dataset.map(map_obj)
dataset = dataset.background()
return dataset
@staticmethod
def get_eval_dataset(filenames, params, cpu=False):
src_vocab = params.vocabulary["source"]
tgt_vocab = params.vocabulary["target"]
src_dataset = TextLineDataset(filenames[0])
tgt_dataset = TextLineDataset(filenames[1])
lab_dataset = TextLineDataset(filenames[1])
src_dataset = src_dataset.tokenize(WhiteSpaceTokenizer(),
None, params.eos)
tgt_dataset = tgt_dataset.tokenize(WhiteSpaceTokenizer(),
params.bos, None)
lab_dataset = lab_dataset.tokenize(WhiteSpaceTokenizer(),
None, params.eos)
src_dataset = Dataset.lookup(src_dataset, src_vocab,
src_vocab[params.unk])
tgt_dataset = Dataset.lookup(tgt_dataset, tgt_vocab,
tgt_vocab[params.unk])
lab_dataset = Dataset.lookup(lab_dataset, tgt_vocab,
tgt_vocab[params.unk])
dataset = Dataset.zip((src_dataset, tgt_dataset, lab_dataset))
dataset = dataset.shard(torch.distributed.get_world_size(),
torch.distributed.get_rank())
dataset = dataset.padded_batch(params.decode_batch_size,
pad=src_vocab[params.pad])
def map_fn(inputs):
src_seq, tgt_seq, labels = inputs
src_seq = torch.tensor(src_seq)
tgt_seq = torch.tensor(tgt_seq)
labels = torch.tensor(labels)
src_mask = src_seq != params.vocabulary["source"][params.pad]
tgt_mask = tgt_seq != params.vocabulary["target"][params.pad]
src_mask = src_mask.float()
tgt_mask = tgt_mask.float()
if not cpu:
src_seq = src_seq.cuda(params.device)
src_mask = src_mask.cuda(params.device)
tgt_seq = tgt_seq.cuda(params.device)
tgt_mask = tgt_mask.cuda(params.device)
features = {
"source": src_seq,
"source_mask": src_mask,
"target": tgt_seq,
"target_mask": tgt_mask
}
return features, labels
map_obj = MapFunc(map_fn, ElementSpec("Tensor", "{key: [None, None]}"))
dataset = dataset.map(map_obj)
dataset = dataset.background()
return dataset
@staticmethod
def get_infer_dataset(filename, params, cpu=False):
sorted_keys, sorted_data = _sort_input_file(filename)
src_vocab = params.vocabulary["source"]
src_dataset = TextLineDataset(sorted_data)
src_dataset = src_dataset.tokenize(WhiteSpaceTokenizer(),
None, params.eos)
src_dataset = Dataset.lookup(src_dataset, src_vocab,
src_vocab[params.unk])
dataset = src_dataset.shard(torch.distributed.get_world_size(),
torch.distributed.get_rank())
dataset = dataset.padded_batch(params.decode_batch_size,
pad=src_vocab[params.pad])
def map_fn(inputs):
src_seq = torch.tensor(inputs)
src_mask = src_seq != params.vocabulary["source"][params.pad]
src_mask = src_mask.float()
if not cpu:
src_seq = src_seq.cuda(params.device)
src_mask = src_mask.cuda(params.device)
features = {
"source": src_seq,
"source_mask": src_mask,
}
return features
map_obj = MapFunc(map_fn, ElementSpec("Tensor", "{key: [None, None]}"))
dataset = dataset.map(map_obj)
dataset = dataset.background()
return sorted_keys, dataset
| 7,894 | 36.240566 | 79 | py |
adaptive_template_systems | adaptive_template_systems-master/docs/source/conf.py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'Adaptive Template System'
copyright = '2019, LP'
author = 'LP'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '0.0.1'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'AdaptiveTemplateSystemdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'AdaptiveTemplateSystem.tex', 'Adaptive Template System Documentation',
'LP', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'adaptivetemplatesystem', 'Adaptive Template System Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'AdaptiveTemplateSystem', 'Adaptive Template System Documentation',
author, 'AdaptiveTemplateSystem', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True | 5,204 | 29.982143 | 88 | py |
multi-categorical-gans | multi-categorical-gans-master/multi_categorical_gans/methods/general/multi_categorical.py | from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions.one_hot_categorical import OneHotCategorical
class MultiCategorical(nn.Module):
def __init__(self, input_size, variable_sizes):
super(MultiCategorical, self).__init__()
self.output_layers = nn.ModuleList()
self.output_activations = nn.ModuleList()
for i, variable_size in enumerate(variable_sizes):
self.output_layers.append(nn.Linear(input_size, variable_size))
self.output_activations.append(CategoricalActivation())
def forward(self, inputs, training=True, temperature=None, concat=True):
outputs = []
for output_layer, output_activation in zip(self.output_layers, self.output_activations):
logits = output_layer(inputs)
output = output_activation(logits, training=training, temperature=temperature)
outputs.append(output)
if concat:
return torch.cat(outputs, dim=1)
else:
return outputs
class CategoricalActivation(nn.Module):
def __init__(self):
super(CategoricalActivation, self).__init__()
def forward(self, logits, training=True, temperature=None):
# gumbel-softmax (training and evaluation)
if temperature is not None:
return F.gumbel_softmax(logits, hard=not training, tau=temperature)
# softmax training
elif training:
return F.softmax(logits, dim=1)
# softmax evaluation
else:
return OneHotCategorical(logits=logits).sample()
| 1,637 | 31.76 | 96 | py |
multi-categorical-gans | multi-categorical-gans-master/multi_categorical_gans/methods/general/discriminator.py | from __future__ import print_function
import torch.nn as nn
class Discriminator(nn.Module):
def __init__(self, input_size, hidden_sizes=(256, 128), bn_decay=0.01, critic=False):
super(Discriminator, self).__init__()
hidden_activation = nn.LeakyReLU(0.2)
previous_layer_size = input_size
layers = []
for layer_number, layer_size in enumerate(hidden_sizes):
layers.append(nn.Linear(previous_layer_size, layer_size))
if layer_number > 0 and bn_decay > 0:
layers.append(nn.BatchNorm1d(layer_size, momentum=(1 - bn_decay)))
layers.append(hidden_activation)
previous_layer_size = layer_size
layers.append(nn.Linear(previous_layer_size, 1))
# the critic has a linear output
if not critic:
layers.append(nn.Sigmoid())
self.model = nn.Sequential(*layers)
def forward(self, inputs):
return self.model(inputs).view(-1)
| 983 | 28.818182 | 89 | py |
multi-categorical-gans | multi-categorical-gans-master/multi_categorical_gans/methods/general/encoder.py | from __future__ import print_function
import torch.nn as nn
class Encoder(nn.Module):
def __init__(self, input_size, code_size, hidden_sizes=[]):
super(Encoder, self).__init__()
hidden_activation = nn.Tanh()
previous_layer_size = input_size
layer_sizes = list(hidden_sizes) + [code_size]
layers = []
for layer_size in layer_sizes:
layers.append(nn.Linear(previous_layer_size, layer_size))
layers.append(hidden_activation)
previous_layer_size = layer_size
self.hidden_layers = nn.Sequential(*layers)
def forward(self, inputs):
return self.hidden_layers(inputs)
| 678 | 24.148148 | 69 | py |
multi-categorical-gans | multi-categorical-gans-master/multi_categorical_gans/methods/general/wgan_gp.py | from __future__ import print_function
import torch
from torch.autograd.variable import Variable
from multi_categorical_gans.utils.cuda import to_cuda_if_available
def calculate_gradient_penalty(discriminator, penalty, real_data, fake_data):
real_data = real_data.data
fake_data = fake_data.data
alpha = torch.rand(len(real_data), 1)
alpha = alpha.expand(real_data.size())
alpha = to_cuda_if_available(alpha)
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
interpolates = Variable(interpolates, requires_grad=True)
discriminator_interpolates = discriminator(interpolates)
gradients = torch.autograd.grad(outputs=discriminator_interpolates,
inputs=interpolates,
grad_outputs=to_cuda_if_available(torch.ones_like(discriminator_interpolates)),
create_graph=True, retain_graph=True, only_inputs=True)[0]
return ((gradients.norm(2, dim=1) - 1) ** 2).mean() * penalty
| 1,031 | 35.857143 | 115 | py |
multi-categorical-gans | multi-categorical-gans-master/multi_categorical_gans/methods/general/autoencoder.py | from __future__ import print_function
import torch
import torch.nn as nn
from multi_categorical_gans.methods.general.decoder import Decoder
from multi_categorical_gans.methods.general.encoder import Encoder
class AutoEncoder(nn.Module):
def __init__(self, input_size, code_size=128, encoder_hidden_sizes=[], decoder_hidden_sizes=[],
variable_sizes=None):
super(AutoEncoder, self).__init__()
self.encoder = Encoder(input_size,
code_size,
hidden_sizes=encoder_hidden_sizes)
self.decoder = Decoder(code_size,
(input_size if variable_sizes is None else variable_sizes),
hidden_sizes=decoder_hidden_sizes)
def forward(self, inputs, normalize_code=False, training=False, temperature=None):
code = self.encode(inputs, normalize_code=normalize_code)
reconstructed = self.decode(code, training=training, temperature=temperature)
return code, reconstructed
def encode(self, inputs, normalize_code=False):
code = self.encoder(inputs)
if normalize_code:
norms = torch.norm(code, 2, 1)
code = torch.div(code, norms.unsqueeze(1).expand_as(code))
return code
def decode(self, code, training=False, temperature=None):
return self.decoder(code, training=training, temperature=temperature)
| 1,446 | 34.292683 | 99 | py |
multi-categorical-gans | multi-categorical-gans-master/multi_categorical_gans/methods/general/single_output.py | from __future__ import print_function
import torch.nn as nn
class SingleOutput(nn.Module):
def __init__(self, previous_layer_size, output_size, activation=None):
super(SingleOutput, self).__init__()
if activation is None:
self.model = nn.Linear(previous_layer_size, output_size)
else:
self.model = nn.Sequential(nn.Linear(previous_layer_size, output_size), activation)
def forward(self, hidden, training=False, temperature=None):
return self.model(hidden)
| 525 | 29.941176 | 95 | py |
multi-categorical-gans | multi-categorical-gans-master/multi_categorical_gans/methods/general/decoder.py | from __future__ import print_function
import torch.nn as nn
from multi_categorical_gans.methods.general.multi_categorical import MultiCategorical
from multi_categorical_gans.methods.general.single_output import SingleOutput
class Decoder(nn.Module):
def __init__(self, code_size, output_size, hidden_sizes=[]):
super(Decoder, self).__init__()
hidden_activation = nn.Tanh()
previous_layer_size = code_size
hidden_layers = []
for layer_size in hidden_sizes:
hidden_layers.append(nn.Linear(previous_layer_size, layer_size))
hidden_layers.append(hidden_activation)
previous_layer_size = layer_size
if len(hidden_layers) > 0:
self.hidden_layers = nn.Sequential(*hidden_layers)
else:
self.hidden_layers = None
if type(output_size) is int:
self.output_layer = SingleOutput(previous_layer_size, output_size, activation=nn.Sigmoid())
elif type(output_size) is list:
self.output_layer = MultiCategorical(previous_layer_size, output_size)
else:
raise Exception("Invalid output size.")
def forward(self, code, training=False, temperature=None):
if self.hidden_layers is None:
hidden = code
else:
hidden = self.hidden_layers(code)
return self.output_layer(hidden, training=training, temperature=temperature)
| 1,439 | 32.488372 | 103 | py |
multi-categorical-gans | multi-categorical-gans-master/multi_categorical_gans/methods/general/generator.py | from __future__ import print_function
import torch.nn as nn
from multi_categorical_gans.methods.general.multi_categorical import MultiCategorical
from multi_categorical_gans.methods.general.single_output import SingleOutput
class Generator(nn.Module):
def __init__(self, noise_size, output_size, hidden_sizes=[], bn_decay=0.01):
super(Generator, self).__init__()
hidden_activation = nn.ReLU()
previous_layer_size = noise_size
hidden_layers = []
for layer_number, layer_size in enumerate(hidden_sizes):
hidden_layers.append(nn.Linear(previous_layer_size, layer_size))
if layer_number > 0 and bn_decay > 0:
hidden_layers.append(nn.BatchNorm1d(layer_size, momentum=(1 - bn_decay)))
hidden_layers.append(hidden_activation)
previous_layer_size = layer_size
if len(hidden_layers) > 0:
self.hidden_layers = nn.Sequential(*hidden_layers)
else:
self.hidden_layers = None
if type(output_size) is int:
self.output = SingleOutput(previous_layer_size, output_size)
elif type(output_size) is list:
self.output = MultiCategorical(previous_layer_size, output_size)
else:
raise Exception("Invalid output size.")
def forward(self, noise, training=False, temperature=None):
if self.hidden_layers is None:
hidden = noise
else:
hidden = self.hidden_layers(noise)
return self.output(hidden, training=training, temperature=temperature)
| 1,585 | 34.244444 | 89 | py |
multi-categorical-gans | multi-categorical-gans-master/multi_categorical_gans/methods/medgan/discriminator.py | from __future__ import print_function
import torch
import torch.nn as nn
class Discriminator(nn.Module):
def __init__(self, input_size, hidden_sizes=(256, 128)):
super(Discriminator, self).__init__()
hidden_activation = nn.LeakyReLU()
previous_layer_size = input_size * 2
layers = []
for layer_size in hidden_sizes:
layers.append(nn.Linear(previous_layer_size, layer_size))
layers.append(hidden_activation)
previous_layer_size = layer_size
layers.append(nn.Linear(previous_layer_size, 1))
layers.append(nn.Sigmoid())
self.model = nn.Sequential(*layers)
def minibatch_averaging(self, inputs):
"""
This method is explained in the MedGAN paper.
"""
mean_per_feature = torch.mean(inputs, 0)
mean_per_feature_repeated = mean_per_feature.repeat(len(inputs), 1)
return torch.cat((inputs, mean_per_feature_repeated), 1)
def forward(self, inputs):
inputs = self.minibatch_averaging(inputs)
return self.model(inputs).view(-1)
| 1,103 | 28.052632 | 75 | py |
multi-categorical-gans | multi-categorical-gans-master/multi_categorical_gans/methods/medgan/sampler.py | from __future__ import print_function
import argparse
import torch
import numpy as np
from torch.autograd.variable import Variable
from multi_categorical_gans.methods.general.autoencoder import AutoEncoder
from multi_categorical_gans.methods.medgan.generator import Generator
from multi_categorical_gans.utils.categorical import load_variable_sizes_from_metadata
from multi_categorical_gans.utils.commandline import parse_int_list
from multi_categorical_gans.utils.cuda import to_cuda_if_available, to_cpu_if_available, load_without_cuda
def sample(autoencoder, generator, num_samples, num_features, batch_size=100, code_size=128, temperature=None,
round_features=False):
autoencoder, generator = to_cuda_if_available(autoencoder, generator)
autoencoder.train(mode=False)
generator.train(mode=False)
samples = np.zeros((num_samples, num_features), dtype=np.float32)
start = 0
while start < num_samples:
with torch.no_grad():
noise = Variable(torch.FloatTensor(batch_size, code_size).normal_())
noise = to_cuda_if_available(noise)
batch_code = generator(noise)
batch_samples = autoencoder.decode(batch_code, training=False, temperature=temperature)
batch_samples = to_cpu_if_available(batch_samples)
batch_samples = batch_samples.data.numpy()
# if rounding is activated (for MedGAN with binary outputs)
if round_features:
batch_samples = np.round(batch_samples)
# do not go further than the desired number of samples
end = min(start + batch_size, num_samples)
# limit the samples taken from the batch based on what is missing
samples[start:end, :] = batch_samples[:min(batch_size, end - start), :]
# move to next batch
start = end
return samples
def main():
options_parser = argparse.ArgumentParser(description="Sample data with MedGAN.")
options_parser.add_argument("autoencoder", type=str, help="Autoencoder input file.")
options_parser.add_argument("generator", type=str, help="Generator input file.")
options_parser.add_argument("num_samples", type=int, help="Number of output samples.")
options_parser.add_argument("num_features", type=int, help="Number of output features.")
options_parser.add_argument("data", type=str, help="Output data.")
options_parser.add_argument("--metadata", type=str,
help="Information about the categorical variables in json format.")
options_parser.add_argument(
"--code_size",
type=int,
default=128,
help="Dimension of the autoencoder latent space."
)
options_parser.add_argument(
"--encoder_hidden_sizes",
type=str,
default="",
help="Size of each hidden layer in the encoder separated by commas (no spaces)."
)
options_parser.add_argument(
"--decoder_hidden_sizes",
type=str,
default="",
help="Size of each hidden layer in the decoder separated by commas (no spaces)."
)
options_parser.add_argument(
"--batch_size",
type=int,
default=100,
help="Amount of samples per batch."
)
options_parser.add_argument(
"--generator_hidden_layers",
type=int,
default=2,
help="Number of hidden layers in the generator."
)
options_parser.add_argument(
"--generator_bn_decay",
type=float,
default=0.01,
help="Generator batch normalization decay."
)
options_parser.add_argument(
"--temperature",
type=float,
default=None,
help="Gumbel-Softmax temperature."
)
options = options_parser.parse_args()
if options.metadata is not None and options.temperature is not None:
variable_sizes = load_variable_sizes_from_metadata(options.metadata)
temperature = options.temperature
else:
variable_sizes = None
temperature = None
autoencoder = AutoEncoder(
options.num_features,
code_size=options.code_size,
encoder_hidden_sizes=parse_int_list(options.encoder_hidden_sizes),
decoder_hidden_sizes=parse_int_list(options.decoder_hidden_sizes),
variable_sizes=variable_sizes
)
autoencoder.load_state_dict(torch.load(options.autoencoder))
generator = Generator(
code_size=options.code_size,
num_hidden_layers=options.generator_hidden_layers,
bn_decay=options.generator_bn_decay
)
load_without_cuda(generator, options.generator)
data = sample(
autoencoder,
generator,
options.num_samples,
options.num_features,
batch_size=options.batch_size,
code_size=options.code_size,
temperature=temperature,
round_features=(temperature is None)
)
np.save(options.data, data)
if __name__ == "__main__":
main()
| 4,969 | 30.858974 | 110 | py |
multi-categorical-gans | multi-categorical-gans-master/multi_categorical_gans/methods/medgan/pre_trainer.py | from __future__ import print_function
import argparse
import torch
import numpy as np
from torch.autograd.variable import Variable
from torch.optim import Adam
from multi_categorical_gans.datasets.dataset import Dataset
from multi_categorical_gans.datasets.formats import data_formats, loaders
from multi_categorical_gans.methods.general.autoencoder import AutoEncoder
from multi_categorical_gans.utils.categorical import load_variable_sizes_from_metadata, categorical_variable_loss
from multi_categorical_gans.utils.commandline import DelayedKeyboardInterrupt, parse_int_list
from multi_categorical_gans.utils.cuda import to_cuda_if_available, to_cpu_if_available
from multi_categorical_gans.utils.initialization import load_or_initialize
from multi_categorical_gans.utils.logger import Logger
def pre_train(autoencoder,
train_data,
val_data,
output_path,
output_loss_path,
batch_size=100,
start_epoch=0,
num_epochs=100,
l2_regularization=0.001,
learning_rate=0.001,
variable_sizes=None,
temperature=None
):
autoencoder = to_cuda_if_available(autoencoder)
optim = Adam(autoencoder.parameters(), weight_decay=l2_regularization, lr=learning_rate)
logger = Logger(output_loss_path, append=start_epoch > 0)
for epoch_index in range(start_epoch, num_epochs):
logger.start_timer()
train_loss = pre_train_epoch(autoencoder, train_data, batch_size, optim, variable_sizes, temperature)
logger.log(epoch_index, num_epochs, "autoencoder", "train_mean_loss", np.mean(train_loss))
logger.start_timer()
val_loss = pre_train_epoch(autoencoder, val_data, batch_size, None, variable_sizes, temperature)
logger.log(epoch_index, num_epochs, "autoencoder", "validation_mean_loss", np.mean(val_loss))
# save models for the epoch
with DelayedKeyboardInterrupt():
torch.save(autoencoder.state_dict(), output_path)
logger.flush()
logger.close()
def pre_train_epoch(autoencoder, data, batch_size, optim=None, variable_sizes=None, temperature=None):
autoencoder.train(mode=(optim is not None))
training = optim is not None
losses = []
for batch in data.batch_iterator(batch_size):
if optim is not None:
optim.zero_grad()
batch = Variable(torch.from_numpy(batch))
batch = to_cuda_if_available(batch)
_, batch_reconstructed = autoencoder(batch, training=training, temperature=temperature, normalize_code=False)
loss = categorical_variable_loss(batch_reconstructed, batch, variable_sizes)
loss.backward()
if training:
optim.step()
loss = to_cpu_if_available(loss)
losses.append(loss.data.numpy())
del loss
return losses
def main():
options_parser = argparse.ArgumentParser(description="Pre-train MedGAN or MC-MedGAN. "
+ "Define 'metadata' and 'temperature' to use MC-MedGAN.")
options_parser.add_argument("data", type=str, help="Training data. See 'data_format' parameter.")
options_parser.add_argument("output_model", type=str, help="Model output file.")
options_parser.add_argument("output_loss", type=str, help="Loss output file.")
options_parser.add_argument("--input_model", type=str, help="Model input file.", default=None)
options_parser.add_argument("--metadata", type=str,
help="Information about the categorical variables in json format." +
" Only used if temperature is also provided.")
options_parser.add_argument(
"--validation_proportion",
type=float,
default=.1,
help="Ratio of data for validation."
)
options_parser.add_argument(
"--data_format",
type=str,
default="sparse",
choices=data_formats,
help="Either a dense numpy array, a sparse csr matrix or any of those formats in split into several files."
)
options_parser.add_argument(
"--code_size",
type=int,
default=128,
help="Dimension of the autoencoder latent space."
)
options_parser.add_argument(
"--encoder_hidden_sizes",
type=str,
default="",
help="Size of each hidden layer in the encoder separated by commas (no spaces)."
)
options_parser.add_argument(
"--decoder_hidden_sizes",
type=str,
default="",
help="Size of each hidden layer in the decoder separated by commas (no spaces)."
)
options_parser.add_argument(
"--batch_size",
type=int,
default=100,
help="Amount of samples per batch."
)
options_parser.add_argument(
"--num_epochs",
type=int,
default=100,
help="Number of epochs."
)
options_parser.add_argument(
"--l2_regularization",
type=float,
default=0.001,
help="L2 regularization weight for every parameter."
)
options_parser.add_argument(
"--learning_rate",
type=float,
default=0.001,
help="Adam learning rate."
)
options_parser.add_argument(
"--temperature",
type=float,
default=None,
help="Gumbel-Softmax temperature. Only used if metadata is also provided."
)
options_parser.add_argument("--seed", type=int, help="Random number generator seed.", default=42)
options = options_parser.parse_args()
if options.seed is not None:
np.random.seed(options.seed)
torch.manual_seed(options.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(options.seed)
features = loaders[options.data_format](options.data)
data = Dataset(features)
train_data, val_data = data.split(1.0 - options.validation_proportion)
if options.metadata is not None and options.temperature is not None:
variable_sizes = load_variable_sizes_from_metadata(options.metadata)
temperature = options.temperature
else:
variable_sizes = None
temperature = None
autoencoder = AutoEncoder(
features.shape[1],
code_size=options.code_size,
encoder_hidden_sizes=parse_int_list(options.encoder_hidden_sizes),
decoder_hidden_sizes=parse_int_list(options.decoder_hidden_sizes),
variable_sizes=variable_sizes
)
load_or_initialize(autoencoder, options.input_model)
pre_train(
autoencoder,
train_data,
val_data,
options.output_model,
options.output_loss,
batch_size=options.batch_size,
num_epochs=options.num_epochs,
l2_regularization=options.l2_regularization,
learning_rate=options.learning_rate,
variable_sizes=variable_sizes,
temperature=temperature
)
if __name__ == "__main__":
main()
| 7,066 | 31.122727 | 117 | py |
multi-categorical-gans | multi-categorical-gans-master/multi_categorical_gans/methods/medgan/generator.py | from __future__ import print_function
import torch.nn as nn
class Generator(nn.Module):
def __init__(self, code_size=128, num_hidden_layers=2, bn_decay=0.01):
super(Generator, self).__init__()
self.modules = []
self.batch_norms = []
for layer_number in range(num_hidden_layers):
self.add_generator_module("hidden_{:d}".format(layer_number + 1), code_size, nn.ReLU(), bn_decay)
self.add_generator_module("output", code_size, nn.Tanh(), bn_decay)
def add_generator_module(self, name, code_size, activation, bn_decay):
batch_norm = nn.BatchNorm1d(code_size, momentum=(1 - bn_decay))
module = nn.Sequential(
nn.Linear(code_size, code_size, bias=False), # bias is not necessary because of the batch normalization
batch_norm,
activation
)
self.modules.append(module)
self.add_module(name, module)
self.batch_norms.append(batch_norm)
def batch_norm_train(self, mode=True):
for batch_norm in self.batch_norms:
batch_norm.train(mode=mode)
def forward(self, noise):
outputs = noise
for module in self.modules:
# Cannot write "outputs += module(outputs)" because it is an inplace operation (no differentiable)
outputs = module(outputs) + outputs # shortcut connection
return outputs
| 1,404 | 34.125 | 116 | py |
multi-categorical-gans | multi-categorical-gans-master/multi_categorical_gans/methods/medgan/trainer.py | from __future__ import division
from __future__ import print_function
import argparse
import torch
import numpy as np
from torch.autograd.variable import Variable
from torch.optim import Adam
from torch.nn import BCELoss
from multi_categorical_gans.datasets.dataset import Dataset
from multi_categorical_gans.datasets.formats import data_formats, loaders
from multi_categorical_gans.methods.general.autoencoder import AutoEncoder
from multi_categorical_gans.methods.medgan.discriminator import Discriminator
from multi_categorical_gans.methods.medgan.generator import Generator
from multi_categorical_gans.utils.categorical import load_variable_sizes_from_metadata
from multi_categorical_gans.utils.commandline import DelayedKeyboardInterrupt, parse_int_list
from multi_categorical_gans.utils.cuda import to_cuda_if_available, to_cpu_if_available, load_without_cuda
from multi_categorical_gans.utils.initialization import load_or_initialize
from multi_categorical_gans.utils.logger import Logger
def train(autoencoder,
generator,
discriminator,
train_data,
val_data,
output_ae_path,
output_gen_path,
output_disc_path,
output_loss_path,
batch_size=1000,
start_epoch=0,
num_epochs=1000,
num_disc_steps=2,
num_gen_steps=1,
code_size=128,
l2_regularization=0.001,
learning_rate=0.001,
temperature=None
):
autoencoder, generator, discriminator = to_cuda_if_available(autoencoder, generator, discriminator)
optim_gen = Adam(list(generator.parameters()) + list(autoencoder.decoder.parameters()),
weight_decay=l2_regularization, lr=learning_rate)
optim_disc = Adam(discriminator.parameters(), weight_decay=l2_regularization, lr=learning_rate)
criterion = BCELoss()
logger = Logger(output_loss_path, append=start_epoch > 0)
for epoch_index in range(start_epoch, num_epochs):
logger.start_timer()
# train
autoencoder.train(mode=True)
generator.train(mode=True)
discriminator.train(mode=True)
disc_losses = []
gen_losses = []
more_batches = True
train_data_iterator = train_data.batch_iterator(batch_size)
while more_batches:
# train discriminator
generator.batch_norm_train(mode=False)
for _ in range(num_disc_steps):
# next batch
try:
batch = next(train_data_iterator)
except StopIteration:
more_batches = False
break
# using "one sided smooth labels" is one trick to improve GAN training
label_zeros = Variable(torch.zeros(len(batch)))
smooth_label_ones = Variable(torch.FloatTensor(len(batch)).uniform_(0.9, 1))
label_zeros, smooth_label_ones = to_cuda_if_available(label_zeros, smooth_label_ones)
optim_disc.zero_grad()
# first train the discriminator only with real data
real_features = Variable(torch.from_numpy(batch))
real_features = to_cuda_if_available(real_features)
real_pred = discriminator(real_features)
real_loss = criterion(real_pred, smooth_label_ones)
real_loss.backward()
# then train the discriminator only with fake data
noise = Variable(torch.FloatTensor(len(batch), code_size).normal_())
noise = to_cuda_if_available(noise)
fake_code = generator(noise)
fake_features = autoencoder.decode(fake_code, training=True, temperature=temperature)
fake_features = fake_features.detach() # do not propagate to the generator
fake_pred = discriminator(fake_features)
fake_loss = criterion(fake_pred, label_zeros)
fake_loss.backward()
# finally update the discriminator weights
# using two separated batches is another trick to improve GAN training
optim_disc.step()
disc_loss = real_loss + fake_loss
disc_loss = to_cpu_if_available(disc_loss)
disc_losses.append(disc_loss.data.numpy())
del disc_loss
del fake_loss
del real_loss
# train generator
generator.batch_norm_train(mode=True)
for _ in range(num_gen_steps):
optim_gen.zero_grad()
noise = Variable(torch.FloatTensor(len(batch), code_size).normal_())
noise = to_cuda_if_available(noise)
gen_code = generator(noise)
gen_features = autoencoder.decode(gen_code, training=True, temperature=temperature)
gen_pred = discriminator(gen_features)
smooth_label_ones = Variable(torch.FloatTensor(len(batch)).uniform_(0.9, 1))
smooth_label_ones = to_cuda_if_available(smooth_label_ones)
gen_loss = criterion(gen_pred, smooth_label_ones)
gen_loss.backward()
optim_gen.step()
gen_loss = to_cpu_if_available(gen_loss)
gen_losses.append(gen_loss.data.numpy())
del gen_loss
# validate discriminator
autoencoder.train(mode=False)
generator.train(mode=False)
discriminator.train(mode=False)
correct = 0.0
total = 0.0
for batch in val_data.batch_iterator(batch_size):
# real data discriminator accuracy
with torch.no_grad():
real_features = Variable(torch.from_numpy(batch))
real_features = to_cuda_if_available(real_features)
real_pred = discriminator(real_features)
real_pred = to_cpu_if_available(real_pred)
correct += (real_pred.data.numpy().ravel() > .5).sum()
total += len(real_pred)
# fake data discriminator accuracy
with torch.no_grad():
noise = Variable(torch.FloatTensor(len(batch), code_size).normal_())
noise = to_cuda_if_available(noise)
fake_code = generator(noise)
fake_features = autoencoder.decode(fake_code, training=False, temperature=temperature)
fake_pred = discriminator(fake_features)
fake_pred = to_cpu_if_available(fake_pred)
correct += (fake_pred.data.numpy().ravel() < .5).sum()
total += len(fake_pred)
# log epoch metrics for current class
logger.log(epoch_index, num_epochs, "discriminator", "train_mean_loss", np.mean(disc_losses))
logger.log(epoch_index, num_epochs, "generator", "train_mean_loss", np.mean(gen_losses))
logger.log(epoch_index, num_epochs, "discriminator", "validation_accuracy", correct / total)
# save models for the epoch
with DelayedKeyboardInterrupt():
torch.save(autoencoder.state_dict(), output_ae_path)
torch.save(generator.state_dict(), output_gen_path)
torch.save(discriminator.state_dict(), output_disc_path)
logger.flush()
logger.close()
def main():
options_parser = argparse.ArgumentParser(description="Train MedGAN or MC-MedGAN. "
+ "Define 'metadata' and 'temperature' to use MC-MedGAN.")
options_parser.add_argument("data", type=str, help="Training data. See 'data_format' parameter.")
options_parser.add_argument("input_autoencoder", type=str, help="Autoencoder input file.")
options_parser.add_argument("output_autoencoder", type=str, help="Autoencoder output file.")
options_parser.add_argument("output_generator", type=str, help="Generator output file.")
options_parser.add_argument("output_discriminator", type=str, help="Discriminator output file.")
options_parser.add_argument("output_loss", type=str, help="Loss output file.")
options_parser.add_argument("--input_generator", type=str, help="Generator input file.", default=None)
options_parser.add_argument("--input_discriminator", type=str, help="Discriminator input file.", default=None)
options_parser.add_argument("--metadata", type=str,
help="Information about the categorical variables in json format." +
" Only used if temperature is also provided.")
options_parser.add_argument(
"--validation_proportion", type=float,
default=.1,
help="Ratio of data for validation."
)
options_parser.add_argument(
"--data_format",
type=str,
default="sparse",
choices=data_formats,
help="Either a dense numpy array, a sparse csr matrix or any of those formats in split into several files."
)
options_parser.add_argument(
"--code_size",
type=int,
default=128,
help="Dimension of the autoencoder latent space."
)
options_parser.add_argument(
"--encoder_hidden_sizes",
type=str,
default="",
help="Size of each hidden layer in the encoder separated by commas (no spaces)."
)
options_parser.add_argument(
"--decoder_hidden_sizes",
type=str,
default="",
help="Size of each hidden layer in the decoder separated by commas (no spaces)."
)
options_parser.add_argument(
"--batch_size",
type=int,
default=1000,
help="Amount of samples per batch."
)
options_parser.add_argument(
"--start_epoch",
type=int,
default=0,
help="Starting epoch."
)
options_parser.add_argument(
"--num_epochs",
type=int,
default=1000,
help="Number of epochs."
)
options_parser.add_argument(
"--l2_regularization",
type=float,
default=0.001,
help="L2 regularization weight for every parameter."
)
options_parser.add_argument(
"--learning_rate",
type=float,
default=0.001,
help="Adam learning rate."
)
options_parser.add_argument(
"--generator_hidden_layers",
type=int,
default=2,
help="Number of hidden layers in the generator."
)
options_parser.add_argument(
"--generator_bn_decay",
type=float,
default=0.99,
help="Generator batch normalization decay."
)
options_parser.add_argument(
"--discriminator_hidden_sizes",
type=str,
default="256,128",
help="Size of each hidden layer in the discriminator separated by commas (no spaces)."
)
options_parser.add_argument(
"--num_discriminator_steps",
type=int,
default=2,
help="Number of successive training steps for the discriminator."
)
options_parser.add_argument(
"--num_generator_steps",
type=int,
default=1,
help="Number of successive training steps for the generator."
)
options_parser.add_argument(
"--temperature",
type=float,
default=None,
help="Gumbel-Softmax temperature. Only used if metadata is also provided."
)
options_parser.add_argument("--seed", type=int, help="Random number generator seed.", default=42)
options = options_parser.parse_args()
if options.seed is not None:
np.random.seed(options.seed)
torch.manual_seed(options.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(options.seed)
features = loaders[options.data_format](options.data)
data = Dataset(features)
train_data, val_data = data.split(1.0 - options.validation_proportion)
if options.metadata is not None and options.temperature is not None:
variable_sizes = load_variable_sizes_from_metadata(options.metadata)
temperature = options.temperature
else:
variable_sizes = None
temperature = None
autoencoder = AutoEncoder(
features.shape[1],
code_size=options.code_size,
encoder_hidden_sizes=parse_int_list(options.encoder_hidden_sizes),
decoder_hidden_sizes=parse_int_list(options.decoder_hidden_sizes),
variable_sizes=variable_sizes
)
load_without_cuda(autoencoder, options.input_autoencoder)
generator = Generator(
code_size=options.code_size,
num_hidden_layers=options.generator_hidden_layers,
bn_decay=options.generator_bn_decay
)
load_or_initialize(generator, options.input_generator)
discriminator = Discriminator(
features.shape[1],
hidden_sizes=parse_int_list(options.discriminator_hidden_sizes)
)
load_or_initialize(discriminator, options.input_discriminator)
train(
autoencoder,
generator,
discriminator,
train_data,
val_data,
options.output_autoencoder,
options.output_generator,
options.output_discriminator,
options.output_loss,
batch_size=options.batch_size,
start_epoch=options.start_epoch,
num_epochs=options.num_epochs,
num_disc_steps=options.num_discriminator_steps,
num_gen_steps=options.num_generator_steps,
code_size=options.code_size,
l2_regularization=options.l2_regularization,
learning_rate=options.learning_rate,
temperature=temperature
)
if __name__ == "__main__":
main()
| 13,686 | 34.18509 | 115 | py |
multi-categorical-gans | multi-categorical-gans-master/multi_categorical_gans/methods/arae/sampler.py | from __future__ import print_function
import argparse
import torch
import numpy as np
from torch.autograd.variable import Variable
from multi_categorical_gans.methods.general.autoencoder import AutoEncoder
from multi_categorical_gans.methods.general.generator import Generator
from multi_categorical_gans.utils.categorical import load_variable_sizes_from_metadata
from multi_categorical_gans.utils.commandline import parse_int_list
from multi_categorical_gans.utils.cuda import to_cuda_if_available, to_cpu_if_available, load_without_cuda
def sample(autoencoder, generator, num_samples, num_features, batch_size=100, noise_size=128, temperature=None,
round_features=False):
autoencoder, generator = to_cuda_if_available(autoencoder, generator)
autoencoder.train(mode=False)
generator.train(mode=False)
samples = np.zeros((num_samples, num_features), dtype=np.float32)
start = 0
while start < num_samples:
with torch.no_grad():
noise = Variable(torch.FloatTensor(batch_size, noise_size).normal_())
noise = to_cuda_if_available(noise)
batch_code = generator(noise)
batch_samples = autoencoder.decode(batch_code, training=False, temperature=temperature)
batch_samples = to_cpu_if_available(batch_samples)
batch_samples = batch_samples.data.numpy()
# if rounding is activated (for ARAE with binary outputs)
if round_features:
batch_samples = np.round(batch_samples)
# do not go further than the desired number of samples
end = min(start + batch_size, num_samples)
# limit the samples taken from the batch based on what is missing
samples[start:end, :] = batch_samples[:min(batch_size, end - start), :]
# move to next batch
start = end
return samples
def main():
options_parser = argparse.ArgumentParser(description="Sample data with ARAE.")
options_parser.add_argument("autoencoder", type=str, help="Autoencoder input file.")
options_parser.add_argument("generator", type=str, help="Generator input file.")
options_parser.add_argument("num_samples", type=int, help="Number of output samples.")
options_parser.add_argument("num_features", type=int, help="Number of output features.")
options_parser.add_argument("data", type=str, help="Output data.")
options_parser.add_argument("--metadata", type=str,
help="Information about the categorical variables in json format.")
options_parser.add_argument(
"--code_size",
type=int,
default=128,
help="Dimension of the autoencoder latent space."
)
options_parser.add_argument(
"--noise_size",
type=int,
default=128,
help="Dimension of the generator input noise."
)
options_parser.add_argument(
"--encoder_hidden_sizes",
type=str,
default="",
help="Size of each hidden layer in the encoder separated by commas (no spaces)."
)
options_parser.add_argument(
"--decoder_hidden_sizes",
type=str,
default="",
help="Size of each hidden layer in the decoder separated by commas (no spaces)."
)
options_parser.add_argument(
"--batch_size",
type=int,
default=100,
help="Amount of samples per batch."
)
options_parser.add_argument(
"--generator_hidden_sizes",
type=str,
default="256,128",
help="Size of each hidden layer in the generator separated by commas (no spaces)."
)
options_parser.add_argument(
"--generator_bn_decay",
type=float,
default=0.01,
help="Generator batch normalization decay."
)
options_parser.add_argument(
"--temperature",
type=float,
default=None,
help="Gumbel-Softmax temperature."
)
options = options_parser.parse_args()
if options.metadata is not None and options.temperature is not None:
variable_sizes = load_variable_sizes_from_metadata(options.metadata)
temperature = options.temperature
else:
variable_sizes = None
temperature = None
autoencoder = AutoEncoder(
options.num_features,
code_size=options.code_size,
encoder_hidden_sizes=parse_int_list(options.encoder_hidden_sizes),
decoder_hidden_sizes=parse_int_list(options.decoder_hidden_sizes),
variable_sizes=variable_sizes
)
load_without_cuda(autoencoder, options.autoencoder)
generator = Generator(
options.noise_size,
options.code_size,
hidden_sizes=parse_int_list(options.generator_hidden_sizes),
bn_decay=options.generator_bn_decay
)
load_without_cuda(generator, options.generator)
data = sample(
autoencoder,
generator,
options.num_samples,
options.num_features,
batch_size=options.batch_size,
noise_size=options.noise_size,
temperature=temperature,
round_features=(temperature is None)
)
np.save(options.data, data)
if __name__ == "__main__":
main()
| 5,188 | 30.640244 | 111 | py |
multi-categorical-gans | multi-categorical-gans-master/multi_categorical_gans/methods/arae/trainer.py | from __future__ import print_function
import argparse
import torch
import numpy as np
from torch.autograd.variable import Variable
from torch.optim import Adam
from multi_categorical_gans.datasets.dataset import Dataset
from multi_categorical_gans.datasets.formats import data_formats, loaders
from multi_categorical_gans.methods.general.autoencoder import AutoEncoder
from multi_categorical_gans.methods.general.generator import Generator
from multi_categorical_gans.methods.general.discriminator import Discriminator
from multi_categorical_gans.methods.general.wgan_gp import calculate_gradient_penalty
from multi_categorical_gans.utils.categorical import load_variable_sizes_from_metadata, categorical_variable_loss
from multi_categorical_gans.utils.commandline import DelayedKeyboardInterrupt, parse_int_list
from multi_categorical_gans.utils.cuda import to_cuda_if_available, to_cpu_if_available
from multi_categorical_gans.utils.initialization import load_or_initialize
from multi_categorical_gans.utils.logger import Logger
def add_noise_to_code(code, noise_radius):
if noise_radius > 0:
means = torch.zeros_like(code)
gauss_noise = torch.normal(means, noise_radius)
return code + to_cuda_if_available(Variable(gauss_noise))
else:
return code
def train(autoencoder,
generator,
discriminator,
train_data,
val_data,
output_ae_path,
output_gen_path,
output_disc_path,
output_loss_path,
batch_size=1000,
start_epoch=0,
num_epochs=1000,
num_ae_steps=1,
num_disc_steps=2,
num_gen_steps=1,
noise_size=128,
l2_regularization=0.001,
learning_rate=0.001,
ae_noise_radius=0.2,
ae_noise_anneal=0.995,
normalize_code=True,
variable_sizes=None,
temperature=None,
penalty=0.1
):
autoencoder, generator, discriminator = to_cuda_if_available(autoencoder, generator, discriminator)
optim_ae = Adam(autoencoder.parameters(), weight_decay=l2_regularization, lr=learning_rate)
optim_gen = Adam(generator.parameters(), weight_decay=l2_regularization, lr=learning_rate)
optim_disc = Adam(discriminator.parameters(), weight_decay=l2_regularization, lr=learning_rate)
logger = Logger(output_loss_path, append=start_epoch > 0)
for epoch_index in range(start_epoch, num_epochs):
logger.start_timer()
# train
autoencoder.train(mode=True)
generator.train(mode=True)
discriminator.train(mode=True)
ae_losses = []
disc_losses = []
gen_losses = []
more_batches = True
train_data_iterator = train_data.batch_iterator(batch_size)
while more_batches:
# train autoencoder
for _ in range(num_ae_steps):
try:
batch = next(train_data_iterator)
except StopIteration:
more_batches = False
break
autoencoder.zero_grad()
batch_original = Variable(torch.from_numpy(batch))
batch_original = to_cuda_if_available(batch_original)
batch_code = autoencoder.encode(batch_original, normalize_code=normalize_code)
batch_code = add_noise_to_code(batch_code, ae_noise_radius)
batch_reconstructed = autoencoder.decode(batch_code, training=True, temperature=temperature)
ae_loss = categorical_variable_loss(batch_reconstructed, batch_original, variable_sizes)
ae_loss.backward()
optim_ae.step()
ae_loss = to_cpu_if_available(ae_loss)
ae_losses.append(ae_loss.data.numpy())
# train discriminator
for _ in range(num_disc_steps):
try:
batch = next(train_data_iterator)
except StopIteration:
more_batches = False
break
discriminator.zero_grad()
autoencoder.zero_grad()
# first train the discriminator only with real data
real_features = Variable(torch.from_numpy(batch))
real_features = to_cuda_if_available(real_features)
real_code = autoencoder.encode(real_features, normalize_code=normalize_code)
real_code = add_noise_to_code(real_code, ae_noise_radius)
real_pred = discriminator(real_code)
real_loss = - real_pred.mean(0).view(1)
real_loss.backward()
# then train the discriminator only with fake data
noise = Variable(torch.FloatTensor(len(batch), noise_size).normal_())
noise = to_cuda_if_available(noise)
fake_code = generator(noise)
fake_code = fake_code.detach() # do not propagate to the generator
fake_pred = discriminator(fake_code)
fake_loss = fake_pred.mean(0).view(1)
fake_loss.backward()
# this is the magic from WGAN-GP
gradient_penalty = calculate_gradient_penalty(discriminator, penalty, real_code, fake_code)
gradient_penalty.backward()
optim_ae.step()
optim_disc.step()
disc_loss = real_loss + fake_loss + gradient_penalty
disc_loss = to_cpu_if_available(disc_loss)
disc_losses.append(disc_loss.data.numpy())
del disc_loss
del gradient_penalty
del fake_loss
del real_loss
# train generator
for _ in range(num_gen_steps):
generator.zero_grad()
noise = Variable(torch.FloatTensor(len(batch), noise_size).normal_())
noise = to_cuda_if_available(noise)
gen_code = generator(noise)
fake_pred = discriminator(gen_code)
fake_loss = - fake_pred.mean(0).view(1)
fake_loss.backward()
optim_gen.step()
fake_loss = to_cpu_if_available(fake_loss)
gen_losses.append(fake_loss.data.numpy()[0])
del fake_loss
# log epoch metrics for current class
logger.log(epoch_index, num_epochs, "autoencoder", "train_mean_loss", np.mean(ae_losses))
logger.log(epoch_index, num_epochs, "discriminator", "train_mean_loss", np.mean(disc_losses))
logger.log(epoch_index, num_epochs, "generator", "train_mean_loss", np.mean(gen_losses))
# save models for the epoch
with DelayedKeyboardInterrupt():
torch.save(autoencoder.state_dict(), output_ae_path)
torch.save(generator.state_dict(), output_gen_path)
torch.save(discriminator.state_dict(), output_disc_path)
logger.flush()
ae_noise_radius *= ae_noise_anneal
logger.close()
def main():
options_parser = argparse.ArgumentParser(description="Train ARAE or MC-ARAE. "
+ "Define 'metadata' and 'temperature' to use MC-ARAE.")
options_parser.add_argument("data", type=str, help="Training data. See 'data_format' parameter.")
options_parser.add_argument("output_autoencoder", type=str, help="Autoencoder output file.")
options_parser.add_argument("output_generator", type=str, help="Generator output file.")
options_parser.add_argument("output_discriminator", type=str, help="Discriminator output file.")
options_parser.add_argument("output_loss", type=str, help="Loss output file.")
options_parser.add_argument("--input_autoencoder", type=str, help="Autoencoder input file.", default=None)
options_parser.add_argument("--input_generator", type=str, help="Generator input file.", default=None)
options_parser.add_argument("--input_discriminator", type=str, help="Discriminator input file.", default=None)
options_parser.add_argument("--metadata", type=str,
help="Information about the categorical variables in json format.")
options_parser.add_argument(
"--validation_proportion", type=float,
default=.1,
help="Ratio of data for validation."
)
options_parser.add_argument(
"--data_format",
type=str,
default="sparse",
choices=data_formats,
help="Either a dense numpy array or a sparse csr matrix."
)
options_parser.add_argument(
"--code_size",
type=int,
default=128,
help="Dimension of the autoencoder latent space."
)
options_parser.add_argument(
"--noise_size",
type=int,
default=128,
help="Dimension of the generator input noise."
)
options_parser.add_argument(
"--encoder_hidden_sizes",
type=str,
default="",
help="Size of each hidden layer in the encoder separated by commas (no spaces)."
)
options_parser.add_argument(
"--decoder_hidden_sizes",
type=str,
default="",
help="Size of each hidden layer in the decoder separated by commas (no spaces)."
)
options_parser.add_argument(
"--batch_size",
type=int,
default=100,
help="Amount of samples per batch."
)
options_parser.add_argument(
"--start_epoch",
type=int,
default=0,
help="Starting epoch."
)
options_parser.add_argument(
"--num_epochs",
type=int,
default=5000,
help="Number of epochs."
)
options_parser.add_argument(
"--l2_regularization",
type=float,
default=0,
help="L2 regularization weight for every parameter."
)
options_parser.add_argument(
"--learning_rate",
type=float,
default=1e-5,
help="Adam learning rate."
)
options_parser.add_argument(
"--generator_hidden_sizes",
type=str,
default="100,100,100",
help="Size of each hidden layer in the generator separated by commas (no spaces)."
)
options_parser.add_argument(
"--bn_decay",
type=float,
default=0.9,
help="Batch normalization decay for the generator and discriminator."
)
options_parser.add_argument(
"--discriminator_hidden_sizes",
type=str,
default="100",
help="Size of each hidden layer in the discriminator separated by commas (no spaces)."
)
options_parser.add_argument(
"--num_autoencoder_steps",
type=int,
default=1,
help="Number of successive training steps for the autoencoder."
)
options_parser.add_argument(
"--num_discriminator_steps",
type=int,
default=1,
help="Number of successive training steps for the discriminator."
)
options_parser.add_argument(
"--num_generator_steps",
type=int,
default=1,
help="Number of successive training steps for the generator."
)
options_parser.add_argument(
"--autoencoder_noise_radius",
type=float,
default=0,
help="Gaussian noise standard deviation for the latent code (autoencoder regularization)."
)
options_parser.add_argument(
"--autoencoder_noise_anneal",
type=float,
default=0.995,
help="Anneal the noise radius by this value after every epoch."
)
options_parser.add_argument(
"--temperature",
type=float,
default=None,
help="Gumbel-Softmax temperature."
)
options_parser.add_argument(
"--penalty",
type=float,
default=0.1,
help="WGAN-GP gradient penalty lambda."
)
options_parser.add_argument("--seed", type=int, help="Random number generator seed.", default=42)
options = options_parser.parse_args()
if options.seed is not None:
np.random.seed(options.seed)
torch.manual_seed(options.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(options.seed)
features = loaders[options.data_format](options.data)
data = Dataset(features)
train_data, val_data = data.split(1.0 - options.validation_proportion)
if options.metadata is not None and options.temperature is not None:
variable_sizes = load_variable_sizes_from_metadata(options.metadata)
temperature = options.temperature
else:
variable_sizes = None
temperature = None
autoencoder = AutoEncoder(
features.shape[1],
code_size=options.code_size,
encoder_hidden_sizes=parse_int_list(options.encoder_hidden_sizes),
decoder_hidden_sizes=parse_int_list(options.decoder_hidden_sizes),
variable_sizes=variable_sizes
)
load_or_initialize(autoencoder, options.input_autoencoder)
generator = Generator(
options.noise_size,
options.code_size,
hidden_sizes=parse_int_list(options.generator_hidden_sizes),
bn_decay=options.bn_decay
)
load_or_initialize(generator, options.input_generator)
discriminator = Discriminator(
options.code_size,
hidden_sizes=parse_int_list(options.discriminator_hidden_sizes),
bn_decay=0, # no batch normalization for the critic
critic=True
)
load_or_initialize(discriminator, options.input_discriminator)
train(
autoencoder,
generator,
discriminator,
train_data,
val_data,
options.output_autoencoder,
options.output_generator,
options.output_discriminator,
options.output_loss,
batch_size=options.batch_size,
start_epoch=options.start_epoch,
num_epochs=options.num_epochs,
num_ae_steps=options.num_autoencoder_steps,
num_disc_steps=options.num_discriminator_steps,
num_gen_steps=options.num_generator_steps,
noise_size=options.noise_size,
l2_regularization=options.l2_regularization,
learning_rate=options.learning_rate,
ae_noise_radius=options.autoencoder_noise_radius,
ae_noise_anneal=options.autoencoder_noise_anneal,
variable_sizes=variable_sizes,
temperature=temperature,
penalty=options.penalty
)
if __name__ == "__main__":
main()
| 14,552 | 32.6875 | 114 | py |
multi-categorical-gans | multi-categorical-gans-master/multi_categorical_gans/methods/mc_wgan_gp/sampler.py | from __future__ import print_function
import argparse
import torch
import numpy as np
from torch.autograd.variable import Variable
from multi_categorical_gans.methods.general.generator import Generator
from multi_categorical_gans.utils.categorical import load_variable_sizes_from_metadata
from multi_categorical_gans.utils.commandline import parse_int_list
from multi_categorical_gans.utils.cuda import to_cuda_if_available, to_cpu_if_available, load_without_cuda
def sample(generator, num_samples, num_features, batch_size=100, noise_size=128):
generator = to_cuda_if_available(generator)
generator.train(mode=False)
samples = np.zeros((num_samples, num_features), dtype=np.float32)
start = 0
while start < num_samples:
with torch.no_grad():
noise = Variable(torch.FloatTensor(batch_size, noise_size).normal_())
noise = to_cuda_if_available(noise)
batch_samples = generator(noise, training=False)
batch_samples = to_cpu_if_available(batch_samples)
batch_samples = batch_samples.data.numpy()
# do not go further than the desired number of samples
end = min(start + batch_size, num_samples)
# limit the samples taken from the batch based on what is missing
samples[start:end, :] = batch_samples[:min(batch_size, end - start), :]
# move to next batch
start = end
return samples
def main():
options_parser = argparse.ArgumentParser(description="Sample data with MedGAN.")
options_parser.add_argument("generator", type=str, help="Generator input file.")
options_parser.add_argument("metadata", type=str,
help="Information about the categorical variables in json format.")
options_parser.add_argument("num_samples", type=int, help="Number of output samples.")
options_parser.add_argument("num_features", type=int, help="Number of output features.")
options_parser.add_argument("data", type=str, help="Output data.")
options_parser.add_argument(
"--noise_size",
type=int,
default=128,
help="Dimension of the generator input noise."
)
options_parser.add_argument(
"--batch_size",
type=int,
default=100,
help="Amount of samples per batch."
)
options_parser.add_argument(
"--generator_hidden_sizes",
type=str,
default="256,128",
help="Size of each hidden layer in the generator separated by commas (no spaces)."
)
options_parser.add_argument(
"--generator_bn_decay",
type=float,
default=0.01,
help="Generator batch normalization decay."
)
options = options_parser.parse_args()
generator = Generator(
options.noise_size,
load_variable_sizes_from_metadata(options.metadata),
hidden_sizes=parse_int_list(options.generator_hidden_sizes),
bn_decay=options.generator_bn_decay
)
load_without_cuda(generator, options.generator)
data = sample(
generator,
options.num_samples,
options.num_features,
batch_size=options.batch_size,
noise_size=options.noise_size
)
np.save(options.data, data)
if __name__ == "__main__":
main()
| 3,285 | 29.71028 | 106 | py |
multi-categorical-gans | multi-categorical-gans-master/multi_categorical_gans/methods/mc_wgan_gp/trainer.py | from __future__ import print_function
import argparse
import torch
import numpy as np
from torch.autograd.variable import Variable
from torch.optim import Adam
from multi_categorical_gans.datasets.dataset import Dataset
from multi_categorical_gans.datasets.formats import data_formats, loaders
from multi_categorical_gans.methods.general.discriminator import Discriminator
from multi_categorical_gans.methods.general.generator import Generator
from multi_categorical_gans.methods.general.wgan_gp import calculate_gradient_penalty
from multi_categorical_gans.utils.categorical import load_variable_sizes_from_metadata
from multi_categorical_gans.utils.commandline import DelayedKeyboardInterrupt, parse_int_list
from multi_categorical_gans.utils.cuda import to_cuda_if_available, to_cpu_if_available
from multi_categorical_gans.utils.initialization import load_or_initialize
from multi_categorical_gans.utils.logger import Logger
def train(generator,
discriminator,
train_data,
val_data,
output_gen_path,
output_disc_path,
output_loss_path,
batch_size=1000,
start_epoch=0,
num_epochs=1000,
num_disc_steps=2,
num_gen_steps=1,
noise_size=128,
l2_regularization=0.001,
learning_rate=0.001,
penalty=0.1
):
generator, discriminator = to_cuda_if_available(generator, discriminator)
optim_gen = Adam(generator.parameters(), weight_decay=l2_regularization, lr=learning_rate)
optim_disc = Adam(discriminator.parameters(), weight_decay=l2_regularization, lr=learning_rate)
logger = Logger(output_loss_path, append=start_epoch > 0)
for epoch_index in range(start_epoch, num_epochs):
logger.start_timer()
# train
generator.train(mode=True)
discriminator.train(mode=True)
disc_losses = []
gen_losses = []
more_batches = True
train_data_iterator = train_data.batch_iterator(batch_size)
while more_batches:
# train discriminator
for _ in range(num_disc_steps):
# next batch
try:
batch = next(train_data_iterator)
except StopIteration:
more_batches = False
break
optim_disc.zero_grad()
# first train the discriminator only with real data
real_features = Variable(torch.from_numpy(batch))
real_features = to_cuda_if_available(real_features)
real_pred = discriminator(real_features)
real_loss = - real_pred.mean(0).view(1)
real_loss.backward()
# then train the discriminator only with fake data
noise = Variable(torch.FloatTensor(len(batch), noise_size).normal_())
noise = to_cuda_if_available(noise)
fake_features = generator(noise, training=True)
fake_features = fake_features.detach() # do not propagate to the generator
fake_pred = discriminator(fake_features)
fake_loss = fake_pred.mean(0).view(1)
fake_loss.backward()
# this is the magic from WGAN-GP
gradient_penalty = calculate_gradient_penalty(discriminator, penalty, real_features, fake_features)
gradient_penalty.backward()
# finally update the discriminator weights
# using two separated batches is another trick to improve GAN training
optim_disc.step()
disc_loss = real_loss + fake_loss + gradient_penalty
disc_loss = to_cpu_if_available(disc_loss)
disc_losses.append(disc_loss.data.numpy())
del disc_loss
del gradient_penalty
del fake_loss
del real_loss
# train generator
for _ in range(num_gen_steps):
optim_gen.zero_grad()
noise = Variable(torch.FloatTensor(len(batch), noise_size).normal_())
noise = to_cuda_if_available(noise)
gen_features = generator(noise, training=True)
fake_pred = discriminator(gen_features)
fake_loss = - fake_pred.mean(0).view(1)
fake_loss.backward()
optim_gen.step()
fake_loss = to_cpu_if_available(fake_loss)
gen_losses.append(fake_loss.data.numpy())
del fake_loss
# log epoch metrics for current class
logger.log(epoch_index, num_epochs, "discriminator", "train_mean_loss", np.mean(disc_losses))
logger.log(epoch_index, num_epochs, "generator", "train_mean_loss", np.mean(gen_losses))
# save models for the epoch
with DelayedKeyboardInterrupt():
torch.save(generator.state_dict(), output_gen_path)
torch.save(discriminator.state_dict(), output_disc_path)
logger.flush()
logger.close()
def main():
options_parser = argparse.ArgumentParser(description="Train Gumbel generator and discriminator.")
options_parser.add_argument("data", type=str, help="Training data. See 'data_format' parameter.")
options_parser.add_argument("metadata", type=str,
help="Information about the categorical variables in json format.")
options_parser.add_argument("output_generator", type=str, help="Generator output file.")
options_parser.add_argument("output_discriminator", type=str, help="Discriminator output file.")
options_parser.add_argument("output_loss", type=str, help="Loss output file.")
options_parser.add_argument("--input_generator", type=str, help="Generator input file.", default=None)
options_parser.add_argument("--input_discriminator", type=str, help="Discriminator input file.", default=None)
options_parser.add_argument(
"--validation_proportion", type=float,
default=.1,
help="Ratio of data for validation."
)
options_parser.add_argument(
"--data_format",
type=str,
default="sparse",
choices=data_formats,
help="Either a dense numpy array, a sparse csr matrix or any of those formats in split into several files."
)
options_parser.add_argument(
"--noise_size",
type=int,
default=128,
help=""
)
options_parser.add_argument(
"--batch_size",
type=int,
default=1000,
help="Amount of samples per batch."
)
options_parser.add_argument(
"--start_epoch",
type=int,
default=0,
help="Starting epoch."
)
options_parser.add_argument(
"--num_epochs",
type=int,
default=1000,
help="Number of epochs."
)
options_parser.add_argument(
"--l2_regularization",
type=float,
default=0.001,
help="L2 regularization weight for every parameter."
)
options_parser.add_argument(
"--learning_rate",
type=float,
default=0.001,
help="Adam learning rate."
)
options_parser.add_argument(
"--generator_hidden_sizes",
type=str,
default="256,128",
help="Size of each hidden layer in the generator separated by commas (no spaces)."
)
options_parser.add_argument(
"--bn_decay",
type=float,
default=0.9,
help="Batch normalization decay for the generator and discriminator."
)
options_parser.add_argument(
"--discriminator_hidden_sizes",
type=str,
default="256,128",
help="Size of each hidden layer in the discriminator separated by commas (no spaces)."
)
options_parser.add_argument(
"--num_discriminator_steps",
type=int,
default=2,
help="Number of successive training steps for the discriminator."
)
options_parser.add_argument(
"--num_generator_steps",
type=int,
default=1,
help="Number of successive training steps for the generator."
)
options_parser.add_argument(
"--penalty",
type=float,
default=0.1,
help="WGAN-GP gradient penalty lambda."
)
options_parser.add_argument("--seed", type=int, help="Random number generator seed.", default=42)
options = options_parser.parse_args()
if options.seed is not None:
np.random.seed(options.seed)
torch.manual_seed(options.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(options.seed)
features = loaders[options.data_format](options.data)
data = Dataset(features)
train_data, val_data = data.split(1.0 - options.validation_proportion)
variable_sizes = load_variable_sizes_from_metadata(options.metadata)
generator = Generator(
options.noise_size,
variable_sizes,
hidden_sizes=parse_int_list(options.generator_hidden_sizes),
bn_decay=options.bn_decay
)
load_or_initialize(generator, options.input_generator)
discriminator = Discriminator(
features.shape[1],
hidden_sizes=parse_int_list(options.discriminator_hidden_sizes),
bn_decay=0, # no batch normalization for the critic
critic=True
)
load_or_initialize(discriminator, options.input_discriminator)
train(
generator,
discriminator,
train_data,
val_data,
options.output_generator,
options.output_discriminator,
options.output_loss,
batch_size=options.batch_size,
start_epoch=options.start_epoch,
num_epochs=options.num_epochs,
num_disc_steps=options.num_discriminator_steps,
num_gen_steps=options.num_generator_steps,
noise_size=options.noise_size,
l2_regularization=options.l2_regularization,
learning_rate=options.learning_rate,
penalty=options.penalty
)
if __name__ == "__main__":
main()
| 10,134 | 32.013029 | 115 | py |
multi-categorical-gans | multi-categorical-gans-master/multi_categorical_gans/methods/mc_gumbel/sampler.py | from __future__ import print_function
import argparse
import torch
import numpy as np
from torch.autograd.variable import Variable
from multi_categorical_gans.methods.general.generator import Generator
from multi_categorical_gans.utils.categorical import load_variable_sizes_from_metadata
from multi_categorical_gans.utils.commandline import parse_int_list
from multi_categorical_gans.utils.cuda import to_cuda_if_available, to_cpu_if_available, load_without_cuda
def sample(generator, temperature, num_samples, num_features, batch_size=100, noise_size=128):
generator = to_cuda_if_available(generator)
generator.train(mode=False)
samples = np.zeros((num_samples, num_features), dtype=np.float32)
start = 0
while start < num_samples:
with torch.no_grad():
noise = Variable(torch.FloatTensor(batch_size, noise_size).normal_())
noise = to_cuda_if_available(noise)
batch_samples = generator(noise, training=False, temperature=temperature)
batch_samples = to_cpu_if_available(batch_samples)
batch_samples = batch_samples.data.numpy()
# do not go further than the desired number of samples
end = min(start + batch_size, num_samples)
# limit the samples taken from the batch based on what is missing
samples[start:end, :] = batch_samples[:min(batch_size, end - start), :]
# move to next batch
start = end
return samples
def main():
options_parser = argparse.ArgumentParser(description="Sample data with MedGAN.")
options_parser.add_argument("generator", type=str, help="Generator input file.")
options_parser.add_argument("metadata", type=str,
help="Information about the categorical variables in json format.")
options_parser.add_argument("num_samples", type=int, help="Number of output samples.")
options_parser.add_argument("num_features", type=int, help="Number of output features.")
options_parser.add_argument("data", type=str, help="Output data.")
options_parser.add_argument(
"--noise_size",
type=int,
default=128,
help="Dimension of the generator input noise."
)
options_parser.add_argument(
"--batch_size",
type=int,
default=100,
help="Amount of samples per batch."
)
options_parser.add_argument(
"--generator_hidden_sizes",
type=str,
default="256,128",
help="Size of each hidden layer in the generator separated by commas (no spaces)."
)
options_parser.add_argument(
"--generator_bn_decay",
type=float,
default=0.01,
help="Generator batch normalization decay."
)
options_parser.add_argument(
"--temperature",
type=float,
default=0.666,
help="Gumbel-Softmax temperature."
)
options = options_parser.parse_args()
generator = Generator(
options.noise_size,
load_variable_sizes_from_metadata(options.metadata),
hidden_sizes=parse_int_list(options.generator_hidden_sizes),
bn_decay=options.generator_bn_decay
)
load_without_cuda(generator, options.generator)
data = sample(
generator,
options.temperature,
options.num_samples,
options.num_features,
batch_size=options.batch_size,
noise_size=options.noise_size,
)
np.save(options.data, data)
if __name__ == "__main__":
main()
| 3,504 | 29.478261 | 106 | py |
multi-categorical-gans | multi-categorical-gans-master/multi_categorical_gans/methods/mc_gumbel/trainer.py | from __future__ import division
from __future__ import print_function
import argparse
import torch
import numpy as np
from torch.autograd.variable import Variable
from torch.nn import BCELoss
from torch.optim import Adam
from multi_categorical_gans.datasets.dataset import Dataset
from multi_categorical_gans.datasets.formats import data_formats, loaders
from multi_categorical_gans.methods.general.discriminator import Discriminator
from multi_categorical_gans.methods.general.generator import Generator
from multi_categorical_gans.utils.categorical import load_variable_sizes_from_metadata
from multi_categorical_gans.utils.commandline import DelayedKeyboardInterrupt, parse_int_list
from multi_categorical_gans.utils.cuda import to_cuda_if_available, to_cpu_if_available
from multi_categorical_gans.utils.initialization import load_or_initialize
from multi_categorical_gans.utils.logger import Logger
def train(generator,
discriminator,
train_data,
val_data,
output_gen_path,
output_disc_path,
output_loss_path,
batch_size=1000,
start_epoch=0,
num_epochs=1000,
num_disc_steps=2,
num_gen_steps=1,
noise_size=128,
l2_regularization=0.001,
learning_rate=0.001,
temperature=0.666
):
generator, discriminator = to_cuda_if_available(generator, discriminator)
optim_gen = Adam(generator.parameters(), weight_decay=l2_regularization, lr=learning_rate)
optim_disc = Adam(discriminator.parameters(), weight_decay=l2_regularization, lr=learning_rate)
criterion = BCELoss()
logger = Logger(output_loss_path, append=start_epoch > 0)
for epoch_index in range(start_epoch, num_epochs):
logger.start_timer()
# train
generator.train(mode=True)
discriminator.train(mode=True)
disc_losses = []
gen_losses = []
more_batches = True
train_data_iterator = train_data.batch_iterator(batch_size)
while more_batches:
# train discriminator
for _ in range(num_disc_steps):
# next batch
try:
batch = next(train_data_iterator)
except StopIteration:
more_batches = False
break
# using "one sided smooth labels" is one trick to improve GAN training
label_zeros = Variable(torch.zeros(len(batch)))
smooth_label_ones = Variable(torch.FloatTensor(len(batch)).uniform_(0.9, 1))
label_zeros, smooth_label_ones = to_cuda_if_available(label_zeros, smooth_label_ones)
optim_disc.zero_grad()
# first train the discriminator only with real data
real_features = Variable(torch.from_numpy(batch))
real_features = to_cuda_if_available(real_features)
real_pred = discriminator(real_features)
real_loss = criterion(real_pred, smooth_label_ones)
real_loss.backward()
# then train the discriminator only with fake data
noise = Variable(torch.FloatTensor(len(batch), noise_size).normal_())
noise = to_cuda_if_available(noise)
fake_features = generator(noise, training=True, temperature=temperature)
fake_features = fake_features.detach() # do not propagate to the generator
fake_pred = discriminator(fake_features)
fake_loss = criterion(fake_pred, label_zeros)
fake_loss.backward()
# finally update the discriminator weights
# using two separated batches is another trick to improve GAN training
optim_disc.step()
disc_loss = real_loss + fake_loss
disc_loss = to_cpu_if_available(disc_loss)
disc_losses.append(disc_loss.data.numpy())
del disc_loss
del fake_loss
del real_loss
# train generator
for _ in range(num_gen_steps):
optim_gen.zero_grad()
noise = Variable(torch.FloatTensor(len(batch), noise_size).normal_())
noise = to_cuda_if_available(noise)
gen_features = generator(noise, training=True, temperature=temperature)
gen_pred = discriminator(gen_features)
smooth_label_ones = Variable(torch.FloatTensor(len(batch)).uniform_(0.9, 1))
smooth_label_ones = to_cuda_if_available(smooth_label_ones)
gen_loss = criterion(gen_pred, smooth_label_ones)
gen_loss.backward()
optim_gen.step()
gen_loss = to_cpu_if_available(gen_loss)
gen_losses.append(gen_loss.data.numpy())
del gen_loss
# validate discriminator
generator.train(mode=False)
discriminator.train(mode=False)
correct = 0.0
total = 0.0
for batch in val_data.batch_iterator(batch_size):
# real data discriminator accuracy
with torch.no_grad():
real_features = Variable(torch.from_numpy(batch))
real_features = to_cuda_if_available(real_features)
real_pred = discriminator(real_features)
real_pred = to_cpu_if_available(real_pred)
correct += (real_pred.data.numpy().ravel() > .5).sum()
total += len(real_pred)
# fake data discriminator accuracy
with torch.no_grad():
noise = Variable(torch.FloatTensor(len(batch), noise_size).normal_())
noise = to_cuda_if_available(noise)
fake_features = generator(noise, training=False, temperature=temperature)
fake_pred = discriminator(fake_features)
fake_pred = to_cpu_if_available(fake_pred)
correct += (fake_pred.data.numpy().ravel() < .5).sum()
total += len(fake_pred)
# log epoch metrics for current class
logger.log(epoch_index, num_epochs, "discriminator", "train_mean_loss", np.mean(disc_losses))
logger.log(epoch_index, num_epochs, "generator", "train_mean_loss", np.mean(gen_losses))
logger.log(epoch_index, num_epochs, "discriminator", "validation_accuracy", correct / total)
# save models for the epoch
with DelayedKeyboardInterrupt():
torch.save(generator.state_dict(), output_gen_path)
torch.save(discriminator.state_dict(), output_disc_path)
logger.flush()
logger.close()
def main():
options_parser = argparse.ArgumentParser(description="Train MC-Gumbel.")
options_parser.add_argument("data", type=str, help="Training data. See 'data_format' parameter.")
options_parser.add_argument("metadata", type=str,
help="Information about the categorical variables in json format.")
options_parser.add_argument("output_generator", type=str, help="Generator output file.")
options_parser.add_argument("output_discriminator", type=str, help="Discriminator output file.")
options_parser.add_argument("output_loss", type=str, help="Loss output file.")
options_parser.add_argument("--input_generator", type=str, help="Generator input file.", default=None)
options_parser.add_argument("--input_discriminator", type=str, help="Discriminator input file.", default=None)
options_parser.add_argument(
"--validation_proportion", type=float,
default=.1,
help="Ratio of data for validation."
)
options_parser.add_argument(
"--data_format",
type=str,
default="sparse",
choices=data_formats,
help="Either a dense numpy array or a sparse csr matrix."
)
options_parser.add_argument(
"--noise_size",
type=int,
default=128,
help=""
)
options_parser.add_argument(
"--batch_size",
type=int,
default=1000,
help="Amount of samples per batch."
)
options_parser.add_argument(
"--start_epoch",
type=int,
default=0,
help="Starting epoch."
)
options_parser.add_argument(
"--num_epochs",
type=int,
default=1000,
help="Number of epochs."
)
options_parser.add_argument(
"--l2_regularization",
type=float,
default=0.001,
help="L2 regularization weight for every parameter."
)
options_parser.add_argument(
"--learning_rate",
type=float,
default=0.001,
help="Adam learning rate."
)
options_parser.add_argument(
"--generator_hidden_sizes",
type=str,
default="256,128",
help="Size of each hidden layer in the generator separated by commas (no spaces)."
)
options_parser.add_argument(
"--bn_decay",
type=float,
default=0.9,
help="Batch normalization decay for the generator and discriminator."
)
options_parser.add_argument(
"--discriminator_hidden_sizes",
type=str,
default="256,128",
help="Size of each hidden layer in the discriminator separated by commas (no spaces)."
)
options_parser.add_argument(
"--num_discriminator_steps",
type=int,
default=2,
help="Number of successive training steps for the discriminator."
)
options_parser.add_argument(
"--num_generator_steps",
type=int,
default=1,
help="Number of successive training steps for the generator."
)
options_parser.add_argument(
"--temperature",
type=float,
default=0.666,
help="Gumbel-Softmax temperature."
)
options_parser.add_argument("--seed", type=int, help="Random number generator seed.", default=42)
options = options_parser.parse_args()
if options.seed is not None:
np.random.seed(options.seed)
torch.manual_seed(options.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(options.seed)
features = loaders[options.data_format](options.data)
data = Dataset(features)
train_data, val_data = data.split(1.0 - options.validation_proportion)
variable_sizes = load_variable_sizes_from_metadata(options.metadata)
generator = Generator(
options.noise_size,
variable_sizes,
hidden_sizes=parse_int_list(options.generator_hidden_sizes),
bn_decay=options.bn_decay
)
load_or_initialize(generator, options.input_generator)
discriminator = Discriminator(
features.shape[1],
hidden_sizes=parse_int_list(options.discriminator_hidden_sizes),
bn_decay=options.bn_decay,
critic=False
)
load_or_initialize(discriminator, options.input_discriminator)
train(
generator,
discriminator,
train_data,
val_data,
options.output_generator,
options.output_discriminator,
options.output_loss,
batch_size=options.batch_size,
start_epoch=options.start_epoch,
num_epochs=options.num_epochs,
num_disc_steps=options.num_discriminator_steps,
num_gen_steps=options.num_generator_steps,
noise_size=options.noise_size,
l2_regularization=options.l2_regularization,
learning_rate=options.learning_rate,
temperature=options.temperature
)
if __name__ == "__main__":
main()
| 11,645 | 33.052632 | 114 | py |
multi-categorical-gans | multi-categorical-gans-master/multi_categorical_gans/datasets/synthetic/generate.py | from __future__ import division
from __future__ import print_function
import argparse
import json
import torch
import numpy as np
from scipy.sparse import csr_matrix, save_npz
from torch.distributions.one_hot_categorical import OneHotCategorical
distribution_types = ["probs", "logits", "uniform"]
class Variable(object):
def __init__(self, distributions):
self.distributions = distributions
def sample(self, previous_sample):
k = previous_sample.argmax().item()
distribution = self.distributions[k]
return distribution.sample()
def add_one(ones, rows, cols, i, j, sample):
k = sample.argmax().item()
ones.append(1)
rows.append(i)
cols.append(j + k)
def generate_one_hot_variable(distribution, distribution_type):
assert distribution_type in distribution_types
variable = OneHotCategorical(**{distribution_type: torch.FloatTensor(distribution)})
assert all([prob > 0 for prob in variable.probs])
return variable
def print_matrix_stats(matrix, num_samples, num_features):
num_ones = matrix.sum()
num_positions = num_samples * num_features
num_ones_per_row = np.asarray(matrix.sum(axis=1)).ravel()
num_ones_per_column = np.asarray(matrix.sum(axis=0)).ravel()
print("Min:", matrix.min())
print("Max:", matrix.max())
print("Rows:", matrix.shape[0])
print("Columns:", matrix.shape[1])
print("Mean ones per row:", num_ones_per_row.mean())
print("Mean ones per column:", num_ones_per_column.mean())
print("Total ones:", num_ones)
print("Total positions:", num_positions)
print("Total ratio of ones:", num_ones / num_positions)
print("Empty rows:", np.sum(num_ones_per_row == 0))
print("Full rows:", np.sum(num_ones_per_row == num_features))
print("Empty columns:", np.sum(num_ones_per_column == 0))
print("Full columns:", np.sum(num_ones_per_column == num_samples))
def generate_one_hot(num_samples, num_variables, min_variable_size, max_variable_size, metadata_path, output_path,
class_distribution=2, class_distribution_type="uniform", seed=None):
if seed is not None:
np.random.seed(seed)
assert 2 <= min_variable_size <= max_variable_size
assert class_distribution is not None
if class_distribution_type == "uniform":
num_classes = int(class_distribution[0])
class_distribution = [1.0 / num_classes for _ in range(num_classes)]
class_distribution_type = "probs"
# generate classes
class_variable = generate_one_hot_variable(class_distribution, class_distribution_type)
num_classes = class_variable.event_shape[0]
# generate variables
variables = []
variable_sizes = [num_classes]
num_features = num_classes
last_variable_size = num_classes
for _ in range(num_variables):
if min_variable_size == max_variable_size:
variable_size = min_variable_size
else:
variable_size = np.random.randint(low=min_variable_size, high=max_variable_size + 1)
variable_sizes.append(variable_size)
distributions = {}
for input_value in range(last_variable_size):
logits = torch.FloatTensor(size=(variable_size,)).normal_(0, 1)
distributions[input_value] = OneHotCategorical(logits=logits)
variables.append(Variable(distributions))
num_features += variable_size
last_variable_size = variable_size
# generate metadata
metadata = {
"seed": seed,
"variable_sizes": variable_sizes,
"class_probs": class_variable.probs.tolist(),
"variable_probs": [[sub_variable.probs.tolist() for sub_variable in variable.distributions.values()]
for variable in variables]
}
with open(metadata_path, "w") as metadata_file:
json.dump(metadata, metadata_file, indent=2)
# generate data
ones = []
rows = []
cols = []
for i in range(num_samples):
j = 0
class_sample = class_variable.sample()
add_one(ones, rows, cols, i, j, class_sample)
j += class_sample.shape[0]
previous_sample = class_sample
for variable in variables:
sample = variable.sample(previous_sample)
add_one(ones, rows, cols, i, j, sample)
j += sample.shape[0]
previous_sample = sample
output = csr_matrix((ones, (rows, cols)), shape=(num_samples, num_features), dtype=np.uint8)
print_matrix_stats(output, num_samples, num_features)
save_npz(output_path, output)
def main():
options_parser = argparse.ArgumentParser(description="Generate one hot encoded data with cascade dependencies.")
options_parser.add_argument("num_samples", type=int, help="Number of output samples.")
options_parser.add_argument("num_variables", type=int, help="Number of output categorical variables.")
options_parser.add_argument("metadata_path", type=str,
help="Output data file path indicating the class distribution and the variable maps.")
options_parser.add_argument("output_path", type=str,
help="Output data file path in sparse format.")
options_parser.add_argument("--min_variable_size", type=int, default=2,
help="Minimum random size of each categorical variable. Should be at least 2.")
options_parser.add_argument("--max_variable_size", type=int, default=10,
help="Maximum random size of each categorical variable.")
options_parser.add_argument("--seed", type=int, help="Random number generator seed.", default=42)
options_parser.add_argument("--class_distribution", type=str, default="2",
help="Defines the distribution of the class variable. See 'class_distribution_type'.")
options_parser.add_argument("--class_distribution_type", type=str, default="uniform", choices=distribution_types,
help="If uniform, same probability is assigned to every class;" +
" the 'class_distribution' should be the number of classes." +
"\nIf probs, explicit probabilities per class" +
" are defined in 'class_distribution' separated by commas." +
"\nIf logits, the values separated by commas defined in 'class_distribution'" +
" will be used as softmax logits."
)
options = options_parser.parse_args()
generate_one_hot(options.num_samples,
options.num_variables,
options.min_variable_size,
options.max_variable_size,
options.metadata_path,
options.output_path,
[float(x) for x in options.class_distribution.split(",")],
options.class_distribution_type,
options.seed
)
if __name__ == "__main__":
main()
| 7,152 | 37.251337 | 118 | py |
multi-categorical-gans | multi-categorical-gans-master/multi_categorical_gans/utils/cuda.py | import torch
def to_cuda_if_available(*tensors):
if torch.cuda.is_available():
tensors = [tensor.cuda() if tensor is not None else None for tensor in tensors]
if len(tensors) == 1:
return tensors[0]
return tensors
def to_cpu_if_available(*tensors):
if torch.cuda.is_available():
tensors = [tensor.cpu() if tensor is not None else None for tensor in tensors]
if len(tensors) == 1:
return tensors[0]
return tensors
def load_without_cuda(model, state_dict_path):
model.load_state_dict(torch.load(state_dict_path, map_location=lambda storage, loc: storage))
| 620 | 27.227273 | 97 | py |
multi-categorical-gans | multi-categorical-gans-master/multi_categorical_gans/utils/categorical.py | import json
import numpy as np
import torch
import torch.nn.functional as F
def load_variable_sizes_from_metadata(metadata_path):
with open(metadata_path, "r") as metadata_file:
metadata = json.load(metadata_file)
return metadata["variable_sizes"]
def categorical_variable_loss(reconstructed, original, variable_sizes):
# by default use loss for binary variables
if variable_sizes is None:
return F.binary_cross_entropy(reconstructed, original)
# use the variable sizes when available
else:
loss = 0
start = 0
continuous_size = 0
for variable_size in variable_sizes:
# if it is a categorical variable
if variable_size > 1:
# add loss from the accumulated continuous variables
if continuous_size > 0:
end = start + continuous_size
batch_reconstructed_variable = reconstructed[:, start:end]
batch_target = original[:, start:end]
loss += F.mse_loss(batch_reconstructed_variable, batch_target)
start = end
continuous_size = 0
# add loss from categorical variable
end = start + variable_size
batch_reconstructed_variable = reconstructed[:, start:end]
batch_target = torch.argmax(original[:, start:end], dim=1)
loss += F.cross_entropy(batch_reconstructed_variable, batch_target)
start = end
# if not, accumulate continuous variables
else:
continuous_size += 1
# add loss from the remaining accumulated continuous variables
if continuous_size > 0:
end = start + continuous_size
batch_reconstructed_variable = reconstructed[:, start:end]
batch_target = original[:, start:end]
loss += F.mse_loss(batch_reconstructed_variable, batch_target)
return loss
def separate_categorical(data, variable_sizes, selected_index):
if selected_index == 0:
features = data[:, variable_sizes[selected_index]:]
labels = data[:, :variable_sizes[selected_index]]
elif 0 < selected_index < len(variable_sizes) - 1:
left_size = sum(variable_sizes[:selected_index])
left = data[:, :left_size]
labels = data[:, left_size:left_size + variable_sizes[selected_index]]
right = data[:, left_size + variable_sizes[selected_index]:]
features = np.concatenate((left, right), axis=1)
else:
left_size = sum(variable_sizes[:-1])
features = data[:, :left_size]
labels = data[:, left_size:]
assert data.shape[1] == features.shape[1] + labels.shape[1]
labels = np.argmax(labels, axis=1)
return features, labels
| 2,835 | 37.324324 | 83 | py |
multi-categorical-gans | multi-categorical-gans-master/multi_categorical_gans/utils/initialization.py | import torch.nn as nn
from multi_categorical_gans.utils.cuda import load_without_cuda
def initialize_weights(module):
if type(module) == nn.Linear:
nn.init.xavier_normal_(module.weight)
if module.bias is not None:
nn.init.constant_(module.bias, 0.0)
elif type(module) == nn.BatchNorm1d:
module.weight.data.normal_(1.0, 0.02)
module.bias.data.fill_(0)
def load_or_initialize(module, state_dict_path):
if state_dict_path is not None:
load_without_cuda(module, state_dict_path)
else:
module.apply(initialize_weights)
| 595 | 27.380952 | 63 | py |
pytorch-hessian-eigenthings | pytorch-hessian-eigenthings-master/setup.py | """setup.py for hessian_eigenthings"""
from setuptools import setup, find_packages
install_requires = [
'numpy>=0.14',
'torch>=0.4',
'scipy>=1.2.1'
]
setup(name="hessian_eigenthings",
author="Noah Golmant",
install_requires=install_requires,
packages=find_packages(),
description='Eigendecomposition of model Hessians in PyTorch!',
version='0.0.2')
| 394 | 22.235294 | 69 | py |
pytorch-hessian-eigenthings | pytorch-hessian-eigenthings-master/tests/random_matrix_tests.py | """
This file tests the accuracy of the power iteration methods by comparing
against np.linalg.eig results for various random matrix configurations
"""
import argparse
import functools
import numpy as np
import torch
from hessian_eigenthings.operator import LambdaOperator
from hessian_eigenthings.power_iter import deflated_power_iteration
from hessian_eigenthings.lanczos import lanczos
import matplotlib.pyplot as plt
from utils import plot_eigenval_estimates, plot_eigenvec_errors
parser = argparse.ArgumentParser(description='power iteration tester')
parser.add_argument('--matrix_dim', type=int, default=100,
help='number of rows/columns in matrix')
parser.add_argument('--num_eigenthings', type=int, default=10,
help='number of eigenvalues to compute')
parser.add_argument('--power_iter_steps', default=20, type=int,
help='number of steps of power iteration')
parser.add_argument('--momentum', default=0, type=float,
help='acceleration term for stochastic power iter')
parser.add_argument('--num_trials', default=30, type=int,
help='number of matrices per test')
parser.add_argument('--seed', default=1, type=int)
parser.add_argument('--fp16', action='store_true')
parser.add_argument('--mode', default='power_iter',
choices=['power_iter', 'lanczos'])
args = parser.parse_args()
def test_matrix(mat, ntrials, mode):
"""
Tests the accuracy of deflated power iteration on the given matrix.
It computes the average percent eigenval error and eigenvec simliartiy err
"""
tensor = torch.from_numpy(mat).float()
# for non-gpu tests, addmv not implemented for fp16 on CPU. have to do float.
op = LambdaOperator(lambda x: torch.matmul(tensor, x.float()), tensor.size()[:1])
real_eigenvals, true_eigenvecs = np.linalg.eig(mat)
real_eigenvecs = [true_eigenvecs[:, i] for i in range(len(real_eigenvals))]
eigenvals = []
eigenvecs = []
if mode == 'lanczos':
method = lanczos
else:
method = functools.partial(deflated_power_iteration,
power_iter_steps=args.power_iter_steps,
momentum=args.momentum)
for _ in range(ntrials):
est_eigenvals, est_eigenvecs = method(
op,
num_eigenthings=args.num_eigenthings,
use_gpu=False,
fp16=args.fp16
)
est_inds = np.argsort(est_eigenvals)
est_eigenvals = np.array(est_eigenvals)[est_inds][::-1]
est_eigenvecs = np.array(est_eigenvecs)[est_inds][::-1]
eigenvals.append(est_eigenvals)
eigenvecs.append(est_eigenvecs)
eigenvals = np.array(eigenvals)
eigenvecs = np.array(eigenvecs)
# truncate estimates
real_inds = np.argsort(real_eigenvals)
real_eigenvals = np.array(real_eigenvals)[real_inds][-args.num_eigenthings:][::-1]
real_eigenvecs = np.array(real_eigenvecs)[real_inds][-args.num_eigenthings:][::-1]
# Plot eigenvalue error
plt.suptitle('Random Matrix Eigendecomposition Errors: %d trials' % ntrials)
plt.subplot(1, 2, 1)
plt.title('Eigenvalues')
plt.plot(list(range(len(real_eigenvals))), real_eigenvals, label='True Eigenvals', linestyle='--', linewidth=5)
plot_eigenval_estimates(eigenvals, label='Estimates')
plt.legend()
# Plot eigenvector L2 norm error
plt.subplot(1, 2, 2)
plt.title('Eigenvector cosine simliarity')
plot_eigenvec_errors(real_eigenvecs, eigenvecs, label='Estimates')
plt.legend()
plt.show()
def generate_wishart(n, offset=0.0):
"""
Generates a wishart PSD matrix with n rows/cols.
Adds offset * I for conditioning testing.
"""
matrix = np.random.random(size=(n, n)).astype(float)
matrix = matrix.transpose().dot(matrix)
matrix = matrix + offset * np.eye(n)
return (1./n) * matrix
def test_wishart():
m = generate_wishart(args.matrix_dim)
test_matrix(m, args.num_trials, mode=args.mode)
if __name__ == '__main__':
test_wishart()
| 4,091 | 35.212389 | 115 | py |
pytorch-hessian-eigenthings | pytorch-hessian-eigenthings-master/tests/principle_eigenvec_tests.py | import argparse
import numpy as np
import torch
from hessian_eigenthings import compute_hessian_eigenthings
from utils import plot_eigenval_estimates, plot_eigenvec_errors
from torch.utils.data import DataLoader
from torch import nn
import matplotlib.pyplot as plt
from variance_tests import get_full_hessian
import scipy
def test_principal_eigenvec(model, criterion, x, y, ntrials, fp16):
loss = criterion(model(x), y)
loss_grad = torch.autograd.grad(loss, model.parameters(), create_graph=True)
print("computing real hessian")
real_hessian = get_full_hessian(loss_grad, model)
#
real_hessian += 1e-4 * np.eye(len(real_hessian))
samples = [(x_i, y_i) for x_i, y_i in zip(x, y)]
# full dataset
dataloader = DataLoader(samples, batch_size=len(x))
print("computing numpy principal eigenvec of hessian")
num_params = len(real_hessian)
real_eigenvals, real_eigenvecs = scipy.linalg.eigh(
real_hessian, eigvals=(num_params - 1, num_params - 1)
)
real_eigenvec, real_eigenval = real_eigenvecs[0], real_eigenvals[0]
eigenvals = []
eigenvecs = []
nparams = len(real_hessian)
# for _ in range(ntrials):
est_eigenvals, est_eigenvecs = compute_hessian_eigenthings(
model,
dataloader,
criterion,
num_eigenthings=1,
power_iter_steps=10,
power_iter_err_threshold=1e-5,
momentum=0,
use_gpu=False,
fp16=fp16
)
est_eigenval, est_eigenvec = est_eigenvecs[0], est_eigenvals[0]
# compute cosine similarity
print(real_eigenvec, est_eigenvec)
dotted = np.dot(real_eigenvec, est_eigenvec)
if dotted == 0.0:
score = 1.0 # both in nullspace... nice...
else:
norm = scipy.linalg.norm(real_eigenvec) * scipy.linalg.norm(est_eigenvec)
score = abs(dotted / norm)
print(score)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='power iteration tester')
parser.add_argument('--data_dim', type=int, default=100)
parser.add_argument('--hidden_dim', type=int, default=1000)
parser.add_argument('--fp16', action='store_true')
parser.add_argument('--mode', default='power_iter',
choices=['power_iter', 'lanczos'])
args = parser.parse_args()
indim = outdim = args.data_dim
hidden = args.hidden_dim
nsamples = 10
ntrials = 1
bs = 10
model = nn.Sequential(
nn.Linear(indim, hidden),
nn.ReLU(inplace=True),
nn.Linear(hidden, outdim),
nn.ReLU(inplace=True),
)
criterion = torch.nn.MSELoss()
x = torch.rand((nsamples, indim))
y = torch.rand((nsamples, outdim))
test_principal_eigenvec(model, criterion, x, y, ntrials, fp16=args.fp16)
| 2,769 | 28.784946 | 81 | py |
pytorch-hessian-eigenthings | pytorch-hessian-eigenthings-master/tests/variance_tests.py | """
This test looks at the variance of eigenvalue/eigenvector estimates
(1) Full dataset should have deterministic results
(2) Compute variance of repeated trials and the effect of averaging, error
relative to full dataset
(3) Compute variance of full power iteration on a fixed mini-batch (vs.
varying the mini-batch at each step) compared to full dataset
"""
import numpy as np
import torch
from hessian_eigenthings import compute_hessian_eigenthings
from utils import plot_eigenval_estimates, plot_eigenvec_errors
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
from hessian_eigenthings.utils import progress_bar
def get_full_hessian(loss_grad, model):
# from https://discuss.pytorch.org/t/compute-the-hessian-matrix-of-a-network/15270/3
cnt = 0
loss_grad = list(loss_grad)
for i, g in enumerate(loss_grad):
progress_bar(
i,
len(loss_grad),
"flattening to full gradient: %d of %d" % (i, len(loss_grad)),
)
g_vector = (
g.contiguous().view(-1)
if cnt == 0
else torch.cat([g_vector, g.contiguous().view(-1)])
)
cnt = 1
hessian_size = g_vector.size(0)
hessian = torch.zeros(hessian_size, hessian_size)
for idx in range(hessian_size):
progress_bar(
idx, hessian_size, "full hessian columns: %d of %d" % (idx, hessian_size)
)
grad2rd = torch.autograd.grad(
g_vector[idx], model.parameters(), create_graph=True
)
cnt = 0
for g in grad2rd:
g2 = (
g.contiguous().view(-1)
if cnt == 0
else torch.cat([g2, g.contiguous().view(-1)])
)
cnt = 1
hessian[idx] = g2
return hessian.cpu().data.numpy()
def test_full_hessian(model, criterion, x, y, ntrials=10):
loss = criterion(model(x), y)
loss_grad = torch.autograd.grad(loss, model.parameters(), create_graph=True)
real_hessian = get_full_hessian(loss_grad, model)
samples = [(x_i, y_i) for x_i, y_i in zip(x, y)]
# full dataset
dataloader = DataLoader(samples, batch_size=len(x))
eigenvals = []
eigenvecs = []
nparams = len(real_hessian)
for _ in range(ntrials):
est_eigenvals, est_eigenvecs = compute_hessian_eigenthings(
model,
dataloader,
criterion,
num_eigenthings=nparams,
power_iter_steps=100,
power_iter_err_threshold=1e-9,
momentum=0.0,
use_gpu=False,
)
est_inds = np.argsort(est_eigenvals)
est_eigenvals = np.array(est_eigenvals)[est_inds][::-1]
est_eigenvecs = np.array(est_eigenvecs)[est_inds][::-1]
eigenvals.append(est_eigenvals)
eigenvecs.append(est_eigenvecs)
eigenvals = np.array(eigenvals)
eigenvecs = np.array(eigenvecs)
real_eigenvals, real_eigenvecs = np.linalg.eig(real_hessian)
real_inds = np.argsort(real_eigenvals)
real_eigenvals = np.array(real_eigenvals)[real_inds][::-1]
real_eigenvecs = np.array(real_eigenvecs)[real_inds][::-1]
# Plot eigenvalue error
plt.suptitle("Hessian eigendecomposition errors: %d trials" % ntrials)
plt.subplot(1, 2, 1)
plt.title("Eigenvalues")
plt.plot(list(range(nparams)), real_eigenvals, label="True Eigenvals", linewidth=3, linestyle='--')
plot_eigenval_estimates(eigenvals, label="Estimates")
plt.legend()
# Plot eigenvector L2 norm error
plt.subplot(1, 2, 2)
plt.title("Eigenvector cosine simliarity")
plot_eigenvec_errors(real_eigenvecs, eigenvecs, label="Estimates")
plt.legend()
plt.savefig("full.png")
plt.clf()
return real_hessian
def test_stochastic_hessian(model, criterion, real_hessian, x, y, bs=10, ntrials=10):
samples = [(x_i, y_i) for x_i, y_i in zip(x, y)]
# full dataset
dataloader = DataLoader(samples, batch_size=bs)
eigenvals = []
eigenvecs = []
nparams = len(real_hessian)
for _ in range(ntrials):
est_eigenvals, est_eigenvecs = compute_hessian_eigenthings(
model,
dataloader,
criterion,
num_eigenthings=nparams,
power_iter_steps=100,
power_iter_err_threshold=1e-9,
momentum=0,
use_gpu=False,
)
est_inds = np.argsort(est_eigenvals)
est_eigenvals = np.array(est_eigenvals)[est_inds][::-1]
est_eigenvecs = np.array(est_eigenvecs)[est_inds][::-1]
eigenvals.append(est_eigenvals)
eigenvecs.append(est_eigenvecs)
eigenvals = np.array(eigenvals)
eigenvecs = np.array(eigenvecs)
real_eigenvals, real_eigenvecs = np.linalg.eig(real_hessian)
real_inds = np.argsort(real_eigenvals)
real_eigenvals = np.array(real_eigenvals)[real_inds][::-1]
real_eigenvecs = np.array(real_eigenvecs)[real_inds][::-1]
# Plot eigenvalue error
plt.suptitle("Stochastic Hessian eigendecomposition errors: %d trials" % ntrials)
plt.subplot(1, 2, 1)
plt.title("Eigenvalues")
plt.plot(list(range(nparams)), real_eigenvals, label="True Eigenvals", linewidth=3, linestyle='--')
plot_eigenval_estimates(eigenvals, label="Estimates")
plt.legend()
# Plot eigenvector L2 norm error
plt.subplot(1, 2, 2)
plt.title("Eigenvector cosine simliarity")
plot_eigenvec_errors(real_eigenvecs, eigenvecs, label="Estimates")
plt.legend()
plt.savefig("stochastic.png")
plt.clf()
def test_fixed_mini(model, criterion, real_hessian, x, y, bs=10, ntrials=10):
x = x[:bs]
y = y[:bs]
samples = [(x_i, y_i) for x_i, y_i in zip(x, y)]
# full dataset
dataloader = DataLoader(samples, batch_size=len(x))
eigenvals = []
eigenvecs = []
nparams = len(real_hessian)
for _ in range(ntrials):
est_eigenvals, est_eigenvecs = compute_hessian_eigenthings(
model,
dataloader,
criterion,
num_eigenthings=nparams,
mode="lanczos",
power_iter_steps=10,
power_iter_err_threshold=1e-5,
momentum=0,
use_gpu=False,
)
est_eigenvals = np.array(est_eigenvals)
est_eigenvecs = np.array([t.numpy() for t in est_eigenvecs])
est_inds = np.argsort(est_eigenvals)
est_eigenvals = np.array(est_eigenvals)[est_inds][::-1]
est_eigenvecs = np.array(est_eigenvecs)[est_inds][::-1]
eigenvals.append(est_eigenvals)
eigenvecs.append(est_eigenvecs)
eigenvals = np.array(eigenvals)
eigenvecs = np.array(eigenvecs)
real_eigenvals, real_eigenvecs = np.linalg.eig(real_hessian)
real_inds = np.argsort(real_eigenvals)
real_eigenvals = np.array(real_eigenvals)[real_inds][::-1]
real_eigenvecs = np.array(real_eigenvecs)[real_inds][::-1]
# Plot eigenvalue error
plt.suptitle(
"Fixed mini-batch Hessian eigendecomposition errors: %d trials" % ntrials
)
plt.subplot(1, 2, 1)
plt.title("Eigenvalues")
plt.plot(list(range(nparams)), real_eigenvals, label="True Eigenvals")
plot_eigenval_estimates(eigenvals, label="Estimates")
plt.legend()
# Plot eigenvector L2 norm error
plt.subplot(1, 2, 2)
plt.title("Eigenvector cosine simliarity")
plot_eigenvec_errors(real_eigenvecs, eigenvecs, label="Estimates")
plt.legend()
plt.savefig("fixed.png")
if __name__ == "__main__":
indim = 100
outdim = 1
nsamples = 10
ntrials = 1
bs = 10
model = torch.nn.Linear(indim, outdim)
criterion = torch.nn.MSELoss()
x = torch.rand((nsamples, indim))
y = torch.rand((nsamples, outdim))
hessian = test_full_hessian(model, criterion, x, y, ntrials=ntrials)
test_stochastic_hessian(model, criterion, hessian, x, y, bs=bs, ntrials=ntrials)
# test_fixed_mini(model, criterion, hessian, x, y, bs=bs, ntrials=ntrials)
| 7,988 | 31.741803 | 103 | py |
pytorch-hessian-eigenthings | pytorch-hessian-eigenthings-master/hessian_eigenthings/lanczos.py | """ Use scipy/ARPACK implicitly restarted lanczos to find top k eigenthings """
from typing import Tuple
import numpy as np
import torch
import scipy.sparse.linalg as linalg
from scipy.sparse.linalg import LinearOperator as ScipyLinearOperator
from warnings import warn
import hessian_eigenthings.utils as utils
from hessian_eigenthings.operator import Operator
def lanczos(
operator: Operator,
num_eigenthings: int =10,
which: str ="LM",
max_steps: int =20,
tol: float =1e-6,
num_lanczos_vectors: int =None,
init_vec: np.ndarray =None,
use_gpu: bool =False,
fp16: bool =False,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Use the scipy.sparse.linalg.eigsh hook to the ARPACK lanczos algorithm
to find the top k eigenvalues/eigenvectors.
Please see scipy documentation for details on specific parameters
such as 'which'.
Parameters
-------------
operator: operator.Operator
linear operator to solve.
num_eigenthings : int
number of eigenvalue/eigenvector pairs to compute
which : str ['LM', SM', 'LA', SA']
L,S = largest, smallest. M, A = in magnitude, algebriac
SM = smallest in magnitude. LA = largest algebraic.
max_steps : int
maximum number of arnoldi updates
tol : float
relative accuracy of eigenvalues / stopping criterion
num_lanczos_vectors : int
number of lanczos vectors to compute. if None, > 2*num_eigenthings
for stability.
init_vec: [torch.Tensor, torch.cuda.Tensor]
if None, use random tensor. this is the init vec for arnoldi updates.
use_gpu: bool
if true, use cuda tensors.
fp16: bool
if true, keep operator input/output in fp16 instead of fp32.
Returns
----------------
eigenvalues : np.ndarray
array containing `num_eigenthings` eigenvalues of the operator
eigenvectors : np.ndarray
array containing `num_eigenthings` eigenvectors of the operator
"""
if isinstance(operator.size, int):
size = operator.size
else:
size = operator.size[0]
shape = (size, size)
if num_lanczos_vectors is None:
num_lanczos_vectors = min(2 * num_eigenthings, size - 1)
if num_lanczos_vectors < 2 * num_eigenthings:
warn(
"[lanczos] number of lanczos vectors should usually be > 2*num_eigenthings"
)
def _scipy_apply(x):
x = torch.from_numpy(x)
x = utils.maybe_fp16(x, fp16)
if use_gpu:
x = x.cuda()
out = operator.apply(x)
out = utils.maybe_fp16(out, fp16)
out = out.cpu().numpy()
return out
scipy_op = ScipyLinearOperator(shape, _scipy_apply)
if init_vec is None:
init_vec = np.random.rand(size)
eigenvals, eigenvecs = linalg.eigsh(
A=scipy_op,
k=num_eigenthings,
which=which,
maxiter=max_steps,
tol=tol,
ncv=num_lanczos_vectors,
return_eigenvectors=True,
)
return eigenvals, eigenvecs.T
| 3,048 | 29.49 | 87 | py |
pytorch-hessian-eigenthings | pytorch-hessian-eigenthings-master/hessian_eigenthings/hvp_operator.py | """
This module defines a linear operator to compute the hessian-vector product
for a given pytorch model using subsampled data.
"""
from typing import Callable
import torch
import torch.nn as nn
import torch.utils.data as data
import hessian_eigenthings.utils as utils
from hessian_eigenthings.operator import Operator
class HVPOperator(Operator):
"""
Use PyTorch autograd for Hessian Vec product calculation
model: PyTorch network to compute hessian for
dataloader: pytorch dataloader that we get examples from to compute grads
loss: Loss function to descend (e.g. F.cross_entropy)
use_gpu: use cuda or not
max_possible_gpu_samples: max number of examples per batch using all GPUs.
"""
def __init__(
self,
model: nn.Module,
dataloader: data.DataLoader,
criterion: Callable[[torch.Tensor], torch.Tensor],
use_gpu: bool = True,
fp16: bool = False,
full_dataset: bool = True,
max_possible_gpu_samples: int = 256,
):
size = int(sum(p.numel() for p in model.parameters()))
super(HVPOperator, self).__init__(size)
self.grad_vec = torch.zeros(size)
self.model = model
if use_gpu:
self.model = self.model.cuda()
self.dataloader = dataloader
# Make a copy since we will go over it a bunch
self.dataloader_iter = iter(dataloader)
self.criterion = criterion
self.use_gpu = use_gpu
self.fp16 = fp16
self.full_dataset = full_dataset
self.max_possible_gpu_samples = max_possible_gpu_samples
if not hasattr(self.dataloader, '__len__') and self.full_dataset:
raise ValueError("For full-dataset averaging, dataloader must have '__len__'")
def apply(self, vec: torch.Tensor):
"""
Returns H*vec where H is the hessian of the loss w.r.t.
the vectorized model parameters
"""
if self.full_dataset:
return self._apply_full(vec)
else:
return self._apply_batch(vec)
def _apply_batch(self, vec: torch.Tensor) -> torch.Tensor:
"""
Computes the Hessian-vector product for a mini-batch from the dataset.
"""
# compute original gradient, tracking computation graph
self._zero_grad()
grad_vec = self._prepare_grad()
self._zero_grad()
# take the second gradient
# this is the derivative of <grad_vec, v> where <,> is an inner product.
hessian_vec_prod_dict = torch.autograd.grad(
grad_vec, self.model.parameters(), grad_outputs=vec, only_inputs=True
)
# concatenate the results over the different components of the network
hessian_vec_prod = torch.cat([g.contiguous().view(-1) for g in hessian_vec_prod_dict])
hessian_vec_prod = utils.maybe_fp16(hessian_vec_prod, self.fp16)
return hessian_vec_prod
def _apply_full(self, vec: torch.Tensor) -> torch.Tensor:
"""
Computes the Hessian-vector product averaged over all batches in the dataset.
"""
n = len(self.dataloader)
hessian_vec_prod = None
for _ in range(n):
if hessian_vec_prod is not None:
hessian_vec_prod += self._apply_batch(vec)
else:
hessian_vec_prod = self._apply_batch(vec)
hessian_vec_prod = hessian_vec_prod / n
return hessian_vec_prod
def _zero_grad(self):
"""
Zeros out the gradient info for each parameter in the model
"""
for p in self.model.parameters():
if p.grad is not None:
p.grad.data.zero_()
def _prepare_grad(self) -> torch.Tensor:
"""
Compute gradient w.r.t loss over all parameters and vectorize
"""
try:
all_inputs, all_targets = next(self.dataloader_iter)
except StopIteration:
self.dataloader_iter = iter(self.dataloader)
all_inputs, all_targets = next(self.dataloader_iter)
num_chunks = max(1, len(all_inputs) // self.max_possible_gpu_samples)
grad_vec = None
# This will do the "gradient chunking trick" to create micro-batches
# when the batch size is larger than what will fit in memory.
# WARNING: this may interact poorly with batch normalization.
input_microbatches = all_inputs.chunk(num_chunks)
target_microbatches = all_targets.chunk(num_chunks)
for input, target in zip(input_microbatches, target_microbatches):
if self.use_gpu:
input = input.cuda()
target = target.cuda()
output = self.model(input)
loss = self.criterion(output, target)
grad_dict = torch.autograd.grad(
loss, self.model.parameters(), create_graph=True
)
if grad_vec is not None:
grad_vec += torch.cat([g.contiguous().view(-1) for g in grad_dict])
else:
grad_vec = torch.cat([g.contiguous().view(-1) for g in grad_dict])
grad_vec = utils.maybe_fp16(grad_vec, self.fp16)
grad_vec /= num_chunks
self.grad_vec = grad_vec
return self.grad_vec
| 5,275 | 35.136986 | 94 | py |
pytorch-hessian-eigenthings | pytorch-hessian-eigenthings-master/hessian_eigenthings/power_iter.py | """
This module contains functions to perform power iteration with deflation
to compute the top eigenvalues and eigenvectors of a linear operator
"""
from typing import Tuple
import numpy as np
import torch
from hessian_eigenthings.operator import Operator, LambdaOperator
import hessian_eigenthings.utils as utils
def deflated_power_iteration(
operator: Operator,
num_eigenthings: int = 10,
power_iter_steps: int = 20,
power_iter_err_threshold: float = 1e-4,
momentum: float = 0.0,
use_gpu: bool = True,
fp16: bool = False,
to_numpy: bool = True,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Compute top k eigenvalues by repeatedly subtracting out dyads
operator: linear operator that gives us access to matrix vector product
num_eigenvals number of eigenvalues to compute
power_iter_steps: number of steps per run of power iteration
power_iter_err_threshold: early stopping threshold for power iteration
returns: np.ndarray of top eigenvalues, np.ndarray of top eigenvectors
"""
eigenvals = []
eigenvecs = []
current_op = operator
prev_vec = None
def _deflate(x, val, vec):
return val * vec.dot(x) * vec
utils.log("beginning deflated power iteration")
for i in range(num_eigenthings):
utils.log("computing eigenvalue/vector %d of %d" % (i + 1, num_eigenthings))
eigenval, eigenvec = power_iteration(
current_op,
power_iter_steps,
power_iter_err_threshold,
momentum=momentum,
use_gpu=use_gpu,
fp16=fp16,
init_vec=prev_vec,
)
utils.log("eigenvalue %d: %.4f" % (i + 1, eigenval))
def _new_op_fn(x, op=current_op, val=eigenval, vec=eigenvec):
return utils.maybe_fp16(op.apply(x), fp16) - _deflate(x, val, vec)
current_op = LambdaOperator(_new_op_fn, operator.size)
prev_vec = eigenvec
eigenvals.append(eigenval)
eigenvec = eigenvec.cpu()
if to_numpy:
# Clone so that power_iteration can continue to use torch.
numpy_eigenvec = eigenvec.detach().clone().numpy()
eigenvecs.append(numpy_eigenvec)
else:
eigenvecs.append(eigenvec)
eigenvals = np.array(eigenvals)
eigenvecs = np.array(eigenvecs)
# sort them in descending order
sorted_inds = np.argsort(eigenvals)
eigenvals = eigenvals[sorted_inds][::-1]
eigenvecs = eigenvecs[sorted_inds][::-1]
return eigenvals, eigenvecs
def power_iteration(
operator: Operator,
steps: int = 20,
error_threshold: float = 1e-4,
momentum: float = 0.0,
use_gpu: bool = True,
fp16: bool = False,
init_vec: torch.Tensor = None,
) -> Tuple[float, torch.Tensor]:
"""
Compute dominant eigenvalue/eigenvector of a matrix
operator: linear Operator giving us matrix-vector product access
steps: number of update steps to take
returns: (principal eigenvalue, principal eigenvector) pair
"""
vector_size = operator.size # input dimension of operator
if init_vec is None:
vec = torch.rand(vector_size)
else:
vec = init_vec
vec = utils.maybe_fp16(vec, fp16)
if use_gpu:
vec = vec.cuda()
prev_lambda = 0.0
prev_vec = utils.maybe_fp16(torch.randn_like(vec), fp16)
for i in range(steps):
prev_vec = vec / (torch.norm(vec) + 1e-6)
new_vec = utils.maybe_fp16(operator.apply(vec), fp16) - momentum * prev_vec
# need to handle case where we end up in the nullspace of the operator.
# in this case, we are done.
if torch.norm(new_vec).item() == 0.0:
return 0.0, new_vec
lambda_estimate = vec.dot(new_vec).item()
diff = lambda_estimate - prev_lambda
vec = new_vec.detach() / torch.norm(new_vec)
if lambda_estimate == 0.0: # for low-rank
error = 1.0
else:
error = np.abs(diff / lambda_estimate)
utils.progress_bar(i, steps, "power iter error: %.4f" % error)
if error < error_threshold:
break
prev_lambda = lambda_estimate
return lambda_estimate, vec
| 4,190 | 32.528 | 84 | py |
pytorch-hessian-eigenthings | pytorch-hessian-eigenthings-master/hessian_eigenthings/__init__.py | """ Top-level module for hessian eigenvec computation """
from hessian_eigenthings.power_iter import power_iteration, deflated_power_iteration
from hessian_eigenthings.lanczos import lanczos
from hessian_eigenthings.hvp_operator import HVPOperator
name = "hessian_eigenthings"
def compute_hessian_eigenthings(
model,
dataloader,
loss,
num_eigenthings=10,
full_dataset=True,
mode="power_iter",
use_gpu=True,
fp16=False,
max_possible_gpu_samples=2 ** 16,
**kwargs
):
"""
Computes the top `num_eigenthings` eigenvalues and eigenvecs
for the hessian of the given model by using subsampled power iteration
with deflation and the hessian-vector product
Parameters
---------------
model : Module
pytorch model for this netowrk
dataloader : torch.data.DataLoader
dataloader with x,y pairs for which we compute the loss.
loss : torch.nn.modules.Loss | torch.nn.functional criterion
loss function to differentiate through
num_eigenthings : int
number of eigenvalues/eigenvecs to compute. computed in order of
decreasing eigenvalue magnitude.
full_dataset : boolean
if true, each power iteration call evaluates the gradient over the
whole dataset.
(if False, you might want to check if the eigenvalue estimate variance
depends on batch size)
mode : str ['power_iter', 'lanczos']
which backend algorithm to use to compute the top eigenvalues.
use_gpu:
if true, attempt to use cuda for all lin alg computatoins
fp16: bool
if true, store and do math with eigenvectors, gradients, etc. in fp16.
(you should test if this is numerically stable for your application)
max_possible_gpu_samples:
the maximum number of samples that can fit on-memory. used
to accumulate gradients for large batches.
(note: if smaller than dataloader batch size, this can have odd
interactions with batch norm statistics)
**kwargs:
contains additional parameters passed onto lanczos or power_iter.
"""
hvp_operator = HVPOperator(
model,
dataloader,
loss,
use_gpu=use_gpu,
full_dataset=full_dataset,
max_possible_gpu_samples=max_possible_gpu_samples,
)
eigenvals, eigenvecs = None, None
if mode == "power_iter":
eigenvals, eigenvecs = deflated_power_iteration(
hvp_operator, num_eigenthings, use_gpu=use_gpu, fp16=fp16, **kwargs
)
elif mode == "lanczos":
eigenvals, eigenvecs = lanczos(
hvp_operator, num_eigenthings, use_gpu=use_gpu, fp16=fp16, **kwargs
)
else:
raise ValueError("Unsupported mode %s (must be power_iter or lanczos)" % mode)
return eigenvals, eigenvecs
__all__ = [
"power_iteration",
"deflated_power_iteration",
"lanczos",
"HVPOperator",
"compute_hessian_eigenthings",
]
| 2,968 | 33.126437 | 86 | py |
pytorch-hessian-eigenthings | pytorch-hessian-eigenthings-master/example/main.py | """
A simple example to calculate the top eigenvectors for the hessian of
ResNet18 network for CIFAR-10
"""
import track
import skeletor
from skeletor.datasets import build_dataset
from skeletor.models import build_model
import torch
from hessian_eigenthings import compute_hessian_eigenthings
def extra_args(parser):
parser.add_argument(
"--num_eigenthings",
default=5,
type=int,
help="number of eigenvals/vecs to compute",
)
parser.add_argument(
"--batch_size", default=128, type=int, help="train set batch size"
)
parser.add_argument(
"--eval_batch_size", default=16, type=int, help="test set batch size"
)
parser.add_argument(
"--momentum", default=0.0, type=float, help="power iteration momentum term"
)
parser.add_argument(
"--num_steps", default=50, type=int, help="number of power iter steps"
)
parser.add_argument("--max_samples", default=2048, type=int)
parser.add_argument("--cuda", action="store_true", help="if true, use CUDA/GPUs")
parser.add_argument(
"--full_dataset",
action="store_true",
help="if true,\
loop over all batches in set for each gradient step",
)
parser.add_argument("--fname", default="", type=str)
parser.add_argument("--mode", type=str, choices=["power_iter", "lanczos"])
def main(args):
trainloader, testloader = build_dataset(
"cifar10",
dataroot=args.dataroot,
batch_size=args.batch_size,
eval_batch_size=args.eval_batch_size,
num_workers=2,
)
if args.fname:
print("Loading model from %s" % args.fname)
model = torch.load(args.fname, map_location="cpu").cuda()
else:
model = build_model("ResNet18", num_classes=10)
criterion = torch.nn.CrossEntropyLoss()
eigenvals, eigenvecs = compute_hessian_eigenthings(
model,
testloader,
criterion,
args.num_eigenthings,
mode=args.mode,
# power_iter_steps=args.num_steps,
max_possible_gpu_samples=args.max_samples,
# momentum=args.momentum,
full_dataset=args.full_dataset,
use_gpu=args.cuda,
)
print("Eigenvecs:")
print(eigenvecs)
print("Eigenvals:")
print(eigenvals)
# track.metric(iteration=0, eigenvals=eigenvals)
if __name__ == "__main__":
skeletor.supply_args(extra_args)
skeletor.execute(main)
| 2,460 | 28.650602 | 85 | py |
TSCC2019 | TSCC2019-master/dqn_agent.py | import random
import numpy as np
from collections import deque
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
class DQNAgent:
def __init__(self, config):
self.state_size = config['state_size']
self.action_size = config['action_size']
self.memory = deque(maxlen=2000)
self.gamma = 0.95 # discount rate
self.epsilon = 1.0 # exploration rate
self.epsilon_min = 0.01
self.epsilon_decay = 0.995
self.learning_rate = 0.001
self.update_target_freq = 5
self.batch_size = 30
self.model = self._build_model()
self.target_model = self._build_model()
self.update_target_network()
intersection_id = list(config['lane_phase_info'].keys())[0]
self.phase_list = config['lane_phase_info'][intersection_id]['phase']
def _build_model(self):
# Neural Net for Deep-Q learning Model
model = Sequential()
model.add(Dense(40, input_dim=self.state_size, activation='relu'))
model.add(Dense(40, activation='relu'))
model.add(Dense(self.action_size, activation='linear'))
model.compile(loss='mse',
optimizer=Adam(lr=self.learning_rate))
return model
def update_target_network(self):
weights = self.model.get_weights()
self.target_model.set_weights(weights)
def remember(self, state, action, reward, next_state):
action = self.phase_list.index(action)
self.memory.append((state, action, reward, next_state))
def choose_action(self, state):
if np.random.rand() <= self.epsilon:
return random.randrange(self.action_size)
act_values = self.model.predict(state)
return np.argmax(act_values[0]) # returns action
def replay(self):
minibatch = random.sample(self.memory, self.batch_size)
for state, action, reward, next_state in minibatch:
target = (reward + self.gamma *
np.amax(self.target_model.predict(next_state)[0]))
target_f = self.model.predict(state)
target_f[0][action] = target
self.model.fit(state, target_f, epochs=1, verbose=0)
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
def load(self, name):
self.model.load_weights(name)
def save(self, name):
self.model.save_weights(name) | 2,469 | 36.424242 | 77 | py |
TTE | TTE-main/main.py | import sys
import torch
import random
import argparse
import numpy as np
import os.path as osp
import torch.backends.cudnn as cudnn
from utils.utils import (AugWrapper, get_model, print_to_log, eval_chunk,
eval_files)
# For deterministic behavior
cudnn.benchmark = False
cudnn.deterministic = True
def set_seed(device, seed=111):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if device == 'cuda':
torch.cuda.manual_seed_all(seed)
def main(args):
# Setup
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
set_seed(DEVICE, args.seed)
# Model
model = get_model(args.experiment)
# Parameters for Gaussian (if any)
if (args.gauss_k is not None) and (args.gauss_s is not None):
gauss_ps = (args.gauss_k, args.gauss_s)
else:
gauss_ps = None
model_aug = AugWrapper(model, args.flip, args.n_crops, args.flip_crop,
gauss_ps).to(DEVICE)
# Print augmentations
info = ','.join(model_aug.total_augs)
print_to_log(info, args.info_log)
# de-facto GPU usage will be increased by num of transforms!
batch_size = int(args.batch_size / (1 + len(model_aug.total_augs)))
# Data
if args.num_chunk is None: # evaluate sequentially
log_files = []
for num_chunk in range(1, args.chunks+1):
log_file = eval_chunk(model_aug, args.dataset, batch_size,
args.chunks, num_chunk, DEVICE, args)
log_files.append(log_file)
eval_files(log_files, args.final_results)
else: # evaluate a single chunk and exit
log_file = eval_chunk(model_aug, args.dataset, batch_size, args.chunks,
args.num_chunk, DEVICE, args)
sys.exit()
if __name__ == "__main__":
from utils.opts import parse_settings
args = parse_settings()
if args.eval_files:
from glob import glob
log_files = glob(osp.join(args.logs_dir,
'results_chunk*of*_*to*.txt'))
eval_files(log_files, args.final_results)
sys.exit()
else:
main(args)
| 2,176 | 28.026667 | 80 | py |
TTE | TTE-main/experiments/gowal.py | # Copyright 2020 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""WideResNet implementation in PyTorch."""
from typing import Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.utils import NormalizedWrapper
CIFAR10_MEAN = (0.4914, 0.4822, 0.4465)
CIFAR10_STD = (0.2471, 0.2435, 0.2616)
CIFAR100_MEAN = (0.5071, 0.4865, 0.4409)
CIFAR100_STD = (0.2673, 0.2564, 0.2762)
class _Swish(torch.autograd.Function):
"""Custom implementation of swish."""
@staticmethod
def forward(ctx, i):
result = i * torch.sigmoid(i)
ctx.save_for_backward(i)
return result
@staticmethod
def backward(ctx, grad_output):
i = ctx.saved_variables[0]
sigmoid_i = torch.sigmoid(i)
return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i)))
class Swish(nn.Module):
"""Module using custom implementation."""
def forward(self, input_tensor):
return _Swish.apply(input_tensor)
class _Block(nn.Module):
"""WideResNet Block."""
def __init__(self, in_planes, out_planes, stride, activation_fn=nn.ReLU):
super().__init__()
self.batchnorm_0 = nn.BatchNorm2d(in_planes)
self.relu_0 = activation_fn()
# We manually pad to obtain the same effect as `SAME` (necessary when
# `stride` is different than 1).
self.conv_0 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=0, bias=False)
self.batchnorm_1 = nn.BatchNorm2d(out_planes)
self.relu_1 = activation_fn()
self.conv_1 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.has_shortcut = in_planes != out_planes
if self.has_shortcut:
self.shortcut = nn.Conv2d(in_planes, out_planes, kernel_size=1,
stride=stride, padding=0, bias=False)
else:
self.shortcut = None
self._stride = stride
def forward(self, x):
if self.has_shortcut:
x = self.relu_0(self.batchnorm_0(x))
else:
out = self.relu_0(self.batchnorm_0(x))
v = x if self.has_shortcut else out
if self._stride == 1:
v = F.pad(v, (1, 1, 1, 1))
elif self._stride == 2:
v = F.pad(v, (0, 1, 0, 1))
else:
raise ValueError('Unsupported `stride`.')
out = self.conv_0(v)
out = self.relu_1(self.batchnorm_1(out))
out = self.conv_1(out)
out = torch.add(self.shortcut(x) if self.has_shortcut else x, out)
return out
class _BlockGroup(nn.Module):
"""WideResNet block group."""
def __init__(self, num_blocks, in_planes, out_planes, stride,
activation_fn=nn.ReLU):
super().__init__()
block = []
for i in range(num_blocks):
block.append(
_Block(i == 0 and in_planes or out_planes,
out_planes,
i == 0 and stride or 1,
activation_fn=activation_fn))
self.block = nn.Sequential(*block)
def forward(self, x):
return self.block(x)
class WideResNet(nn.Module):
"""WideResNet."""
def __init__(self,
num_classes: int = 10,
depth: int = 28,
width: int = 10,
activation_fn: nn.Module = nn.ReLU,
mean: Union[Tuple[float, ...], float] = CIFAR10_MEAN,
std: Union[Tuple[float, ...], float] = CIFAR10_STD,
padding: int = 0,
num_input_channels: int = 3):
super().__init__()
self.mean = torch.tensor(mean).view(num_input_channels, 1, 1)
self.std = torch.tensor(std).view(num_input_channels, 1, 1)
self.mean_cuda = None
self.std_cuda = None
self.padding = padding
num_channels = [16, 16 * width, 32 * width, 64 * width]
assert (depth - 4) % 6 == 0
num_blocks = (depth - 4) // 6
self.init_conv = nn.Conv2d(num_input_channels, num_channels[0],
kernel_size=3, stride=1, padding=1, bias=False)
self.layer = nn.Sequential(
_BlockGroup(num_blocks, num_channels[0], num_channels[1], 1,
activation_fn=activation_fn),
_BlockGroup(num_blocks, num_channels[1], num_channels[2], 2,
activation_fn=activation_fn),
_BlockGroup(num_blocks, num_channels[2], num_channels[3], 2,
activation_fn=activation_fn))
self.batchnorm = nn.BatchNorm2d(num_channels[3])
self.relu = activation_fn()
self.logits = nn.Linear(num_channels[3], num_classes)
self.num_channels = num_channels[3]
def forward(self, x):
if self.padding > 0:
x = F.pad(x, (self.padding,) * 4)
if x.is_cuda:
if self.mean_cuda is None:
self.mean_cuda = self.mean.cuda()
self.std_cuda = self.std.cuda()
out = (x - self.mean_cuda) / self.std_cuda
else:
out = (x - self.mean) / self.std
out = self.init_conv(out)
out = self.layer(out)
out = self.relu(self.batchnorm(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.num_channels)
return self.logits(out)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def get_model(experiment):
if '100' in experiment:
WEIGHTS_PATH = './weights/cifar100_linf_wrn70-16_with.pt'
num_classes = 100
mean = CIFAR100_MEAN
std = CIFAR100_STD
else:
WEIGHTS_PATH = './weights/cifar10_linf_wrn70-16_with.pt'
num_classes = 10
mean = CIFAR10_MEAN
std = CIFAR10_STD
model = WideResNet(
num_classes=num_classes, depth=70, width=16,
activation_fn=Swish, mean=mean,
std=std)
#load model
model.load_state_dict(torch.load(WEIGHTS_PATH))
model = NormalizedWrapper(model, mean=None, std=None) # no normalization!
return model
| 6,440 | 32.201031 | 80 | py |
TTE | TTE-main/experiments/unlabeled_pretraining.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.utils import NormalizedWrapper
"""Based on code from https://github.com/yaodongyu/TRADES"""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(int(nb_layers)):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth=28, num_classes=10, widen_factor=10, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]
assert ((depth - 4) % 6 == 0)
n = (depth - 4) / 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 1st sub-block
self.sub_block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x, return_prelogit=False):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
if return_prelogit:
return self.fc(out), out
else:
return self.fc(out)
def copy_pretrained_model(model, path_to_copy_from):
resnet = torch.load(path_to_copy_from, map_location='cuda')
if 'state_dict' in resnet.keys():
resnet = resnet['state_dict']
keys = list(resnet.keys())
count = 0
for key in model.state_dict().keys():
model.state_dict()[key].copy_(resnet[keys[count]].data)
count +=1
print('Pretrained model is loaded successfully')
return model
def get_imagenet_pretrained_model():
WEIGHTS_PATH = './weights/rst_adv.pt.ckpt'
model = WideResNet()
model = copy_pretrained_model(model, WEIGHTS_PATH)
model = NormalizedWrapper(model, mean=[0.4914, 0.4822, 0.4465], std=[0.2470, 0.2435, 0.2616])
return model
| 4,907 | 38.264 | 116 | py |
TTE | TTE-main/experiments/mart.py | # Taken from MART repo https://github.com/YisenWang/MART/blob/master/wideresnet.py
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.utils import NormalizedWrapper
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(int(nb_layers)):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth=28, num_classes=10, widen_factor=10, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]
assert ((depth - 4) % 6 == 0)
n = (depth - 4) / 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 1st sub-block
self.sub_block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
return self.fc(out)
def get_model():
model = WideResNet()
WEIGHTS_PATH = './weights/mart_unlabel.pt'
state_dict = torch.load(WEIGHTS_PATH, map_location='cpu')['state_dict']
state_dict = { k.replace('module.', '') : v for k, v in state_dict.items() }
state_dict = { k : v for k, v in state_dict.items() if 'num_batches_tracked' not in k }
# load weights
model.load_state_dict(state_dict)
# place inside normalizing wrapper
model = NormalizedWrapper(model, mean=None, std=None) # no normalization!
return model
| 4,554 | 41.175926 | 116 | py |
TTE | TTE-main/experiments/hydra.py | ## Make sure to first download the model_best_dense.pth.tar from https://www.dropbox.com/sh/56yyfy16elwbnr8/AADmr7bXgFkrNdoHjKWwIFKqa?dl=0
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.utils import NormalizedWrapper
class BasicBlock(nn.Module):
def __init__(self, conv_layer, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = conv_layer(
in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False
)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = conv_layer(
out_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False
)
self.droprate = dropRate
self.equalInOut = in_planes == out_planes
self.convShortcut = (
(not self.equalInOut)
and conv_layer(
in_planes,
out_planes,
kernel_size=1,
stride=stride,
padding=0,
bias=False,
)
or None
)
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(
self, nb_layers, in_planes, out_planes, block, conv_layer, stride, dropRate=0.0
):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(
conv_layer, block, in_planes, out_planes, nb_layers, stride, dropRate
)
def _make_layer(
self, conv_layer, block, in_planes, out_planes, nb_layers, stride, dropRate
):
layers = []
for i in range(int(nb_layers)):
layers.append(
block(
conv_layer,
i == 0 and in_planes or out_planes,
out_planes,
i == 0 and stride or 1,
dropRate,
)
)
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(
self,
conv_layer,
linear_layer,
depth=34,
num_classes=10,
widen_factor=10,
dropRate=0.0,
):
super(WideResNet, self).__init__()
nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]
assert (depth - 4) % 6 == 0
n = (depth - 4) / 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = conv_layer(
3, nChannels[0], kernel_size=3, stride=1, padding=1, bias=False
)
# 1st block
self.block1 = NetworkBlock(
n, nChannels[0], nChannels[1], block, conv_layer, 1, dropRate
)
# 1st sub-block
self.sub_block1 = NetworkBlock(
n, nChannels[0], nChannels[1], block, conv_layer, 1, dropRate
)
# 2nd block
self.block2 = NetworkBlock(
n, nChannels[1], nChannels[2], block, conv_layer, 2, dropRate
)
# 3rd block
self.block3 = NetworkBlock(
n, nChannels[2], nChannels[3], block, conv_layer, 2, dropRate
)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = linear_layer(nChannels[3], num_classes)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, linear_layer):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
return self.fc(out)
def wrn_28_10(**kwargs):
return WideResNet(nn.Conv2d, nn.Linear, depth=28, widen_factor=10, **kwargs)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def get_model():
# https://github.com/yaodongyu/TRADES/blob/master/evaluate_attack_cifar10.py#L104
model = wrn_28_10()
# load weights
WEIGHTS_PATH = './weights/hydra_model_best_dense.pth'
state_dict = torch.load(WEIGHTS_PATH, map_location='cpu')['state_dict']
state_dict = { k.replace('module.', '') : v for k, v in state_dict.items() }
model.load_state_dict(state_dict)
# place inside normalizing wrapper
model = NormalizedWrapper(model, mean=None, std=None) # no normalization!
return model | 5,521 | 33.949367 | 138 | py |
TTE | TTE-main/experiments/imagenet_pretraining.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.utils import NormalizedWrapper
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
if self.equalInOut:
out = self.relu2(self.bn2(self.conv1(out)))
else:
out = self.relu2(self.bn2(self.conv1(x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
if not self.equalInOut:
return torch.add(self.convShortcut(x), out)
else:
return torch.add(x, out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(nb_layers):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth=34, num_classes=10, widen_factor=10, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]
assert ((depth - 4) % 6 == 0)
n = (depth - 4) // 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
# self.mean = torch.tensor([0.5, 0.5, 0.5]).view(1,3,1,1).to('cuda')
# self.std = torch.tensor([0.5, 0.5, 0.5]).view(1,3,1,1).to('cuda')
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
return self.fc(out)
def get_imagenet_pretrained_model():
WEIGHTS_PATH = './weights/imagenet_pretrainet_cifar10.pt'
model = WideResNet()
model.load_state_dict(torch.load(WEIGHTS_PATH))
model = NormalizedWrapper(model, mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
return model
| 4,385 | 39.990654 | 116 | py |
TTE | TTE-main/experiments/ates.py | # We took this code from
# https://github.com/chawins/ates-minimal/blob/master/lib/wideresnet.py
'''
This code is taken from
https://github.com/yaodongyu/TRADES/blob/master/models/wideresnet.py
'''
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.utils import NormalizedWrapper
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(
block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(int(nb_layers)):
layers.append(block(i == 0 and in_planes or out_planes,
out_planes, i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth=34, num_classes=10, widen_factor=10, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [16, 16 * widen_factor,
32 * widen_factor, 64 * widen_factor]
assert ((depth - 4) % 6 == 0)
n = (depth - 4) / 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(
n, nChannels[0], nChannels[1], block, 1, dropRate)
# 1st sub-block
self.sub_block1 = NetworkBlock(
n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(
n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(
n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
return self.fc(out)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def get_model(experiment):
# https://github.com/yaodongyu/TRADES/blob/master/evaluate_attack_cifar10.py#L104
if experiment == 'ates': # cifar10
WEIGHTS_PATH = './weights/ates_cifar10_wrn.pt'
num_classes = 10
elif experiment == 'ates_cif100': # cifar100
WEIGHTS_PATH = './weights/ates_cifar100_wrn.pt'
num_classes = 100
model = WideResNet(num_classes=num_classes)
# load weights
model.load_state_dict(torch.load(WEIGHTS_PATH))
# place inside normalizing wrapper
model = NormalizedWrapper(model, mean=None, std=None) # no normalization!
return model
| 5,123 | 38.415385 | 116 | py |
TTE | TTE-main/experiments/adv_weight_pert_cif100.py | # Taken from AWP repo
# https://github.com/csdongxian/AWP/blob/main/AT_AWP/wideresnet.py
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.utils import NormalizedWrapper
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(int(nb_layers)):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [16, 16*widen_factor, 32*widen_factor, 64*widen_factor]
assert((depth - 4) % 6 == 0)
n = (depth - 4) / 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
return self.fc(out)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def get_model():
# https://github.com/yaodongyu/TRADES/blob/master/evaluate_attack_cifar10.py#L104
WEIGHTS_PATH = './weights/newmodel1_CIF100_AT-AWP_cifar100_linf_wrn34-10.pth'
model = WideResNet(depth=34, num_classes=100, widen_factor=10)
# load weights
state_dict = filter_state_dict(torch.load(WEIGHTS_PATH))
model.load_state_dict(state_dict)
# place inside normalizing wrapper
# check
# https://github.com/csdongxian/AWP/blob/main/auto_attacks/eval.py#L73
mean = (0.5070751592371323, 0.48654887331495095, 0.4409178433670343)
std = (0.2673342858792401, 0.2564384629170883, 0.27615047132568404)
model = NormalizedWrapper(model, mean=mean, std=std)
return model
def filter_state_dict(state_dict):
from collections import OrderedDict
if 'state_dict' in state_dict.keys():
state_dict = state_dict['state_dict']
new_state_dict = OrderedDict()
for k, v in state_dict.items():
if 'sub_block' in k:
continue
if 'module' in k:
new_state_dict[k[7:]] = v
else:
new_state_dict[k] = v
return new_state_dict
| 5,199 | 41.276423 | 116 | py |
TTE | TTE-main/experiments/adv_weight_pert.py | # Taken from AWP repo
# https://github.com/csdongxian/AWP/blob/main/AT_AWP/wideresnet.py
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.utils import NormalizedWrapper
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(int(nb_layers)):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [16, 16*widen_factor, 32*widen_factor, 64*widen_factor]
assert((depth - 4) % 6 == 0)
n = (depth - 4) / 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
return self.fc(out)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def get_model():
# https://github.com/yaodongyu/TRADES/blob/master/evaluate_attack_cifar10.py#L104
WEIGHTS_PATH = './weights/newmodel1_RST-AWP_cifar10_linf_wrn28-10.pt'
model = WideResNet(depth=28, num_classes=10, widen_factor=10)
# load weights
state_dict = filter_state_dict(torch.load(WEIGHTS_PATH))
model.load_state_dict(state_dict)
# place inside normalizing wrapper
# check
# https://github.com/csdongxian/AWP/blob/main/auto_attacks/eval.py#L75
model = NormalizedWrapper(model, mean=None, std=None) # no normalization!
return model
def filter_state_dict(state_dict):
from collections import OrderedDict
if 'state_dict' in state_dict.keys():
state_dict = state_dict['state_dict']
new_state_dict = OrderedDict()
for k, v in state_dict.items():
if 'sub_block' in k:
continue
if 'module' in k:
new_state_dict[k[7:]] = v
else:
new_state_dict[k] = v
return new_state_dict
| 5,066 | 40.876033 | 116 | py |
TTE | TTE-main/experiments/eval.py | # Copyright 2020 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluates a PyTorch checkpoint on CIFAR-10/100 or MNIST."""
from absl import app
from absl import flags
import torch
from torch.utils import data
from torchvision import datasets
from torchvision import transforms
import tqdm
from adversarial_robustness.pytorch import model_zoo
_CKPT = flags.DEFINE_string(
'ckpt', None, 'Path to checkpoint.')
_DATASET = flags.DEFINE_enum(
'dataset', 'cifar10', ['cifar10', 'cifar100', 'mnist'],
'Dataset on which the checkpoint is evaluated.')
_WIDTH = flags.DEFINE_integer(
'width', 16, 'Width of WideResNet.')
_DEPTH = flags.DEFINE_integer(
'depth', 70, 'Depth of WideResNet.')
_USE_CUDA = flags.DEFINE_boolean(
'use_cuda', True, 'Whether to use CUDA.')
_BATCH_SIZE = flags.DEFINE_integer(
'batch_size', 100, 'Batch size.')
_NUM_BATCHES = flags.DEFINE_integer(
'num_batches', 0,
'Number of batches to evaluate (zero means the whole dataset).')
def main(unused_argv):
print(f'Loading "{_CKPT.value}"')
print(f'Using a WideResNet with depth {_DEPTH.value} and width '
f'{_WIDTH.value}.')
# Create model and dataset.
if _DATASET.value == 'mnist':
model = model_zoo.WideResNet(
num_classes=10, depth=_DEPTH.value, width=_WIDTH.value,
activation_fn=model_zoo.Swish, mean=.5, std=.5, padding=2,
num_input_channels=1)
dataset_fn = datasets.MNIST
elif _DATASET.value == 'cifar10':
model = model_zoo.WideResNet(
num_classes=10, depth=_DEPTH.value, width=_WIDTH.value,
activation_fn=model_zoo.Swish, mean=model_zoo.CIFAR10_MEAN,
std=model_zoo.CIFAR10_STD)
dataset_fn = datasets.CIFAR10
else:
assert _DATASET.value == 'cifar100'
model = model_zoo.WideResNet(
num_classes=100, depth=_DEPTH.value, width=_WIDTH.value,
activation_fn=model_zoo.Swish, mean=model_zoo.CIFAR100_MEAN,
std=model_zoo.CIFAR100_STD)
dataset_fn = datasets.CIFAR100
# Load model.
if _CKPT.value != 'dummy':
params = torch.load(_CKPT.value)
model.load_state_dict(params)
if _USE_CUDA.value:
model.cuda()
model.eval()
print('Successfully loaded.')
# Load dataset.
transform_chain = transforms.Compose([transforms.ToTensor()])
ds = dataset_fn(root='/tmp/data', train=False, transform=transform_chain,
download=True)
test_loader = data.DataLoader(ds, batch_size=_BATCH_SIZE.value, shuffle=False,
num_workers=0)
# Evaluation.
correct = 0
total = 0
batch_count = 0
total_batches = min((10_000 - 1) // _BATCH_SIZE.value + 1, _NUM_BATCHES.value)
with torch.no_grad():
for images, labels in tqdm.tqdm(test_loader, total=total_batches):
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
batch_count += 1
if _NUM_BATCHES.value > 0 and batch_count >= _NUM_BATCHES.value:
break
print(f'Accuracy on the {total} test images: {100 * correct / total:.2f}%')
if __name__ == '__main__':
flags.mark_flag_as_required('ckpt')
app.run(main)
| 3,712 | 33.700935 | 80 | py |
TTE | TTE-main/experiments/trades.py | # Taken from TRADES repo
# https://github.com/yaodongyu/TRADES/blob/master/models/wideresnet.py
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.utils import NormalizedWrapper
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(int(nb_layers)):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth=34, num_classes=10, widen_factor=10, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]
assert ((depth - 4) % 6 == 0)
n = (depth - 4) / 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 1st sub-block
self.sub_block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
return self.fc(out)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def get_model(experiment):
# https://github.com/yaodongyu/TRADES/blob/master/evaluate_attack_cifar10.py#L104
if experiment == 'trades':
WEIGHTS_PATH = './weights/official_trades_cifar_wrn34_10.pth'
elif experiment == 'noflip_trades':
WEIGHTS_PATH = './weights/trades_noflip.pth'
elif experiment == 'nocrop_trades':
WEIGHTS_PATH = './weights/trades_nocrop.pth'
model = WideResNet()
# load weights
model.load_state_dict(torch.load(WEIGHTS_PATH))
# place inside normalizing wrapper
model = NormalizedWrapper(model, mean=None, std=None) # no normalization!
return model
| 4,910 | 40.974359 | 116 | py |
TTE | TTE-main/utils/resnet.py | '''ResNet in PyTorch.
For Pre-activation ResNet, see 'preact_resnet.py'.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class FakeReLU(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return input.clamp(min=0)
@staticmethod
def backward(ctx, grad_output):
return grad_output
class SequentialWithArgs(torch.nn.Sequential):
def forward(self, input, *args, **kwargs):
vs = list(self._modules.values())
l = len(vs)
for i in range(l):
if i == l-1:
input = vs[i](input, *args, **kwargs)
else:
input = vs[i](input)
return input
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1,
stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes))
def forward(self, x, fake_relu=False):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
if fake_relu:
return FakeReLU.apply(out)
return F.relu(out)
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1,
bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1,
stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x, fake_relu=False):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
if fake_relu:
return FakeReLU.apply(out)
return F.relu(out)
class ResNet(nn.Module):
# feat_scale lets us deal with CelebA, other non-32x32 datasets
def __init__(self, block, num_blocks, num_classes=10, feat_scale=1, wd=1):
super(ResNet, self).__init__()
widths = [64, 128, 256, 512]
widths = [int(w * wd) for w in widths]
self.in_planes = widths[0]
self.conv1 = nn.Conv2d(3, self.in_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(self.in_planes)
self.layer1 = self._make_layer(block, widths[0], num_blocks[0],
stride=1)
self.layer2 = self._make_layer(block, widths[1], num_blocks[1],
stride=2)
self.layer3 = self._make_layer(block, widths[2], num_blocks[2],
stride=2)
self.layer4 = self._make_layer(block, widths[3], num_blocks[3],
stride=2)
self.linear = nn.Linear(feat_scale*widths[3]*block.expansion,
num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return SequentialWithArgs(*layers)
def forward(self, x, fake_relu=False, no_relu=False):
assert not no_relu, "no_relu not yet supported for this architecture"
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out, fake_relu=fake_relu)
out = F.avg_pool2d(out, 4)
embeddings = out.view(out.size(0), -1)
logits = self.linear(embeddings)
return logits, embeddings
def ResNet18(**kwargs):
return ResNet(BasicBlock, [2,2,2,2], **kwargs)
def ResNet18Wide(**kwargs):
return ResNet(BasicBlock, [2,2,2,2], wd=1.5, **kwargs)
def ResNet18Thin(**kwargs):
return ResNet(BasicBlock, [2,2,2,2], wd=.75, **kwargs)
def ResNet34(**kwargs):
return ResNet(BasicBlock, [3,4,6,3], **kwargs)
def ResNet50(**kwargs):
return ResNet(Bottleneck, [3,4,6,3], **kwargs)
def ResNet101(**kwargs):
return ResNet(Bottleneck, [3,4,23,3], **kwargs)
def ResNet152(**kwargs):
return ResNet(Bottleneck, [3,8,36,3], **kwargs)
resnet50 = ResNet50
resnet18 = ResNet18
resnet101 = ResNet101
resnet152 = ResNet152
# resnet18thin = ResNet18Thin
# resnet18wide = ResNet18Wide
def test():
net = ResNet18()
y = net(torch.randn(1,3,32,32))
print(y.size())
| 5,715 | 33.433735 | 79 | py |
TTE | TTE-main/utils/utils.py | import os
import random
import argparse
import numpy as np
import os.path as osp
from tqdm import tqdm
from scipy import ndimage
from autoattack import AutoAttack
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.datasets import CIFAR10, CIFAR100, ImageFolder
from torch.utils.data import TensorDataset
from torchvision.transforms import Compose, ToTensor
from torch.utils.data import DataLoader, random_split
import torchvision.models as models
class DiffCrop(nn.Module):
def __init__(self, inp_size=32, crop_size=32, pad_size=4):
super(DiffCrop, self).__init__()
self.pad = tuple([pad_size for _ in range(4)]) # udlr
# get origins for x and y
valid_init_limit = inp_size + int(2*pad_size) - crop_size
self.orig_x = np.random.randint(valid_init_limit)
self.orig_y = np.random.randint(valid_init_limit)
# get ends for x and y
self.end_x = self.orig_x + crop_size
self.end_y = self.orig_y + crop_size
def forward(self, x):
x = F.pad(x, pad=self.pad) # pad input
x = x[:, :, self.orig_x:self.end_x, self.orig_y:self.end_y] # crop it
return x
class DiffFlip(nn.Module):
def __init__(self):
super(DiffFlip, self).__init__()
def forward(self, x):
return x.flip(3) # 3 = the left-right dim
class GaussianLayer(nn.Module):
# Code taken from (and slightly modified)
# https://discuss.pytorch.org/t/gaussian-kernel-layer/37619
def __init__(self, kernel_size=5, sigma=1):
super(GaussianLayer, self).__init__()
assert kernel_size % 2 != 0, 'kernel_size should be odd'
self.sigma = sigma
self.kernel_size = kernel_size
self.seq = nn.Sequential(
nn.ReflectionPad2d(self.kernel_size // 2),
nn.Conv2d(3, 3, kernel_size=self.kernel_size,
stride=1, padding=0, bias=None,
groups=3)
)
self.weights_init()
def forward(self, x):
return self.seq(x)
def weights_init(self):
n = np.zeros((self.kernel_size, self.kernel_size))
center = self.kernel_size // 2
n[center, center] = 1
k = ndimage.gaussian_filter(n, sigma=self.sigma)
for _, f in self.named_parameters():
f.data.copy_(torch.from_numpy(k))
class NormalizedWrapper(nn.Module):
def __init__(self, model, mean=None, std=None):
super(NormalizedWrapper, self).__init__()
self.model = model
self.normalize = False
if mean is not None:
assert std is not None
self.normalize = True
# std
std = torch.tensor(std).view(1, 3, 1, 1)
self.std = nn.Parameter(std, requires_grad=False)
# mean
mean = torch.tensor(mean).view(1, 3, 1, 1)
self.mean = nn.Parameter(mean, requires_grad=False)
def forward(self, x):
if self.normalize:
x = (x - self.mean) / self.std
return self.model(x)
class AugWrapper(nn.Module):
def __init__(self, model, flip=False, n_crops=0, flip_crop=False,
gauss_ps=None):
super(AugWrapper, self).__init__()
self.model = model
# transforms
self.transforms = [lambda x: x] # the identity
self.total_augs = self._init_augs(flip, n_crops, gauss_ps, flip_crop)
if len(self.total_augs) != 0: # whether augmentations are used
print('Using augmentations: ' + ','.join(self.total_augs))
else:
print('NOT using augmentations!')
print(f'{len(self.transforms)} transforms: {self.transforms}')
def _init_augs(self, flip, n_crops, gauss_ps, flip_crop):
total_augs = []
if flip:
total_augs.append('flip')
# flip augmentations
flip_f = DiffFlip()
self.transforms.append(lambda x: flip_f(x))
if n_crops != 0:
total_augs.append(f'crops n={n_crops}')
# crop augmentations
crops_fs = [DiffCrop() for _ in range(n_crops)]
self.transforms.extend([lambda x: f(x) for f in crops_fs])
if flip and (n_crops != 0) and flip_crop:
total_augs.append(f'flipped-crops n={n_crops}')
# flip-crop augmentations
self.transforms.extend([lambda x: f(flip_f(x)) for f in crops_fs])
if gauss_ps is not None:
kernel_size, sigma = gauss_ps
total_augs.append(f'gauss k={kernel_size}, s={sigma}')
self.gauss_layer = GaussianLayer(kernel_size=kernel_size,
sigma=sigma)
self.transforms.append(lambda x: self.gauss_layer(x))
return total_augs
def forward(self, x):
x = torch.cat([t(x).unsqueeze(0) for t in self.transforms])
x = x.view(-1, x.size(2), x.size(3), x.size(4))
scores = self.model(x)
scores = scores[0] if isinstance(scores, tuple) else scores # resnet case
scores = scores.view(len(self.transforms), -1, scores.size(1))
scores = torch.mean(scores, dim=0) # average across augmentations
return scores
def get_data_utils(dataset_name, batch_size, chunks, num_chunk):
if dataset_name == 'imagenet':
from torchvision import transforms
path = '/local/reference/CV/ILSVR/classification-localization/data/jpeg/val'
transform = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()
])
dataset = ImageFolder(path, transform)
print('Hi')
else:
dataset_fun = CIFAR10 if dataset_name == 'cifar10' else CIFAR100
dataset = dataset_fun(root='./data', train=False, download=True,
transform=Compose([ToTensor()]))
tot_instances = len(dataset)
print('lols', tot_instances)
assert 1 <= num_chunk <= chunks
assert tot_instances % chunks == 0
# inds of current chunk
inds = np.linspace(0, tot_instances, chunks+1, dtype=int)
start_ind, end_ind = inds[num_chunk-1], inds[num_chunk]
# extract data and put in new dataset
data = [dataset[i] for i in range(start_ind, end_ind)]
imgs = torch.cat([x.unsqueeze(0) for (x, y) in data], 0)
labels = torch.cat([torch.tensor(y).unsqueeze(0) for (x, y) in data], 0)
testset = TensorDataset(imgs, labels)
testloader = DataLoader(testset, batch_size=batch_size, shuffle=False,
num_workers=2, pin_memory=True, drop_last=False)
return testloader, start_ind, end_ind
def get_clean_acc(model, testloader, device):
model.eval()
n, total_acc = 0, 0
with torch.no_grad():
for X, y in testloader:
X, y = X.to(device), y.to(device)
output = model(X)
total_acc += (output.max(1)[1] == y).sum().item()
n += y.size(0)
acc = 100. * total_acc / n
print(f'Clean accuracy: {acc:.4f}')
return acc
def get_adversary(model, cheap, seed, eps):
model.eval()
adversary = AutoAttack(model.forward, norm='Linf', eps=eps, verbose=False)
adversary.seed = seed
if cheap:
# print('Running CHEAP attack')
# based on
# https://github.com/fra31/auto-attack/blob/master/autoattack/autoattack.py#L230
# adversary.attacks_to_run = ['apgd-ce', 'apgd-t', 'fab-t', 'square']
adversary.attacks_to_run = ['apgd-ce', 'square']
adversary.apgd.n_iter = 2
adversary.apgd.n_restarts = 1
adversary.fab.n_restarts = 1
adversary.apgd_targeted.n_restarts = 1
adversary.fab.n_target_classes = 2
adversary.apgd_targeted.n_target_classes = 2
adversary.square.n_queries = 2
return adversary
def compute_advs(model, testloader, device, batch_size, cheap, seed, eps):
model.eval()
adversary = get_adversary(model, cheap, seed, eps)
imgs = torch.cat([x for (x, y) in testloader], 0)
labs = torch.cat([y for (x, y) in testloader], 0)
advs = adversary.run_standard_evaluation_individual(imgs, labs,
bs=batch_size)
return advs, labs
def compute_adv_accs(model, advs, labels, device, batch_size):
accs = {}
all_preds = []
for attack_name, curr_advs in advs.items():
dataset = TensorDataset(curr_advs, labels)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False,
num_workers=1, pin_memory=True, drop_last=False)
total_corr = 0
curr_preds = []
with torch.no_grad():
for img, lab in dataloader:
img, lab = img.to(device), lab.to(device)
output = model(img)
pred = output.max(1)[1]
curr_preds.append(pred)
total_corr += (pred == lab).sum().item()
curr_preds = torch.cat(curr_preds)
all_preds.append(curr_preds)
curr_acc = 100. * total_corr / labels.size(0)
accs.update({ attack_name : curr_acc })
# compute worst case for each image
all_preds = torch.cat([x.unsqueeze(0) for x in all_preds])
temp_labels = labels.unsqueeze(0).expand(len(advs), -1).to(device)
where_all_correct = torch.prod(all_preds==temp_labels, dim=0) # logical AND
worst_acc = 100. * where_all_correct.sum().item() / labels.size(0)
accs.update({ 'rob acc' : worst_acc })
return accs
def print_to_log(text, txt_file_path):
with open(txt_file_path, 'a') as text_file:
print(text, file=text_file)
def print_training_params(args, txt_file_path):
d = vars(args)
text = ' | '.join([str(key) + ': ' + str(d[key]) for key in d])
# Print to log and console
print_to_log(text, txt_file_path)
print(text)
def get_model(experiment):
if experiment == 'local_trades':
from utils.resnet import ResNet18
model = ResNet18(num_classes=10)
state_dict = torch.load('./weights/local_trades_best.pth')['state_dict']
state_dict = { k.replace('model.' ,'') : v
for k, v in state_dict.items() }
model.load_state_dict(state_dict, strict=False)
elif experiment in ['trades', 'noflip_trades', 'nocrop_trades']:
from experiments.trades import get_model
model = get_model(experiment)
elif experiment == 'awp': # Adversarial Weight Perturbation
from experiments.adv_weight_pert import get_model
model = get_model()
elif experiment == 'awp_cif100': # Adversarial Weight Perturbation for CIFAR100
from experiments.adv_weight_pert_cif100 import get_model
model = get_model()
elif experiment == 'imagenet_pretraining': # ImageNet preatraining
from experiments.imagenet_pretraining import get_imagenet_pretrained_model
model = get_imagenet_pretrained_model()
elif experiment == 'unlabeled_pretraining': # Unlabeled preatraining
from experiments.unlabeled_pretraining import get_imagenet_pretrained_model
model = get_imagenet_pretrained_model()
elif experiment == 'hydra': # HYDRA
from experiments.hydra import get_model
model = get_model()
elif experiment == 'mart': # MART
from experiments.mart import get_model
model = get_model()
elif experiment in ['ates', 'ates_cif100']: # Adversarial Training with Early Stopping
from experiments.ates import get_model
model = get_model(experiment)
elif experiment == 'imagenet_nominal_training':
model = models.resnet18(pretrained=True)
model = NormalizedWrapper(model, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
elif experiment in ['gowal', 'gowal_c100']:
from experiments.gowal import get_model
model = get_model(experiment)
return model
def save_results(advs, labels, accs, args, num_chunk, start_ind, end_ind):
filename = f'chunk{num_chunk}of{args.chunks}_{start_ind}to{end_ind}'
# Save adversaries to file
data_file = osp.join(args.adv_dir, f'advs_{filename}.pth')
data = {'advs' : advs, 'labels' : labels} # advs is a dict
torch.save(data, data_file)
# Log stuff
log_file = osp.join(args.logs_dir, f'results_{filename}.txt')
info = '\n'.join([f'{k}:{v}' if k == 'n_instances' else f'{k}:{v:4.2f}'
for k, v in accs.items()])
print_to_log(info, log_file)
print('==> Accuracies: \n', info)
print(f'Evaluation for chunk {num_chunk} out of {args.chunks} finished.\n'
f'==> Adversaries saved to {data_file}.\n'
f'==> Log file saved to {log_file}.\n'
+ 50 * '-' + '\n')
return log_file
def eval_chunk(model, dataset, batch_size, chunks, num_chunk, device, args):
testloader, start_ind, end_ind = get_data_utils(dataset, batch_size, chunks,
num_chunk)
# Clean acc
clean_acc = get_clean_acc(model, testloader, device)
# Compute adversarial instances
advs, labels = compute_advs(model, testloader, device, batch_size,
args.cheap, args.seed, args.eps)
# Compute robustness
accs = compute_adv_accs(model, advs, labels, device, batch_size)
# Send everything to file
accs.update({'clean' : clean_acc , 'n_instances' : len(testloader.dataset)})
log_file = save_results(advs, labels, accs, args, num_chunk, start_ind,
end_ind)
return log_file
def eval_files(log_files, final_log):
print(f'Evaluating based on these {len(log_files)} files: ', log_files)
tot_instances = 0
tot_corr = {}
for log_file in log_files:
with open(log_file, 'r') as f:
lines = f.readlines()
lines = [l.strip() for l in lines]
data = { l.split(':')[0] : float(l.split(':')[1]) for l in lines }
instances = int(data.pop('n_instances'))
tot_instances += instances
for atck, acc in data.items():
corr = acc * instances
if atck in tot_corr:
tot_corr[atck] += corr
else:
tot_corr[atck] = corr
accs = {atck : float(corr)/tot_instances for atck, corr in tot_corr.items()}
accs.update({ 'n_instances' : tot_instances })
info = '\n'.join([f'{k}:{v}' if k == 'n_instances' else f'{k}:{v:4.2f}'
for k, v in accs.items()])
print_to_log(info, final_log)
print(f'Saved all results to {final_log}')
| 14,696 | 38.087766 | 95 | py |
GFocalV2 | GFocalV2-master/setup.py | #!/usr/bin/env python
import os
from setuptools import find_packages, setup
import torch
from torch.utils.cpp_extension import (BuildExtension, CppExtension,
CUDAExtension)
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content
version_file = 'mmdet/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
def make_cuda_ext(name, module, sources, sources_cuda=[]):
define_macros = []
extra_compile_args = {'cxx': []}
if torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1':
define_macros += [('WITH_CUDA', None)]
extension = CUDAExtension
extra_compile_args['nvcc'] = [
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
]
sources += sources_cuda
else:
print(f'Compiling {name} without CUDA')
extension = CppExtension
return extension(
name=f'{module}.{name}',
sources=[os.path.join(*module.split('.'), p) for p in sources],
define_macros=define_macros,
extra_compile_args=extra_compile_args)
def parse_requirements(fname='requirements.txt', with_version=True):
"""Parse the package dependencies listed in a requirements file but strips
specific versioning information.
Args:
fname (str): path to requirements file
with_version (bool, default=False): if True include version specs
Returns:
List[str]: list of requirements items
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
"""
import sys
from os.path import exists
import re
require_fpath = fname
def parse_line(line):
"""Parse information from a line in a requirements text file."""
if line.startswith('-r '):
# Allow specifying requirements in other files
target = line.split(' ')[1]
for info in parse_require_file(target):
yield info
else:
info = {'line': line}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
elif '@git+' in line:
info['package'] = line
else:
# Remove versioning from the package
pat = '(' + '|'.join(['>=', '==', '>']) + ')'
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if len(parts) > 1:
op, rest = parts[1:]
if ';' in rest:
# Handle platform specific dependencies
# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
version, platform_deps = map(str.strip,
rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest # NOQA
info['version'] = (op, version)
yield info
def parse_require_file(fpath):
with open(fpath, 'r') as f:
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
for info in parse_line(line):
yield info
def gen_packages_items():
if exists(require_fpath):
for info in parse_require_file(require_fpath):
parts = [info['package']]
if with_version and 'version' in info:
parts.extend(info['version'])
if not sys.version.startswith('3.4'):
# apparently package_deps are broken in 3.4
platform_deps = info.get('platform_deps')
if platform_deps is not None:
parts.append(';' + platform_deps)
item = ''.join(parts)
yield item
packages = list(gen_packages_items())
return packages
if __name__ == '__main__':
setup(
name='mmdet',
version=get_version(),
description='OpenMMLab Detection Toolbox and Benchmark',
long_description=readme(),
long_description_content_type='text/markdown',
author='OpenMMLab',
author_email='openmmlab@gmail.com',
keywords='computer vision, object detection',
url='https://github.com/open-mmlab/mmdetection',
packages=find_packages(exclude=('configs', 'tools', 'demo')),
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
license='Apache License 2.0',
setup_requires=parse_requirements('requirements/build.txt'),
tests_require=parse_requirements('requirements/tests.txt'),
install_requires=parse_requirements('requirements/runtime.txt'),
extras_require={
'all': parse_requirements('requirements.txt'),
'tests': parse_requirements('requirements/tests.txt'),
'build': parse_requirements('requirements/build.txt'),
'optional': parse_requirements('requirements/optional.txt'),
},
ext_modules=[],
cmdclass={'build_ext': BuildExtension},
zip_safe=False)
| 5,864 | 35.203704 | 125 | py |
GFocalV2 | GFocalV2-master/tools/test.py | import argparse
import os
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.cnn import fuse_conv_bn
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (get_dist_info, init_dist, load_checkpoint,
wrap_fp16_model)
from mmdet.apis import multi_gpu_test, single_gpu_test
from mmdet.datasets import (build_dataloader, build_dataset,
replace_ImageToTensor)
from mmdet.models import build_detector
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--out', help='output result file in pickle format')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--format-only',
action='store_true',
help='Format the output results without perform evaluation. It is'
'useful when you want to format the result to a specific format and '
'submit it to the test server')
parser.add_argument(
'--eval',
type=str,
nargs='+',
help='evaluation metrics, which depends on the dataset, e.g., "bbox",'
' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--show-dir', help='directory where painted images will be saved')
parser.add_argument(
'--show-score-thr',
type=float,
default=0.3,
help='score threshold (default: 0.3)')
parser.add_argument(
'--gpu-collect',
action='store_true',
help='whether to use gpu to collect results.')
parser.add_argument(
'--tmpdir',
help='tmp directory used for collecting results from multiple '
'workers, available when gpu-collect is not specified')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function (deprecate), '
'change to --eval-options instead.')
parser.add_argument(
'--eval-options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.eval_options:
raise ValueError(
'--options and --eval-options cannot be both '
'specified, --options is deprecated in favor of --eval-options')
if args.options:
warnings.warn('--options is deprecated in favor of --eval-options')
args.eval_options = args.options
return args
def main():
args = parse_args()
assert args.out or args.eval or args.format_only or args.show \
or args.show_dir, \
('Please specify at least one operation (save/eval/format/show the '
'results / save the results) with the argument "--out", "--eval"'
', "--format-only", "--show" or "--show-dir"')
if args.eval and args.format_only:
raise ValueError('--eval and --format_only cannot be both specified')
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
if cfg.model.get('neck'):
if isinstance(cfg.model.neck, list):
for neck_cfg in cfg.model.neck:
if neck_cfg.get('rfp_backbone'):
if neck_cfg.rfp_backbone.get('pretrained'):
neck_cfg.rfp_backbone.pretrained = None
elif cfg.model.neck.get('rfp_backbone'):
if cfg.model.neck.rfp_backbone.get('pretrained'):
cfg.model.neck.rfp_backbone.pretrained = None
# in case the test dataset is concatenated
if isinstance(cfg.data.test, dict):
cfg.data.test.test_mode = True
elif isinstance(cfg.data.test, list):
for ds_cfg in cfg.data.test:
ds_cfg.test_mode = True
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# build the dataloader
samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1)
if samples_per_gpu > 1:
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
samples_per_gpu=samples_per_gpu,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
# build the model and load checkpoint
model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
if args.fuse_conv_bn:
model = fuse_conv_bn(model)
# old versions did not save class info in checkpoints, this walkaround is
# for backward compatibility
if 'CLASSES' in checkpoint['meta']:
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
if not distributed:
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, args.show, args.show_dir,
args.show_score_thr)
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir,
args.gpu_collect)
rank, _ = get_dist_info()
if rank == 0:
if args.out:
print(f'\nwriting results to {args.out}')
mmcv.dump(outputs, args.out)
kwargs = {} if args.eval_options is None else args.eval_options
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
eval_kwargs = cfg.get('evaluation', {}).copy()
# hard-code way to remove EvalHook args
for key in ['interval', 'tmpdir', 'start', 'gpu_collect']:
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metric=args.eval, **kwargs))
print(dataset.evaluate(outputs, **eval_kwargs))
if __name__ == '__main__':
main()
| 7,967 | 37.679612 | 79 | py |
GFocalV2 | GFocalV2-master/tools/benchmark.py | import argparse
import time
import torch
from mmcv import Config
from mmcv.cnn import fuse_conv_bn
from mmcv.parallel import MMDataParallel
from mmcv.runner import load_checkpoint, wrap_fp16_model
from mmdet.datasets import (build_dataloader, build_dataset,
replace_ImageToTensor)
from mmdet.models import build_detector
def parse_args():
parser = argparse.ArgumentParser(description='MMDet benchmark a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--log-interval', default=50, help='interval of logging')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
# build the dataloader
samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1)
if samples_per_gpu > 1:
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
samples_per_gpu=1,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=False,
shuffle=False)
# build the model and load checkpoint
model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
load_checkpoint(model, args.checkpoint, map_location='cpu')
if args.fuse_conv_bn:
model = fuse_conv_bn(model)
model = MMDataParallel(model, device_ids=[0])
model.eval()
# the first several iterations may be very slow so skip them
num_warmup = 5
pure_inf_time = 0
# benchmark with 2000 image and take the average
for i, data in enumerate(data_loader):
torch.cuda.synchronize()
start_time = time.perf_counter()
with torch.no_grad():
model(return_loss=False, rescale=True, **data)
torch.cuda.synchronize()
elapsed = time.perf_counter() - start_time
if i >= num_warmup:
pure_inf_time += elapsed
if (i + 1) % args.log_interval == 0:
fps = (i + 1 - num_warmup) / pure_inf_time
print(f'Done image [{i + 1:<3}/ 2000], fps: {fps:.1f} img / s')
if (i + 1) == 2000:
pure_inf_time += elapsed
fps = (i + 1 - num_warmup) / pure_inf_time
print(f'Overall fps: {fps:.1f} img / s')
break
if __name__ == '__main__':
main()
| 3,176 | 30.455446 | 79 | py |
GFocalV2 | GFocalV2-master/tools/get_flops.py | import argparse
import torch
from mmcv import Config
from mmdet.models import build_detector
try:
from mmcv.cnn import get_model_complexity_info
except ImportError:
raise ImportError('Please upgrade mmcv to >0.6.2')
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[1280, 800],
help='input image size')
args = parser.parse_args()
return args
def main():
args = parse_args()
if len(args.shape) == 1:
input_shape = (3, args.shape[0], args.shape[0])
elif len(args.shape) == 2:
input_shape = (3, ) + tuple(args.shape)
else:
raise ValueError('invalid input shape')
cfg = Config.fromfile(args.config)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
model = build_detector(
cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
if torch.cuda.is_available():
model.cuda()
model.eval()
if hasattr(model, 'forward_dummy'):
model.forward = model.forward_dummy
else:
raise NotImplementedError(
'FLOPs counter is currently not currently supported with {}'.
format(model.__class__.__name__))
flops, params = get_model_complexity_info(model, input_shape)
split_line = '=' * 30
print(f'{split_line}\nInput shape: {input_shape}\n'
f'Flops: {flops}\nParams: {params}\n{split_line}')
print('!!!Please be cautious if you use the results in papers. '
'You may need to check if all ops are supported and verify that the '
'flops computation is correct.')
if __name__ == '__main__':
main()
| 1,932 | 27.426471 | 79 | py |
GFocalV2 | GFocalV2-master/tools/publish_model.py | import argparse
import subprocess
import torch
def parse_args():
parser = argparse.ArgumentParser(
description='Process a checkpoint to be published')
parser.add_argument('in_file', help='input checkpoint filename')
parser.add_argument('out_file', help='output checkpoint filename')
args = parser.parse_args()
return args
def process_checkpoint(in_file, out_file):
checkpoint = torch.load(in_file, map_location='cpu')
# remove optimizer for smaller file size
if 'optimizer' in checkpoint:
del checkpoint['optimizer']
# if it is necessary to remove some sensitive data in checkpoint['meta'],
# add the code here.
torch.save(checkpoint, out_file)
sha = subprocess.check_output(['sha256sum', out_file]).decode()
if out_file.endswith('.pth'):
out_file_name = out_file[:-4]
else:
out_file_name = out_file
final_file = out_file_name + f'-{sha[:8]}.pth'
subprocess.Popen(['mv', out_file, final_file])
def main():
args = parse_args()
process_checkpoint(args.in_file, args.out_file)
if __name__ == '__main__':
main()
| 1,125 | 27.15 | 77 | py |
GFocalV2 | GFocalV2-master/tools/regnet2mmdet.py | import argparse
from collections import OrderedDict
import torch
def convert_stem(model_key, model_weight, state_dict, converted_names):
new_key = model_key.replace('stem.conv', 'conv1')
new_key = new_key.replace('stem.bn', 'bn1')
state_dict[new_key] = model_weight
converted_names.add(model_key)
print(f'Convert {model_key} to {new_key}')
def convert_head(model_key, model_weight, state_dict, converted_names):
new_key = model_key.replace('head.fc', 'fc')
state_dict[new_key] = model_weight
converted_names.add(model_key)
print(f'Convert {model_key} to {new_key}')
def convert_reslayer(model_key, model_weight, state_dict, converted_names):
split_keys = model_key.split('.')
layer, block, module = split_keys[:3]
block_id = int(block[1:])
layer_name = f'layer{int(layer[1:])}'
block_name = f'{block_id - 1}'
if block_id == 1 and module == 'bn':
new_key = f'{layer_name}.{block_name}.downsample.1.{split_keys[-1]}'
elif block_id == 1 and module == 'proj':
new_key = f'{layer_name}.{block_name}.downsample.0.{split_keys[-1]}'
elif module == 'f':
if split_keys[3] == 'a_bn':
module_name = 'bn1'
elif split_keys[3] == 'b_bn':
module_name = 'bn2'
elif split_keys[3] == 'c_bn':
module_name = 'bn3'
elif split_keys[3] == 'a':
module_name = 'conv1'
elif split_keys[3] == 'b':
module_name = 'conv2'
elif split_keys[3] == 'c':
module_name = 'conv3'
new_key = f'{layer_name}.{block_name}.{module_name}.{split_keys[-1]}'
else:
raise ValueError(f'Unsupported conversion of key {model_key}')
print(f'Convert {model_key} to {new_key}')
state_dict[new_key] = model_weight
converted_names.add(model_key)
def convert(src, dst):
"""Convert keys in pycls pretrained RegNet models to mmdet style."""
# load caffe model
regnet_model = torch.load(src)
blobs = regnet_model['model_state']
# convert to pytorch style
state_dict = OrderedDict()
converted_names = set()
for key, weight in blobs.items():
if 'stem' in key:
convert_stem(key, weight, state_dict, converted_names)
elif 'head' in key:
convert_head(key, weight, state_dict, converted_names)
elif key.startswith('s'):
convert_reslayer(key, weight, state_dict, converted_names)
# check if all layers are converted
for key in blobs:
if key not in converted_names:
print(f'not converted: {key}')
# save checkpoint
checkpoint = dict()
checkpoint['state_dict'] = state_dict
torch.save(checkpoint, dst)
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument('src', help='src detectron model path')
parser.add_argument('dst', help='save path')
args = parser.parse_args()
convert(args.src, args.dst)
if __name__ == '__main__':
main()
| 3,015 | 32.511111 | 77 | py |
GFocalV2 | GFocalV2-master/tools/pytorch2onnx.py | import argparse
import os.path as osp
import numpy as np
import onnx
import onnxruntime as rt
import torch
from mmdet.core import (build_model_from_cfg, generate_inputs_and_wrap_model,
preprocess_example_input)
def pytorch2onnx(config_path,
checkpoint_path,
input_img,
input_shape,
opset_version=11,
show=False,
output_file='tmp.onnx',
verify=False,
normalize_cfg=None):
input_config = {
'input_shape': input_shape,
'input_path': input_img,
'normalize_cfg': normalize_cfg
}
# prepare original model and meta for verifying the onnx model
orig_model = build_model_from_cfg(config_path, checkpoint_path)
one_img, one_meta = preprocess_example_input(input_config)
model, tensor_data = generate_inputs_and_wrap_model(
config_path, checkpoint_path, input_config)
torch.onnx.export(
model,
tensor_data,
output_file,
export_params=True,
keep_initializers_as_inputs=True,
verbose=show,
opset_version=opset_version)
model.forward = orig_model.forward
print(f'Successfully exported ONNX model: {output_file}')
if verify:
# check by onnx
onnx_model = onnx.load(output_file)
onnx.checker.check_model(onnx_model)
# check the numerical value
# get pytorch output
pytorch_result = model(tensor_data, [[one_meta]], return_loss=False)
# get onnx output
input_all = [node.name for node in onnx_model.graph.input]
input_initializer = [
node.name for node in onnx_model.graph.initializer
]
net_feed_input = list(set(input_all) - set(input_initializer))
assert (len(net_feed_input) == 1)
sess = rt.InferenceSession(output_file)
from mmdet.core import bbox2result
det_bboxes, det_labels = sess.run(
None, {net_feed_input[0]: one_img.detach().numpy()})
# only compare a part of result
bbox_results = bbox2result(det_bboxes, det_labels, 1)
onnx_results = bbox_results[0]
assert np.allclose(
pytorch_result[0][0][0][:4], onnx_results[0]
[:4]), 'The outputs are different between Pytorch and ONNX'
print('The numerical values are the same between Pytorch and ONNX')
def parse_args():
parser = argparse.ArgumentParser(
description='Convert MMDetection models to ONNX')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--input-img', type=str, help='Images for input')
parser.add_argument('--show', action='store_true', help='show onnx graph')
parser.add_argument('--output-file', type=str, default='tmp.onnx')
parser.add_argument('--opset-version', type=int, default=11)
parser.add_argument(
'--verify',
action='store_true',
help='verify the onnx model output against pytorch output')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[800, 1216],
help='input image size')
parser.add_argument(
'--mean',
type=float,
nargs='+',
default=[123.675, 116.28, 103.53],
help='mean value used for preprocess input data')
parser.add_argument(
'--std',
type=float,
nargs='+',
default=[58.395, 57.12, 57.375],
help='variance value used for preprocess input data')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
assert args.opset_version == 11, 'MMDet only support opset 11 now'
if not args.input_img:
args.input_img = osp.join(
osp.dirname(__file__), '../tests/data/color.jpg')
if len(args.shape) == 1:
input_shape = (1, 3, args.shape[0], args.shape[0])
elif len(args.shape) == 2:
input_shape = (1, 3) + tuple(args.shape)
else:
raise ValueError('invalid input shape')
assert len(args.mean) == 3
assert len(args.std) == 3
normalize_cfg = {'mean': args.mean, 'std': args.std}
# convert model to onnx file
pytorch2onnx(
args.config,
args.checkpoint,
args.input_img,
input_shape,
opset_version=args.opset_version,
show=args.show,
output_file=args.output_file,
verify=args.verify,
normalize_cfg=normalize_cfg)
| 4,585 | 31.06993 | 78 | py |
GFocalV2 | GFocalV2-master/tools/upgrade_model_version.py | import argparse
import re
import tempfile
from collections import OrderedDict
import torch
from mmcv import Config
def is_head(key):
valid_head_list = [
'bbox_head', 'mask_head', 'semantic_head', 'grid_head', 'mask_iou_head'
]
return any(key.startswith(h) for h in valid_head_list)
def parse_config(config_strings):
temp_file = tempfile.NamedTemporaryFile()
config_path = f'{temp_file.name}.py'
with open(config_path, 'w') as f:
f.write(config_strings)
config = Config.fromfile(config_path)
is_two_stage = True
is_ssd = False
is_retina = False
reg_cls_agnostic = False
if 'rpn_head' not in config.model:
is_two_stage = False
# check whether it is SSD
if config.model.bbox_head.type == 'SSDHead':
is_ssd = True
elif config.model.bbox_head.type == 'RetinaHead':
is_retina = True
elif isinstance(config.model['bbox_head'], list):
reg_cls_agnostic = True
elif 'reg_class_agnostic' in config.model.bbox_head:
reg_cls_agnostic = config.model.bbox_head \
.reg_class_agnostic
temp_file.close()
return is_two_stage, is_ssd, is_retina, reg_cls_agnostic
def reorder_cls_channel(val, num_classes=81):
# bias
if val.dim() == 1:
new_val = torch.cat((val[1:], val[:1]), dim=0)
# weight
else:
out_channels, in_channels = val.shape[:2]
# conv_cls for softmax output
if out_channels != num_classes and out_channels % num_classes == 0:
new_val = val.reshape(-1, num_classes, in_channels, *val.shape[2:])
new_val = torch.cat((new_val[:, 1:], new_val[:, :1]), dim=1)
new_val = new_val.reshape(val.size())
# fc_cls
elif out_channels == num_classes:
new_val = torch.cat((val[1:], val[:1]), dim=0)
# agnostic | retina_cls | rpn_cls
else:
new_val = val
return new_val
def truncate_cls_channel(val, num_classes=81):
# bias
if val.dim() == 1:
if val.size(0) % num_classes == 0:
new_val = val[:num_classes - 1]
else:
new_val = val
# weight
else:
out_channels, in_channels = val.shape[:2]
# conv_logits
if out_channels % num_classes == 0:
new_val = val.reshape(num_classes, in_channels, *val.shape[2:])[1:]
new_val = new_val.reshape(-1, *val.shape[1:])
# agnostic
else:
new_val = val
return new_val
def truncate_reg_channel(val, num_classes=81):
# bias
if val.dim() == 1:
# fc_reg | rpn_reg
if val.size(0) % num_classes == 0:
new_val = val.reshape(num_classes, -1)[:num_classes - 1]
new_val = new_val.reshape(-1)
# agnostic
else:
new_val = val
# weight
else:
out_channels, in_channels = val.shape[:2]
# fc_reg | rpn_reg
if out_channels % num_classes == 0:
new_val = val.reshape(num_classes, -1, in_channels,
*val.shape[2:])[1:]
new_val = new_val.reshape(-1, *val.shape[1:])
# agnostic
else:
new_val = val
return new_val
def convert(in_file, out_file, num_classes):
"""Convert keys in checkpoints.
There can be some breaking changes during the development of mmdetection,
and this tool is used for upgrading checkpoints trained with old versions
to the latest one.
"""
checkpoint = torch.load(in_file)
in_state_dict = checkpoint.pop('state_dict')
out_state_dict = OrderedDict()
meta_info = checkpoint['meta']
is_two_stage, is_ssd, is_retina, reg_cls_agnostic = parse_config(
meta_info['config'])
if meta_info['mmdet_version'] <= '0.5.3' and is_retina:
upgrade_retina = True
else:
upgrade_retina = False
# MMDetection v2.5.0 unifies the class order in RPN
# if the model is trained in version<v2.5.0
# The RPN model should be upgraded to be used in version>=2.5.0
if meta_info['mmdet_version'] < '2.5.0':
upgrade_rpn = True
else:
upgrade_rpn = False
for key, val in in_state_dict.items():
new_key = key
new_val = val
if is_two_stage and is_head(key):
new_key = 'roi_head.{}'.format(key)
# classification
if upgrade_rpn:
m = re.search(
r'(conv_cls|retina_cls|rpn_cls|fc_cls|fcos_cls|'
r'fovea_cls).(weight|bias)', new_key)
else:
m = re.search(
r'(conv_cls|retina_cls|fc_cls|fcos_cls|'
r'fovea_cls).(weight|bias)', new_key)
if m is not None:
print(f'reorder cls channels of {new_key}')
new_val = reorder_cls_channel(val, num_classes)
# regression
if upgrade_rpn:
m = re.search(r'(fc_reg).(weight|bias)', new_key)
else:
m = re.search(r'(fc_reg|rpn_reg).(weight|bias)', new_key)
if m is not None and not reg_cls_agnostic:
print(f'truncate regression channels of {new_key}')
new_val = truncate_reg_channel(val, num_classes)
# mask head
m = re.search(r'(conv_logits).(weight|bias)', new_key)
if m is not None:
print(f'truncate mask prediction channels of {new_key}')
new_val = truncate_cls_channel(val, num_classes)
m = re.search(r'(cls_convs|reg_convs).\d.(weight|bias)', key)
# Legacy issues in RetinaNet since V1.x
# Use ConvModule instead of nn.Conv2d in RetinaNet
# cls_convs.0.weight -> cls_convs.0.conv.weight
if m is not None and upgrade_retina:
param = m.groups()[1]
new_key = key.replace(param, f'conv.{param}')
out_state_dict[new_key] = val
print(f'rename the name of {key} to {new_key}')
continue
m = re.search(r'(cls_convs).\d.(weight|bias)', key)
if m is not None and is_ssd:
print(f'reorder cls channels of {new_key}')
new_val = reorder_cls_channel(val, num_classes)
out_state_dict[new_key] = new_val
checkpoint['state_dict'] = out_state_dict
torch.save(checkpoint, out_file)
def main():
parser = argparse.ArgumentParser(description='Upgrade model version')
parser.add_argument('in_file', help='input checkpoint file')
parser.add_argument('out_file', help='output checkpoint file')
parser.add_argument(
'--num-classes',
type=int,
default=81,
help='number of classes of the original model')
args = parser.parse_args()
convert(args.in_file, args.out_file, args.num_classes)
if __name__ == '__main__':
main()
| 6,794 | 31.357143 | 79 | py |
GFocalV2 | GFocalV2-master/tools/test_robustness.py | import argparse
import copy
import os
import os.path as osp
import mmcv
import torch
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (get_dist_info, init_dist, load_checkpoint,
wrap_fp16_model)
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from robustness_eval import get_results
from mmdet import datasets
from mmdet.apis import multi_gpu_test, set_random_seed, single_gpu_test
from mmdet.core import eval_map
from mmdet.datasets import build_dataloader, build_dataset
from mmdet.models import build_detector
def coco_eval_with_return(result_files,
result_types,
coco,
max_dets=(100, 300, 1000)):
for res_type in result_types:
assert res_type in ['proposal', 'bbox', 'segm', 'keypoints']
if mmcv.is_str(coco):
coco = COCO(coco)
assert isinstance(coco, COCO)
eval_results = {}
for res_type in result_types:
result_file = result_files[res_type]
assert result_file.endswith('.json')
coco_dets = coco.loadRes(result_file)
img_ids = coco.getImgIds()
iou_type = 'bbox' if res_type == 'proposal' else res_type
cocoEval = COCOeval(coco, coco_dets, iou_type)
cocoEval.params.imgIds = img_ids
if res_type == 'proposal':
cocoEval.params.useCats = 0
cocoEval.params.maxDets = list(max_dets)
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if res_type == 'segm' or res_type == 'bbox':
metric_names = [
'AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'AR1', 'AR10',
'AR100', 'ARs', 'ARm', 'ARl'
]
eval_results[res_type] = {
metric_names[i]: cocoEval.stats[i]
for i in range(len(metric_names))
}
else:
eval_results[res_type] = cocoEval.stats
return eval_results
def voc_eval_with_return(result_file,
dataset,
iou_thr=0.5,
logger='print',
only_ap=True):
det_results = mmcv.load(result_file)
annotations = [dataset.get_ann_info(i) for i in range(len(dataset))]
if hasattr(dataset, 'year') and dataset.year == 2007:
dataset_name = 'voc07'
else:
dataset_name = dataset.CLASSES
mean_ap, eval_results = eval_map(
det_results,
annotations,
scale_ranges=None,
iou_thr=iou_thr,
dataset=dataset_name,
logger=logger)
if only_ap:
eval_results = [{
'ap': eval_results[i]['ap']
} for i in range(len(eval_results))]
return mean_ap, eval_results
def parse_args():
parser = argparse.ArgumentParser(description='MMDet test detector')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--out', help='output result file')
parser.add_argument(
'--corruptions',
type=str,
nargs='+',
default='benchmark',
choices=[
'all', 'benchmark', 'noise', 'blur', 'weather', 'digital',
'holdout', 'None', 'gaussian_noise', 'shot_noise', 'impulse_noise',
'defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur', 'snow',
'frost', 'fog', 'brightness', 'contrast', 'elastic_transform',
'pixelate', 'jpeg_compression', 'speckle_noise', 'gaussian_blur',
'spatter', 'saturate'
],
help='corruptions')
parser.add_argument(
'--severities',
type=int,
nargs='+',
default=[0, 1, 2, 3, 4, 5],
help='corruption severity levels')
parser.add_argument(
'--eval',
type=str,
nargs='+',
choices=['proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints'],
help='eval types')
parser.add_argument(
'--iou-thr',
type=float,
default=0.5,
help='IoU threshold for pascal voc evaluation')
parser.add_argument(
'--summaries',
type=bool,
default=False,
help='Print summaries for every corruption and severity')
parser.add_argument(
'--workers', type=int, default=32, help='workers per gpu')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--show-dir', help='directory where painted images will be saved')
parser.add_argument(
'--show-score-thr',
type=float,
default=0.3,
help='score threshold (default: 0.3)')
parser.add_argument('--tmpdir', help='tmp dir for writing some results')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument(
'--final-prints',
type=str,
nargs='+',
choices=['P', 'mPC', 'rPC'],
default='mPC',
help='corruption benchmark metric to print at the end')
parser.add_argument(
'--final-prints-aggregate',
type=str,
choices=['all', 'benchmark'],
default='benchmark',
help='aggregate all results or only those for benchmark corruptions')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
assert args.out or args.show or args.show_dir, \
('Please specify at least one operation (save or show the results) '
'with the argument "--out", "--show" or "show-dir"')
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
cfg = mmcv.Config.fromfile(args.config)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
if args.workers == 0:
args.workers = cfg.data.workers_per_gpu
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# set random seeds
if args.seed is not None:
set_random_seed(args.seed)
if 'all' in args.corruptions:
corruptions = [
'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur',
'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog',
'brightness', 'contrast', 'elastic_transform', 'pixelate',
'jpeg_compression', 'speckle_noise', 'gaussian_blur', 'spatter',
'saturate'
]
elif 'benchmark' in args.corruptions:
corruptions = [
'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur',
'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog',
'brightness', 'contrast', 'elastic_transform', 'pixelate',
'jpeg_compression'
]
elif 'noise' in args.corruptions:
corruptions = ['gaussian_noise', 'shot_noise', 'impulse_noise']
elif 'blur' in args.corruptions:
corruptions = [
'defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur'
]
elif 'weather' in args.corruptions:
corruptions = ['snow', 'frost', 'fog', 'brightness']
elif 'digital' in args.corruptions:
corruptions = [
'contrast', 'elastic_transform', 'pixelate', 'jpeg_compression'
]
elif 'holdout' in args.corruptions:
corruptions = ['speckle_noise', 'gaussian_blur', 'spatter', 'saturate']
elif 'None' in args.corruptions:
corruptions = ['None']
args.severities = [0]
else:
corruptions = args.corruptions
rank, _ = get_dist_info()
aggregated_results = {}
for corr_i, corruption in enumerate(corruptions):
aggregated_results[corruption] = {}
for sev_i, corruption_severity in enumerate(args.severities):
# evaluate severity 0 (= no corruption) only once
if corr_i > 0 and corruption_severity == 0:
aggregated_results[corruption][0] = \
aggregated_results[corruptions[0]][0]
continue
test_data_cfg = copy.deepcopy(cfg.data.test)
# assign corruption and severity
if corruption_severity > 0:
corruption_trans = dict(
type='Corrupt',
corruption=corruption,
severity=corruption_severity)
# TODO: hard coded "1", we assume that the first step is
# loading images, which needs to be fixed in the future
test_data_cfg['pipeline'].insert(1, corruption_trans)
# print info
print(f'\nTesting {corruption} at severity {corruption_severity}')
# build the dataloader
# TODO: support multiple images per gpu
# (only minor changes are needed)
dataset = build_dataset(test_data_cfg)
data_loader = build_dataloader(
dataset,
samples_per_gpu=1,
workers_per_gpu=args.workers,
dist=distributed,
shuffle=False)
# build the model and load checkpoint
model = build_detector(
cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
checkpoint = load_checkpoint(
model, args.checkpoint, map_location='cpu')
# old versions did not save class info in checkpoints,
# this walkaround is for backward compatibility
if 'CLASSES' in checkpoint['meta']:
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
if not distributed:
model = MMDataParallel(model, device_ids=[0])
show_dir = args.show_dir
if show_dir is not None:
show_dir = osp.join(show_dir, corruption)
show_dir = osp.join(show_dir, str(corruption_severity))
if not osp.exists(show_dir):
osp.makedirs(show_dir)
outputs = single_gpu_test(model, data_loader, args.show,
show_dir, args.show_score_thr)
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir)
if args.out and rank == 0:
eval_results_filename = (
osp.splitext(args.out)[0] + '_results' +
osp.splitext(args.out)[1])
mmcv.dump(outputs, args.out)
eval_types = args.eval
if cfg.dataset_type == 'VOCDataset':
if eval_types:
for eval_type in eval_types:
if eval_type == 'bbox':
test_dataset = mmcv.runner.obj_from_dict(
cfg.data.test, datasets)
logger = 'print' if args.summaries else None
mean_ap, eval_results = \
voc_eval_with_return(
args.out, test_dataset,
args.iou_thr, logger)
aggregated_results[corruption][
corruption_severity] = eval_results
else:
print('\nOnly "bbox" evaluation \
is supported for pascal voc')
else:
if eval_types:
print(f'Starting evaluate {" and ".join(eval_types)}')
if eval_types == ['proposal_fast']:
result_file = args.out
else:
if not isinstance(outputs[0], dict):
result_files = dataset.results2json(
outputs, args.out)
else:
for name in outputs[0]:
print(f'\nEvaluating {name}')
outputs_ = [out[name] for out in outputs]
result_file = args.out
+ f'.{name}'
result_files = dataset.results2json(
outputs_, result_file)
eval_results = coco_eval_with_return(
result_files, eval_types, dataset.coco)
aggregated_results[corruption][
corruption_severity] = eval_results
else:
print('\nNo task was selected for evaluation;'
'\nUse --eval to select a task')
# save results after each evaluation
mmcv.dump(aggregated_results, eval_results_filename)
if rank == 0:
# print filan results
print('\nAggregated results:')
prints = args.final_prints
aggregate = args.final_prints_aggregate
if cfg.dataset_type == 'VOCDataset':
get_results(
eval_results_filename,
dataset='voc',
prints=prints,
aggregate=aggregate)
else:
get_results(
eval_results_filename,
dataset='coco',
prints=prints,
aggregate=aggregate)
if __name__ == '__main__':
main()
| 14,711 | 37.920635 | 79 | py |
GFocalV2 | GFocalV2-master/tools/train.py | import argparse
import copy
import os
import os.path as osp
import time
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.runner import init_dist
from mmcv.utils import get_git_hash
from mmdet import __version__
from mmdet.apis import set_random_seed, train_detector
from mmdet.datasets import build_dataset
from mmdet.models import build_detector
from mmdet.utils import collect_env, get_root_logger
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--resume-from', help='the checkpoint file to resume from')
parser.add_argument(
'--no-validate',
action='store_true',
help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus',
type=int,
help='number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file (deprecate), '
'change to --cfg-options instead.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both '
'specified, --options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.resume_from is not None:
cfg.resume_from = args.resume_from
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
meta['config'] = cfg.pretty_text
# log some basic info
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config:\n{cfg.pretty_text}')
# set random seeds
if args.seed is not None:
logger.info(f'Set random seed to {args.seed}, '
f'deterministic: {args.deterministic}')
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
meta['exp_name'] = osp.basename(args.config)
model = build_detector(
cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmdet version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmdet_version=__version__ + get_git_hash()[:7],
CLASSES=datasets[0].CLASSES)
# add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
train_detector(
model,
datasets,
cfg,
distributed=distributed,
validate=(not args.no_validate),
timestamp=timestamp,
meta=meta)
if __name__ == '__main__':
main()
| 6,435 | 34.955307 | 79 | py |
GFocalV2 | GFocalV2-master/tools/detectron2pytorch.py | import argparse
from collections import OrderedDict
import mmcv
import torch
arch_settings = {50: (3, 4, 6, 3), 101: (3, 4, 23, 3)}
def convert_bn(blobs, state_dict, caffe_name, torch_name, converted_names):
# detectron replace bn with affine channel layer
state_dict[torch_name + '.bias'] = torch.from_numpy(blobs[caffe_name +
'_b'])
state_dict[torch_name + '.weight'] = torch.from_numpy(blobs[caffe_name +
'_s'])
bn_size = state_dict[torch_name + '.weight'].size()
state_dict[torch_name + '.running_mean'] = torch.zeros(bn_size)
state_dict[torch_name + '.running_var'] = torch.ones(bn_size)
converted_names.add(caffe_name + '_b')
converted_names.add(caffe_name + '_s')
def convert_conv_fc(blobs, state_dict, caffe_name, torch_name,
converted_names):
state_dict[torch_name + '.weight'] = torch.from_numpy(blobs[caffe_name +
'_w'])
converted_names.add(caffe_name + '_w')
if caffe_name + '_b' in blobs:
state_dict[torch_name + '.bias'] = torch.from_numpy(blobs[caffe_name +
'_b'])
converted_names.add(caffe_name + '_b')
def convert(src, dst, depth):
"""Convert keys in detectron pretrained ResNet models to pytorch style."""
# load arch_settings
if depth not in arch_settings:
raise ValueError('Only support ResNet-50 and ResNet-101 currently')
block_nums = arch_settings[depth]
# load caffe model
caffe_model = mmcv.load(src, encoding='latin1')
blobs = caffe_model['blobs'] if 'blobs' in caffe_model else caffe_model
# convert to pytorch style
state_dict = OrderedDict()
converted_names = set()
convert_conv_fc(blobs, state_dict, 'conv1', 'conv1', converted_names)
convert_bn(blobs, state_dict, 'res_conv1_bn', 'bn1', converted_names)
for i in range(1, len(block_nums) + 1):
for j in range(block_nums[i - 1]):
if j == 0:
convert_conv_fc(blobs, state_dict, f'res{i + 1}_{j}_branch1',
f'layer{i}.{j}.downsample.0', converted_names)
convert_bn(blobs, state_dict, f'res{i + 1}_{j}_branch1_bn',
f'layer{i}.{j}.downsample.1', converted_names)
for k, letter in enumerate(['a', 'b', 'c']):
convert_conv_fc(blobs, state_dict,
f'res{i + 1}_{j}_branch2{letter}',
f'layer{i}.{j}.conv{k+1}', converted_names)
convert_bn(blobs, state_dict,
f'res{i + 1}_{j}_branch2{letter}_bn',
f'layer{i}.{j}.bn{k + 1}', converted_names)
# check if all layers are converted
for key in blobs:
if key not in converted_names:
print(f'Not Convert: {key}')
# save checkpoint
checkpoint = dict()
checkpoint['state_dict'] = state_dict
torch.save(checkpoint, dst)
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument('src', help='src detectron model path')
parser.add_argument('dst', help='save path')
parser.add_argument('depth', type=int, help='ResNet model depth')
args = parser.parse_args()
convert(args.src, args.dst, args.depth)
if __name__ == '__main__':
main()
| 3,530 | 41.542169 | 78 | py |
GFocalV2 | GFocalV2-master/tests/async_benchmark.py | import asyncio
import os
import shutil
import urllib
import mmcv
import torch
from mmdet.apis import (async_inference_detector, inference_detector,
init_detector, show_result)
from mmdet.utils.contextmanagers import concurrent
from mmdet.utils.profiling import profile_time
async def main():
"""Benchmark between async and synchronous inference interfaces.
Sample runs for 20 demo images on K80 GPU, model - mask_rcnn_r50_fpn_1x:
async sync
7981.79 ms 9660.82 ms
8074.52 ms 9660.94 ms
7976.44 ms 9406.83 ms
Async variant takes about 0.83-0.85 of the time of the synchronous
interface.
"""
project_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
config_file = os.path.join(project_dir,
'configs/mask_rcnn_r50_fpn_1x_coco.py')
checkpoint_file = os.path.join(
project_dir, 'checkpoints/mask_rcnn_r50_fpn_1x_20181010-069fa190.pth')
if not os.path.exists(checkpoint_file):
url = ('https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection'
'/models/mask_rcnn_r50_fpn_1x_20181010-069fa190.pth')
print(f'Downloading {url} ...')
local_filename, _ = urllib.request.urlretrieve(url)
os.makedirs(os.path.dirname(checkpoint_file), exist_ok=True)
shutil.move(local_filename, checkpoint_file)
print(f'Saved as {checkpoint_file}')
else:
print(f'Using existing checkpoint {checkpoint_file}')
device = 'cuda:0'
model = init_detector(
config_file, checkpoint=checkpoint_file, device=device)
# queue is used for concurrent inference of multiple images
streamqueue = asyncio.Queue()
# queue size defines concurrency level
streamqueue_size = 4
for _ in range(streamqueue_size):
streamqueue.put_nowait(torch.cuda.Stream(device=device))
# test a single image and show the results
img = mmcv.imread(os.path.join(project_dir, 'demo/demo.jpg'))
# warmup
await async_inference_detector(model, img)
async def detect(img):
async with concurrent(streamqueue):
return await async_inference_detector(model, img)
num_of_images = 20
with profile_time('benchmark', 'async'):
tasks = [
asyncio.create_task(detect(img)) for _ in range(num_of_images)
]
async_results = await asyncio.gather(*tasks)
with torch.cuda.stream(torch.cuda.default_stream()):
with profile_time('benchmark', 'sync'):
sync_results = [
inference_detector(model, img) for _ in range(num_of_images)
]
result_dir = os.path.join(project_dir, 'demo')
show_result(
img,
async_results[0],
model.CLASSES,
score_thr=0.5,
show=False,
out_file=os.path.join(result_dir, 'result_async.jpg'))
show_result(
img,
sync_results[0],
model.CLASSES,
score_thr=0.5,
show=False,
out_file=os.path.join(result_dir, 'result_sync.jpg'))
if __name__ == '__main__':
asyncio.run(main())
| 3,126 | 29.960396 | 79 | py |
GFocalV2 | GFocalV2-master/tests/test_anchor.py | """
CommandLine:
pytest tests/test_anchor.py
xdoctest tests/test_anchor.py zero
"""
import torch
def test_standard_anchor_generator():
from mmdet.core.anchor import build_anchor_generator
anchor_generator_cfg = dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8])
anchor_generator = build_anchor_generator(anchor_generator_cfg)
assert anchor_generator is not None
def test_strides():
from mmdet.core import AnchorGenerator
# Square strides
self = AnchorGenerator([10], [1.], [1.], [10])
anchors = self.grid_anchors([(2, 2)], device='cpu')
expected_anchors = torch.tensor([[-5., -5., 5., 5.], [5., -5., 15., 5.],
[-5., 5., 5., 15.], [5., 5., 15., 15.]])
assert torch.equal(anchors[0], expected_anchors)
# Different strides in x and y direction
self = AnchorGenerator([(10, 20)], [1.], [1.], [10])
anchors = self.grid_anchors([(2, 2)], device='cpu')
expected_anchors = torch.tensor([[-5., -5., 5., 5.], [5., -5., 15., 5.],
[-5., 15., 5., 25.], [5., 15., 15., 25.]])
assert torch.equal(anchors[0], expected_anchors)
def test_ssd_anchor_generator():
from mmdet.core.anchor import build_anchor_generator
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
anchor_generator_cfg = dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=300,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32, 64, 100, 300],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]])
featmap_sizes = [(38, 38), (19, 19), (10, 10), (5, 5), (3, 3), (1, 1)]
anchor_generator = build_anchor_generator(anchor_generator_cfg)
# check base anchors
expected_base_anchors = [
torch.Tensor([[-6.5000, -6.5000, 14.5000, 14.5000],
[-11.3704, -11.3704, 19.3704, 19.3704],
[-10.8492, -3.4246, 18.8492, 11.4246],
[-3.4246, -10.8492, 11.4246, 18.8492]]),
torch.Tensor([[-14.5000, -14.5000, 30.5000, 30.5000],
[-25.3729, -25.3729, 41.3729, 41.3729],
[-23.8198, -7.9099, 39.8198, 23.9099],
[-7.9099, -23.8198, 23.9099, 39.8198],
[-30.9711, -4.9904, 46.9711, 20.9904],
[-4.9904, -30.9711, 20.9904, 46.9711]]),
torch.Tensor([[-33.5000, -33.5000, 65.5000, 65.5000],
[-45.5366, -45.5366, 77.5366, 77.5366],
[-54.0036, -19.0018, 86.0036, 51.0018],
[-19.0018, -54.0036, 51.0018, 86.0036],
[-69.7365, -12.5788, 101.7365, 44.5788],
[-12.5788, -69.7365, 44.5788, 101.7365]]),
torch.Tensor([[-44.5000, -44.5000, 108.5000, 108.5000],
[-56.9817, -56.9817, 120.9817, 120.9817],
[-76.1873, -22.0937, 140.1873, 86.0937],
[-22.0937, -76.1873, 86.0937, 140.1873],
[-100.5019, -12.1673, 164.5019, 76.1673],
[-12.1673, -100.5019, 76.1673, 164.5019]]),
torch.Tensor([[-53.5000, -53.5000, 153.5000, 153.5000],
[-66.2185, -66.2185, 166.2185, 166.2185],
[-96.3711, -23.1855, 196.3711, 123.1855],
[-23.1855, -96.3711, 123.1855, 196.3711]]),
torch.Tensor([[19.5000, 19.5000, 280.5000, 280.5000],
[6.6342, 6.6342, 293.3658, 293.3658],
[-34.5549, 57.7226, 334.5549, 242.2774],
[57.7226, -34.5549, 242.2774, 334.5549]]),
]
base_anchors = anchor_generator.base_anchors
for i, base_anchor in enumerate(base_anchors):
assert base_anchor.allclose(expected_base_anchors[i])
# check valid flags
expected_valid_pixels = [5776, 2166, 600, 150, 36, 4]
multi_level_valid_flags = anchor_generator.valid_flags(
featmap_sizes, (300, 300), device)
for i, single_level_valid_flag in enumerate(multi_level_valid_flags):
assert single_level_valid_flag.sum() == expected_valid_pixels[i]
# check number of base anchors for each level
assert anchor_generator.num_base_anchors == [4, 6, 6, 6, 4, 4]
# check anchor generation
anchors = anchor_generator.grid_anchors(featmap_sizes, device)
assert len(anchors) == 6
def test_anchor_generator_with_tuples():
from mmdet.core.anchor import build_anchor_generator
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
anchor_generator_cfg = dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=300,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32, 64, 100, 300],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]])
featmap_sizes = [(38, 38), (19, 19), (10, 10), (5, 5), (3, 3), (1, 1)]
anchor_generator = build_anchor_generator(anchor_generator_cfg)
anchors = anchor_generator.grid_anchors(featmap_sizes, device)
anchor_generator_cfg_tuples = dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=300,
basesize_ratio_range=(0.15, 0.9),
strides=[(8, 8), (16, 16), (32, 32), (64, 64), (100, 100), (300, 300)],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]])
anchor_generator_tuples = build_anchor_generator(
anchor_generator_cfg_tuples)
anchors_tuples = anchor_generator_tuples.grid_anchors(
featmap_sizes, device)
for anchor, anchor_tuples in zip(anchors, anchors_tuples):
assert torch.equal(anchor, anchor_tuples)
def test_yolo_anchor_generator():
from mmdet.core.anchor import build_anchor_generator
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
anchor_generator_cfg = dict(
type='YOLOAnchorGenerator',
strides=[32, 16, 8],
base_sizes=[
[(116, 90), (156, 198), (373, 326)],
[(30, 61), (62, 45), (59, 119)],
[(10, 13), (16, 30), (33, 23)],
])
featmap_sizes = [(14, 18), (28, 36), (56, 72)]
anchor_generator = build_anchor_generator(anchor_generator_cfg)
# check base anchors
expected_base_anchors = [
torch.Tensor([[-42.0000, -29.0000, 74.0000, 61.0000],
[-62.0000, -83.0000, 94.0000, 115.0000],
[-170.5000, -147.0000, 202.5000, 179.0000]]),
torch.Tensor([[-7.0000, -22.5000, 23.0000, 38.5000],
[-23.0000, -14.5000, 39.0000, 30.5000],
[-21.5000, -51.5000, 37.5000, 67.5000]]),
torch.Tensor([[-1.0000, -2.5000, 9.0000, 10.5000],
[-4.0000, -11.0000, 12.0000, 19.0000],
[-12.5000, -7.5000, 20.5000, 15.5000]])
]
base_anchors = anchor_generator.base_anchors
for i, base_anchor in enumerate(base_anchors):
assert base_anchor.allclose(expected_base_anchors[i])
# check number of base anchors for each level
assert anchor_generator.num_base_anchors == [3, 3, 3]
# check anchor generation
anchors = anchor_generator.grid_anchors(featmap_sizes, device)
assert len(anchors) == 3
def test_retina_anchor():
from mmdet.models import build_head
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
# head configs modified from
# configs/nas_fpn/retinanet_r50_fpn_crop640_50e.py
bbox_head = dict(
type='RetinaSepBNHead',
num_classes=4,
num_ins=5,
in_channels=4,
stacked_convs=1,
feat_channels=4,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]))
retina_head = build_head(bbox_head)
assert retina_head.anchor_generator is not None
# use the featmap sizes in NASFPN setting to test retina head
featmap_sizes = [(80, 80), (40, 40), (20, 20), (10, 10), (5, 5)]
# check base anchors
expected_base_anchors = [
torch.Tensor([[-22.6274, -11.3137, 22.6274, 11.3137],
[-28.5088, -14.2544, 28.5088, 14.2544],
[-35.9188, -17.9594, 35.9188, 17.9594],
[-16.0000, -16.0000, 16.0000, 16.0000],
[-20.1587, -20.1587, 20.1587, 20.1587],
[-25.3984, -25.3984, 25.3984, 25.3984],
[-11.3137, -22.6274, 11.3137, 22.6274],
[-14.2544, -28.5088, 14.2544, 28.5088],
[-17.9594, -35.9188, 17.9594, 35.9188]]),
torch.Tensor([[-45.2548, -22.6274, 45.2548, 22.6274],
[-57.0175, -28.5088, 57.0175, 28.5088],
[-71.8376, -35.9188, 71.8376, 35.9188],
[-32.0000, -32.0000, 32.0000, 32.0000],
[-40.3175, -40.3175, 40.3175, 40.3175],
[-50.7968, -50.7968, 50.7968, 50.7968],
[-22.6274, -45.2548, 22.6274, 45.2548],
[-28.5088, -57.0175, 28.5088, 57.0175],
[-35.9188, -71.8376, 35.9188, 71.8376]]),
torch.Tensor([[-90.5097, -45.2548, 90.5097, 45.2548],
[-114.0350, -57.0175, 114.0350, 57.0175],
[-143.6751, -71.8376, 143.6751, 71.8376],
[-64.0000, -64.0000, 64.0000, 64.0000],
[-80.6349, -80.6349, 80.6349, 80.6349],
[-101.5937, -101.5937, 101.5937, 101.5937],
[-45.2548, -90.5097, 45.2548, 90.5097],
[-57.0175, -114.0350, 57.0175, 114.0350],
[-71.8376, -143.6751, 71.8376, 143.6751]]),
torch.Tensor([[-181.0193, -90.5097, 181.0193, 90.5097],
[-228.0701, -114.0350, 228.0701, 114.0350],
[-287.3503, -143.6751, 287.3503, 143.6751],
[-128.0000, -128.0000, 128.0000, 128.0000],
[-161.2699, -161.2699, 161.2699, 161.2699],
[-203.1873, -203.1873, 203.1873, 203.1873],
[-90.5097, -181.0193, 90.5097, 181.0193],
[-114.0350, -228.0701, 114.0350, 228.0701],
[-143.6751, -287.3503, 143.6751, 287.3503]]),
torch.Tensor([[-362.0387, -181.0193, 362.0387, 181.0193],
[-456.1401, -228.0701, 456.1401, 228.0701],
[-574.7006, -287.3503, 574.7006, 287.3503],
[-256.0000, -256.0000, 256.0000, 256.0000],
[-322.5398, -322.5398, 322.5398, 322.5398],
[-406.3747, -406.3747, 406.3747, 406.3747],
[-181.0193, -362.0387, 181.0193, 362.0387],
[-228.0701, -456.1401, 228.0701, 456.1401],
[-287.3503, -574.7006, 287.3503, 574.7006]])
]
base_anchors = retina_head.anchor_generator.base_anchors
for i, base_anchor in enumerate(base_anchors):
assert base_anchor.allclose(expected_base_anchors[i])
# check valid flags
expected_valid_pixels = [57600, 14400, 3600, 900, 225]
multi_level_valid_flags = retina_head.anchor_generator.valid_flags(
featmap_sizes, (640, 640), device)
for i, single_level_valid_flag in enumerate(multi_level_valid_flags):
assert single_level_valid_flag.sum() == expected_valid_pixels[i]
# check number of base anchors for each level
assert retina_head.anchor_generator.num_base_anchors == [9, 9, 9, 9, 9]
# check anchor generation
anchors = retina_head.anchor_generator.grid_anchors(featmap_sizes, device)
assert len(anchors) == 5
def test_guided_anchor():
from mmdet.models import build_head
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
# head configs modified from
# configs/guided_anchoring/ga_retinanet_r50_fpn_1x_coco.py
bbox_head = dict(
type='GARetinaHead',
num_classes=8,
in_channels=4,
stacked_convs=1,
feat_channels=4,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[4],
strides=[8, 16, 32, 64, 128]))
ga_retina_head = build_head(bbox_head)
assert ga_retina_head.approx_anchor_generator is not None
# use the featmap sizes in NASFPN setting to test ga_retina_head
featmap_sizes = [(100, 152), (50, 76), (25, 38), (13, 19), (7, 10)]
# check base anchors
expected_approxs = [
torch.Tensor([[-22.6274, -11.3137, 22.6274, 11.3137],
[-28.5088, -14.2544, 28.5088, 14.2544],
[-35.9188, -17.9594, 35.9188, 17.9594],
[-16.0000, -16.0000, 16.0000, 16.0000],
[-20.1587, -20.1587, 20.1587, 20.1587],
[-25.3984, -25.3984, 25.3984, 25.3984],
[-11.3137, -22.6274, 11.3137, 22.6274],
[-14.2544, -28.5088, 14.2544, 28.5088],
[-17.9594, -35.9188, 17.9594, 35.9188]]),
torch.Tensor([[-45.2548, -22.6274, 45.2548, 22.6274],
[-57.0175, -28.5088, 57.0175, 28.5088],
[-71.8376, -35.9188, 71.8376, 35.9188],
[-32.0000, -32.0000, 32.0000, 32.0000],
[-40.3175, -40.3175, 40.3175, 40.3175],
[-50.7968, -50.7968, 50.7968, 50.7968],
[-22.6274, -45.2548, 22.6274, 45.2548],
[-28.5088, -57.0175, 28.5088, 57.0175],
[-35.9188, -71.8376, 35.9188, 71.8376]]),
torch.Tensor([[-90.5097, -45.2548, 90.5097, 45.2548],
[-114.0350, -57.0175, 114.0350, 57.0175],
[-143.6751, -71.8376, 143.6751, 71.8376],
[-64.0000, -64.0000, 64.0000, 64.0000],
[-80.6349, -80.6349, 80.6349, 80.6349],
[-101.5937, -101.5937, 101.5937, 101.5937],
[-45.2548, -90.5097, 45.2548, 90.5097],
[-57.0175, -114.0350, 57.0175, 114.0350],
[-71.8376, -143.6751, 71.8376, 143.6751]]),
torch.Tensor([[-181.0193, -90.5097, 181.0193, 90.5097],
[-228.0701, -114.0350, 228.0701, 114.0350],
[-287.3503, -143.6751, 287.3503, 143.6751],
[-128.0000, -128.0000, 128.0000, 128.0000],
[-161.2699, -161.2699, 161.2699, 161.2699],
[-203.1873, -203.1873, 203.1873, 203.1873],
[-90.5097, -181.0193, 90.5097, 181.0193],
[-114.0350, -228.0701, 114.0350, 228.0701],
[-143.6751, -287.3503, 143.6751, 287.3503]]),
torch.Tensor([[-362.0387, -181.0193, 362.0387, 181.0193],
[-456.1401, -228.0701, 456.1401, 228.0701],
[-574.7006, -287.3503, 574.7006, 287.3503],
[-256.0000, -256.0000, 256.0000, 256.0000],
[-322.5398, -322.5398, 322.5398, 322.5398],
[-406.3747, -406.3747, 406.3747, 406.3747],
[-181.0193, -362.0387, 181.0193, 362.0387],
[-228.0701, -456.1401, 228.0701, 456.1401],
[-287.3503, -574.7006, 287.3503, 574.7006]])
]
approxs = ga_retina_head.approx_anchor_generator.base_anchors
for i, base_anchor in enumerate(approxs):
assert base_anchor.allclose(expected_approxs[i])
# check valid flags
expected_valid_pixels = [136800, 34200, 8550, 2223, 630]
multi_level_valid_flags = ga_retina_head.approx_anchor_generator \
.valid_flags(featmap_sizes, (800, 1216), device)
for i, single_level_valid_flag in enumerate(multi_level_valid_flags):
assert single_level_valid_flag.sum() == expected_valid_pixels[i]
# check number of base anchors for each level
assert ga_retina_head.approx_anchor_generator.num_base_anchors == [
9, 9, 9, 9, 9
]
# check approx generation
squares = ga_retina_head.square_anchor_generator.grid_anchors(
featmap_sizes, device)
assert len(squares) == 5
expected_squares = [
torch.Tensor([[-16., -16., 16., 16.]]),
torch.Tensor([[-32., -32., 32., 32]]),
torch.Tensor([[-64., -64., 64., 64.]]),
torch.Tensor([[-128., -128., 128., 128.]]),
torch.Tensor([[-256., -256., 256., 256.]])
]
squares = ga_retina_head.square_anchor_generator.base_anchors
for i, base_anchor in enumerate(squares):
assert base_anchor.allclose(expected_squares[i])
# square_anchor_generator does not check valid flags
# check number of base anchors for each level
assert (ga_retina_head.square_anchor_generator.num_base_anchors == [
1, 1, 1, 1, 1
])
# check square generation
anchors = ga_retina_head.square_anchor_generator.grid_anchors(
featmap_sizes, device)
assert len(anchors) == 5
| 17,722 | 42.121655 | 79 | py |
GFocalV2 | GFocalV2-master/tests/test_async.py | """Tests for async interface."""
import asyncio
import os
import sys
import asynctest
import mmcv
import torch
from mmdet.apis import async_inference_detector, init_detector
if sys.version_info >= (3, 7):
from mmdet.utils.contextmanagers import concurrent
class AsyncTestCase(asynctest.TestCase):
use_default_loop = False
forbid_get_event_loop = True
TEST_TIMEOUT = int(os.getenv('ASYNCIO_TEST_TIMEOUT', '30'))
def _run_test_method(self, method):
result = method()
if asyncio.iscoroutine(result):
self.loop.run_until_complete(
asyncio.wait_for(result, timeout=self.TEST_TIMEOUT))
class MaskRCNNDetector:
def __init__(self,
model_config,
checkpoint=None,
streamqueue_size=3,
device='cuda:0'):
self.streamqueue_size = streamqueue_size
self.device = device
# build the model and load checkpoint
self.model = init_detector(
model_config, checkpoint=None, device=self.device)
self.streamqueue = None
async def init(self):
self.streamqueue = asyncio.Queue()
for _ in range(self.streamqueue_size):
stream = torch.cuda.Stream(device=self.device)
self.streamqueue.put_nowait(stream)
if sys.version_info >= (3, 7):
async def apredict(self, img):
if isinstance(img, str):
img = mmcv.imread(img)
async with concurrent(self.streamqueue):
result = await async_inference_detector(self.model, img)
return result
class AsyncInferenceTestCase(AsyncTestCase):
if sys.version_info >= (3, 7):
async def test_simple_inference(self):
if not torch.cuda.is_available():
import pytest
pytest.skip('test requires GPU and torch+cuda')
ori_grad_enabled = torch.is_grad_enabled()
root_dir = os.path.dirname(os.path.dirname(__name__))
model_config = os.path.join(
root_dir, 'configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py')
detector = MaskRCNNDetector(model_config)
await detector.init()
img_path = os.path.join(root_dir, 'demo/demo.jpg')
bboxes, _ = await detector.apredict(img_path)
self.assertTrue(bboxes)
# asy inference detector will hack grad_enabled,
# so restore here to avoid it to influence other tests
torch.set_grad_enabled(ori_grad_enabled)
| 2,560 | 29.855422 | 75 | py |
GFocalV2 | GFocalV2-master/tests/test_config.py | from os.path import dirname, exists, join, relpath
import pytest
import torch
from mmcv.runner import build_optimizer
from mmdet.core import BitmapMasks, PolygonMasks
def _get_config_directory():
"""Find the predefined detector config directory."""
try:
# Assume we are running in the source mmdetection repo
repo_dpath = dirname(dirname(__file__))
except NameError:
# For IPython development when this __file__ is not defined
import mmdet
repo_dpath = dirname(dirname(mmdet.__file__))
config_dpath = join(repo_dpath, 'configs')
if not exists(config_dpath):
raise Exception('Cannot find config path')
return config_dpath
def test_config_build_detector():
"""Test that all detection models defined in the configs can be
initialized."""
from mmcv import Config
from mmdet.models import build_detector
config_dpath = _get_config_directory()
print(f'Found config_dpath = {config_dpath}')
import glob
config_fpaths = list(glob.glob(join(config_dpath, '**', '*.py')))
config_fpaths = [p for p in config_fpaths if p.find('_base_') == -1]
config_names = [relpath(p, config_dpath) for p in config_fpaths]
print(f'Using {len(config_names)} config files')
for config_fname in config_names:
config_fpath = join(config_dpath, config_fname)
config_mod = Config.fromfile(config_fpath)
config_mod.model
config_mod.train_cfg
config_mod.test_cfg
print(f'Building detector, config_fpath = {config_fpath}')
# Remove pretrained keys to allow for testing in an offline environment
if 'pretrained' in config_mod.model:
config_mod.model['pretrained'] = None
detector = build_detector(
config_mod.model,
train_cfg=config_mod.train_cfg,
test_cfg=config_mod.test_cfg)
assert detector is not None
optimizer = build_optimizer(detector, config_mod.optimizer)
assert isinstance(optimizer, torch.optim.Optimizer)
if 'roi_head' in config_mod.model.keys():
# for two stage detector
# detectors must have bbox head
assert detector.roi_head.with_bbox and detector.with_bbox
assert detector.roi_head.with_mask == detector.with_mask
head_config = config_mod.model['roi_head']
_check_roi_head(head_config, detector.roi_head)
# else:
# # for single stage detector
# # detectors must have bbox head
# # assert detector.with_bbox
# head_config = config_mod.model['bbox_head']
# _check_bbox_head(head_config, detector.bbox_head)
def _check_roi_head(config, head):
# check consistency between head_config and roi_head
assert config['type'] == head.__class__.__name__
# check roi_align
bbox_roi_cfg = config.bbox_roi_extractor
bbox_roi_extractor = head.bbox_roi_extractor
_check_roi_extractor(bbox_roi_cfg, bbox_roi_extractor)
# check bbox head infos
bbox_cfg = config.bbox_head
bbox_head = head.bbox_head
_check_bbox_head(bbox_cfg, bbox_head)
if head.with_mask:
# check roi_align
if config.mask_roi_extractor:
mask_roi_cfg = config.mask_roi_extractor
mask_roi_extractor = head.mask_roi_extractor
_check_roi_extractor(mask_roi_cfg, mask_roi_extractor,
bbox_roi_extractor)
# check mask head infos
mask_head = head.mask_head
mask_cfg = config.mask_head
_check_mask_head(mask_cfg, mask_head)
# check arch specific settings, e.g., cascade/htc
if config['type'] in ['CascadeRoIHead', 'HybridTaskCascadeRoIHead']:
assert config.num_stages == len(head.bbox_head)
assert config.num_stages == len(head.bbox_roi_extractor)
if head.with_mask:
assert config.num_stages == len(head.mask_head)
assert config.num_stages == len(head.mask_roi_extractor)
elif config['type'] in ['MaskScoringRoIHead']:
assert (hasattr(head, 'mask_iou_head')
and head.mask_iou_head is not None)
mask_iou_cfg = config.mask_iou_head
mask_iou_head = head.mask_iou_head
assert (mask_iou_cfg.fc_out_channels ==
mask_iou_head.fc_mask_iou.in_features)
elif config['type'] in ['GridRoIHead']:
grid_roi_cfg = config.grid_roi_extractor
grid_roi_extractor = head.grid_roi_extractor
_check_roi_extractor(grid_roi_cfg, grid_roi_extractor,
bbox_roi_extractor)
config.grid_head.grid_points = head.grid_head.grid_points
def _check_roi_extractor(config, roi_extractor, prev_roi_extractor=None):
import torch.nn as nn
if isinstance(roi_extractor, nn.ModuleList):
if prev_roi_extractor:
prev_roi_extractor = prev_roi_extractor[0]
roi_extractor = roi_extractor[0]
assert (len(config.featmap_strides) == len(roi_extractor.roi_layers))
assert (config.out_channels == roi_extractor.out_channels)
from torch.nn.modules.utils import _pair
assert (_pair(config.roi_layer.output_size) ==
roi_extractor.roi_layers[0].output_size)
if 'use_torchvision' in config.roi_layer:
assert (config.roi_layer.use_torchvision ==
roi_extractor.roi_layers[0].use_torchvision)
elif 'aligned' in config.roi_layer:
assert (
config.roi_layer.aligned == roi_extractor.roi_layers[0].aligned)
if prev_roi_extractor:
assert (roi_extractor.roi_layers[0].aligned ==
prev_roi_extractor.roi_layers[0].aligned)
assert (roi_extractor.roi_layers[0].use_torchvision ==
prev_roi_extractor.roi_layers[0].use_torchvision)
def _check_mask_head(mask_cfg, mask_head):
import torch.nn as nn
if isinstance(mask_cfg, list):
for single_mask_cfg, single_mask_head in zip(mask_cfg, mask_head):
_check_mask_head(single_mask_cfg, single_mask_head)
elif isinstance(mask_head, nn.ModuleList):
for single_mask_head in mask_head:
_check_mask_head(mask_cfg, single_mask_head)
else:
assert mask_cfg['type'] == mask_head.__class__.__name__
assert mask_cfg.in_channels == mask_head.in_channels
class_agnostic = mask_cfg.get('class_agnostic', False)
out_dim = (1 if class_agnostic else mask_cfg.num_classes)
if hasattr(mask_head, 'conv_logits'):
assert (mask_cfg.conv_out_channels ==
mask_head.conv_logits.in_channels)
assert mask_head.conv_logits.out_channels == out_dim
else:
assert mask_cfg.fc_out_channels == mask_head.fc_logits.in_features
assert (mask_head.fc_logits.out_features == out_dim *
mask_head.output_area)
def _check_bbox_head(bbox_cfg, bbox_head):
import torch.nn as nn
if isinstance(bbox_cfg, list):
for single_bbox_cfg, single_bbox_head in zip(bbox_cfg, bbox_head):
_check_bbox_head(single_bbox_cfg, single_bbox_head)
elif isinstance(bbox_head, nn.ModuleList):
for single_bbox_head in bbox_head:
_check_bbox_head(bbox_cfg, single_bbox_head)
else:
assert bbox_cfg['type'] == bbox_head.__class__.__name__
if bbox_cfg['type'] == 'SABLHead':
assert bbox_cfg.cls_in_channels == bbox_head.cls_in_channels
assert bbox_cfg.reg_in_channels == bbox_head.reg_in_channels
cls_out_channels = bbox_cfg.get('cls_out_channels', 1024)
assert (cls_out_channels == bbox_head.fc_cls.in_features)
assert (bbox_cfg.num_classes + 1 == bbox_head.fc_cls.out_features)
else:
assert bbox_cfg.in_channels == bbox_head.in_channels
with_cls = bbox_cfg.get('with_cls', True)
if with_cls:
fc_out_channels = bbox_cfg.get('fc_out_channels', 2048)
assert (fc_out_channels == bbox_head.fc_cls.in_features)
assert (bbox_cfg.num_classes +
1 == bbox_head.fc_cls.out_features)
with_reg = bbox_cfg.get('with_reg', True)
if with_reg:
out_dim = (4 if bbox_cfg.reg_class_agnostic else 4 *
bbox_cfg.num_classes)
assert bbox_head.fc_reg.out_features == out_dim
def _check_anchorhead(config, head):
# check consistency between head_config and roi_head
assert config['type'] == head.__class__.__name__
assert config.in_channels == head.in_channels
num_classes = (
config.num_classes -
1 if config.loss_cls.get('use_sigmoid', False) else config.num_classes)
if config['type'] == 'ATSSHead':
assert (config.feat_channels == head.atss_cls.in_channels)
assert (config.feat_channels == head.atss_reg.in_channels)
assert (config.feat_channels == head.atss_centerness.in_channels)
elif config['type'] == 'SABLRetinaHead':
assert (config.feat_channels == head.retina_cls.in_channels)
assert (config.feat_channels == head.retina_bbox_reg.in_channels)
assert (config.feat_channels == head.retina_bbox_cls.in_channels)
else:
assert (config.in_channels == head.conv_cls.in_channels)
assert (config.in_channels == head.conv_reg.in_channels)
assert (head.conv_cls.out_channels == num_classes * head.num_anchors)
assert head.fc_reg.out_channels == 4 * head.num_anchors
# Only tests a representative subset of configurations
# TODO: test pipelines using Albu, current Albu throw None given empty GT
@pytest.mark.parametrize(
'config_rpath',
[
'wider_face/ssd300_wider_face.py',
'pascal_voc/ssd300_voc0712.py',
'pascal_voc/ssd512_voc0712.py',
# 'albu_example/mask_rcnn_r50_fpn_1x.py',
'foveabox/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py',
'mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py',
'mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain_1x_coco.py',
'fp16/mask_rcnn_r50_fpn_fp16_1x_coco.py'
])
def test_config_data_pipeline(config_rpath):
"""Test whether the data pipeline is valid and can process corner cases.
CommandLine:
xdoctest -m tests/test_config.py test_config_build_data_pipeline
"""
from mmcv import Config
from mmdet.datasets.pipelines import Compose
import numpy as np
config_dpath = _get_config_directory()
print(f'Found config_dpath = {config_dpath}')
def dummy_masks(h, w, num_obj=3, mode='bitmap'):
assert mode in ('polygon', 'bitmap')
if mode == 'bitmap':
masks = np.random.randint(0, 2, (num_obj, h, w), dtype=np.uint8)
masks = BitmapMasks(masks, h, w)
else:
masks = []
for i in range(num_obj):
masks.append([])
masks[-1].append(
np.random.uniform(0, min(h - 1, w - 1), (8 + 4 * i, )))
masks[-1].append(
np.random.uniform(0, min(h - 1, w - 1), (10 + 4 * i, )))
masks = PolygonMasks(masks, h, w)
return masks
config_fpath = join(config_dpath, config_rpath)
cfg = Config.fromfile(config_fpath)
# remove loading pipeline
loading_pipeline = cfg.train_pipeline.pop(0)
loading_ann_pipeline = cfg.train_pipeline.pop(0)
cfg.test_pipeline.pop(0)
train_pipeline = Compose(cfg.train_pipeline)
test_pipeline = Compose(cfg.test_pipeline)
print(f'Building data pipeline, config_fpath = {config_fpath}')
print(f'Test training data pipeline: \n{train_pipeline!r}')
img = np.random.randint(0, 255, size=(888, 666, 3), dtype=np.uint8)
if loading_pipeline.get('to_float32', False):
img = img.astype(np.float32)
mode = 'bitmap' if loading_ann_pipeline.get('poly2mask',
True) else 'polygon'
results = dict(
filename='test_img.png',
ori_filename='test_img.png',
img=img,
img_shape=img.shape,
ori_shape=img.shape,
gt_bboxes=np.array([[35.2, 11.7, 39.7, 15.7]], dtype=np.float32),
gt_labels=np.array([1], dtype=np.int64),
gt_masks=dummy_masks(img.shape[0], img.shape[1], mode=mode),
)
results['img_fields'] = ['img']
results['bbox_fields'] = ['gt_bboxes']
results['mask_fields'] = ['gt_masks']
output_results = train_pipeline(results)
assert output_results is not None
print(f'Test testing data pipeline: \n{test_pipeline!r}')
results = dict(
filename='test_img.png',
ori_filename='test_img.png',
img=img,
img_shape=img.shape,
ori_shape=img.shape,
gt_bboxes=np.array([[35.2, 11.7, 39.7, 15.7]], dtype=np.float32),
gt_labels=np.array([1], dtype=np.int64),
gt_masks=dummy_masks(img.shape[0], img.shape[1], mode=mode),
)
results['img_fields'] = ['img']
results['bbox_fields'] = ['gt_bboxes']
results['mask_fields'] = ['gt_masks']
output_results = test_pipeline(results)
assert output_results is not None
# test empty GT
print('Test empty GT with training data pipeline: '
f'\n{train_pipeline!r}')
results = dict(
filename='test_img.png',
ori_filename='test_img.png',
img=img,
img_shape=img.shape,
ori_shape=img.shape,
gt_bboxes=np.zeros((0, 4), dtype=np.float32),
gt_labels=np.array([], dtype=np.int64),
gt_masks=dummy_masks(img.shape[0], img.shape[1], num_obj=0, mode=mode),
)
results['img_fields'] = ['img']
results['bbox_fields'] = ['gt_bboxes']
results['mask_fields'] = ['gt_masks']
output_results = train_pipeline(results)
assert output_results is not None
print(f'Test empty GT with testing data pipeline: \n{test_pipeline!r}')
results = dict(
filename='test_img.png',
ori_filename='test_img.png',
img=img,
img_shape=img.shape,
ori_shape=img.shape,
gt_bboxes=np.zeros((0, 4), dtype=np.float32),
gt_labels=np.array([], dtype=np.int64),
gt_masks=dummy_masks(img.shape[0], img.shape[1], num_obj=0, mode=mode),
)
results['img_fields'] = ['img']
results['bbox_fields'] = ['gt_bboxes']
results['mask_fields'] = ['gt_masks']
output_results = test_pipeline(results)
assert output_results is not None
| 14,537 | 38.505435 | 79 | py |
GFocalV2 | GFocalV2-master/tests/test_coder.py | import torch
from mmdet.core.bbox.coder import YOLOBBoxCoder
def test_yolo_bbox_coder():
coder = YOLOBBoxCoder()
bboxes = torch.Tensor([[-42., -29., 74., 61.], [-10., -29., 106., 61.],
[22., -29., 138., 61.], [54., -29., 170., 61.]])
pred_bboxes = torch.Tensor([[0.4709, 0.6152, 0.1690, -0.4056],
[0.5399, 0.6653, 0.1162, -0.4162],
[0.4654, 0.6618, 0.1548, -0.4301],
[0.4786, 0.6197, 0.1896, -0.4479]])
grid_size = 32
expected_decode_bboxes = torch.Tensor(
[[-53.6102, -10.3096, 83.7478, 49.6824],
[-15.8700, -8.3901, 114.4236, 50.9693],
[11.1822, -8.0924, 146.6034, 50.4476],
[41.2068, -8.9232, 181.4236, 48.5840]])
assert expected_decode_bboxes.allclose(
coder.decode(bboxes, pred_bboxes, grid_size))
| 896 | 39.772727 | 75 | py |
GFocalV2 | GFocalV2-master/tests/test_masks.py | import numpy as np
import pytest
import torch
from mmdet.core import BitmapMasks, PolygonMasks
def dummy_raw_bitmap_masks(size):
"""
Args:
size (tuple): expected shape of dummy masks, (H, W) or (N, H, W)
Return:
ndarray: dummy mask
"""
return np.random.randint(0, 2, size, dtype=np.uint8)
def dummy_raw_polygon_masks(size):
"""
Args:
size (tuple): expected shape of dummy masks, (N, H, W)
Return:
list[list[ndarray]]: dummy mask
"""
num_obj, heigt, width = size
polygons = []
for _ in range(num_obj):
num_points = np.random.randint(5) * 2 + 6
polygons.append([np.random.uniform(0, min(heigt, width), num_points)])
return polygons
def dummy_bboxes(num, max_height, max_width):
x1y1 = np.random.randint(0, min(max_height // 2, max_width // 2), (num, 2))
wh = np.random.randint(0, min(max_height // 2, max_width // 2), (num, 2))
x2y2 = x1y1 + wh
return np.concatenate([x1y1, x2y2], axis=1).squeeze().astype(np.float32)
def test_bitmap_mask_init():
# init with empty ndarray masks
raw_masks = np.empty((0, 28, 28), dtype=np.uint8)
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
assert len(bitmap_masks) == 0
assert bitmap_masks.height == 28
assert bitmap_masks.width == 28
# init with empty list masks
raw_masks = []
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
assert len(bitmap_masks) == 0
assert bitmap_masks.height == 28
assert bitmap_masks.width == 28
# init with ndarray masks contain 3 instances
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
assert len(bitmap_masks) == 3
assert bitmap_masks.height == 28
assert bitmap_masks.width == 28
# init with list masks contain 3 instances
raw_masks = [dummy_raw_bitmap_masks((28, 28)) for _ in range(3)]
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
assert len(bitmap_masks) == 3
assert bitmap_masks.height == 28
assert bitmap_masks.width == 28
# init with raw masks of unsupported type
with pytest.raises(AssertionError):
raw_masks = [[dummy_raw_bitmap_masks((28, 28))]]
BitmapMasks(raw_masks, 28, 28)
def test_bitmap_mask_rescale():
# rescale with empty bitmap masks
raw_masks = dummy_raw_bitmap_masks((0, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
rescaled_masks = bitmap_masks.rescale((56, 72))
assert len(rescaled_masks) == 0
assert rescaled_masks.height == 56
assert rescaled_masks.width == 56
# rescale with bitmap masks contain 1 instances
raw_masks = np.array([[[1, 0, 0, 0], [0, 1, 0, 1]]])
bitmap_masks = BitmapMasks(raw_masks, 2, 4)
rescaled_masks = bitmap_masks.rescale((8, 8))
assert len(rescaled_masks) == 1
assert rescaled_masks.height == 4
assert rescaled_masks.width == 8
truth = np.array([[[1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 1, 1], [0, 0, 1, 1, 0, 0, 1, 1]]])
assert (rescaled_masks.masks == truth).all()
def test_bitmap_mask_resize():
# resize with empty bitmap masks
raw_masks = dummy_raw_bitmap_masks((0, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
resized_masks = bitmap_masks.resize((56, 72))
assert len(resized_masks) == 0
assert resized_masks.height == 56
assert resized_masks.width == 72
# resize with bitmap masks contain 1 instances
raw_masks = np.diag(np.ones(4, dtype=np.uint8))[np.newaxis, ...]
bitmap_masks = BitmapMasks(raw_masks, 4, 4)
resized_masks = bitmap_masks.resize((8, 8))
assert len(resized_masks) == 1
assert resized_masks.height == 8
assert resized_masks.width == 8
truth = np.array([[[1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 0, 0, 1, 1]]])
assert (resized_masks.masks == truth).all()
def test_bitmap_mask_flip():
# flip with empty bitmap masks
raw_masks = dummy_raw_bitmap_masks((0, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
flipped_masks = bitmap_masks.flip(flip_direction='horizontal')
assert len(flipped_masks) == 0
assert flipped_masks.height == 28
assert flipped_masks.width == 28
# horizontally flip with bitmap masks contain 3 instances
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
flipped_masks = bitmap_masks.flip(flip_direction='horizontal')
flipped_flipped_masks = flipped_masks.flip(flip_direction='horizontal')
assert flipped_masks.masks.shape == (3, 28, 28)
assert (bitmap_masks.masks == flipped_flipped_masks.masks).all()
assert (flipped_masks.masks == raw_masks[:, :, ::-1]).all()
# vertically flip with bitmap masks contain 3 instances
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
flipped_masks = bitmap_masks.flip(flip_direction='vertical')
flipped_flipped_masks = flipped_masks.flip(flip_direction='vertical')
assert len(flipped_masks) == 3
assert flipped_masks.height == 28
assert flipped_masks.width == 28
assert (bitmap_masks.masks == flipped_flipped_masks.masks).all()
assert (flipped_masks.masks == raw_masks[:, ::-1, :]).all()
# diagonal flip with bitmap masks contain 3 instances
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
flipped_masks = bitmap_masks.flip(flip_direction='diagonal')
flipped_flipped_masks = flipped_masks.flip(flip_direction='diagonal')
assert len(flipped_masks) == 3
assert flipped_masks.height == 28
assert flipped_masks.width == 28
assert (bitmap_masks.masks == flipped_flipped_masks.masks).all()
assert (flipped_masks.masks == raw_masks[:, ::-1, ::-1]).all()
def test_bitmap_mask_pad():
# pad with empty bitmap masks
raw_masks = dummy_raw_bitmap_masks((0, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
padded_masks = bitmap_masks.pad((56, 56))
assert len(padded_masks) == 0
assert padded_masks.height == 56
assert padded_masks.width == 56
# pad with bitmap masks contain 3 instances
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
padded_masks = bitmap_masks.pad((56, 56))
assert len(padded_masks) == 3
assert padded_masks.height == 56
assert padded_masks.width == 56
assert (padded_masks.masks[:, 28:, 28:] == 0).all()
def test_bitmap_mask_crop():
# crop with empty bitmap masks
dummy_bbox = np.array([0, 10, 10, 27], dtype=np.int)
raw_masks = dummy_raw_bitmap_masks((0, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
cropped_masks = bitmap_masks.crop(dummy_bbox)
assert len(cropped_masks) == 0
assert cropped_masks.height == 17
assert cropped_masks.width == 10
# crop with bitmap masks contain 3 instances
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
cropped_masks = bitmap_masks.crop(dummy_bbox)
assert len(cropped_masks) == 3
assert cropped_masks.height == 17
assert cropped_masks.width == 10
x1, y1, x2, y2 = dummy_bbox
assert (cropped_masks.masks == raw_masks[:, y1:y2, x1:x2]).all()
# crop with invalid bbox
with pytest.raises(AssertionError):
dummy_bbox = dummy_bboxes(2, 28, 28)
bitmap_masks.crop(dummy_bbox)
def test_bitmap_mask_crop_and_resize():
dummy_bbox = dummy_bboxes(5, 28, 28)
inds = np.random.randint(0, 3, (5, ))
# crop and resize with empty bitmap masks
raw_masks = dummy_raw_bitmap_masks((0, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
cropped_resized_masks = bitmap_masks.crop_and_resize(
dummy_bbox, (56, 56), inds)
assert len(cropped_resized_masks) == 0
assert cropped_resized_masks.height == 56
assert cropped_resized_masks.width == 56
# crop and resize with bitmap masks contain 3 instances
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
cropped_resized_masks = bitmap_masks.crop_and_resize(
dummy_bbox, (56, 56), inds)
assert len(cropped_resized_masks) == 5
assert cropped_resized_masks.height == 56
assert cropped_resized_masks.width == 56
def test_bitmap_mask_expand():
# expand with empty bitmap masks
raw_masks = dummy_raw_bitmap_masks((0, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
expanded_masks = bitmap_masks.expand(56, 56, 12, 14)
assert len(expanded_masks) == 0
assert expanded_masks.height == 56
assert expanded_masks.width == 56
# expand with bitmap masks contain 3 instances
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
expanded_masks = bitmap_masks.expand(56, 56, 12, 14)
assert len(expanded_masks) == 3
assert expanded_masks.height == 56
assert expanded_masks.width == 56
assert (expanded_masks.masks[:, :12, :14] == 0).all()
assert (expanded_masks.masks[:, 12 + 28:, 14 + 28:] == 0).all()
def test_bitmap_mask_area():
# area of empty bitmap mask
raw_masks = dummy_raw_bitmap_masks((0, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
assert bitmap_masks.areas.sum() == 0
# area of bitmap masks contain 3 instances
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
areas = bitmap_masks.areas
assert len(areas) == 3
assert (areas == raw_masks.sum((1, 2))).all()
def test_bitmap_mask_to_ndarray():
# empty bitmap masks to ndarray
raw_masks = dummy_raw_bitmap_masks((0, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
ndarray_masks = bitmap_masks.to_ndarray()
assert isinstance(ndarray_masks, np.ndarray)
assert ndarray_masks.shape == (0, 28, 28)
# bitmap masks contain 3 instances to ndarray
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
ndarray_masks = bitmap_masks.to_ndarray()
assert isinstance(ndarray_masks, np.ndarray)
assert ndarray_masks.shape == (3, 28, 28)
assert (ndarray_masks == raw_masks).all()
def test_bitmap_mask_to_tensor():
# empty bitmap masks to tensor
raw_masks = dummy_raw_bitmap_masks((0, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
tensor_masks = bitmap_masks.to_tensor(dtype=torch.uint8, device='cpu')
assert isinstance(tensor_masks, torch.Tensor)
assert tensor_masks.shape == (0, 28, 28)
# bitmap masks contain 3 instances to tensor
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
tensor_masks = bitmap_masks.to_tensor(dtype=torch.uint8, device='cpu')
assert isinstance(tensor_masks, torch.Tensor)
assert tensor_masks.shape == (3, 28, 28)
assert (tensor_masks.numpy() == raw_masks).all()
def test_bitmap_mask_index():
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
assert (bitmap_masks[0].masks == raw_masks[0]).all()
assert (bitmap_masks[range(2)].masks == raw_masks[range(2)]).all()
def test_bitmap_mask_iter():
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
for i, bitmap_mask in enumerate(bitmap_masks):
assert bitmap_mask.shape == (28, 28)
assert (bitmap_mask == raw_masks[i]).all()
def test_polygon_mask_init():
# init with empty masks
raw_masks = []
polygon_masks = BitmapMasks(raw_masks, 28, 28)
assert len(polygon_masks) == 0
assert polygon_masks.height == 28
assert polygon_masks.width == 28
# init with masks contain 3 instances
raw_masks = dummy_raw_polygon_masks((3, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
assert isinstance(polygon_masks.masks, list)
assert isinstance(polygon_masks.masks[0], list)
assert isinstance(polygon_masks.masks[0][0], np.ndarray)
assert len(polygon_masks) == 3
assert polygon_masks.height == 28
assert polygon_masks.width == 28
assert polygon_masks.to_ndarray().shape == (3, 28, 28)
# init with raw masks of unsupported type
with pytest.raises(AssertionError):
raw_masks = [[[]]]
PolygonMasks(raw_masks, 28, 28)
raw_masks = [dummy_raw_polygon_masks((3, 28, 28))]
PolygonMasks(raw_masks, 28, 28)
def test_polygon_mask_rescale():
# rescale with empty polygon masks
raw_masks = dummy_raw_polygon_masks((0, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
rescaled_masks = polygon_masks.rescale((56, 72))
assert len(rescaled_masks) == 0
assert rescaled_masks.height == 56
assert rescaled_masks.width == 56
assert rescaled_masks.to_ndarray().shape == (0, 56, 56)
# rescale with polygon masks contain 3 instances
raw_masks = [[np.array([1, 1, 3, 1, 4, 3, 2, 4, 1, 3], dtype=np.float)]]
polygon_masks = PolygonMasks(raw_masks, 5, 5)
rescaled_masks = polygon_masks.rescale((12, 10))
assert len(rescaled_masks) == 1
assert rescaled_masks.height == 10
assert rescaled_masks.width == 10
assert rescaled_masks.to_ndarray().shape == (1, 10, 10)
truth = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
np.uint8)
assert (rescaled_masks.to_ndarray() == truth).all()
def test_polygon_mask_resize():
# resize with empty polygon masks
raw_masks = dummy_raw_polygon_masks((0, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
resized_masks = polygon_masks.resize((56, 72))
assert len(resized_masks) == 0
assert resized_masks.height == 56
assert resized_masks.width == 72
assert resized_masks.to_ndarray().shape == (0, 56, 72)
# resize with polygon masks contain 1 instance 1 part
raw_masks1 = [[np.array([1, 1, 3, 1, 4, 3, 2, 4, 1, 3], dtype=np.float)]]
polygon_masks1 = PolygonMasks(raw_masks1, 5, 5)
resized_masks1 = polygon_masks1.resize((10, 10))
assert len(resized_masks1) == 1
assert resized_masks1.height == 10
assert resized_masks1.width == 10
assert resized_masks1.to_ndarray().shape == (1, 10, 10)
truth1 = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
np.uint8)
assert (resized_masks1.to_ndarray() == truth1).all()
# resize with polygon masks contain 1 instance 2 part
raw_masks2 = [[
np.array([0., 0., 1., 0., 1., 1.]),
np.array([1., 1., 2., 1., 2., 2., 1., 2.])
]]
polygon_masks2 = PolygonMasks(raw_masks2, 3, 3)
resized_masks2 = polygon_masks2.resize((6, 6))
assert len(resized_masks2) == 1
assert resized_masks2.height == 6
assert resized_masks2.width == 6
assert resized_masks2.to_ndarray().shape == (1, 6, 6)
truth2 = np.array(
[[0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]], np.uint8)
assert (resized_masks2.to_ndarray() == truth2).all()
# resize with polygon masks contain 2 instances
raw_masks3 = [raw_masks1[0], raw_masks2[0]]
polygon_masks3 = PolygonMasks(raw_masks3, 5, 5)
resized_masks3 = polygon_masks3.resize((10, 10))
assert len(resized_masks3) == 2
assert resized_masks3.height == 10
assert resized_masks3.width == 10
assert resized_masks3.to_ndarray().shape == (2, 10, 10)
truth3 = np.stack([truth1, np.pad(truth2, ((0, 4), (0, 4)), 'constant')])
assert (resized_masks3.to_ndarray() == truth3).all()
def test_polygon_mask_flip():
# flip with empty polygon masks
raw_masks = dummy_raw_polygon_masks((0, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
flipped_masks = polygon_masks.flip(flip_direction='horizontal')
assert len(flipped_masks) == 0
assert flipped_masks.height == 28
assert flipped_masks.width == 28
assert flipped_masks.to_ndarray().shape == (0, 28, 28)
# TODO: fixed flip correctness checking after v2.0_coord is merged
# horizontally flip with polygon masks contain 3 instances
raw_masks = dummy_raw_polygon_masks((3, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
flipped_masks = polygon_masks.flip(flip_direction='horizontal')
flipped_flipped_masks = flipped_masks.flip(flip_direction='horizontal')
assert len(flipped_masks) == 3
assert flipped_masks.height == 28
assert flipped_masks.width == 28
assert flipped_masks.to_ndarray().shape == (3, 28, 28)
assert (polygon_masks.to_ndarray() == flipped_flipped_masks.to_ndarray()
).all()
# vertically flip with polygon masks contain 3 instances
raw_masks = dummy_raw_polygon_masks((3, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
flipped_masks = polygon_masks.flip(flip_direction='vertical')
flipped_flipped_masks = flipped_masks.flip(flip_direction='vertical')
assert len(flipped_masks) == 3
assert flipped_masks.height == 28
assert flipped_masks.width == 28
assert flipped_masks.to_ndarray().shape == (3, 28, 28)
assert (polygon_masks.to_ndarray() == flipped_flipped_masks.to_ndarray()
).all()
# diagonal flip with polygon masks contain 3 instances
raw_masks = dummy_raw_polygon_masks((3, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
flipped_masks = polygon_masks.flip(flip_direction='diagonal')
flipped_flipped_masks = flipped_masks.flip(flip_direction='diagonal')
assert len(flipped_masks) == 3
assert flipped_masks.height == 28
assert flipped_masks.width == 28
assert flipped_masks.to_ndarray().shape == (3, 28, 28)
assert (polygon_masks.to_ndarray() == flipped_flipped_masks.to_ndarray()
).all()
def test_polygon_mask_crop():
dummy_bbox = np.array([0, 10, 10, 27], dtype=np.int)
# crop with empty polygon masks
raw_masks = dummy_raw_polygon_masks((0, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
cropped_masks = polygon_masks.crop(dummy_bbox)
assert len(cropped_masks) == 0
assert cropped_masks.height == 17
assert cropped_masks.width == 10
assert cropped_masks.to_ndarray().shape == (0, 17, 10)
# crop with polygon masks contain 1 instances
raw_masks = [[np.array([1., 3., 5., 1., 5., 6., 1, 6])]]
polygon_masks = PolygonMasks(raw_masks, 7, 7)
bbox = np.array([0, 0, 3, 4])
cropped_masks = polygon_masks.crop(bbox)
assert len(cropped_masks) == 1
assert cropped_masks.height == 4
assert cropped_masks.width == 3
assert cropped_masks.to_ndarray().shape == (1, 4, 3)
truth = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 1], [0, 1, 1]])
assert (cropped_masks.to_ndarray() == truth).all()
# crop with invalid bbox
with pytest.raises(AssertionError):
dummy_bbox = dummy_bboxes(2, 28, 28)
polygon_masks.crop(dummy_bbox)
def test_polygon_mask_pad():
# pad with empty polygon masks
raw_masks = dummy_raw_polygon_masks((0, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
padded_masks = polygon_masks.pad((56, 56))
assert len(padded_masks) == 0
assert padded_masks.height == 56
assert padded_masks.width == 56
assert padded_masks.to_ndarray().shape == (0, 56, 56)
# pad with polygon masks contain 3 instances
raw_masks = dummy_raw_polygon_masks((3, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
padded_masks = polygon_masks.pad((56, 56))
assert len(padded_masks) == 3
assert padded_masks.height == 56
assert padded_masks.width == 56
assert padded_masks.to_ndarray().shape == (3, 56, 56)
assert (padded_masks.to_ndarray()[:, 28:, 28:] == 0).all()
def test_polygon_mask_expand():
with pytest.raises(NotImplementedError):
raw_masks = dummy_raw_polygon_masks((0, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
polygon_masks.expand(56, 56, 10, 17)
def test_polygon_mask_crop_and_resize():
dummy_bbox = dummy_bboxes(5, 28, 28)
inds = np.random.randint(0, 3, (5, ))
# crop and resize with empty polygon masks
raw_masks = dummy_raw_polygon_masks((0, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
cropped_resized_masks = polygon_masks.crop_and_resize(
dummy_bbox, (56, 56), inds)
assert len(cropped_resized_masks) == 0
assert cropped_resized_masks.height == 56
assert cropped_resized_masks.width == 56
assert cropped_resized_masks.to_ndarray().shape == (0, 56, 56)
# crop and resize with polygon masks contain 3 instances
raw_masks = dummy_raw_polygon_masks((3, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
cropped_resized_masks = polygon_masks.crop_and_resize(
dummy_bbox, (56, 56), inds)
assert len(cropped_resized_masks) == 5
assert cropped_resized_masks.height == 56
assert cropped_resized_masks.width == 56
assert cropped_resized_masks.to_ndarray().shape == (5, 56, 56)
def test_polygon_mask_area():
# area of empty polygon masks
raw_masks = dummy_raw_polygon_masks((0, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
assert polygon_masks.areas.sum() == 0
# area of polygon masks contain 1 instance
# here we hack a case that the gap between the area of bitmap and polygon
# is minor
raw_masks = [[np.array([1, 1, 5, 1, 3, 4])]]
polygon_masks = PolygonMasks(raw_masks, 6, 6)
polygon_area = polygon_masks.areas
bitmap_area = polygon_masks.to_bitmap().areas
assert len(polygon_area) == 1
assert np.isclose(polygon_area, bitmap_area).all()
def test_polygon_mask_to_bitmap():
# polygon masks contain 3 instances to bitmap
raw_masks = dummy_raw_polygon_masks((3, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
bitmap_masks = polygon_masks.to_bitmap()
assert (polygon_masks.to_ndarray() == bitmap_masks.to_ndarray()).all()
def test_polygon_mask_to_ndarray():
# empty polygon masks to ndarray
raw_masks = dummy_raw_polygon_masks((0, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
ndarray_masks = polygon_masks.to_ndarray()
assert isinstance(ndarray_masks, np.ndarray)
assert ndarray_masks.shape == (0, 28, 28)
# polygon masks contain 3 instances to ndarray
raw_masks = dummy_raw_polygon_masks((3, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
ndarray_masks = polygon_masks.to_ndarray()
assert isinstance(ndarray_masks, np.ndarray)
assert ndarray_masks.shape == (3, 28, 28)
def test_polygon_to_tensor():
# empty polygon masks to tensor
raw_masks = dummy_raw_polygon_masks((0, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
tensor_masks = polygon_masks.to_tensor(dtype=torch.uint8, device='cpu')
assert isinstance(tensor_masks, torch.Tensor)
assert tensor_masks.shape == (0, 28, 28)
# polygon masks contain 3 instances to tensor
raw_masks = dummy_raw_polygon_masks((3, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
tensor_masks = polygon_masks.to_tensor(dtype=torch.uint8, device='cpu')
assert isinstance(tensor_masks, torch.Tensor)
assert tensor_masks.shape == (3, 28, 28)
assert (tensor_masks.numpy() == polygon_masks.to_ndarray()).all()
def test_polygon_mask_index():
raw_masks = dummy_raw_polygon_masks((3, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
# index by integer
polygon_masks[0]
# index by list
polygon_masks[[0, 1]]
# index by ndarray
polygon_masks[np.asarray([0, 1])]
with pytest.raises(ValueError):
# invalid index
polygon_masks[torch.Tensor([1, 2])]
def test_polygon_mask_iter():
raw_masks = dummy_raw_polygon_masks((3, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
for i, polygon_mask in enumerate(polygon_masks):
assert np.equal(polygon_mask, raw_masks[i]).all()
| 24,825 | 38.343899 | 79 | py |
GFocalV2 | GFocalV2-master/tests/test_iou2d_calculator.py | import numpy as np
import pytest
import torch
from mmdet.core import BboxOverlaps2D, bbox_overlaps
def test_bbox_overlaps_2d(eps=1e-7):
def _construct_bbox(num_bbox=None):
img_h = int(np.random.randint(3, 1000))
img_w = int(np.random.randint(3, 1000))
if num_bbox is None:
num_bbox = np.random.randint(1, 10)
x1y1 = torch.rand((num_bbox, 2))
x2y2 = torch.max(torch.rand((num_bbox, 2)), x1y1)
bboxes = torch.cat((x1y1, x2y2), -1)
bboxes[:, 0::2] *= img_w
bboxes[:, 1::2] *= img_h
return bboxes, num_bbox
# is_aligned is True, bboxes.size(-1) == 5 (include score)
self = BboxOverlaps2D()
bboxes1, num_bbox = _construct_bbox()
bboxes2, _ = _construct_bbox(num_bbox)
bboxes1 = torch.cat((bboxes1, torch.rand((num_bbox, 1))), 1)
bboxes2 = torch.cat((bboxes2, torch.rand((num_bbox, 1))), 1)
gious = self(bboxes1, bboxes2, 'giou', True)
assert gious.size() == (num_bbox, ), gious.size()
assert torch.all(gious >= -1) and torch.all(gious <= 1)
# is_aligned is True, bboxes1.size(-2) == 0
bboxes1 = torch.empty((0, 4))
bboxes2 = torch.empty((0, 4))
gious = self(bboxes1, bboxes2, 'giou', True)
assert gious.size() == (0, ), gious.size()
assert torch.all(gious == torch.empty((0, )))
assert torch.all(gious >= -1) and torch.all(gious <= 1)
# is_aligned is True, and bboxes.ndims > 2
bboxes1, num_bbox = _construct_bbox()
bboxes2, _ = _construct_bbox(num_bbox)
bboxes1 = bboxes1.unsqueeze(0).repeat(2, 1, 1)
# test assertion when batch dim is not the same
with pytest.raises(AssertionError):
self(bboxes1, bboxes2.unsqueeze(0).repeat(3, 1, 1), 'giou', True)
bboxes2 = bboxes2.unsqueeze(0).repeat(2, 1, 1)
gious = self(bboxes1, bboxes2, 'giou', True)
assert torch.all(gious >= -1) and torch.all(gious <= 1)
assert gious.size() == (2, num_bbox)
bboxes1 = bboxes1.unsqueeze(0).repeat(2, 1, 1, 1)
bboxes2 = bboxes2.unsqueeze(0).repeat(2, 1, 1, 1)
gious = self(bboxes1, bboxes2, 'giou', True)
assert torch.all(gious >= -1) and torch.all(gious <= 1)
assert gious.size() == (2, 2, num_bbox)
# is_aligned is False
bboxes1, num_bbox1 = _construct_bbox()
bboxes2, num_bbox2 = _construct_bbox()
gious = self(bboxes1, bboxes2, 'giou')
assert torch.all(gious >= -1) and torch.all(gious <= 1)
assert gious.size() == (num_bbox1, num_bbox2)
# is_aligned is False, and bboxes.ndims > 2
bboxes1 = bboxes1.unsqueeze(0).repeat(2, 1, 1)
bboxes2 = bboxes2.unsqueeze(0).repeat(2, 1, 1)
gious = self(bboxes1, bboxes2, 'giou')
assert torch.all(gious >= -1) and torch.all(gious <= 1)
assert gious.size() == (2, num_bbox1, num_bbox2)
bboxes1 = bboxes1.unsqueeze(0)
bboxes2 = bboxes2.unsqueeze(0)
gious = self(bboxes1, bboxes2, 'giou')
assert torch.all(gious >= -1) and torch.all(gious <= 1)
assert gious.size() == (1, 2, num_bbox1, num_bbox2)
# is_aligned is False, bboxes1.size(-2) == 0
gious = self(torch.empty(1, 2, 0, 4), bboxes2, 'giou')
assert torch.all(gious == torch.empty(1, 2, 0, bboxes2.size(-2)))
assert torch.all(gious >= -1) and torch.all(gious <= 1)
# test allclose between bbox_overlaps and the original official
# implementation.
bboxes1 = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[32, 32, 38, 42],
])
bboxes2 = torch.FloatTensor([
[0, 0, 10, 20],
[0, 10, 10, 19],
[10, 10, 20, 20],
])
gious = bbox_overlaps(bboxes1, bboxes2, 'giou', is_aligned=True, eps=eps)
gious = gious.numpy().round(4)
# the gt is got with four decimal precision.
expected_gious = np.array([0.5000, -0.0500, -0.8214])
assert np.allclose(gious, expected_gious, rtol=0, atol=eps)
# test mode 'iof'
ious = bbox_overlaps(bboxes1, bboxes2, 'iof', is_aligned=True, eps=eps)
assert torch.all(ious >= -1) and torch.all(ious <= 1)
assert ious.size() == (bboxes1.size(0), )
ious = bbox_overlaps(bboxes1, bboxes2, 'iof', eps=eps)
assert torch.all(ious >= -1) and torch.all(ious <= 1)
assert ious.size() == (bboxes1.size(0), bboxes2.size(0))
| 4,230 | 38.915094 | 77 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.