repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
CRB-active-3Ddet | CRB-active-3Ddet-main/tools/train_utils/train_st_utils.py | import torch
import os
import glob
import tqdm
from torch.nn.utils import clip_grad_norm_
from pcdet.utils import common_utils
from pcdet.utils import self_training_utils
from pcdet.config import cfg
from .train_utils import save_checkpoint, checkpoint_state
def train_one_epoch_st(model, optimizer, source_reader, target_loader, model_func, lr_scheduler,
accumulated_iter, optim_cfg, rank, tbar, total_it_each_epoch,
dataloader_iter, tb_log=None, leave_pbar=False, ema_model=None):
if total_it_each_epoch == len(target_loader):
dataloader_iter = iter(target_loader)
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train', dynamic_ncols=True)
ps_bbox_meter = common_utils.AverageMeter()
ignore_ps_bbox_meter = common_utils.AverageMeter()
st_loss_meter = common_utils.AverageMeter()
disp_dict = {}
for cur_it in range(total_it_each_epoch):
lr_scheduler.step(accumulated_iter)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
model.train()
optimizer.zero_grad()
try:
target_batch = next(dataloader_iter)
except StopIteration:
dataloader_iter = iter(target_loader)
target_batch = next(dataloader_iter)
print('new iters')
# parameters for save pseudo label on the fly
st_loss, st_tb_dict, st_disp_dict = model_func(model, target_batch)
st_loss.backward()
st_loss_meter.update(st_loss.item())
# count number of used ps bboxes in this batch
pos_pseudo_bbox = target_batch['pos_ps_bbox'].mean().item()
ign_pseudo_bbox = target_batch['ign_ps_bbox'].mean().item()
ps_bbox_meter.update(pos_pseudo_bbox)
ignore_ps_bbox_meter.update(ign_pseudo_bbox)
st_tb_dict = common_utils.add_prefix_to_dict(st_tb_dict, 'st_')
disp_dict.update(common_utils.add_prefix_to_dict(st_disp_dict, 'st_'))
disp_dict.update({'st_loss': "{:.3f}({:.3f})".format(st_loss_meter.val, st_loss_meter.avg),
'pos_ps_box': ps_bbox_meter.avg,
'ign_ps_box': ignore_ps_bbox_meter.avg})
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter += 1
# log to console and tensorboard
if rank == 0:
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter, pos_ps_box=ps_bbox_meter.val,
ign_ps_box=ignore_ps_bbox_meter.val))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
tb_log.add_scalar('train/st_loss', st_loss, accumulated_iter)
tb_log.add_scalar('train/pos_ps_bbox', ps_bbox_meter.val, accumulated_iter)
tb_log.add_scalar('train/ign_ps_bbox', ignore_ps_bbox_meter.val, accumulated_iter)
for key, val in st_tb_dict.items():
tb_log.add_scalar('train/' + key, val, accumulated_iter)
if rank == 0:
pbar.close()
tb_log.add_scalar('train/epoch_ign_ps_box', ignore_ps_bbox_meter.avg, accumulated_iter)
tb_log.add_scalar('train/epoch_pos_ps_box', ps_bbox_meter.avg, accumulated_iter)
return accumulated_iter
def train_model_active(model, optimizer, source_loader, target_loader, model_func, lr_scheduler, optim_cfg,
start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir, ps_label_dir,
source_sampler=None, target_sampler=None, lr_warmup_scheduler=None, ckpt_save_interval=1,
max_ckpt_save_num=50, merge_all_iters_to_one_epoch=False, logger=None, ema_model=None):
pass
def train_model_st(model, optimizer, source_loader, target_loader, model_func, lr_scheduler, optim_cfg,
start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir, ps_label_dir,
source_sampler=None, target_sampler=None, lr_warmup_scheduler=None, ckpt_save_interval=1,
max_ckpt_save_num=50, merge_all_iters_to_one_epoch=False, logger=None, ema_model=None):
accumulated_iter = start_iter
source_reader = common_utils.DataReader(source_loader, source_sampler)
source_reader.construct_iter()
# for continue training.
# if already exist generated pseudo label result
ps_pkl = self_training_utils.check_already_exsit_pseudo_label(ps_label_dir, start_epoch)
if ps_pkl is not None:
logger.info('==> Loading pseudo labels from {}'.format(ps_pkl))
# for continue training
if cfg.SELF_TRAIN.get('PROG_AUG', None) and cfg.SELF_TRAIN.PROG_AUG.ENABLED and \
start_epoch > 0:
for cur_epoch in range(start_epoch):
if cur_epoch in cfg.SELF_TRAIN.PROG_AUG.UPDATE_AUG:
target_loader.dataset.data_augmentor.re_prepare(
augmentor_configs=None, intensity=cfg.SELF_TRAIN.PROG_AUG.SCALE)
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True,
leave=(rank == 0)) as tbar:
total_it_each_epoch = len(target_loader)
if merge_all_iters_to_one_epoch:
assert hasattr(target_loader.dataset, 'merge_all_iters_to_one_epoch')
target_loader.dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
total_it_each_epoch = len(target_loader) // max(total_epochs, 1)
dataloader_iter = iter(target_loader)
for cur_epoch in tbar:
if target_sampler is not None:
target_sampler.set_epoch(cur_epoch)
source_reader.set_cur_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
# update pseudo label
if (cur_epoch in cfg.SELF_TRAIN.UPDATE_PSEUDO_LABEL) or \
((cur_epoch % cfg.SELF_TRAIN.UPDATE_PSEUDO_LABEL_INTERVAL == 0)
and cur_epoch != 0):
target_loader.dataset.eval()
self_training_utils.save_pseudo_label_epoch(
model, target_loader, rank,
leave_pbar=True, ps_label_dir=ps_label_dir, cur_epoch=cur_epoch
)
target_loader.dataset.train()
# curriculum data augmentation
if cfg.SELF_TRAIN.get('PROG_AUG', None) and cfg.SELF_TRAIN.PROG_AUG.ENABLED and \
(cur_epoch in cfg.SELF_TRAIN.PROG_AUG.UPDATE_AUG):
target_loader.dataset.data_augmentor.re_prepare(
augmentor_configs=None, intensity=cfg.SELF_TRAIN.PROG_AUG.SCALE)
accumulated_iter = train_one_epoch_st(
model, optimizer, source_reader, target_loader, model_func,
lr_scheduler=cur_scheduler,
accumulated_iter=accumulated_iter, optim_cfg=optim_cfg,
rank=rank, tbar=tbar, tb_log=tb_log,
leave_pbar=(cur_epoch + 1 == total_epochs),
total_it_each_epoch=total_it_each_epoch,
dataloader_iter=dataloader_iter, ema_model=ema_model
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch)
state = checkpoint_state(model, optimizer, trained_epoch, accumulated_iter)
save_checkpoint(state, filename=ckpt_name)
| 8,363 | 45.466667 | 108 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/tools/train_utils/train_utils.py | import glob
import os
import torch
import tqdm
import time
from torch.nn.utils import clip_grad_norm_
from pcdet.datasets import build_active_dataloader
from pcdet.utils import common_utils, commu_utils
import pickle
def train_one_epoch(model, optimizer, train_loader, model_func, lr_scheduler, accumulated_iter, optim_cfg,
rank, tbar, total_it_each_epoch, dataloader_iter, tb_log=None, leave_pbar=False):
if total_it_each_epoch == len(train_loader):
dataloader_iter = iter(train_loader)
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch = next(dataloader_iter)
except StopIteration:
dataloader_iter = iter(train_loader)
batch = next(dataloader_iter)
print('new iters')
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
model.train()
optimizer.zero_grad()
loss, tb_dict, disp_dict = model_func(model, batch)
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter += 1
cur_batch_time = time.time() - end
# average reduce
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr': cur_lr, 'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})', 'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss', loss, accumulated_iter)
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
for key, val in tb_dict.items():
tb_log.add_scalar('train/' + key, val, accumulated_iter)
if rank == 0:
pbar.close()
return accumulated_iter
def train_model(model, optimizer, train_loader, model_func, lr_scheduler, optim_cfg,
start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir, train_sampler=None,
lr_warmup_scheduler=None, ckpt_save_interval=1, max_ckpt_save_num=50,
merge_all_iters_to_one_epoch=False):
accumulated_iter = start_iter
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True, leave=(rank == 0)) as tbar:
total_it_each_epoch = len(train_loader)
if merge_all_iters_to_one_epoch:
assert hasattr(train_loader.dataset, 'merge_all_iters_to_one_epoch')
train_loader.dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
total_it_each_epoch = len(train_loader) // max(total_epochs, 1)
dataloader_iter = iter(train_loader)
for cur_epoch in tbar:
if train_sampler is not None:
train_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
accumulated_iter = train_one_epoch(
model, optimizer, train_loader, model_func,
lr_scheduler=cur_scheduler,
accumulated_iter=accumulated_iter, optim_cfg=optim_cfg,
rank=rank, tbar=tbar, tb_log=tb_log,
leave_pbar=(cur_epoch + 1 == total_epochs),
total_it_each_epoch=total_it_each_epoch,
dataloader_iter=dataloader_iter
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch)
save_checkpoint(
checkpoint_state(model, optimizer, trained_epoch, accumulated_iter), filename=ckpt_name,
)
def model_state_to_cpu(model_state):
model_state_cpu = type(model_state)() # ordered dict
for key, val in model_state.items():
model_state_cpu[key] = val.cpu()
return model_state_cpu
def checkpoint_state(model=None, optimizer=None, epoch=None, it=None, lr_scheduler=None):
optim_state = optimizer.state_dict() if optimizer is not None else None
if model is not None:
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_state = model_state_to_cpu(model.module.state_dict())
else:
model_state = model.state_dict()
else:
model_state = None
if lr_scheduler is not None and hasattr(lr_scheduler, 'state_dict'):
lr_scheduler_state = lr_scheduler.state_dict()
else:
lr_scheduler_state = None
try:
import pcdet
version = 'pcdet+' + pcdet.__version__
except:
version = 'none'
return {'epoch': epoch, 'it': it, 'model_state': model_state, 'optimizer_state': optim_state, 'version': version, 'lr_scheduler': lr_scheduler_state}
def save_checkpoint(state, filename='checkpoint'):
# if False and 'optimizer_state' in state:
# optimizer_state = state['optimizer_state']
# state.pop('optimizer_state', None)
# optimizer_filename = '{}_optim.pth'.format(filename)
# torch.save({'optimizer_state': optimizer_state}, optimizer_filename)
filename = '{}.pth'.format(filename)
torch.save(state, filename)
def resume_datset(labelled_loader, unlabelled_loader, selected_frames_list, dist_train, logger, cfg):
labelled_set = labelled_loader.dataset
unlabelled_set = unlabelled_loader.dataset
if cfg.DATA_CONFIG.DATASET == 'KittiDataset':
pairs = list(zip(unlabelled_set.sample_id_list, unlabelled_set.kitti_infos))
else:
pairs = list(zip(unlabelled_set.frame_ids, unlabelled_set.infos))
# get selected frame id list and infos
if cfg.DATA_CONFIG.DATASET == 'KittiDataset':
selected_id_list, selected_infos = list(labelled_set.sample_id_list), \
list(labelled_set.kitti_infos)
unselected_id_list, unselected_infos = list(unlabelled_set.sample_id_list), \
list(unlabelled_set.kitti_infos)
else: # Waymo
selected_id_list, selected_infos = list(labelled_set.frame_ids), \
list(labelled_set.infos)
unselected_id_list, unselected_infos = list(unlabelled_set.frame_ids), \
list(unlabelled_set.infos)
selected_frames_files = []
for file_dir in selected_frames_list:
pkl_file = pickle.load(open(file_dir, 'rb'))
frame_list = [str(i) for i in pkl_file['frame_id']]
selected_frames_files += frame_list
print('successfully load the selected frames from {}'.format(file_dir.split('/')[-1]))
for i in range(len(pairs)):
if pairs[i][0] in selected_frames_files:
selected_id_list.append(pairs[i][0])
selected_infos.append(pairs[i][1])
unselected_id_list.remove(pairs[i][0])
unselected_infos.remove(pairs[i][1])
selected_id_list, selected_infos, \
unselected_id_list, unselected_infos = \
tuple(selected_id_list), tuple(selected_infos), \
tuple(unselected_id_list), tuple(unselected_infos)
active_training = [selected_id_list, selected_infos, unselected_id_list, unselected_infos]
# Create new dataloaders
batch_size = unlabelled_loader.batch_size
print("Batch_size of a single loader: %d" % (batch_size))
workers = unlabelled_loader.num_workers
# delete old sets and loaders, save space for new sets and loaders
del labelled_loader.dataset, unlabelled_loader.dataset, \
labelled_loader, unlabelled_loader
labelled_set, unlabelled_set, \
labelled_loader, unlabelled_loader, \
sampler_labelled, sampler_unlabelled = build_active_dataloader(
cfg.DATA_CONFIG,
cfg.CLASS_NAMES,
batch_size,
dist_train,
workers=workers,
logger=logger,
training=True,
active_training=active_training
)
return labelled_loader, unlabelled_loader
| 9,878 | 38.834677 | 153 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/tools/train_utils/train_active_utils.py | import torch
import os
import glob
import tqdm
import torch.nn as nn
from torch.nn.utils import clip_grad_norm_
from pcdet.utils import common_utils
from pcdet.utils import self_training_utils
from pcdet.utils import active_training_utils
from pcdet.config import cfg
from .train_utils import save_checkpoint, checkpoint_state, resume_datset
from train_utils.optimization import build_scheduler, build_optimizer
from train_utils.train_utils import model_state_to_cpu
import wandb
from pcdet.models import build_network
def train_one_epoch(model, optimizer, train_loader, model_func, lr_scheduler, accumulated_iter, optim_cfg,
rank, tbar, total_it_each_epoch, dataloader_iter, tb_log=None, leave_pbar=False, history_accumulated_iter=None):
if total_it_each_epoch == len(train_loader):
dataloader_iter = iter(train_loader)
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train', dynamic_ncols=True)
model.train()
for cur_it in range(total_it_each_epoch):
try:
batch = next(dataloader_iter)
except StopIteration:
dataloader_iter = iter(train_loader)
batch = next(dataloader_iter)
print('new iters')
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
# if tb_log is not None:
# tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
# wandb.log({'meta_data/learning_rate': cur_lr}, step=accumulated_iter)
optimizer.zero_grad()
loss, tb_dict, disp_dict = model_func(model, batch)
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
lr_scheduler.step(accumulated_iter)
if history_accumulated_iter is not None:
history_accumulated_iter += 1
accumulated_iter += 1
log_accumulated_iter = history_accumulated_iter
else:
accumulated_iter += 1
log_accumulated_iter = accumulated_iter
disp_dict.update({'loss': loss.item(), 'lr': cur_lr})
# log to console and tensorboard
if rank == 0:
pbar.update()
pbar.set_postfix(dict(total_it=log_accumulated_iter))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss', loss, log_accumulated_iter)
tb_log.add_scalar('meta_data/learning_rate', cur_lr,log_accumulated_iter)
wandb.log({'train/loss': loss, 'meta_data/learning_rate': cur_lr}, step=log_accumulated_iter)
for key, val in tb_dict.items():
tb_log.add_scalar('train/' + key, val, log_accumulated_iter)
wandb.log({'train/' + key: val}, step=log_accumulated_iter)
if rank == 0:
pbar.close()
if history_accumulated_iter is not None:
return accumulated_iter, history_accumulated_iter
else:
return accumulated_iter
def train_model_active(model, optimizer, labelled_loader, unlabelled_loader, model_func, lr_scheduler, optim_cfg,
start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir, active_label_dir, backbone_dir,
labelled_sampler=None, unlabelled_sampler=None, lr_warmup_scheduler=None, ckpt_save_interval=1,
max_ckpt_save_num=50, merge_all_iters_to_one_epoch=False, logger=None, ema_model=None, dist_train=False):
# 1. Pre-train the labelled_loader for a number of epochs (cfg.ACTIVE_TRAIN.PRE_TRAIN_EPOCH_NUMS)
accumulated_iter = start_iter
active_pre_train_epochs = cfg.ACTIVE_TRAIN.PRE_TRAIN_EPOCH_NUMS
logger.info("***** Start Active Pre-train *****")
pretrain_resumed = False
selected_frames_list = []
backbone_init_ckpt = str(backbone_dir / 'init_checkpoint.pth')
if not os.path.isfile(backbone_init_ckpt):
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_state = model_state_to_cpu(model.module.state_dict())
else:
model_state = model.state_dict()
torch.save(model_state, backbone_init_ckpt)
logger.info("**init backbone weights saved...**")
if cfg.ACTIVE_TRAIN.TRAIN_RESUME:
# try:
backbone_ckpt_list = [i for i in glob.glob(str(backbone_dir / 'checkpoint_epoch_*.pth'))]
assert(len(backbone_ckpt_list) > 0) # otherwise nothing to resume
ckpt_list = [i for i in glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))]
# filter out the backbones trained after active_pre_train_epochs
backbone_ckpt_list = [i for i in backbone_ckpt_list if int(i.split('_')[-1].split('.')[0]) <= active_pre_train_epochs]
if len(ckpt_list) < 1:
# just at the pretrain stage
backbone_ckpt_list.sort(key=os.path.getmtime)
last_epoch = int(backbone_ckpt_list[-1].split('_')[-1].split('.')[0])
if last_epoch >= active_pre_train_epochs:
pretrain_resumed = True
elif cfg.ACTIVE_TRAIN.METHOD=='llal':
logger.info("need to finish the backbone pre-training first...")
raise NotImplementedError
logger.info('found {}th epoch pretrain model weights, start resumeing...'.format(last_epoch))
model_str = str(backbone_dir / backbone_ckpt_list[-1])
ckpt_last_epoch = torch.load(str(backbone_dir / backbone_ckpt_list[-1]))
else:
# at the active train stage
pretrain_resumed = True
ckpt_list.sort(key=os.path.getmtime)
last_epoch = int(ckpt_list[-1].split('_')[-1].split('.')[0])
model_str = str(ckpt_save_dir / ckpt_list[-1])
ckpt_last_epoch = torch.load(str(ckpt_save_dir / ckpt_list[-1]))
selected_frames_list = [str(active_label_dir / i) for i in glob.glob(str(active_label_dir / 'selected_frames_epoch_*.pkl'))]
if isinstance(model, torch.nn.parallel.DistributedDataParallel) or isinstance(model, torch.nn.DataParallel):
model.module.load_state_dict(ckpt_last_epoch['model_state'], strict=cfg.ACTIVE_TRAIN.METHOD!='llal')
else:
model.load_state_dict(ckpt_last_epoch['model_state'], strict=cfg.ACTIVE_TRAIN.METHOD!='llal')
# optimizer.load_state_dict(ckpt_last_epoch['optimizer_state'])
if ckpt_last_epoch['lr_scheduler'] is not None:
lr_scheduler.load_state_dict(ckpt_last_epoch['lr_scheduler'])
start_epoch = int(model_str.split('_')[-1].split('.')[0])
cur_epoch = start_epoch
accumulated_iter = ckpt_last_epoch['it']
if len(selected_frames_list) > 0:
labelled_loader, unlabelled_loader = resume_datset(labelled_loader, unlabelled_loader, selected_frames_list, dist_train, logger, cfg)
trained_steps = (cur_epoch - cfg.ACTIVE_TRAIN.PRE_TRAIN_EPOCH_NUMS) % cfg.ACTIVE_TRAIN.SELECT_LABEL_EPOCH_INTERVAL
new_accumulated_iter = len(labelled_loader) * trained_steps
# except Exception:
# cfg.ACTIVE_TRAIN.TRAIN_RESUME = False
# logger.info('no backbone found. training from strach.')
# pass
if not pretrain_resumed:
with tqdm.trange(start_epoch, active_pre_train_epochs, desc='epochs', dynamic_ncols=True, leave=(rank == 0)) as tbar:
total_it_each_epoch = len(labelled_loader)
if merge_all_iters_to_one_epoch:
assert hasattr(labelled_loader.dataset, 'merge_all_iters_to_one_epoch')
labelled_loader.dataset.merge_all_iters_to_one_epoch(merge=True, epochs=active_pre_train_epochs)
total_it_each_epoch = len(labelled_loader) // max(active_pre_train_epochs, 1)
dataloader_iter = iter(labelled_loader)
for cur_epoch in tbar:
if labelled_sampler is not None:
labelled_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler # TODO: currently not related
else:
cur_scheduler = lr_scheduler
accumulated_iter = train_one_epoch(
model, optimizer, labelled_loader, model_func,
lr_scheduler=cur_scheduler,
accumulated_iter=accumulated_iter, optim_cfg=optim_cfg,
rank=rank, tbar=tbar, tb_log=tb_log,
leave_pbar=(cur_epoch + 1 == active_pre_train_epochs),
total_it_each_epoch=total_it_each_epoch,
dataloader_iter=dataloader_iter
)
# save pre-trained model
trained_epoch = cur_epoch + 1
if trained_epoch > 19 and trained_epoch % ckpt_save_interval == 0 and rank == 0:
backbone_ckpt_list = glob.glob(str(backbone_dir / 'checkpoint_epoch_*.pth'))
backbone_ckpt_list.sort(key=os.path.getmtime)
if backbone_ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(backbone_ckpt_list) - max_ckpt_save_num + 1):
os.remove(backbone_ckpt_list[cur_file_idx])
ckpt_name = backbone_dir / ('checkpoint_epoch_%d' % trained_epoch)
save_checkpoint(
checkpoint_state(model, optimizer, trained_epoch, accumulated_iter, lr_scheduler), filename=ckpt_name,
)
start_epoch = active_pre_train_epochs
logger.info("***** Complete Active Pre-train *****")
# 2. Compute total epochs
# (Total budgets / selection number each time) * number of epochs needed for each selection
total_epochs = active_pre_train_epochs + \
int((cfg.ACTIVE_TRAIN.TOTAL_BUDGET_NUMS / cfg.ACTIVE_TRAIN.SELECT_NUMS) # 50 + (300 / 50) * 10
* (cfg.ACTIVE_TRAIN.SELECT_LABEL_EPOCH_INTERVAL))
# 3 & 4. Active training loop
logger.info("***** Start Active Train Loop *****")
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True,
leave=(rank == 0)) as tbar:
#
# if merge_all_iters_to_one_epoch:
# assert hasattr(unlabelled_loader.dataset, 'merge_all_iters_to_one_epoch')
# unlabelled_loader.dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
# total_it_each_epoch = len(unlabelled_loader) // max(total_epochs, 1)
selection_num = 0
for cur_epoch in tbar:
# 1) query, select and move active labelled samples
if cur_epoch == active_pre_train_epochs or \
((cur_epoch - active_pre_train_epochs)
% cfg.ACTIVE_TRAIN.SELECT_LABEL_EPOCH_INTERVAL == 0):
# train loss_net for llal
if cfg.ACTIVE_TRAIN.METHOD=='llal':
if os.path.isfile(os.path.join(ckpt_save_dir, 'init_loss_net_checkpoint.pth')):
loss_net_ckpt = torch.load(os.path.join(ckpt_save_dir, 'pretrained_loss_net_{}.pth'.format(cur_epoch)))
model.roi_head.loss_net.load_state_dict(loss_net_ckpt['model_state'])
else:
ckpt_name = os.path.join(ckpt_save_dir, 'init_loss_net_checkpoint.pth')
save_checkpoint(
checkpoint_state(model.roi_head.loss_net, optimizer=None, epoch=cur_epoch, it=accumulated_iter, lr_scheduler=None), filename=ckpt_name,
)
model.train()
logger.info('**Epoch {}**: start training the loss net...'.format(cur_epoch))
with tqdm.trange(0, cfg.ACTIVE_TRAIN.LOSS_NET_TRAIN_EPOCH, desc='epochs', dynamic_ncols=True, leave=(rank == 0)) as tbar:
total_it_each_epoch = len(labelled_loader)
if merge_all_iters_to_one_epoch:
assert hasattr(labelled_loader.dataset, 'merge_all_iters_to_one_epoch')
labelled_loader.dataset.merge_all_iters_to_one_epoch(merge=True, epochs=active_pre_train_epochs)
total_it_each_epoch = len(labelled_loader) // max(active_pre_train_epochs, 1)
dataloader_iter = iter(labelled_loader)
# train model's loss net only
for param in model.roi_head.loss_net.parameters():
param.requires_grad = True
loss_net_optimizer = build_optimizer(model.roi_head.loss_net, cfg.OPTIMIZATION)
loss_net_scheduler, _ = build_scheduler(loss_net_optimizer, total_iters_each_epoch=total_it_each_epoch, total_epochs=cfg.ACTIVE_TRAIN.LOSS_NET_TRAIN_EPOCH, last_epoch=None, optim_cfg=cfg.OPTIMIZATION)
for temp_cur_epoch in tbar:
if labelled_sampler is not None:
labelled_sampler.set_epoch(temp_cur_epoch)
# train one epoch
accumulated_iter = train_one_epoch(
model, loss_net_optimizer, labelled_loader, model_func,
lr_scheduler=loss_net_scheduler,
accumulated_iter=accumulated_iter, optim_cfg=optim_cfg,
rank=rank, tbar=tbar, tb_log=tb_log,
# leave_pbar=(cur_epoch + 1 == cfg.ACTIVE_TRAIN.LOSS_NET_TRAIN_EPOCH),
total_it_each_epoch=total_it_each_epoch,
dataloader_iter=dataloader_iter
)
# save pre-trained model
if rank == 0:
ckpt_name = os.path.join(ckpt_save_dir, 'pretrained_loss_net_{}'.format(cur_epoch))
save_checkpoint(
checkpoint_state(model.roi_head.loss_net, optimizer=None, epoch=cur_epoch, it=accumulated_iter, lr_scheduler=None), filename=ckpt_name,
)
# prevent loss net to be trained with other parts
for param in model.roi_head.loss_net.parameters():
param.requires_grad = False
selection_num = selection_num + cfg.ACTIVE_TRAIN.SELECT_NUMS
# select new active samples
# unlabelled_loader.dataset.eval()
labelled_loader, unlabelled_loader \
= active_training_utils.select_active_labels(
model,
labelled_loader,
unlabelled_loader,
rank,
logger,
method = cfg.ACTIVE_TRAIN.METHOD,
leave_pbar=True,
cur_epoch=cur_epoch,
dist_train=dist_train,
active_label_dir=active_label_dir,
accumulated_iter=accumulated_iter
)
# reset the lr of optimizer and lr scheduler after each selection round
total_iters_each_epoch = len(labelled_loader)
# decay_rate = cur_epoch // cfg.ACTIVE_TRAIN.SELECT_LABEL_EPOCH_INTERVAL + 2
# training from scratch
logger.info("**finished selection: reload init weights of the model")
backbone_init_ckpt = torch.load(str(backbone_dir / 'init_checkpoint.pth'))
model.load_state_dict(backbone_init_ckpt, strict=cfg.ACTIVE_TRAIN.METHOD!='llal') # strict=cfg.ACTIVE_TRAIN.METHOD!='llal'
# rebuild optimizer and scheduler
for g in optimizer.param_groups:
g['lr'] = cfg.OPTIMIZATION.LR #/ decay_rate
lr_scheduler, lr_warmup_scheduler = build_scheduler(
optimizer, total_iters_each_epoch=total_iters_each_epoch, total_epochs=cfg.ACTIVE_TRAIN.SELECT_LABEL_EPOCH_INTERVAL,
last_epoch=None, optim_cfg=cfg.OPTIMIZATION
)
# iter counter to zero
new_accumulated_iter = 1
if labelled_sampler is not None:
labelled_sampler.set_epoch(cur_epoch)
# 2) train one epoch
# labelled_loader.dataset.train()
total_it_each_epoch = len(labelled_loader)
logger.info("currently {} iterations to learn per epoch".format(total_it_each_epoch))
dataloader_iter = iter(labelled_loader)
if labelled_sampler is not None:
labelled_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler # TODO - currently not related
else:
cur_scheduler = lr_scheduler
new_accumulated_iter, accumulated_iter = train_one_epoch(
model, optimizer, labelled_loader, model_func,
lr_scheduler=cur_scheduler,
accumulated_iter=new_accumulated_iter, optim_cfg=optim_cfg,
rank=rank, tbar=tbar, tb_log=tb_log,
# leave_pbar=(cur_epoch + 1 == active_pre_train_epochs),
total_it_each_epoch=total_it_each_epoch,
dataloader_iter=dataloader_iter,
history_accumulated_iter=accumulated_iter
)
# 3) save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % (trained_epoch))
state = checkpoint_state(model, optimizer, trained_epoch, accumulated_iter, lr_scheduler)
save_checkpoint(state, filename=ckpt_name)
logger.info('***checkpoint {} has been saved***'.format(trained_epoch))
| 19,002 | 49.272487 | 224 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/tools/train_utils/optimization/fastai_optim.py | # This file is modified from https://github.com/traveller59/second.pytorch
try:
from collections.abc import Iterable
except:
from collections import Iterable
import torch
from torch import nn
from torch._utils import _unflatten_dense_tensors
from torch.nn.utils import parameters_to_vector
bn_types = (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d, nn.SyncBatchNorm)
def split_bn_bias(layer_groups):
"Split the layers in `layer_groups` into batchnorm (`bn_types`) and non-batchnorm groups."
split_groups = []
for l in layer_groups:
l1, l2 = [], []
for c in l.children():
if isinstance(c, bn_types):
l2.append(c)
else:
l1.append(c)
split_groups += [nn.Sequential(*l1), nn.Sequential(*l2)]
return split_groups
def get_master(layer_groups, flat_master: bool = False):
"Return two lists, one for the model parameters in FP16 and one for the master parameters in FP32."
split_groups = split_bn_bias(layer_groups)
model_params = [[param for param in lg.parameters() if param.requires_grad] for lg in split_groups]
if flat_master:
master_params = []
for lg in model_params:
if len(lg) != 0:
mp = parameters_to_vector([param.data.float() for param in lg])
mp = torch.nn.Parameter(mp, requires_grad=True)
if mp.grad is None: mp.grad = mp.new(*mp.size())
master_params.append([mp])
else:
master_params.append([])
return model_params, master_params
else:
master_params = [[param.clone().float().detach() for param in lg] for lg in model_params]
for mp in master_params:
for param in mp: param.requires_grad = True
return model_params, master_params
def model_g2master_g(model_params, master_params, flat_master: bool = False) -> None:
"Copy the `model_params` gradients to `master_params` for the optimizer step."
if flat_master:
for model_group, master_group in zip(model_params, master_params):
if len(master_group) != 0:
master_group[0].grad.data.copy_(parameters_to_vector([p.grad.data.float() for p in model_group]))
else:
for model_group, master_group in zip(model_params, master_params):
for model, master in zip(model_group, master_group):
if model.grad is not None:
if master.grad is None: master.grad = master.data.new(*master.data.size())
master.grad.data.copy_(model.grad.data)
else:
master.grad = None
def master2model(model_params, master_params, flat_master: bool = False) -> None:
"Copy `master_params` to `model_params`."
if flat_master:
for model_group, master_group in zip(model_params, master_params):
if len(model_group) != 0:
for model, master in zip(model_group, _unflatten_dense_tensors(master_group[0].data, model_group)):
model.data.copy_(master)
else:
for model_group, master_group in zip(model_params, master_params):
for model, master in zip(model_group, master_group): model.data.copy_(master.data)
def listify(p=None, q=None):
"Make `p` listy and the same length as `q`."
if p is None:
p = []
elif isinstance(p, str):
p = [p]
elif not isinstance(p, Iterable):
p = [p]
n = q if type(q) == int else len(p) if q is None else len(q)
if len(p) == 1: p = p * n
assert len(p) == n, f'List len mismatch ({len(p)} vs {n})'
return list(p)
def trainable_params(m: nn.Module):
"Return list of trainable params in `m`."
res = filter(lambda p: p.requires_grad, m.parameters())
return res
def is_tuple(x) -> bool: return isinstance(x, tuple)
# copy from fastai.
class OptimWrapper():
"Basic wrapper around `opt` to simplify hyper-parameters changes."
def __init__(self, opt, wd, true_wd: bool = False, bn_wd: bool = True):
self.opt, self.true_wd, self.bn_wd = opt, true_wd, bn_wd
self.opt_keys = list(self.opt.param_groups[0].keys())
self.opt_keys.remove('params')
self.read_defaults()
self.wd = wd
@classmethod
def create(cls, opt_func, lr,
layer_groups, **kwargs):
"Create an `optim.Optimizer` from `opt_func` with `lr`. Set lr on `layer_groups`."
split_groups = split_bn_bias(layer_groups)
opt = opt_func([{'params': trainable_params(l), 'lr': 0} for l in split_groups])
opt = cls(opt, **kwargs)
opt.lr, opt.opt_func = listify(lr, layer_groups), opt_func
return opt
def new(self, layer_groups):
"Create a new `OptimWrapper` from `self` with another `layer_groups` but the same hyper-parameters."
opt_func = getattr(self, 'opt_func', self.opt.__class__)
split_groups = split_bn_bias(layer_groups)
opt = opt_func([{'params': trainable_params(l), 'lr': 0} for l in split_groups])
return self.create(opt_func, self.lr, layer_groups, wd=self.wd, true_wd=self.true_wd, bn_wd=self.bn_wd)
def __repr__(self) -> str:
return f'OptimWrapper over {repr(self.opt)}.\nTrue weight decay: {self.true_wd}'
# Pytorch optimizer methods
def step(self) -> None:
"Set weight decay and step optimizer."
# weight decay outside of optimizer step (AdamW)
if self.true_wd:
for lr, wd, pg1, pg2 in zip(self._lr, self._wd, self.opt.param_groups[::2], self.opt.param_groups[1::2]):
for p in pg1['params']:
# When some parameters are fixed: Shaoshuai Shi
if p.requires_grad is False:
continue
p.data.mul_(1 - wd * lr)
if self.bn_wd:
for p in pg2['params']:
# When some parameters are fixed: Shaoshuai Shi
if p.requires_grad is False:
continue
p.data.mul_(1 - wd * lr)
self.set_val('weight_decay', listify(0, self._wd))
self.opt.step()
def zero_grad(self) -> None:
"Clear optimizer gradients."
self.opt.zero_grad()
# Passthrough to the inner opt.
def __getattr__(self, k: str):
return getattr(self.opt, k, None)
def clear(self):
"Reset the state of the inner optimizer."
sd = self.state_dict()
sd['state'] = {}
self.load_state_dict(sd)
# Hyperparameters as properties
@property
def lr(self) -> float:
return self._lr[-1]
@lr.setter
def lr(self, val: float) -> None:
self._lr = self.set_val('lr', listify(val, self._lr))
@property
def mom(self) -> float:
return self._mom[-1]
@mom.setter
def mom(self, val: float) -> None:
if 'momentum' in self.opt_keys:
self.set_val('momentum', listify(val, self._mom))
elif 'betas' in self.opt_keys:
self.set_val('betas', (listify(val, self._mom), self._beta))
self._mom = listify(val, self._mom)
@property
def beta(self) -> float:
return None if self._beta is None else self._beta[-1]
@beta.setter
def beta(self, val: float) -> None:
"Set beta (or alpha as makes sense for given optimizer)."
if val is None: return
if 'betas' in self.opt_keys:
self.set_val('betas', (self._mom, listify(val, self._beta)))
elif 'alpha' in self.opt_keys:
self.set_val('alpha', listify(val, self._beta))
self._beta = listify(val, self._beta)
@property
def wd(self) -> float:
return self._wd[-1]
@wd.setter
def wd(self, val: float) -> None:
"Set weight decay."
if not self.true_wd: self.set_val('weight_decay', listify(val, self._wd), bn_groups=self.bn_wd)
self._wd = listify(val, self._wd)
# Helper functions
def read_defaults(self) -> None:
"Read the values inside the optimizer for the hyper-parameters."
self._beta = None
if 'lr' in self.opt_keys: self._lr = self.read_val('lr')
if 'momentum' in self.opt_keys: self._mom = self.read_val('momentum')
if 'alpha' in self.opt_keys: self._beta = self.read_val('alpha')
if 'betas' in self.opt_keys: self._mom, self._beta = self.read_val('betas')
if 'weight_decay' in self.opt_keys: self._wd = self.read_val('weight_decay')
def set_val(self, key: str, val, bn_groups: bool = True):
"Set `val` inside the optimizer dictionary at `key`."
if is_tuple(val): val = [(v1, v2) for v1, v2 in zip(*val)]
for v, pg1, pg2 in zip(val, self.opt.param_groups[::2], self.opt.param_groups[1::2]):
pg1[key] = v
if bn_groups: pg2[key] = v
return val
def read_val(self, key: str):
"Read a hyperparameter `key` in the optimizer dictionary."
val = [pg[key] for pg in self.opt.param_groups[::2]]
if is_tuple(val[0]): val = [o[0] for o in val], [o[1] for o in val]
return val
class FastAIMixedOptim(OptimWrapper):
@classmethod
def create(cls, opt_func, lr,
layer_groups, model, flat_master=False, loss_scale=512.0, **kwargs):
"Create an `optim.Optimizer` from `opt_func` with `lr`. Set lr on `layer_groups`."
opt = OptimWrapper.create(opt_func, lr, layer_groups, **kwargs)
opt.model_params, opt.master_params = get_master(layer_groups, flat_master)
opt.flat_master = flat_master
opt.loss_scale = loss_scale
opt.model = model
# Changes the optimizer so that the optimization step is done in FP32.
# opt = self.learn.opt
mom, wd, beta = opt.mom, opt.wd, opt.beta
lrs = [lr for lr in opt._lr for _ in range(2)]
opt_params = [{'params': mp, 'lr': lr} for mp, lr in zip(opt.master_params, lrs)]
opt.opt = opt_func(opt_params)
opt.mom, opt.wd, opt.beta = mom, wd, beta
return opt
def step(self):
model_g2master_g(self.model_params, self.master_params, self.flat_master)
for group in self.master_params:
for param in group: param.grad.div_(self.loss_scale)
super(FastAIMixedOptim, self).step()
self.model.zero_grad()
# Update the params from master to model.
master2model(self.model_params, self.master_params, self.flat_master)
| 10,535 | 38.758491 | 117 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/tools/train_utils/optimization/learning_schedules_fastai.py | # This file is modified from https://github.com/traveller59/second.pytorch
import math
from functools import partial
import numpy as np
import torch.optim.lr_scheduler as lr_sched
from .fastai_optim import OptimWrapper
class LRSchedulerStep(object):
def __init__(self, fai_optimizer: OptimWrapper, total_step, lr_phases,
mom_phases):
# if not isinstance(fai_optimizer, OptimWrapper):
# raise TypeError('{} is not a fastai OptimWrapper'.format(
# type(fai_optimizer).__name__))
self.optimizer = fai_optimizer
self.total_step = total_step
self.lr_phases = []
for i, (start, lambda_func) in enumerate(lr_phases):
if len(self.lr_phases) != 0:
assert self.lr_phases[-1][0] < start
if isinstance(lambda_func, str):
lambda_func = eval(lambda_func)
if i < len(lr_phases) - 1:
self.lr_phases.append((int(start * total_step), int(lr_phases[i + 1][0] * total_step), lambda_func))
else:
self.lr_phases.append((int(start * total_step), total_step, lambda_func))
assert self.lr_phases[0][0] == 0
self.mom_phases = []
for i, (start, lambda_func) in enumerate(mom_phases):
if len(self.mom_phases) != 0:
assert self.mom_phases[-1][0] < start
if isinstance(lambda_func, str):
lambda_func = eval(lambda_func)
if i < len(mom_phases) - 1:
self.mom_phases.append((int(start * total_step), int(mom_phases[i + 1][0] * total_step), lambda_func))
else:
self.mom_phases.append((int(start * total_step), total_step, lambda_func))
assert self.mom_phases[0][0] == 0
def step(self, step):
for start, end, func in self.lr_phases:
if step >= start:
self.optimizer.lr = func((step - start) / (end - start))
for start, end, func in self.mom_phases:
if step >= start:
self.optimizer.mom = func((step - start) / (end - start))
def annealing_cos(start, end, pct):
# print(pct, start, end)
"Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0."
cos_out = np.cos(np.pi * pct) + 1
return end + (start - end) / 2 * cos_out
class OneCycle(LRSchedulerStep):
def __init__(self, fai_optimizer, total_step, lr_max, moms, div_factor,
pct_start):
self.lr_max = lr_max
self.moms = moms
self.div_factor = div_factor
self.pct_start = pct_start
a1 = int(total_step * self.pct_start)
a2 = total_step - a1
low_lr = self.lr_max / self.div_factor
lr_phases = ((0, partial(annealing_cos, low_lr, self.lr_max)),
(self.pct_start,
partial(annealing_cos, self.lr_max, low_lr / 1e4)))
mom_phases = ((0, partial(annealing_cos, *self.moms)),
(self.pct_start, partial(annealing_cos,
*self.moms[::-1])))
fai_optimizer.lr, fai_optimizer.mom = low_lr, self.moms[0]
super().__init__(fai_optimizer, total_step, lr_phases, mom_phases)
class CosineWarmupLR(lr_sched._LRScheduler):
def __init__(self, optimizer, T_max, eta_min=0, last_epoch=-1):
self.T_max = T_max
self.eta_min = eta_min
super(CosineWarmupLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
return [self.eta_min + (base_lr - self.eta_min) *
(1 - math.cos(math.pi * self.last_epoch / self.T_max)) / 2
for base_lr in self.base_lrs]
class FakeOptim:
def __init__(self):
self.lr = 0
self.mom = 0
if __name__ == "__main__":
import matplotlib.pyplot as plt
opt = FakeOptim() # 3e-3, wd=0.4, div_factor=10
schd = OneCycle(opt, 100, 3e-3, (0.95, 0.85), 10.0, 0.1)
lrs = []
moms = []
for i in range(100):
schd.step(i)
lrs.append(opt.lr)
moms.append(opt.mom)
plt.plot(lrs)
# plt.plot(moms)
plt.show()
plt.plot(moms)
plt.show()
| 4,169 | 35.26087 | 118 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/tools/train_utils/optimization/__init__.py | from functools import partial
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler as lr_sched
from .fastai_optim import OptimWrapper
from .learning_schedules_fastai import CosineWarmupLR, OneCycle
def build_optimizer(model, optim_cfg):
if optim_cfg.get('LOSS_NET_SKIP', False) and hasattr(model, 'roi_head'):
for param in model.roi_head.loss_net.parameters():
param.requires_grad = False
if optim_cfg.OPTIMIZER == 'adam':
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=optim_cfg.LR, weight_decay=optim_cfg.WEIGHT_DECAY)
elif optim_cfg.OPTIMIZER == 'sgd':
optimizer = optim.SGD(
model.parameters(), lr=optim_cfg.LR, weight_decay=optim_cfg.WEIGHT_DECAY,
momentum=optim_cfg.MOMENTUM
)
elif optim_cfg.OPTIMIZER == 'adam_onecycle':
def children(m: nn.Module):
return list(m.children())
def num_children(m: nn.Module) -> int:
return len(children(m))
flatten_model = lambda m: sum(map(flatten_model, m.children()), []) if num_children(m) else [m]
get_layer_groups = lambda m: [nn.Sequential(*flatten_model(m))]
optimizer_func = partial(optim.Adam, betas=(0.9, 0.99))
optimizer = OptimWrapper.create(
optimizer_func, 3e-3, get_layer_groups(model), wd=optim_cfg.WEIGHT_DECAY, true_wd=True, bn_wd=True
)
else:
raise NotImplementedError
return optimizer
def build_scheduler(optimizer, total_iters_each_epoch, total_epochs, last_epoch, optim_cfg):
decay_steps = [x * total_iters_each_epoch for x in optim_cfg.DECAY_STEP_LIST]
def lr_lbmd(cur_epoch):
cur_decay = 1
for decay_step in decay_steps:
if cur_epoch >= decay_step:
cur_decay = cur_decay * optim_cfg.LR_DECAY
return max(cur_decay, optim_cfg.LR_CLIP / optim_cfg.LR)
lr_warmup_scheduler = None
total_steps = total_iters_each_epoch * total_epochs
if optim_cfg.OPTIMIZER == 'adam_onecycle':
lr_scheduler = OneCycle(
optimizer, total_steps, optim_cfg.LR, list(optim_cfg.MOMS), optim_cfg.DIV_FACTOR, optim_cfg.PCT_START
)
elif optim_cfg.OPTIMIZER == 'adam':
lr_scheduler = lr_sched.CosineAnnealingLR(optimizer, T_max=total_steps, last_epoch=-1)
# lr_scheduler = lr_sched.StepLR(optimizer, step_size=(total_epochs // 3) * total_iters_each_epoch, gamma=0.2)
else:
lr_scheduler = lr_sched.LambdaLR(optimizer, lr_lbmd, last_epoch=last_epoch)
if optim_cfg.LR_WARMUP:
lr_warmup_scheduler = CosineWarmupLR(
optimizer, T_max=optim_cfg.WARMUP_EPOCH * len(total_iters_each_epoch),
eta_min=optim_cfg.LR / optim_cfg.DIV_FACTOR
)
return lr_scheduler, lr_warmup_scheduler
| 2,868 | 38.847222 | 139 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/tools/visual_utils/open3d_vis_utils.py | """
Open3d visualization tool box
Written by Jihan YANG
All rights preserved from 2021 - present.
"""
import open3d
import torch
import matplotlib
import numpy as np
box_colormap = [
[1, 1, 1],
[0, 1, 0],
[0, 1, 1],
[1, 1, 0],
]
def get_coor_colors(obj_labels):
"""
Args:
obj_labels: 1 is ground, labels > 1 indicates different instance cluster
Returns:
rgb: [N, 3]. color for each point.
"""
colors = matplotlib.colors.XKCD_COLORS.values()
max_color_num = obj_labels.max()
color_list = list(colors)[:max_color_num+1]
colors_rgba = [matplotlib.colors.to_rgba_array(color) for color in color_list]
label_rgba = np.array(colors_rgba)[obj_labels]
label_rgba = label_rgba.squeeze()[:, :3]
return label_rgba
def draw_scenes(points, gt_boxes=None, ref_boxes=None, ref_labels=None, ref_scores=None, point_colors=None, draw_origin=True):
if isinstance(points, torch.Tensor):
points = points.cpu().numpy()
if isinstance(gt_boxes, torch.Tensor):
gt_boxes = gt_boxes.cpu().numpy()
if isinstance(ref_boxes, torch.Tensor):
ref_boxes = ref_boxes.cpu().numpy()
vis = open3d.visualization.Visualizer()
vis.create_window()
vis.get_render_option().point_size = 1.0
vis.get_render_option().background_color = np.zeros(3)
# draw origin
if draw_origin:
axis_pcd = open3d.geometry.TriangleMesh.create_coordinate_frame(size=1.0, origin=[0, 0, 0])
vis.add_geometry(axis_pcd)
pts = open3d.geometry.PointCloud()
pts.points = open3d.utility.Vector3dVector(points[:, :3])
vis.add_geometry(pts)
if point_colors is None:
pts.colors = open3d.utility.Vector3dVector(np.ones((points.shape[0], 3)))
else:
pts.colors = open3d.utility.Vector3dVector(point_colors)
if gt_boxes is not None:
vis = draw_box(vis, gt_boxes, (0, 0, 1))
if ref_boxes is not None:
vis = draw_box(vis, ref_boxes, (0, 1, 0), ref_labels, ref_scores)
vis.run()
vis.destroy_window()
def translate_boxes_to_open3d_instance(gt_boxes):
"""
4-------- 6
/| /|
5 -------- 3 .
| | | |
. 7 -------- 1
|/ |/
2 -------- 0
"""
center = gt_boxes[0:3]
lwh = gt_boxes[3:6]
axis_angles = np.array([0, 0, gt_boxes[6] + 1e-10])
rot = open3d.geometry.get_rotation_matrix_from_axis_angle(axis_angles)
box3d = open3d.geometry.OrientedBoundingBox(center, rot, lwh)
line_set = open3d.geometry.LineSet.create_from_oriented_bounding_box(box3d)
# import ipdb; ipdb.set_trace(context=20)
lines = np.asarray(line_set.lines)
lines = np.concatenate([lines, np.array([[1, 4], [7, 6]])], axis=0)
line_set.lines = open3d.utility.Vector2iVector(lines)
return line_set, box3d
def draw_box(vis, gt_boxes, color=(0, 1, 0), ref_labels=None, score=None):
for i in range(gt_boxes.shape[0]):
line_set, box3d = translate_boxes_to_open3d_instance(gt_boxes[i])
if ref_labels is None:
line_set.paint_uniform_color(color)
else:
line_set.paint_uniform_color(box_colormap[ref_labels[i]])
vis.add_geometry(line_set)
# if score is not None:
# corners = box3d.get_box_points()
# vis.add_3d_label(corners[5], '%.2f' % score[i])
return vis
| 3,413 | 28.179487 | 126 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/tools/visual_utils/visualize_utils.py | import mayavi.mlab as mlab
import numpy as np
import torch
box_colormap = [
[1, 1, 1],
[0, 1, 0],
[0, 1, 1],
[1, 1, 0],
]
def check_numpy_to_torch(x):
if isinstance(x, np.ndarray):
return torch.from_numpy(x).float(), True
return x, False
def rotate_points_along_z(points, angle):
"""
Args:
points: (B, N, 3 + C)
angle: (B), angle along z-axis, angle increases x ==> y
Returns:
"""
points, is_numpy = check_numpy_to_torch(points)
angle, _ = check_numpy_to_torch(angle)
cosa = torch.cos(angle)
sina = torch.sin(angle)
zeros = angle.new_zeros(points.shape[0])
ones = angle.new_ones(points.shape[0])
rot_matrix = torch.stack((
cosa, sina, zeros,
-sina, cosa, zeros,
zeros, zeros, ones
), dim=1).view(-1, 3, 3).float()
points_rot = torch.matmul(points[:, :, 0:3], rot_matrix)
points_rot = torch.cat((points_rot, points[:, :, 3:]), dim=-1)
return points_rot.numpy() if is_numpy else points_rot
def boxes_to_corners_3d(boxes3d):
"""
7 -------- 4
/| /|
6 -------- 5 .
| | | |
. 3 -------- 0
|/ |/
2 -------- 1
Args:
boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
Returns:
"""
boxes3d, is_numpy = check_numpy_to_torch(boxes3d)
template = boxes3d.new_tensor((
[1, 1, -1], [1, -1, -1], [-1, -1, -1], [-1, 1, -1],
[1, 1, 1], [1, -1, 1], [-1, -1, 1], [-1, 1, 1],
)) / 2
corners3d = boxes3d[:, None, 3:6].repeat(1, 8, 1) * template[None, :, :]
corners3d = rotate_points_along_z(corners3d.view(-1, 8, 3), boxes3d[:, 6]).view(-1, 8, 3)
corners3d += boxes3d[:, None, 0:3]
return corners3d.numpy() if is_numpy else corners3d
def visualize_pts(pts, fig=None, bgcolor=(0, 0, 0), fgcolor=(1.0, 1.0, 1.0),
show_intensity=False, size=(600, 600), draw_origin=True):
if not isinstance(pts, np.ndarray):
pts = pts.cpu().numpy()
if fig is None:
fig = mlab.figure(figure=None, bgcolor=bgcolor, fgcolor=fgcolor, engine=None, size=size)
if show_intensity:
G = mlab.points3d(pts[:, 0], pts[:, 1], pts[:, 2], pts[:, 3], mode='point',
colormap='gnuplot', scale_factor=1, figure=fig)
else:
G = mlab.points3d(pts[:, 0], pts[:, 1], pts[:, 2], mode='point',
colormap='gnuplot', scale_factor=1, figure=fig)
if draw_origin:
mlab.points3d(0, 0, 0, color=(1, 1, 1), mode='cube', scale_factor=0.2)
mlab.plot3d([0, 3], [0, 0], [0, 0], color=(0, 0, 1), tube_radius=0.1)
mlab.plot3d([0, 0], [0, 3], [0, 0], color=(0, 1, 0), tube_radius=0.1)
mlab.plot3d([0, 0], [0, 0], [0, 3], color=(1, 0, 0), tube_radius=0.1)
return fig
def draw_sphere_pts(pts, color=(0, 1, 0), fig=None, bgcolor=(0, 0, 0), scale_factor=0.2):
if not isinstance(pts, np.ndarray):
pts = pts.cpu().numpy()
if fig is None:
fig = mlab.figure(figure=None, bgcolor=bgcolor, fgcolor=None, engine=None, size=(600, 600))
if isinstance(color, np.ndarray) and color.shape[0] == 1:
color = color[0]
color = (color[0] / 255.0, color[1] / 255.0, color[2] / 255.0)
if isinstance(color, np.ndarray):
pts_color = np.zeros((pts.__len__(), 4), dtype=np.uint8)
pts_color[:, 0:3] = color
pts_color[:, 3] = 255
G = mlab.points3d(pts[:, 0], pts[:, 1], pts[:, 2], np.arange(0, pts_color.__len__()), mode='sphere',
scale_factor=scale_factor, figure=fig)
G.glyph.color_mode = 'color_by_scalar'
G.glyph.scale_mode = 'scale_by_vector'
G.module_manager.scalar_lut_manager.lut.table = pts_color
else:
mlab.points3d(pts[:, 0], pts[:, 1], pts[:, 2], mode='sphere', color=color,
colormap='gnuplot', scale_factor=scale_factor, figure=fig)
mlab.points3d(0, 0, 0, color=(1, 1, 1), mode='cube', scale_factor=0.2)
mlab.plot3d([0, 3], [0, 0], [0, 0], color=(0, 0, 1), line_width=3, tube_radius=None, figure=fig)
mlab.plot3d([0, 0], [0, 3], [0, 0], color=(0, 1, 0), line_width=3, tube_radius=None, figure=fig)
mlab.plot3d([0, 0], [0, 0], [0, 3], color=(1, 0, 0), line_width=3, tube_radius=None, figure=fig)
return fig
def draw_grid(x1, y1, x2, y2, fig, tube_radius=None, color=(0.5, 0.5, 0.5)):
mlab.plot3d([x1, x1], [y1, y2], [0, 0], color=color, tube_radius=tube_radius, line_width=1, figure=fig)
mlab.plot3d([x2, x2], [y1, y2], [0, 0], color=color, tube_radius=tube_radius, line_width=1, figure=fig)
mlab.plot3d([x1, x2], [y1, y1], [0, 0], color=color, tube_radius=tube_radius, line_width=1, figure=fig)
mlab.plot3d([x1, x2], [y2, y2], [0, 0], color=color, tube_radius=tube_radius, line_width=1, figure=fig)
return fig
def draw_multi_grid_range(fig, grid_size=20, bv_range=(-60, -60, 60, 60)):
for x in range(bv_range[0], bv_range[2], grid_size):
for y in range(bv_range[1], bv_range[3], grid_size):
fig = draw_grid(x, y, x + grid_size, y + grid_size, fig)
return fig
def draw_scenes(points, gt_boxes=None, ref_boxes=None, ref_scores=None, ref_labels=None):
if not isinstance(points, np.ndarray):
points = points.cpu().numpy()
if ref_boxes is not None and not isinstance(ref_boxes, np.ndarray):
ref_boxes = ref_boxes.cpu().numpy()
if gt_boxes is not None and not isinstance(gt_boxes, np.ndarray):
gt_boxes = gt_boxes.cpu().numpy()
if ref_scores is not None and not isinstance(ref_scores, np.ndarray):
ref_scores = ref_scores.cpu().numpy()
if ref_labels is not None and not isinstance(ref_labels, np.ndarray):
ref_labels = ref_labels.cpu().numpy()
fig = visualize_pts(points)
fig = draw_multi_grid_range(fig, bv_range=(0, -40, 80, 40))
if gt_boxes is not None:
corners3d = boxes_to_corners_3d(gt_boxes)
fig = draw_corners3d(corners3d, fig=fig, color=(0, 0, 1), max_num=100)
if ref_boxes is not None and len(ref_boxes) > 0:
ref_corners3d = boxes_to_corners_3d(ref_boxes)
if ref_labels is None:
fig = draw_corners3d(ref_corners3d, fig=fig, color=(0, 1, 0), cls=ref_scores, max_num=100)
else:
for k in range(ref_labels.min(), ref_labels.max() + 1):
cur_color = tuple(box_colormap[k % len(box_colormap)])
mask = (ref_labels == k)
fig = draw_corners3d(ref_corners3d[mask], fig=fig, color=cur_color, cls=ref_scores[mask], max_num=100)
mlab.view(azimuth=-179, elevation=54.0, distance=104.0, roll=90.0)
return fig
def draw_corners3d(corners3d, fig, color=(1, 1, 1), line_width=2, cls=None, tag='', max_num=500, tube_radius=None):
"""
:param corners3d: (N, 8, 3)
:param fig:
:param color:
:param line_width:
:param cls:
:param tag:
:param max_num:
:return:
"""
import mayavi.mlab as mlab
num = min(max_num, len(corners3d))
for n in range(num):
b = corners3d[n] # (8, 3)
if cls is not None:
if isinstance(cls, np.ndarray):
mlab.text3d(b[6, 0], b[6, 1], b[6, 2], '%.2f' % cls[n], scale=(0.3, 0.3, 0.3), color=color, figure=fig)
else:
mlab.text3d(b[6, 0], b[6, 1], b[6, 2], '%s' % cls[n], scale=(0.3, 0.3, 0.3), color=color, figure=fig)
for k in range(0, 4):
i, j = k, (k + 1) % 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
i, j = k + 4, (k + 1) % 4 + 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
i, j = k, k + 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
i, j = 0, 5
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
i, j = 1, 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
return fig
| 8,540 | 38.541667 | 121 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/tools/backups/test_backup.py | import _init_path
import argparse
import datetime
import glob
import os
import re
import time
from pathlib import Path
import numpy as np
import torch
from tensorboardX import SummaryWriter
from eval_utils import eval_utils
from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file
from pcdet.datasets import build_dataloader
from pcdet.models import build_network
from pcdet.utils import common_utils
import wandb
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training')
parser.add_argument('--workers', type=int, default=4, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=30, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--eval_tag', type=str, default='default', help='eval tag for this experiment')
parser.add_argument('--eval_all', action='store_true', default=False, help='whether to evaluate all checkpoints')
parser.add_argument('--ckpt_dir', type=str, default=None, help='specify a ckpt directory to be evaluated if needed')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
np.random.seed(1024)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def eval_single_ckpt(model, test_loader, args, eval_output_dir, logger, epoch_id, dist_test=False):
# load checkpoint
model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=dist_test)
model.cuda()
# start evaluation
eval_utils.eval_one_epoch(
cfg, model, test_loader, epoch_id, logger, dist_test=dist_test,
result_dir=eval_output_dir, save_to_file=args.save_to_file
)
def get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file, args):
ckpt_list = glob.glob(os.path.join(ckpt_dir, '*checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
evaluated_ckpt_list = [float(x.strip()) for x in open(ckpt_record_file, 'r').readlines()]
for cur_ckpt in ckpt_list:
num_list = re.findall('checkpoint_epoch_(.*).pth', cur_ckpt)
if num_list.__len__() == 0:
continue
epoch_id = num_list[-1]
if "select" in epoch_id: # active training epochs
epoch_id = epoch_id.split('_')[0]
if 'optim' in epoch_id:
continue
if float(epoch_id) not in evaluated_ckpt_list and int(float(epoch_id)) >= args.start_epoch:
return epoch_id, cur_ckpt
return -1, None
def repeat_eval_ckpt(model, test_loader, args, eval_output_dir, logger, ckpt_dir, dist_test=False):
# evaluated ckpt record
ckpt_record_file = eval_output_dir / ('eval_list_%s.txt' % cfg.DATA_CONFIG.DATA_SPLIT['test'])
with open(ckpt_record_file, 'a'):
pass
# tensorboard log
if cfg.LOCAL_RANK == 0:
tb_log = SummaryWriter(log_dir=str(eval_output_dir / ('tensorboard_%s' % cfg.DATA_CONFIG.DATA_SPLIT['test'])))
wandb.init(project=cfg.DATA_CONFIG._BASE_CONFIG_.split('/')[-1].split('.')[0], entity='user')
total_time = 0
first_eval = True
while True:
# check whether there is checkpoint which is not evaluated
cur_epoch_id, cur_ckpt = get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file, args)
if cur_epoch_id == -1 or int(float(cur_epoch_id)) < args.start_epoch:
wait_second = 30
if cfg.LOCAL_RANK == 0:
print('Wait %s seconds for next check (progress: %.1f / %d minutes): %s \r'
% (wait_second, total_time * 1.0 / 60, args.max_waiting_mins, ckpt_dir), end='', flush=True)
time.sleep(wait_second)
total_time += 30
if total_time > args.max_waiting_mins * 60 and (first_eval is False):
break
continue
total_time = 0
first_eval = False
model.load_params_from_file(filename=cur_ckpt, logger=logger, to_cpu=dist_test)
model.cuda()
# start evaluation
cur_result_dir = eval_output_dir / ('epoch_%s' % cur_epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test']
tb_dict = eval_utils.eval_one_epoch(
cfg, model, test_loader, cur_epoch_id, logger, dist_test=dist_test,
result_dir=cur_result_dir, save_to_file=args.save_to_file
)
if cfg.LOCAL_RANK == 0:
for key, val in tb_dict.items():
tb_log.add_scalar(key, val, cur_epoch_id)
wandb.log({key: val}, step=int(cur_epoch_id))
# record this epoch which has been evaluated
with open(ckpt_record_file, 'a') as f:
print('%s' % cur_epoch_id, file=f)
logger.info('Epoch %s has been evaluated' % cur_epoch_id)
def main():
args, cfg = parse_config()
if args.launcher == 'none':
dist_test = False
total_gpus = 1
else:
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_test = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
output_dir.mkdir(parents=True, exist_ok=True)
eval_output_dir = output_dir / 'eval'
if not args.eval_all:
num_list = re.findall(r'\d+', args.ckpt) if args.ckpt is not None else []
epoch_id = num_list[-1] if num_list.__len__() > 0 else 'no_number'
eval_output_dir = eval_output_dir / ('epoch_%s' % epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test']
else:
eval_output_dir = eval_output_dir / 'eval_all_default'
if args.eval_tag is not None:
eval_output_dir = eval_output_dir / args.eval_tag
eval_output_dir.mkdir(parents=True, exist_ok=True)
log_file = eval_output_dir / ('log_eval_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
if dist_test:
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
ckpt_dir = args.ckpt_dir if args.ckpt_dir is not None else output_dir / 'ckpt'
test_set, test_loader, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_test, workers=args.workers, logger=logger, training=False
)
model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=test_set)
with torch.no_grad():
if args.eval_all:
repeat_eval_ckpt(model, test_loader, args, eval_output_dir, logger, ckpt_dir, dist_test=dist_test)
else:
eval_single_ckpt(model, test_loader, args, eval_output_dir, logger, epoch_id, dist_test=dist_test)
if __name__ == '__main__':
main()
| 8,549 | 40.707317 | 120 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/__init__.py | from collections import namedtuple
import numpy as np
import torch
from .detectors import build_detector
try:
import kornia
except:
pass
# print('Warning: kornia is not installed. This package is only required by CaDDN')
def build_network(model_cfg, num_class, dataset):
model = build_detector(
model_cfg=model_cfg, num_class=num_class, dataset=dataset
)
return model
def load_data_to_gpu(batch_dict):
for key, val in batch_dict.items():
if not isinstance(val, np.ndarray):
continue
elif key in ['frame_id', 'metadata', 'calib', 'sample_id_list']:
continue
elif key in ['images']:
batch_dict[key] = kornia.image_to_tensor(val).float().cuda().contiguous()
elif key in ['image_shape']:
batch_dict[key] = torch.from_numpy(val).int().cuda()
else:
batch_dict[key] = torch.from_numpy(val).float().cuda()
def model_fn_decorator():
ModelReturn = namedtuple('ModelReturn', ['loss', 'tb_dict', 'disp_dict'])
def model_func(model, batch_dict):
load_data_to_gpu(batch_dict)
ret_dict, tb_dict, disp_dict = model(batch_dict)
loss = ret_dict['loss'].mean()
if hasattr(model, 'update_global_step'):
model.update_global_step()
else:
model.module.update_global_step()
return ModelReturn(loss, tb_dict, disp_dict)
return model_func
| 1,448 | 26.339623 | 87 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/detectors/detector3d_template.py | import os
import torch
import torch.nn as nn
from ...ops.iou3d_nms import iou3d_nms_utils
from ...utils.spconv_utils import find_all_spconv_keys
from .. import backbones_2d, backbones_3d, dense_heads, roi_heads
from ..backbones_2d import map_to_bev
from ..backbones_3d import pfe, vfe
from ..model_utils import model_nms_utils
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
class Detector3DTemplate(nn.Module):
def __init__(self, model_cfg, num_class, dataset):
super().__init__()
self.model_cfg = model_cfg
self.num_class = num_class
self.dataset = dataset
self.class_names = dataset.class_names
self.register_buffer('global_step', torch.LongTensor(1).zero_())
# add loss net for LLAL
self.module_topology = [
'vfe', 'backbone_3d', 'map_to_bev_module', 'pfe',
'backbone_2d', 'dense_head', 'point_head', 'roi_head'
]
# if model_cfg.get('LOSS_NET', None):
# self.module_topology.append('loss_net')
@property
def mode(self):
return 'TRAIN' if self.training else 'TEST'
def update_global_step(self):
self.global_step += 1
def build_networks(self):
model_info_dict = {
'module_list': [],
'num_rawpoint_features': self.dataset.point_feature_encoder.num_point_features,
'num_point_features': self.dataset.point_feature_encoder.num_point_features,
'grid_size': self.dataset.grid_size,
'point_cloud_range': self.dataset.point_cloud_range,
'voxel_size': self.dataset.voxel_size,
'depth_downsample_factor': self.dataset.depth_downsample_factor
}
for module_name in self.module_topology:
module, model_info_dict = getattr(self, 'build_%s' % module_name)(
model_info_dict=model_info_dict
)
self.add_module(module_name, module)
return model_info_dict['module_list']
# def build_loss_net(self, model_info_dict):
# loss_net_module = LossNet(model_cfg=self.model_cfg.LOSS_NET)
# model_info_dict['module_list'].append(loss_net_module)
# return loss_net_module, model_info_dict
def build_vfe(self, model_info_dict):
if self.model_cfg.get('VFE', None) is None:
return None, model_info_dict
vfe_module = vfe.__all__[self.model_cfg.VFE.NAME](
model_cfg=self.model_cfg.VFE,
num_point_features=model_info_dict['num_rawpoint_features'],
point_cloud_range=model_info_dict['point_cloud_range'],
voxel_size=model_info_dict['voxel_size'],
grid_size=model_info_dict['grid_size'],
depth_downsample_factor=model_info_dict['depth_downsample_factor']
)
model_info_dict['num_point_features'] = vfe_module.get_output_feature_dim()
model_info_dict['module_list'].append(vfe_module)
return vfe_module, model_info_dict
def build_backbone_3d(self, model_info_dict):
if self.model_cfg.get('BACKBONE_3D', None) is None:
return None, model_info_dict
backbone_3d_module = backbones_3d.__all__[self.model_cfg.BACKBONE_3D.NAME](
model_cfg=self.model_cfg.BACKBONE_3D,
input_channels=model_info_dict['num_point_features'],
grid_size=model_info_dict['grid_size'],
voxel_size=model_info_dict['voxel_size'],
point_cloud_range=model_info_dict['point_cloud_range']
)
model_info_dict['module_list'].append(backbone_3d_module)
model_info_dict['num_point_features'] = backbone_3d_module.num_point_features
model_info_dict['backbone_channels'] = backbone_3d_module.backbone_channels \
if hasattr(backbone_3d_module, 'backbone_channels') else None
return backbone_3d_module, model_info_dict
def build_map_to_bev_module(self, model_info_dict):
if self.model_cfg.get('MAP_TO_BEV', None) is None:
return None, model_info_dict
map_to_bev_module = map_to_bev.__all__[self.model_cfg.MAP_TO_BEV.NAME](
model_cfg=self.model_cfg.MAP_TO_BEV,
grid_size=model_info_dict['grid_size']
)
model_info_dict['module_list'].append(map_to_bev_module)
model_info_dict['num_bev_features'] = map_to_bev_module.num_bev_features
return map_to_bev_module, model_info_dict
def build_backbone_2d(self, model_info_dict):
if self.model_cfg.get('BACKBONE_2D', None) is None:
return None, model_info_dict
backbone_2d_module = backbones_2d.__all__[self.model_cfg.BACKBONE_2D.NAME](
model_cfg=self.model_cfg.BACKBONE_2D,
input_channels=model_info_dict['num_bev_features']
)
model_info_dict['module_list'].append(backbone_2d_module)
model_info_dict['num_bev_features'] = backbone_2d_module.num_bev_features
return backbone_2d_module, model_info_dict
def build_pfe(self, model_info_dict):
if self.model_cfg.get('PFE', None) is None:
return None, model_info_dict
pfe_module = pfe.__all__[self.model_cfg.PFE.NAME](
model_cfg=self.model_cfg.PFE,
voxel_size=model_info_dict['voxel_size'],
point_cloud_range=model_info_dict['point_cloud_range'],
num_bev_features=model_info_dict['num_bev_features'],
num_rawpoint_features=model_info_dict['num_rawpoint_features']
)
model_info_dict['module_list'].append(pfe_module)
model_info_dict['num_point_features'] = pfe_module.num_point_features
model_info_dict['num_point_features_before_fusion'] = pfe_module.num_point_features_before_fusion
return pfe_module, model_info_dict
def build_dense_head(self, model_info_dict):
if self.model_cfg.get('DENSE_HEAD', None) is None:
return None, model_info_dict
dense_head_module = dense_heads.__all__[self.model_cfg.DENSE_HEAD.NAME](
model_cfg=self.model_cfg.DENSE_HEAD,
input_channels=model_info_dict['num_bev_features'],
num_class=self.num_class if not self.model_cfg.DENSE_HEAD.CLASS_AGNOSTIC else 1,
class_names=self.class_names,
grid_size=model_info_dict['grid_size'],
point_cloud_range=model_info_dict['point_cloud_range'],
predict_boxes_when_training=self.model_cfg.get('ROI_HEAD', False),
voxel_size=model_info_dict.get('voxel_size', False)
)
model_info_dict['module_list'].append(dense_head_module)
return dense_head_module, model_info_dict
def build_point_head(self, model_info_dict):
if self.model_cfg.get('POINT_HEAD', None) is None:
return None, model_info_dict
if self.model_cfg.POINT_HEAD.get('USE_POINT_FEATURES_BEFORE_FUSION', False):
num_point_features = model_info_dict['num_point_features_before_fusion']
else:
num_point_features = model_info_dict['num_point_features']
point_head_module = dense_heads.__all__[self.model_cfg.POINT_HEAD.NAME](
model_cfg=self.model_cfg.POINT_HEAD,
input_channels=num_point_features,
num_class=self.num_class if not self.model_cfg.POINT_HEAD.CLASS_AGNOSTIC else 1,
predict_boxes_when_training=self.model_cfg.get('ROI_HEAD', False)
)
model_info_dict['module_list'].append(point_head_module)
return point_head_module, model_info_dict
def build_roi_head(self, model_info_dict):
if self.model_cfg.get('ROI_HEAD', None) is None:
return None, model_info_dict
point_head_module = roi_heads.__all__[self.model_cfg.ROI_HEAD.NAME](
model_cfg=self.model_cfg.ROI_HEAD,
input_channels=model_info_dict['num_point_features'],
backbone_channels=model_info_dict['backbone_channels'],
point_cloud_range=model_info_dict['point_cloud_range'],
voxel_size=model_info_dict['voxel_size'],
num_class=self.num_class if not self.model_cfg.ROI_HEAD.CLASS_AGNOSTIC else 1,
)
model_info_dict['module_list'].append(point_head_module)
return point_head_module, model_info_dict
def forward(self, **kwargs):
raise NotImplementedError
def post_processing(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
batch_cls_preds: (B, num_boxes, num_classes | 1) or (N1+N2+..., num_classes | 1)
or [(B, num_boxes, num_class1), (B, num_boxes, num_class2) ...]
multihead_label_mapping: [(num_class1), (num_class2), ...]
batch_box_preds: (B, num_boxes, 7+C) or (N1+N2+..., 7+C)
cls_preds_normalized: indicate whether batch_cls_preds is normalized
batch_index: optional (N1+N2+...)
has_class_labels: True/False
roi_labels: (B, num_rois) 1 .. num_classes
batch_pred_labels: (B, num_boxes, 1)
Returns:
"""
post_process_cfg = self.model_cfg.POST_PROCESSING
batch_size = batch_dict['batch_size']
recall_dict = {}
pred_dicts = []
bbox_classes = [i['class_name'] for i in self.model_cfg.DENSE_HEAD.ANCHOR_GENERATOR_CONFIG]
rcnn_cls = torch.mean(torch.sigmoid(batch_dict['rcnn_cls']), 0).view(batch_size, -1, 1) if 'rcnn_cls' in batch_dict.keys() and len(batch_dict['rcnn_cls'].shape) > 2 else None
rcnn_reg = torch.mean(batch_dict['rcnn_reg'], 0).view(batch_size, -1, 7) if 'rcnn_reg' in batch_dict.keys() and len(batch_dict['rcnn_cls'].shape) > 2 else None
# assume self.model_cfg.POINT_SOURCE == 'raw_points':
src_points = batch_dict['points'][:, 1:4]
batch_indices = batch_dict['points'][:, 0].long()
for index in range(batch_size):
if batch_dict.get('batch_index', None) is not None:
assert batch_dict['batch_box_preds'].shape.__len__() == 2
batch_mask = (batch_dict['batch_index'] == index)
else:
assert batch_dict['batch_box_preds'].shape.__len__() == 3 or batch_dict['batch_box_preds'].shape.__len__() == 4
batch_mask = index
box_preds = batch_dict['batch_box_preds'][:, batch_mask, :, :] if batch_dict['batch_box_preds'].shape.__len__() == 4 else batch_dict['batch_box_preds'][batch_mask]
src_box_preds = box_preds
#batch rcnn cls/reg
batch_rcnn_cls = rcnn_cls[batch_mask, :] if rcnn_cls is not None else None
batch_rcnn_reg = rcnn_reg[batch_mask, :] if rcnn_reg is not None else None
# full logits
full_cls_scores = batch_dict['full_cls_scores'][batch_mask]
bs_mask = (batch_indices == index)
# get point clouds for each sample (to validate active label)
sampled_points = src_points[bs_mask]
sampled_points = sampled_points.reshape(1, sampled_points.shape[0], 3)
# remove the
gt_box_label = batch_dict['gt_boxes'][batch_mask][:, -1]
num_bbox = {}
mean_points = {}
median_points = {}
variance_points = {}
gt_box = batch_dict['gt_boxes'][batch_mask][:, :-1]
for cls_idx in range(len(bbox_classes)):
# gt_box_label = 0 -> empty
cls_mask = (gt_box_label == cls_idx + 1)
num_cls_bbox = cls_mask.sum()
if num_cls_bbox > 0:
num_bbox[bbox_classes[cls_idx]] = num_cls_bbox
gt_cls_box = gt_box[cls_mask].reshape(1, -1, 7)
box_idxs_of_pts = roiaware_pool3d_utils.points_in_boxes_gpu(sampled_points, gt_cls_box).long().squeeze(dim=0)
unique_pts_counts = torch.tensor([(box_idxs_of_pts==i).sum() for i in torch.unique(box_idxs_of_pts)])[1:].float() # skip the -1
mean_points[bbox_classes[cls_idx]] = 0 if torch.isnan(torch.mean(unique_pts_counts)) else torch.mean(unique_pts_counts)
median_points[bbox_classes[cls_idx]] = 0 if torch.isnan(torch.median(unique_pts_counts)) else torch.median(unique_pts_counts)
variance_points[bbox_classes[cls_idx]] = 0 if torch.isnan(torch.var(unique_pts_counts, unbiased=False)) else torch.var(unique_pts_counts, unbiased=False)
else:
num_bbox[bbox_classes[cls_idx]] = 0
mean_points[bbox_classes[cls_idx]] = 0
median_points[bbox_classes[cls_idx]] = 0
variance_points[bbox_classes[cls_idx]] = 0
# get loss predictions for llal
loss_predictions = batch_dict['loss_predictions'] if 'loss_predictions' in batch_dict.keys() else None
# get embeddings for Coreset
shared_features = batch_dict['shared_features'] if 'shared_features' in batch_dict.keys() else None
if not isinstance(batch_dict['batch_cls_preds'], list):
cls_preds = batch_dict['batch_cls_preds'][batch_mask]
src_cls_preds = cls_preds
# assert cls_preds.shape[1] in [1, self.num_class]
if not batch_dict['cls_preds_normalized']:
cls_preds = torch.sigmoid(cls_preds)
else:
cls_preds = [x[batch_mask] for x in batch_dict['batch_cls_preds']]
src_cls_preds = cls_preds
if not batch_dict['cls_preds_normalized']:
cls_preds = [torch.sigmoid(x) for x in cls_preds]
if post_process_cfg.NMS_CONFIG.MULTI_CLASSES_NMS:
if not isinstance(cls_preds, list):
cls_preds = [cls_preds]
multihead_label_mapping = [torch.arange(1, self.num_class, device=cls_preds[0].device)]
else:
multihead_label_mapping = batch_dict['multihead_label_mapping']
cur_start_idx = 0
pred_scores, pred_labels, pred_boxes = [], [], []
for cur_cls_preds, cur_label_mapping in zip(cls_preds, multihead_label_mapping):
assert cur_cls_preds.shape[1] == len(cur_label_mapping)
cur_box_preds = box_preds[cur_start_idx: cur_start_idx + cur_cls_preds.shape[0]]
cur_pred_scores, cur_pred_labels, cur_pred_boxes = model_nms_utils.multi_classes_nms(
cls_scores=cur_cls_preds, box_preds=cur_box_preds,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH
)
cur_pred_labels = cur_label_mapping[cur_pred_labels]
pred_scores.append(cur_pred_scores)
pred_labels.append(cur_pred_labels)
pred_boxes.append(cur_pred_boxes)
cur_start_idx += cur_cls_preds.shape[0]
final_scores = torch.cat(pred_scores, dim=0)
final_labels = torch.cat(pred_labels, dim=0)
final_boxes = torch.cat(pred_boxes, dim=0)
# TODO: add how to save final logits
else:
# idk why calculate this part. label_preds will be overwritten
if not isinstance(cls_preds, list):
cls_confs, label_preds = torch.max(cls_preds, dim=-1)
else:
cls_confs = [torch.max(i, dim=-1)[0] for i in cls_preds]
label_preds = [torch.max(i, dim=-1)[1] for i in cls_preds]
if batch_dict.get('has_class_labels', False):
label_key = 'roi_labels' if 'roi_labels' in batch_dict else 'batch_pred_labels'
label_preds = batch_dict[label_key][index]
else:
label_preds = label_preds + 1
if not isinstance(cls_preds, list):
selected, selected_scores = model_nms_utils.class_agnostic_nms(
box_scores=cls_confs, box_preds=box_preds,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH
)
else:
output = [model_nms_utils.class_agnostic_nms(
box_scores=cls_confs[i], box_preds=box_preds[i],
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH
) for i in range(len(cls_confs))]
selected = [i[0] for i in output]
selected_scores = [i[1] for i in output]
if post_process_cfg.OUTPUT_RAW_SCORE:
max_cls_preds, _ = torch.max(src_cls_preds, dim=-1)
selected_scores = max_cls_preds[selected]
final_scores = selected_scores
if not isinstance(selected, list):
final_logits = full_cls_scores[selected]
# print('final logits shape: {}'.format(final_logits.shape))
# print('check scores: {}'.format(final_scores))
final_labels = label_preds[selected]
final_boxes = box_preds[selected]
else:
final_logits = [full_cls_scores[value] for value in selected]
final_labels = [label_preds[value] for value in selected]
final_boxes = [box_preds[i][value] for i, value in enumerate(selected)]
if not isinstance(selected, list):
recall_dict = self.generate_recall_record(
box_preds=final_boxes if 'rois' not in batch_dict else src_box_preds,
recall_dict=recall_dict, batch_index=index, data_dict=batch_dict,
thresh_list=post_process_cfg.RECALL_THRESH_LIST
)
else:
recall_dict = self.generate_recall_record(
box_preds=final_boxes[0] if 'rois' not in batch_dict else src_box_preds[0],
recall_dict=recall_dict, batch_index=index, data_dict=batch_dict,
thresh_list=post_process_cfg.RECALL_THRESH_LIST
)
pred_boxes_reshaped = final_boxes.reshape(1, -1, 7)
box_volumes = final_boxes[:, 3] * final_boxes[:, 4] * final_boxes[:, 5]
pred_box_idxs_of_pts = roiaware_pool3d_utils.points_in_boxes_gpu(sampled_points,
pred_boxes_reshaped).long().squeeze(dim=0)
pts_counts = torch.zeros(final_boxes.shape[0] + 1).cuda()
unique_box_id, pred_box_unique_pts_counts = torch.unique(pred_box_idxs_of_pts, return_counts=True) # remove background: -1
for i, box_id in enumerate(unique_box_id):
pts_counts[box_id] = pred_box_unique_pts_counts[i]
pred_box_unique_density = pts_counts[:-1].cuda() / box_volumes
record_dict = {
'confidence': cls_preds,
'rpn_preds': batch_dict['rpn_preds'],
'num_bbox': num_bbox,
'mean_points': mean_points,
'median_points': median_points,
'variance_points': variance_points,
'loss_predictions': loss_predictions,
'batch_rcnn_cls': batch_rcnn_cls,
'batch_rcnn_reg': batch_rcnn_reg,
'embeddings': shared_features,
'pred_logits': final_logits,
'pred_boxes': final_boxes,
'pred_scores': final_scores,
'pred_labels': final_labels,
'pred_box_unique_density': pred_box_unique_density
}
pred_dicts.append(record_dict)
return pred_dicts, recall_dict
@staticmethod
def generate_recall_record(box_preds, recall_dict, batch_index, data_dict=None, thresh_list=None):
if 'gt_boxes' not in data_dict:
return recall_dict
rois = data_dict['rois'][batch_index] if 'rois' in data_dict else None
gt_boxes = data_dict['gt_boxes'][batch_index]
if recall_dict.__len__() == 0:
recall_dict = {'gt': 0}
for cur_thresh in thresh_list:
recall_dict['roi_%s' % (str(cur_thresh))] = 0
recall_dict['rcnn_%s' % (str(cur_thresh))] = 0
cur_gt = gt_boxes
k = cur_gt.__len__() - 1
while k > 0 and cur_gt[k].sum() == 0:
k -= 1
cur_gt = cur_gt[:k + 1]
if cur_gt.shape[0] > 0:
if box_preds.shape[0] > 0:
iou3d_rcnn = iou3d_nms_utils.boxes_iou3d_gpu(box_preds[:, 0:7], cur_gt[:, 0:7])
else:
iou3d_rcnn = torch.zeros((0, cur_gt.shape[0]))
if rois is not None:
iou3d_roi = iou3d_nms_utils.boxes_iou3d_gpu(rois[:, 0:7], cur_gt[:, 0:7])
for cur_thresh in thresh_list:
if iou3d_rcnn.shape[0] == 0:
recall_dict['rcnn_%s' % str(cur_thresh)] += 0
else:
rcnn_recalled = (iou3d_rcnn.max(dim=0)[0] > cur_thresh).sum().item()
recall_dict['rcnn_%s' % str(cur_thresh)] += rcnn_recalled
if rois is not None:
roi_recalled = (iou3d_roi.max(dim=0)[0] > cur_thresh).sum().item()
recall_dict['roi_%s' % str(cur_thresh)] += roi_recalled
recall_dict['gt'] += cur_gt.shape[0]
else:
gt_iou = box_preds.new_zeros(box_preds.shape[0])
return recall_dict
def _load_state_dict(self, model_state_disk, *, strict=True):
state_dict = self.state_dict() # local cache of state_dict
spconv_keys = find_all_spconv_keys(self)
update_model_state = {}
for key, val in model_state_disk.items():
if key in spconv_keys and key in state_dict and state_dict[key].shape != val.shape:
# with different spconv versions, we need to adapt weight shapes for spconv blocks
# adapt spconv weights from version 1.x to version 2.x if you used weights from spconv 1.x
val_native = val.transpose(-1, -2) # (k1, k2, k3, c_in, c_out) to (k1, k2, k3, c_out, c_in)
if val_native.shape == state_dict[key].shape:
val = val_native.contiguous()
else:
assert val.shape.__len__() == 5, 'currently only spconv 3D is supported'
val_implicit = val.permute(4, 0, 1, 2, 3) # (k1, k2, k3, c_in, c_out) to (c_out, k1, k2, k3, c_in)
if val_implicit.shape == state_dict[key].shape:
val = val_implicit.contiguous()
if key in state_dict and state_dict[key].shape == val.shape:
update_model_state[key] = val
# logger.info('Update weight %s: %s' % (key, str(val.shape)))
if strict:
self.load_state_dict(update_model_state)
else:
state_dict.update(update_model_state)
self.load_state_dict(state_dict)
return state_dict, update_model_state
def load_params_from_file(self, filename, logger, to_cpu=False):
if not os.path.isfile(filename):
raise FileNotFoundError
logger.info('==> Loading parameters from checkpoint %s to %s' % (filename, 'CPU' if to_cpu else 'GPU'))
loc_type = torch.device('cpu') if to_cpu else None
checkpoint = torch.load(filename, map_location=loc_type)
model_state_disk = checkpoint['model_state']
version = checkpoint.get("version", None)
if version is not None:
logger.info('==> Checkpoint trained from version: %s' % version)
state_dict, update_model_state = self._load_state_dict(model_state_disk, strict=False)
for key in state_dict:
if key not in update_model_state:
logger.info('Not updated weight %s: %s' % (key, str(state_dict[key].shape)))
logger.info('==> Done (loaded %d/%d)' % (len(update_model_state), len(state_dict)))
def load_params_with_optimizer(self, filename, to_cpu=False, optimizer=None, logger=None):
if not os.path.isfile(filename):
raise FileNotFoundError
logger.info('==> Loading parameters from checkpoint %s to %s' % (filename, 'CPU' if to_cpu else 'GPU'))
loc_type = torch.device('cpu') if to_cpu else None
checkpoint = torch.load(filename, map_location=loc_type)
epoch = checkpoint.get('epoch', -1)
it = checkpoint.get('it', 0.0)
self._load_state_dict(checkpoint['model_state'], strict=True)
if optimizer is not None:
if 'optimizer_state' in checkpoint and checkpoint['optimizer_state'] is not None:
logger.info('==> Loading optimizer parameters from checkpoint %s to %s'
% (filename, 'CPU' if to_cpu else 'GPU'))
optimizer.load_state_dict(checkpoint['optimizer_state'])
else:
assert filename[-4] == '.', filename
src_file, ext = filename[:-4], filename[-3:]
optimizer_filename = '%s_optim.%s' % (src_file, ext)
if os.path.exists(optimizer_filename):
optimizer_ckpt = torch.load(optimizer_filename, map_location=loc_type)
optimizer.load_state_dict(optimizer_ckpt['optimizer_state'])
if 'version' in checkpoint:
print('==> Checkpoint trained from version: %s' % checkpoint['version'])
logger.info('==> Done')
return it, epoch
| 26,062 | 47.534451 | 182 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/detectors/second_net_iou.py | import torch
from .detector3d_template import Detector3DTemplate
from ..model_utils.model_nms_utils import class_agnostic_nms
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
class SECONDNetIoU(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict):
batch_dict['dataset_cfg'] = self.dataset.dataset_cfg
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
def get_training_loss(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head.get_loss()
loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)
loss = loss_rpn + loss_rcnn
return loss, tb_dict, disp_dict
@staticmethod
def cal_scores_by_npoints(cls_scores, iou_scores, num_points_in_gt, cls_thresh=10, iou_thresh=100):
"""
Args:
cls_scores: (N)
iou_scores: (N)
num_points_in_gt: (N, 7+c)
cls_thresh: scalar
iou_thresh: scalar
"""
assert iou_thresh >= cls_thresh
alpha = torch.zeros(cls_scores.shape, dtype=torch.float32).cuda()
alpha[num_points_in_gt <= cls_thresh] = 0
alpha[num_points_in_gt >= iou_thresh] = 1
mask = ((num_points_in_gt > cls_thresh) & (num_points_in_gt < iou_thresh))
alpha[mask] = (num_points_in_gt[mask] - 10) / (iou_thresh - cls_thresh)
scores = (1 - alpha) * cls_scores + alpha * iou_scores
return scores
def set_nms_score_by_class(self, iou_preds, cls_preds, label_preds, score_by_class):
n_classes = torch.unique(label_preds).shape[0]
nms_scores = torch.zeros(iou_preds.shape, dtype=torch.float32).cuda()
for i in range(n_classes):
mask = label_preds == (i + 1)
class_name = self.class_names[i]
score_type = score_by_class[class_name]
if score_type == 'iou':
nms_scores[mask] = iou_preds[mask]
elif score_type == 'cls':
nms_scores[mask] = cls_preds[mask]
else:
raise NotImplementedError
return nms_scores
def post_processing(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
batch_cls_preds: (B, num_boxes, num_classes | 1) or (N1+N2+..., num_classes | 1)
batch_box_preds: (B, num_boxes, 7+C) or (N1+N2+..., 7+C)
cls_preds_normalized: indicate whether batch_cls_preds is normalized
batch_index: optional (N1+N2+...)
roi_labels: (B, num_rois) 1 .. num_classes
Returns:
"""
post_process_cfg = self.model_cfg.POST_PROCESSING
batch_size = batch_dict['batch_size']
recall_dict = {}
pred_dicts = []
for index in range(batch_size):
if batch_dict.get('batch_index', None) is not None:
assert batch_dict['batch_cls_preds'].shape.__len__() == 2
batch_mask = (batch_dict['batch_index'] == index)
else:
assert batch_dict['batch_cls_preds'].shape.__len__() == 3
batch_mask = index
box_preds = batch_dict['batch_box_preds'][batch_mask]
iou_preds = batch_dict['batch_cls_preds'][batch_mask]
cls_preds = batch_dict['roi_scores'][batch_mask]
src_iou_preds = iou_preds
src_box_preds = box_preds
src_cls_preds = cls_preds
assert iou_preds.shape[1] in [1, self.num_class]
if not batch_dict['cls_preds_normalized']:
iou_preds = torch.sigmoid(iou_preds)
cls_preds = torch.sigmoid(cls_preds)
if post_process_cfg.NMS_CONFIG.MULTI_CLASSES_NMS:
raise NotImplementedError
else:
iou_preds, label_preds = torch.max(iou_preds, dim=-1)
label_preds = batch_dict['roi_labels'][index] if batch_dict.get('has_class_labels', False) else label_preds + 1
if post_process_cfg.NMS_CONFIG.get('SCORE_BY_CLASS', None) and \
post_process_cfg.NMS_CONFIG.SCORE_TYPE == 'score_by_class':
nms_scores = self.set_nms_score_by_class(
iou_preds, cls_preds, label_preds, post_process_cfg.NMS_CONFIG.SCORE_BY_CLASS
)
elif post_process_cfg.NMS_CONFIG.get('SCORE_TYPE', None) == 'iou' or \
post_process_cfg.NMS_CONFIG.get('SCORE_TYPE', None) is None:
nms_scores = iou_preds
elif post_process_cfg.NMS_CONFIG.SCORE_TYPE == 'cls':
nms_scores = cls_preds
elif post_process_cfg.NMS_CONFIG.SCORE_TYPE == 'weighted_iou_cls':
nms_scores = post_process_cfg.NMS_CONFIG.SCORE_WEIGHTS.iou * iou_preds + \
post_process_cfg.NMS_CONFIG.SCORE_WEIGHTS.cls * cls_preds
elif post_process_cfg.NMS_CONFIG.SCORE_TYPE == 'num_pts_iou_cls':
point_mask = (batch_dict['points'][:, 0] == batch_mask)
batch_points = batch_dict['points'][point_mask][:, 1:4]
num_pts_in_gt = roiaware_pool3d_utils.points_in_boxes_cpu(
batch_points.cpu(), box_preds[:, 0:7].cpu()
).sum(dim=1).float().cuda()
score_thresh_cfg = post_process_cfg.NMS_CONFIG.SCORE_THRESH
nms_scores = self.cal_scores_by_npoints(
cls_preds, iou_preds, num_pts_in_gt,
score_thresh_cfg.cls, score_thresh_cfg.iou
)
else:
raise NotImplementedError
selected, selected_scores = class_agnostic_nms(
box_scores=nms_scores, box_preds=box_preds,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH
)
if post_process_cfg.OUTPUT_RAW_SCORE:
raise NotImplementedError
final_scores = selected_scores
final_labels = label_preds[selected]
final_boxes = box_preds[selected]
recall_dict = self.generate_recall_record(
box_preds=final_boxes if 'rois' not in batch_dict else src_box_preds,
recall_dict=recall_dict, batch_index=index, data_dict=batch_dict,
thresh_list=post_process_cfg.RECALL_THRESH_LIST
)
record_dict = {
'pred_boxes': final_boxes,
'pred_scores': final_scores,
'pred_labels': final_labels,
'pred_cls_scores': cls_preds[selected],
'pred_iou_scores': iou_preds[selected]
}
pred_dicts.append(record_dict)
return pred_dicts, recall_dict
| 7,499 | 41.134831 | 127 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/backbones_3d/spconv_unet.py | from functools import partial
import torch
import torch.nn as nn
from ...utils.spconv_utils import replace_feature, spconv
from ...utils import common_utils
from .spconv_backbone import post_act_block
class SparseBasicBlock(spconv.SparseModule):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, indice_key=None, norm_fn=None):
super(SparseBasicBlock, self).__init__()
self.conv1 = spconv.SubMConv3d(
inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False, indice_key=indice_key
)
self.bn1 = norm_fn(planes)
self.relu = nn.ReLU()
self.conv2 = spconv.SubMConv3d(
planes, planes, kernel_size=3, stride=1, padding=1, bias=False, indice_key=indice_key
)
self.bn2 = norm_fn(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x.features
assert x.features.dim() == 2, 'x.features.dim()=%d' % x.features.dim()
out = self.conv1(x)
out = replace_feature(out, self.bn1(out.features))
out = replace_feature(out, self.relu(out.features))
out = self.conv2(out)
out = replace_feature(out, self.bn2(out.features))
if self.downsample is not None:
identity = self.downsample(x)
out = replace_feature(out, out.features + identity)
out = replace_feature(out, self.relu(out.features))
return out
class UNetV2(nn.Module):
"""
Sparse Convolution based UNet for point-wise feature learning.
Reference Paper: https://arxiv.org/abs/1907.03670 (Shaoshuai Shi, et. al)
From Points to Parts: 3D Object Detection from Point Cloud with Part-aware and Part-aggregation Network
"""
def __init__(self, model_cfg, input_channels, grid_size, voxel_size, point_cloud_range, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.sparse_shape = grid_size[::-1] + [1, 0, 0]
self.voxel_size = voxel_size
self.point_cloud_range = point_cloud_range
norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01)
self.conv_input = spconv.SparseSequential(
spconv.SubMConv3d(input_channels, 16, 3, padding=1, bias=False, indice_key='subm1'),
norm_fn(16),
nn.ReLU(),
)
block = post_act_block
self.conv1 = spconv.SparseSequential(
block(16, 16, 3, norm_fn=norm_fn, padding=1, indice_key='subm1'),
)
self.conv2 = spconv.SparseSequential(
# [1600, 1408, 41] <- [800, 704, 21]
block(16, 32, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv2', conv_type='spconv'),
block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='subm2'),
block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='subm2'),
)
self.conv3 = spconv.SparseSequential(
# [800, 704, 21] <- [400, 352, 11]
block(32, 64, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv3', conv_type='spconv'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3'),
)
self.conv4 = spconv.SparseSequential(
# [400, 352, 11] <- [200, 176, 5]
block(64, 64, 3, norm_fn=norm_fn, stride=2, padding=(0, 1, 1), indice_key='spconv4', conv_type='spconv'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4'),
)
if self.model_cfg.get('RETURN_ENCODED_TENSOR', True):
last_pad = self.model_cfg.get('last_pad', 0)
self.conv_out = spconv.SparseSequential(
# [200, 150, 5] -> [200, 150, 2]
spconv.SparseConv3d(64, 128, (3, 1, 1), stride=(2, 1, 1), padding=last_pad,
bias=False, indice_key='spconv_down2'),
norm_fn(128),
nn.ReLU(),
)
else:
self.conv_out = None
# decoder
# [400, 352, 11] <- [200, 176, 5]
self.conv_up_t4 = SparseBasicBlock(64, 64, indice_key='subm4', norm_fn=norm_fn)
self.conv_up_m4 = block(128, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4')
self.inv_conv4 = block(64, 64, 3, norm_fn=norm_fn, indice_key='spconv4', conv_type='inverseconv')
# [800, 704, 21] <- [400, 352, 11]
self.conv_up_t3 = SparseBasicBlock(64, 64, indice_key='subm3', norm_fn=norm_fn)
self.conv_up_m3 = block(128, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3')
self.inv_conv3 = block(64, 32, 3, norm_fn=norm_fn, indice_key='spconv3', conv_type='inverseconv')
# [1600, 1408, 41] <- [800, 704, 21]
self.conv_up_t2 = SparseBasicBlock(32, 32, indice_key='subm2', norm_fn=norm_fn)
self.conv_up_m2 = block(64, 32, 3, norm_fn=norm_fn, indice_key='subm2')
self.inv_conv2 = block(32, 16, 3, norm_fn=norm_fn, indice_key='spconv2', conv_type='inverseconv')
# [1600, 1408, 41] <- [1600, 1408, 41]
self.conv_up_t1 = SparseBasicBlock(16, 16, indice_key='subm1', norm_fn=norm_fn)
self.conv_up_m1 = block(32, 16, 3, norm_fn=norm_fn, indice_key='subm1')
self.conv5 = spconv.SparseSequential(
block(16, 16, 3, norm_fn=norm_fn, padding=1, indice_key='subm1')
)
self.num_point_features = 16
def UR_block_forward(self, x_lateral, x_bottom, conv_t, conv_m, conv_inv):
x_trans = conv_t(x_lateral)
x = x_trans
x = replace_feature(x, torch.cat((x_bottom.features, x_trans.features), dim=1))
x_m = conv_m(x)
x = self.channel_reduction(x, x_m.features.shape[1])
x = replace_feature(x, x_m.features + x.features)
x = conv_inv(x)
return x
@staticmethod
def channel_reduction(x, out_channels):
"""
Args:
x: x.features (N, C1)
out_channels: C2
Returns:
"""
features = x.features
n, in_channels = features.shape
assert (in_channels % out_channels == 0) and (in_channels >= out_channels)
x = replace_feature(x, features.view(n, out_channels, -1).sum(dim=2))
return x
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size: int
vfe_features: (num_voxels, C)
voxel_coords: (num_voxels, 4), [batch_idx, z_idx, y_idx, x_idx]
Returns:
batch_dict:
encoded_spconv_tensor: sparse tensor
point_features: (N, C)
"""
voxel_features, voxel_coords = batch_dict['voxel_features'], batch_dict['voxel_coords']
batch_size = batch_dict['batch_size']
input_sp_tensor = spconv.SparseConvTensor(
features=voxel_features,
indices=voxel_coords.int(),
spatial_shape=self.sparse_shape,
batch_size=batch_size
)
x = self.conv_input(input_sp_tensor)
x_conv1 = self.conv1(x)
x_conv2 = self.conv2(x_conv1)
x_conv3 = self.conv3(x_conv2)
x_conv4 = self.conv4(x_conv3)
if self.conv_out is not None:
# for detection head
# [200, 176, 5] -> [200, 176, 2]
out = self.conv_out(x_conv4)
batch_dict['encoded_spconv_tensor'] = out
batch_dict['encoded_spconv_tensor_stride'] = 8
# for segmentation head
# [400, 352, 11] <- [200, 176, 5]
x_up4 = self.UR_block_forward(x_conv4, x_conv4, self.conv_up_t4, self.conv_up_m4, self.inv_conv4)
# [800, 704, 21] <- [400, 352, 11]
x_up3 = self.UR_block_forward(x_conv3, x_up4, self.conv_up_t3, self.conv_up_m3, self.inv_conv3)
# [1600, 1408, 41] <- [800, 704, 21]
x_up2 = self.UR_block_forward(x_conv2, x_up3, self.conv_up_t2, self.conv_up_m2, self.inv_conv2)
# [1600, 1408, 41] <- [1600, 1408, 41]
x_up1 = self.UR_block_forward(x_conv1, x_up2, self.conv_up_t1, self.conv_up_m1, self.conv5)
batch_dict['point_features'] = x_up1.features
point_coords = common_utils.get_voxel_centers(
x_up1.indices[:, 1:], downsample_times=1, voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range
)
batch_dict['point_coords'] = torch.cat((x_up1.indices[:, 0:1].float(), point_coords), dim=1)
return batch_dict
| 8,602 | 39.389671 | 117 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/backbones_3d/spconv_backbone.py | from functools import partial
import torch.nn as nn
from ...utils.spconv_utils import replace_feature, spconv
def post_act_block(in_channels, out_channels, kernel_size, indice_key=None, stride=1, padding=0,
conv_type='subm', norm_fn=None):
if conv_type == 'subm':
conv = spconv.SubMConv3d(in_channels, out_channels, kernel_size, bias=False, indice_key=indice_key)
elif conv_type == 'spconv':
conv = spconv.SparseConv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding,
bias=False, indice_key=indice_key)
elif conv_type == 'inverseconv':
conv = spconv.SparseInverseConv3d(in_channels, out_channels, kernel_size, indice_key=indice_key, bias=False)
else:
raise NotImplementedError
m = spconv.SparseSequential(
conv,
norm_fn(out_channels),
nn.ReLU(),
)
return m
class SparseBasicBlock(spconv.SparseModule):
expansion = 1
def __init__(self, inplanes, planes, stride=1, norm_fn=None, downsample=None, indice_key=None):
super(SparseBasicBlock, self).__init__()
assert norm_fn is not None
bias = norm_fn is not None
self.conv1 = spconv.SubMConv3d(
inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=bias, indice_key=indice_key
)
self.bn1 = norm_fn(planes)
self.relu = nn.ReLU()
self.conv2 = spconv.SubMConv3d(
planes, planes, kernel_size=3, stride=stride, padding=1, bias=bias, indice_key=indice_key
)
self.bn2 = norm_fn(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = replace_feature(out, self.bn1(out.features))
out = replace_feature(out, self.relu(out.features))
out = self.conv2(out)
out = replace_feature(out, self.bn2(out.features))
if self.downsample is not None:
identity = self.downsample(x)
out = replace_feature(out, out.features + identity.features)
out = replace_feature(out, self.relu(out.features))
return out
class VoxelBackBone8x(nn.Module):
def __init__(self, model_cfg, input_channels, grid_size, **kwargs):
super().__init__()
self.model_cfg = model_cfg
norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01)
self.sparse_shape = grid_size[::-1] + [1, 0, 0]
self.conv_input = spconv.SparseSequential(
spconv.SubMConv3d(input_channels, 16, 3, padding=1, bias=False, indice_key='subm1'),
norm_fn(16),
nn.ReLU(),
)
block = post_act_block
self.conv1 = spconv.SparseSequential(
block(16, 16, 3, norm_fn=norm_fn, padding=1, indice_key='subm1'),
)
self.conv2 = spconv.SparseSequential(
# [1600, 1408, 41] <- [800, 704, 21]
block(16, 32, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv2', conv_type='spconv'),
block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='subm2'),
block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='subm2'),
)
self.conv3 = spconv.SparseSequential(
# [800, 704, 21] <- [400, 352, 11]
block(32, 64, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv3', conv_type='spconv'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3'),
)
self.conv4 = spconv.SparseSequential(
# [400, 352, 11] <- [200, 176, 5]
block(64, 64, 3, norm_fn=norm_fn, stride=2, padding=(0, 1, 1), indice_key='spconv4', conv_type='spconv'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4'),
)
last_pad = 0
last_pad = self.model_cfg.get('last_pad', last_pad)
self.conv_out = spconv.SparseSequential(
# [200, 150, 5] -> [200, 150, 2]
spconv.SparseConv3d(64, 128, (3, 1, 1), stride=(2, 1, 1), padding=last_pad,
bias=False, indice_key='spconv_down2'),
norm_fn(128),
nn.ReLU(),
)
self.num_point_features = 128
self.backbone_channels = {
'x_conv1': 16,
'x_conv2': 32,
'x_conv3': 64,
'x_conv4': 64
}
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size: int
vfe_features: (num_voxels, C)
voxel_coords: (num_voxels, 4), [batch_idx, z_idx, y_idx, x_idx]
Returns:
batch_dict:
encoded_spconv_tensor: sparse tensor
"""
voxel_features, voxel_coords = batch_dict['voxel_features'], batch_dict['voxel_coords']
batch_size = batch_dict['batch_size']
input_sp_tensor = spconv.SparseConvTensor(
features=voxel_features,
indices=voxel_coords.int(),
spatial_shape=self.sparse_shape,
batch_size=batch_size
)
x = self.conv_input(input_sp_tensor)
x_conv1 = self.conv1(x)
x_conv2 = self.conv2(x_conv1)
x_conv3 = self.conv3(x_conv2)
x_conv4 = self.conv4(x_conv3)
# for detection head
# [200, 176, 5] -> [200, 176, 2]
out = self.conv_out(x_conv4)
batch_dict.update({
'encoded_spconv_tensor': out,
'encoded_spconv_tensor_stride': 8
})
batch_dict.update({
'multi_scale_3d_features': {
'x_conv1': x_conv1,
'x_conv2': x_conv2,
'x_conv3': x_conv3,
'x_conv4': x_conv4,
}
})
batch_dict.update({
'multi_scale_3d_strides': {
'x_conv1': 1,
'x_conv2': 2,
'x_conv3': 4,
'x_conv4': 8,
}
})
return batch_dict
class VoxelResBackBone8x(nn.Module):
def __init__(self, model_cfg, input_channels, grid_size, **kwargs):
super().__init__()
self.model_cfg = model_cfg
norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01)
self.sparse_shape = grid_size[::-1] + [1, 0, 0]
self.conv_input = spconv.SparseSequential(
spconv.SubMConv3d(input_channels, 16, 3, padding=1, bias=False, indice_key='subm1'),
norm_fn(16),
nn.ReLU(),
)
block = post_act_block
self.conv1 = spconv.SparseSequential(
SparseBasicBlock(16, 16, norm_fn=norm_fn, indice_key='res1'),
SparseBasicBlock(16, 16, norm_fn=norm_fn, indice_key='res1'),
)
self.conv2 = spconv.SparseSequential(
# [1600, 1408, 41] <- [800, 704, 21]
block(16, 32, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv2', conv_type='spconv'),
SparseBasicBlock(32, 32, norm_fn=norm_fn, indice_key='res2'),
SparseBasicBlock(32, 32, norm_fn=norm_fn, indice_key='res2'),
)
self.conv3 = spconv.SparseSequential(
# [800, 704, 21] <- [400, 352, 11]
block(32, 64, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv3', conv_type='spconv'),
SparseBasicBlock(64, 64, norm_fn=norm_fn, indice_key='res3'),
SparseBasicBlock(64, 64, norm_fn=norm_fn, indice_key='res3'),
)
self.conv4 = spconv.SparseSequential(
# [400, 352, 11] <- [200, 176, 5]
block(64, 128, 3, norm_fn=norm_fn, stride=2, padding=(0, 1, 1), indice_key='spconv4', conv_type='spconv'),
SparseBasicBlock(128, 128, norm_fn=norm_fn, indice_key='res4'),
SparseBasicBlock(128, 128, norm_fn=norm_fn, indice_key='res4'),
)
last_pad = 0
last_pad = self.model_cfg.get('last_pad', last_pad)
self.conv_out = spconv.SparseSequential(
# [200, 150, 5] -> [200, 150, 2]
spconv.SparseConv3d(128, 128, (3, 1, 1), stride=(2, 1, 1), padding=last_pad,
bias=False, indice_key='spconv_down2'),
norm_fn(128),
nn.ReLU(),
)
self.num_point_features = 128
self.backbone_channels = {
'x_conv1': 16,
'x_conv2': 32,
'x_conv3': 64,
'x_conv4': 128
}
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size: int
vfe_features: (num_voxels, C)
voxel_coords: (num_voxels, 4), [batch_idx, z_idx, y_idx, x_idx]
Returns:
batch_dict:
encoded_spconv_tensor: sparse tensor
"""
voxel_features, voxel_coords = batch_dict['voxel_features'], batch_dict['voxel_coords']
batch_size = batch_dict['batch_size']
input_sp_tensor = spconv.SparseConvTensor(
features=voxel_features,
indices=voxel_coords.int(),
spatial_shape=self.sparse_shape,
batch_size=batch_size
)
x = self.conv_input(input_sp_tensor)
x_conv1 = self.conv1(x)
x_conv2 = self.conv2(x_conv1)
x_conv3 = self.conv3(x_conv2)
x_conv4 = self.conv4(x_conv3)
# for detection head
# [200, 176, 5] -> [200, 176, 2]
out = self.conv_out(x_conv4)
batch_dict.update({
'encoded_spconv_tensor': out,
'encoded_spconv_tensor_stride': 8
})
batch_dict.update({
'multi_scale_3d_features': {
'x_conv1': x_conv1,
'x_conv2': x_conv2,
'x_conv3': x_conv3,
'x_conv4': x_conv4,
}
})
batch_dict.update({
'multi_scale_3d_strides': {
'x_conv1': 1,
'x_conv2': 2,
'x_conv3': 4,
'x_conv4': 8,
}
})
return batch_dict
| 10,242 | 33.840136 | 118 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/backbones_3d/pointnet2_backbone.py | import torch
import torch.nn as nn
from ...ops.pointnet2.pointnet2_batch import pointnet2_modules
from ...ops.pointnet2.pointnet2_stack import pointnet2_modules as pointnet2_modules_stack
from ...ops.pointnet2.pointnet2_stack import pointnet2_utils as pointnet2_utils_stack
class PointNet2MSG(nn.Module):
def __init__(self, model_cfg, input_channels, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.SA_modules = nn.ModuleList()
channel_in = input_channels - 3
self.num_points_each_layer = []
skip_channel_list = [input_channels - 3]
for k in range(self.model_cfg.SA_CONFIG.NPOINTS.__len__()):
mlps = self.model_cfg.SA_CONFIG.MLPS[k].copy()
channel_out = 0
for idx in range(mlps.__len__()):
mlps[idx] = [channel_in] + mlps[idx]
channel_out += mlps[idx][-1]
self.SA_modules.append(
pointnet2_modules.PointnetSAModuleMSG(
npoint=self.model_cfg.SA_CONFIG.NPOINTS[k],
radii=self.model_cfg.SA_CONFIG.RADIUS[k],
nsamples=self.model_cfg.SA_CONFIG.NSAMPLE[k],
mlps=mlps,
use_xyz=self.model_cfg.SA_CONFIG.get('USE_XYZ', True),
)
)
skip_channel_list.append(channel_out)
channel_in = channel_out
self.FP_modules = nn.ModuleList()
for k in range(self.model_cfg.FP_MLPS.__len__()):
pre_channel = self.model_cfg.FP_MLPS[k + 1][-1] if k + 1 < len(self.model_cfg.FP_MLPS) else channel_out
self.FP_modules.append(
pointnet2_modules.PointnetFPModule(
mlp=[pre_channel + skip_channel_list[k]] + self.model_cfg.FP_MLPS[k]
)
)
self.num_point_features = self.model_cfg.FP_MLPS[0][-1]
def break_up_pc(self, pc):
batch_idx = pc[:, 0]
xyz = pc[:, 1:4].contiguous()
features = (pc[:, 4:].contiguous() if pc.size(-1) > 4 else None)
return batch_idx, xyz, features
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size: int
vfe_features: (num_voxels, C)
points: (num_points, 4 + C), [batch_idx, x, y, z, ...]
Returns:
batch_dict:
encoded_spconv_tensor: sparse tensor
point_features: (N, C)
"""
batch_size = batch_dict['batch_size']
points = batch_dict['points']
batch_idx, xyz, features = self.break_up_pc(points)
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
for bs_idx in range(batch_size):
xyz_batch_cnt[bs_idx] = (batch_idx == bs_idx).sum()
assert xyz_batch_cnt.min() == xyz_batch_cnt.max()
xyz = xyz.view(batch_size, -1, 3)
features = features.view(batch_size, -1, features.shape[-1]).permute(0, 2, 1).contiguous() if features is not None else None
l_xyz, l_features = [xyz], [features]
for i in range(len(self.SA_modules)):
li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i])
l_xyz.append(li_xyz)
l_features.append(li_features)
for i in range(-1, -(len(self.FP_modules) + 1), -1):
l_features[i - 1] = self.FP_modules[i](
l_xyz[i - 1], l_xyz[i], l_features[i - 1], l_features[i]
) # (B, C, N)
point_features = l_features[0].permute(0, 2, 1).contiguous() # (B, N, C)
batch_dict['point_features'] = point_features.view(-1, point_features.shape[-1])
batch_dict['point_coords'] = torch.cat((batch_idx[:, None].float(), l_xyz[0].view(-1, 3)), dim=1)
return batch_dict
class PointNet2Backbone(nn.Module):
"""
DO NOT USE THIS CURRENTLY SINCE IT MAY HAVE POTENTIAL BUGS, 20200723
"""
def __init__(self, model_cfg, input_channels, **kwargs):
assert False, 'DO NOT USE THIS CURRENTLY SINCE IT MAY HAVE POTENTIAL BUGS, 20200723'
super().__init__()
self.model_cfg = model_cfg
self.SA_modules = nn.ModuleList()
channel_in = input_channels - 3
self.num_points_each_layer = []
skip_channel_list = [input_channels]
for k in range(self.model_cfg.SA_CONFIG.NPOINTS.__len__()):
self.num_points_each_layer.append(self.model_cfg.SA_CONFIG.NPOINTS[k])
mlps = self.model_cfg.SA_CONFIG.MLPS[k].copy()
channel_out = 0
for idx in range(mlps.__len__()):
mlps[idx] = [channel_in] + mlps[idx]
channel_out += mlps[idx][-1]
self.SA_modules.append(
pointnet2_modules_stack.StackSAModuleMSG(
radii=self.model_cfg.SA_CONFIG.RADIUS[k],
nsamples=self.model_cfg.SA_CONFIG.NSAMPLE[k],
mlps=mlps,
use_xyz=self.model_cfg.SA_CONFIG.get('USE_XYZ', True),
)
)
skip_channel_list.append(channel_out)
channel_in = channel_out
self.FP_modules = nn.ModuleList()
for k in range(self.model_cfg.FP_MLPS.__len__()):
pre_channel = self.model_cfg.FP_MLPS[k + 1][-1] if k + 1 < len(self.model_cfg.FP_MLPS) else channel_out
self.FP_modules.append(
pointnet2_modules_stack.StackPointnetFPModule(
mlp=[pre_channel + skip_channel_list[k]] + self.model_cfg.FP_MLPS[k]
)
)
self.num_point_features = self.model_cfg.FP_MLPS[0][-1]
def break_up_pc(self, pc):
batch_idx = pc[:, 0]
xyz = pc[:, 1:4].contiguous()
features = (pc[:, 4:].contiguous() if pc.size(-1) > 4 else None)
return batch_idx, xyz, features
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size: int
vfe_features: (num_voxels, C)
points: (num_points, 4 + C), [batch_idx, x, y, z, ...]
Returns:
batch_dict:
encoded_spconv_tensor: sparse tensor
point_features: (N, C)
"""
batch_size = batch_dict['batch_size']
points = batch_dict['points']
batch_idx, xyz, features = self.break_up_pc(points)
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
for bs_idx in range(batch_size):
xyz_batch_cnt[bs_idx] = (batch_idx == bs_idx).sum()
l_xyz, l_features, l_batch_cnt = [xyz], [features], [xyz_batch_cnt]
for i in range(len(self.SA_modules)):
new_xyz_list = []
for k in range(batch_size):
if len(l_xyz) == 1:
cur_xyz = l_xyz[0][batch_idx == k]
else:
last_num_points = self.num_points_each_layer[i - 1]
cur_xyz = l_xyz[-1][k * last_num_points: (k + 1) * last_num_points]
cur_pt_idxs = pointnet2_utils_stack.farthest_point_sample(
cur_xyz[None, :, :].contiguous(), self.num_points_each_layer[i]
).long()[0]
if cur_xyz.shape[0] < self.num_points_each_layer[i]:
empty_num = self.num_points_each_layer[i] - cur_xyz.shape[1]
cur_pt_idxs[0, -empty_num:] = cur_pt_idxs[0, :empty_num]
new_xyz_list.append(cur_xyz[cur_pt_idxs])
new_xyz = torch.cat(new_xyz_list, dim=0)
new_xyz_batch_cnt = xyz.new_zeros(batch_size).int().fill_(self.num_points_each_layer[i])
li_xyz, li_features = self.SA_modules[i](
xyz=l_xyz[i], features=l_features[i], xyz_batch_cnt=l_batch_cnt[i],
new_xyz=new_xyz, new_xyz_batch_cnt=new_xyz_batch_cnt
)
l_xyz.append(li_xyz)
l_features.append(li_features)
l_batch_cnt.append(new_xyz_batch_cnt)
l_features[0] = points[:, 1:]
for i in range(-1, -(len(self.FP_modules) + 1), -1):
l_features[i - 1] = self.FP_modules[i](
unknown=l_xyz[i - 1], unknown_batch_cnt=l_batch_cnt[i - 1],
known=l_xyz[i], known_batch_cnt=l_batch_cnt[i],
unknown_feats=l_features[i - 1], known_feats=l_features[i]
)
batch_dict['point_features'] = l_features[0]
batch_dict['point_coords'] = torch.cat((batch_idx[:, None].float(), l_xyz[0]), dim=1)
return batch_dict
| 8,540 | 40.26087 | 132 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/backbones_3d/pfe/voxel_set_abstraction.py | import math
import numpy as np
import torch
import torch.nn as nn
from ....ops.pointnet2.pointnet2_stack import pointnet2_modules as pointnet2_stack_modules
from ....ops.pointnet2.pointnet2_stack import pointnet2_utils as pointnet2_stack_utils
from ....utils import common_utils
def bilinear_interpolate_torch(im, x, y):
"""
Args:
im: (H, W, C) [y, x]
x: (N)
y: (N)
Returns:
"""
x0 = torch.floor(x).long()
x1 = x0 + 1
y0 = torch.floor(y).long()
y1 = y0 + 1
x0 = torch.clamp(x0, 0, im.shape[1] - 1)
x1 = torch.clamp(x1, 0, im.shape[1] - 1)
y0 = torch.clamp(y0, 0, im.shape[0] - 1)
y1 = torch.clamp(y1, 0, im.shape[0] - 1)
Ia = im[y0, x0]
Ib = im[y1, x0]
Ic = im[y0, x1]
Id = im[y1, x1]
wa = (x1.type_as(x) - x) * (y1.type_as(y) - y)
wb = (x1.type_as(x) - x) * (y - y0.type_as(y))
wc = (x - x0.type_as(x)) * (y1.type_as(y) - y)
wd = (x - x0.type_as(x)) * (y - y0.type_as(y))
ans = torch.t((torch.t(Ia) * wa)) + torch.t(torch.t(Ib) * wb) + torch.t(torch.t(Ic) * wc) + torch.t(torch.t(Id) * wd)
return ans
def sample_points_with_roi(rois, points, sample_radius_with_roi, num_max_points_of_part=200000):
"""
Args:
rois: (M, 7 + C)
points: (N, 3)
sample_radius_with_roi:
num_max_points_of_part:
Returns:
sampled_points: (N_out, 3)
"""
if points.shape[0] < num_max_points_of_part:
distance = (points[:, None, :] - rois[None, :, 0:3]).norm(dim=-1)
min_dis, min_dis_roi_idx = distance.min(dim=-1)
roi_max_dim = (rois[min_dis_roi_idx, 3:6] / 2).norm(dim=-1)
point_mask = min_dis < roi_max_dim + sample_radius_with_roi
else:
start_idx = 0
point_mask_list = []
while start_idx < points.shape[0]:
distance = (points[start_idx:start_idx + num_max_points_of_part, None, :] - rois[None, :, 0:3]).norm(dim=-1)
min_dis, min_dis_roi_idx = distance.min(dim=-1)
roi_max_dim = (rois[min_dis_roi_idx, 3:6] / 2).norm(dim=-1)
cur_point_mask = min_dis < roi_max_dim + sample_radius_with_roi
point_mask_list.append(cur_point_mask)
start_idx += num_max_points_of_part
point_mask = torch.cat(point_mask_list, dim=0)
sampled_points = points[:1] if point_mask.sum() == 0 else points[point_mask, :]
return sampled_points, point_mask
def sector_fps(points, num_sampled_points, num_sectors):
"""
Args:
points: (N, 3)
num_sampled_points: int
num_sectors: int
Returns:
sampled_points: (N_out, 3)
"""
sector_size = np.pi * 2 / num_sectors
point_angles = torch.atan2(points[:, 1], points[:, 0]) + np.pi
sector_idx = (point_angles / sector_size).floor().clamp(min=0, max=num_sectors)
xyz_points_list = []
xyz_batch_cnt = []
num_sampled_points_list = []
for k in range(num_sectors):
mask = (sector_idx == k)
cur_num_points = mask.sum().item()
if cur_num_points > 0:
xyz_points_list.append(points[mask])
xyz_batch_cnt.append(cur_num_points)
ratio = cur_num_points / points.shape[0]
num_sampled_points_list.append(
min(cur_num_points, math.ceil(ratio * num_sampled_points))
)
if len(xyz_batch_cnt) == 0:
xyz_points_list.append(points)
xyz_batch_cnt.append(len(points))
num_sampled_points_list.append(num_sampled_points)
print(f'Warning: empty sector points detected in SectorFPS: points.shape={points.shape}')
xyz = torch.cat(xyz_points_list, dim=0)
xyz_batch_cnt = torch.tensor(xyz_batch_cnt, device=points.device).int()
sampled_points_batch_cnt = torch.tensor(num_sampled_points_list, device=points.device).int()
sampled_pt_idxs = pointnet2_stack_utils.stack_farthest_point_sample(
xyz.contiguous(), xyz_batch_cnt, sampled_points_batch_cnt
).long()
sampled_points = xyz[sampled_pt_idxs]
return sampled_points
class VoxelSetAbstraction(nn.Module):
def __init__(self, model_cfg, voxel_size, point_cloud_range, num_bev_features=None,
num_rawpoint_features=None, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.voxel_size = voxel_size
self.point_cloud_range = point_cloud_range
SA_cfg = self.model_cfg.SA_LAYER
self.SA_layers = nn.ModuleList()
self.SA_layer_names = []
self.downsample_times_map = {}
c_in = 0
for src_name in self.model_cfg.FEATURES_SOURCE:
if src_name in ['bev', 'raw_points']:
continue
self.downsample_times_map[src_name] = SA_cfg[src_name].DOWNSAMPLE_FACTOR
if SA_cfg[src_name].get('INPUT_CHANNELS', None) is None:
input_channels = SA_cfg[src_name].MLPS[0][0] \
if isinstance(SA_cfg[src_name].MLPS[0], list) else SA_cfg[src_name].MLPS[0]
else:
input_channels = SA_cfg[src_name]['INPUT_CHANNELS']
cur_layer, cur_num_c_out = pointnet2_stack_modules.build_local_aggregation_module(
input_channels=input_channels, config=SA_cfg[src_name]
)
self.SA_layers.append(cur_layer)
self.SA_layer_names.append(src_name)
c_in += cur_num_c_out
if 'bev' in self.model_cfg.FEATURES_SOURCE:
c_bev = num_bev_features
c_in += c_bev
if 'raw_points' in self.model_cfg.FEATURES_SOURCE:
self.SA_rawpoints, cur_num_c_out = pointnet2_stack_modules.build_local_aggregation_module(
input_channels=num_rawpoint_features - 3, config=SA_cfg['raw_points']
)
c_in += cur_num_c_out
self.vsa_point_feature_fusion = nn.Sequential(
nn.Linear(c_in, self.model_cfg.NUM_OUTPUT_FEATURES, bias=False),
nn.BatchNorm1d(self.model_cfg.NUM_OUTPUT_FEATURES),
nn.ReLU(),
)
self.num_point_features = self.model_cfg.NUM_OUTPUT_FEATURES
self.num_point_features_before_fusion = c_in
def interpolate_from_bev_features(self, keypoints, bev_features, batch_size, bev_stride):
"""
Args:
keypoints: (N1 + N2 + ..., 4)
bev_features: (B, C, H, W)
batch_size:
bev_stride:
Returns:
point_bev_features: (N1 + N2 + ..., C)
"""
x_idxs = (keypoints[:, 1] - self.point_cloud_range[0]) / self.voxel_size[0]
y_idxs = (keypoints[:, 2] - self.point_cloud_range[1]) / self.voxel_size[1]
x_idxs = x_idxs / bev_stride
y_idxs = y_idxs / bev_stride
point_bev_features_list = []
for k in range(batch_size):
bs_mask = (keypoints[:, 0] == k)
cur_x_idxs = x_idxs[bs_mask]
cur_y_idxs = y_idxs[bs_mask]
cur_bev_features = bev_features[k].permute(1, 2, 0) # (H, W, C)
point_bev_features = bilinear_interpolate_torch(cur_bev_features, cur_x_idxs, cur_y_idxs)
point_bev_features_list.append(point_bev_features)
point_bev_features = torch.cat(point_bev_features_list, dim=0) # (N1 + N2 + ..., C)
return point_bev_features
def sectorized_proposal_centric_sampling(self, roi_boxes, points):
"""
Args:
roi_boxes: (M, 7 + C)
points: (N, 3)
Returns:
sampled_points: (N_out, 3)
"""
sampled_points, _ = sample_points_with_roi(
rois=roi_boxes, points=points,
sample_radius_with_roi=self.model_cfg.SPC_SAMPLING.SAMPLE_RADIUS_WITH_ROI,
num_max_points_of_part=self.model_cfg.SPC_SAMPLING.get('NUM_POINTS_OF_EACH_SAMPLE_PART', 200000)
)
sampled_points = sector_fps(
points=sampled_points, num_sampled_points=self.model_cfg.NUM_KEYPOINTS,
num_sectors=self.model_cfg.SPC_SAMPLING.NUM_SECTORS
)
return sampled_points
def get_sampled_points(self, batch_dict):
"""
Args:
batch_dict:
Returns:
keypoints: (N1 + N2 + ..., 4), where 4 indicates [bs_idx, x, y, z]
"""
batch_size = batch_dict['batch_size']
if self.model_cfg.POINT_SOURCE == 'raw_points':
src_points = batch_dict['points'][:, 1:4]
batch_indices = batch_dict['points'][:, 0].long()
elif self.model_cfg.POINT_SOURCE == 'voxel_centers':
src_points = common_utils.get_voxel_centers(
batch_dict['voxel_coords'][:, 1:4],
downsample_times=1,
voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range
)
batch_indices = batch_dict['voxel_coords'][:, 0].long()
else:
raise NotImplementedError
keypoints_list = []
for bs_idx in range(batch_size):
bs_mask = (batch_indices == bs_idx)
sampled_points = src_points[bs_mask].unsqueeze(dim=0) # (1, N, 3)
if self.model_cfg.SAMPLE_METHOD == 'FPS':
cur_pt_idxs = pointnet2_stack_utils.farthest_point_sample(
sampled_points[:, :, 0:3].contiguous(), self.model_cfg.NUM_KEYPOINTS
).long()
if sampled_points.shape[1] < self.model_cfg.NUM_KEYPOINTS:
times = int(self.model_cfg.NUM_KEYPOINTS / sampled_points.shape[1]) + 1
non_empty = cur_pt_idxs[0, :sampled_points.shape[1]]
cur_pt_idxs[0] = non_empty.repeat(times)[:self.model_cfg.NUM_KEYPOINTS]
keypoints = sampled_points[0][cur_pt_idxs[0]].unsqueeze(dim=0)
elif self.model_cfg.SAMPLE_METHOD == 'SPC':
cur_keypoints = self.sectorized_proposal_centric_sampling(
roi_boxes=batch_dict['rois'][bs_idx], points=sampled_points[0]
)
bs_idxs = cur_keypoints.new_ones(cur_keypoints.shape[0]) * bs_idx
keypoints = torch.cat((bs_idxs[:, None], cur_keypoints), dim=1)
else:
raise NotImplementedError
keypoints_list.append(keypoints)
keypoints = torch.cat(keypoints_list, dim=0) # (B, M, 3) or (N1 + N2 + ..., 4)
if len(keypoints.shape) == 3:
batch_idx = torch.arange(batch_size, device=keypoints.device).view(-1, 1).repeat(1, keypoints.shape[1]).view(-1, 1)
keypoints = torch.cat((batch_idx.float(), keypoints.view(-1, 3)), dim=1)
return keypoints
@staticmethod
def aggregate_keypoint_features_from_one_source(
batch_size, aggregate_func, xyz, xyz_features, xyz_bs_idxs, new_xyz, new_xyz_batch_cnt,
filter_neighbors_with_roi=False, radius_of_neighbor=None, num_max_points_of_part=200000, rois=None
):
"""
Args:
aggregate_func:
xyz: (N, 3)
xyz_features: (N, C)
xyz_bs_idxs: (N)
new_xyz: (M, 3)
new_xyz_batch_cnt: (batch_size), [N1, N2, ...]
filter_neighbors_with_roi: True/False
radius_of_neighbor: float
num_max_points_of_part: int
rois: (batch_size, num_rois, 7 + C)
Returns:
"""
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
if filter_neighbors_with_roi:
point_features = torch.cat((xyz, xyz_features), dim=-1) if xyz_features is not None else xyz
point_features_list = []
for bs_idx in range(batch_size):
bs_mask = (xyz_bs_idxs == bs_idx)
_, valid_mask = sample_points_with_roi(
rois=rois[bs_idx], points=xyz[bs_mask],
sample_radius_with_roi=radius_of_neighbor, num_max_points_of_part=num_max_points_of_part,
)
point_features_list.append(point_features[bs_mask][valid_mask])
xyz_batch_cnt[bs_idx] = valid_mask.sum()
valid_point_features = torch.cat(point_features_list, dim=0)
xyz = valid_point_features[:, 0:3]
xyz_features = valid_point_features[:, 3:] if xyz_features is not None else None
else:
for bs_idx in range(batch_size):
xyz_batch_cnt[bs_idx] = (xyz_bs_idxs == bs_idx).sum()
pooled_points, pooled_features = aggregate_func(
xyz=xyz.contiguous(),
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
features=xyz_features.contiguous(),
)
return pooled_features
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
keypoints: (B, num_keypoints, 3)
multi_scale_3d_features: {
'x_conv4': ...
}
points: optional (N, 1 + 3 + C) [bs_idx, x, y, z, ...]
spatial_features: optional
spatial_features_stride: optional
Returns:
point_features: (N, C)
point_coords: (N, 4)
"""
keypoints = self.get_sampled_points(batch_dict)
point_features_list = []
if 'bev' in self.model_cfg.FEATURES_SOURCE:
point_bev_features = self.interpolate_from_bev_features(
keypoints, batch_dict['spatial_features'], batch_dict['batch_size'],
bev_stride=batch_dict['spatial_features_stride']
)
point_features_list.append(point_bev_features)
batch_size = batch_dict['batch_size']
new_xyz = keypoints[:, 1:4].contiguous()
new_xyz_batch_cnt = new_xyz.new_zeros(batch_size).int()
for k in range(batch_size):
new_xyz_batch_cnt[k] = (keypoints[:, 0] == k).sum()
if 'raw_points' in self.model_cfg.FEATURES_SOURCE:
raw_points = batch_dict['points']
pooled_features = self.aggregate_keypoint_features_from_one_source(
batch_size=batch_size, aggregate_func=self.SA_rawpoints,
xyz=raw_points[:, 1:4],
xyz_features=raw_points[:, 4:].contiguous() if raw_points.shape[1] > 4 else None,
xyz_bs_idxs=raw_points[:, 0],
new_xyz=new_xyz, new_xyz_batch_cnt=new_xyz_batch_cnt,
filter_neighbors_with_roi=self.model_cfg.SA_LAYER['raw_points'].get('FILTER_NEIGHBOR_WITH_ROI', False),
radius_of_neighbor=self.model_cfg.SA_LAYER['raw_points'].get('RADIUS_OF_NEIGHBOR_WITH_ROI', None),
rois=batch_dict.get('rois', None)
)
point_features_list.append(pooled_features)
for k, src_name in enumerate(self.SA_layer_names):
cur_coords = batch_dict['multi_scale_3d_features'][src_name].indices
cur_features = batch_dict['multi_scale_3d_features'][src_name].features.contiguous()
xyz = common_utils.get_voxel_centers(
cur_coords[:, 1:4], downsample_times=self.downsample_times_map[src_name],
voxel_size=self.voxel_size, point_cloud_range=self.point_cloud_range
)
pooled_features = self.aggregate_keypoint_features_from_one_source(
batch_size=batch_size, aggregate_func=self.SA_layers[k],
xyz=xyz.contiguous(), xyz_features=cur_features, xyz_bs_idxs=cur_coords[:, 0],
new_xyz=new_xyz, new_xyz_batch_cnt=new_xyz_batch_cnt,
filter_neighbors_with_roi=self.model_cfg.SA_LAYER[src_name].get('FILTER_NEIGHBOR_WITH_ROI', False),
radius_of_neighbor=self.model_cfg.SA_LAYER[src_name].get('RADIUS_OF_NEIGHBOR_WITH_ROI', None),
rois=batch_dict.get('rois', None)
)
point_features_list.append(pooled_features)
point_features = torch.cat(point_features_list, dim=-1)
batch_dict['point_features_before_fusion'] = point_features.view(-1, point_features.shape[-1])
point_features = self.vsa_point_feature_fusion(point_features.view(-1, point_features.shape[-1]))
batch_dict['point_features'] = point_features # (BxN, C)
batch_dict['point_coords'] = keypoints # (BxN, 4)
return batch_dict
| 16,404 | 38.817961 | 127 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/backbones_3d/vfe/vfe_template.py | import torch.nn as nn
class VFETemplate(nn.Module):
def __init__(self, model_cfg, **kwargs):
super().__init__()
self.model_cfg = model_cfg
def get_output_feature_dim(self):
raise NotImplementedError
def forward(self, **kwargs):
"""
Args:
**kwargs:
Returns:
batch_dict:
...
vfe_features: (num_voxels, C)
"""
raise NotImplementedError
| 470 | 19.478261 | 45 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/backbones_3d/vfe/dynamic_mean_vfe.py | import torch
from .vfe_template import VFETemplate
try:
import torch_scatter
except Exception as e:
# Incase someone doesn't want to use dynamic pillar vfe and hasn't installed torch_scatter
pass
from .vfe_template import VFETemplate
class DynamicMeanVFE(VFETemplate):
def __init__(self, model_cfg, num_point_features, voxel_size, grid_size, point_cloud_range, **kwargs):
super().__init__(model_cfg=model_cfg)
self.num_point_features = num_point_features
self.grid_size = torch.tensor(grid_size).cuda()
self.voxel_size = torch.tensor(voxel_size).cuda()
self.point_cloud_range = torch.tensor(point_cloud_range).cuda()
self.voxel_x = voxel_size[0]
self.voxel_y = voxel_size[1]
self.voxel_z = voxel_size[2]
self.x_offset = self.voxel_x / 2 + point_cloud_range[0]
self.y_offset = self.voxel_y / 2 + point_cloud_range[1]
self.z_offset = self.voxel_z / 2 + point_cloud_range[2]
self.scale_xyz = grid_size[0] * grid_size[1] * grid_size[2]
self.scale_yz = grid_size[1] * grid_size[2]
self.scale_z = grid_size[2]
def get_output_feature_dim(self):
return self.num_point_features
@torch.no_grad()
def forward(self, batch_dict, **kwargs):
"""
Args:
batch_dict:
voxels: (num_voxels, max_points_per_voxel, C)
voxel_num_points: optional (num_voxels)
**kwargs:
Returns:
vfe_features: (num_voxels, C)
"""
batch_size = batch_dict['batch_size']
points = batch_dict['points'] # (batch_idx, x, y, z, i, e)
# # debug
point_coords = torch.floor((points[:, 1:4] - self.point_cloud_range[0:3]) / self.voxel_size).int()
mask = ((point_coords >= 0) & (point_coords < self.grid_size)).all(dim=1)
points = points[mask]
point_coords = point_coords[mask]
merge_coords = points[:, 0].int() * self.scale_xyz + \
point_coords[:, 0] * self.scale_yz + \
point_coords[:, 1] * self.scale_z + \
point_coords[:, 2]
points_data = points[:, 1:].contiguous()
unq_coords, unq_inv, unq_cnt = torch.unique(merge_coords, return_inverse=True, return_counts=True)
points_mean = torch_scatter.scatter_mean(points_data, unq_inv, dim=0)
unq_coords = unq_coords.int()
voxel_coords = torch.stack((unq_coords // self.scale_xyz,
(unq_coords % self.scale_xyz) // self.scale_yz,
(unq_coords % self.scale_yz) // self.scale_z,
unq_coords % self.scale_z), dim=1)
voxel_coords = voxel_coords[:, [0, 3, 2, 1]]
batch_dict['voxel_features'] = points_mean.contiguous()
batch_dict['voxel_coords'] = voxel_coords.contiguous()
return batch_dict
| 2,980 | 37.714286 | 106 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/backbones_3d/vfe/mean_vfe.py | import torch
from .vfe_template import VFETemplate
class MeanVFE(VFETemplate):
def __init__(self, model_cfg, num_point_features, **kwargs):
super().__init__(model_cfg=model_cfg)
self.num_point_features = num_point_features
def get_output_feature_dim(self):
return self.num_point_features
def forward(self, batch_dict, **kwargs):
"""
Args:
batch_dict:
voxels: (num_voxels, max_points_per_voxel, C)
voxel_num_points: optional (num_voxels)
**kwargs:
Returns:
vfe_features: (num_voxels, C)
"""
voxel_features, voxel_num_points = batch_dict['voxels'], batch_dict['voxel_num_points']
points_mean = voxel_features[:, :, :].sum(dim=1, keepdim=False)
normalizer = torch.clamp_min(voxel_num_points.view(-1, 1), min=1.0).type_as(voxel_features)
points_mean = points_mean / normalizer
batch_dict['voxel_features'] = points_mean.contiguous()
return batch_dict
| 1,038 | 31.46875 | 99 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/backbones_3d/vfe/dynamic_pillar_vfe.py | import torch
import torch.nn as nn
import torch.nn.functional as F
try:
import torch_scatter
except Exception as e:
# Incase someone doesn't want to use dynamic pillar vfe and hasn't installed torch_scatter
pass
from .vfe_template import VFETemplate
class PFNLayerV2(nn.Module):
def __init__(self,
in_channels,
out_channels,
use_norm=True,
last_layer=False):
super().__init__()
self.last_vfe = last_layer
self.use_norm = use_norm
if not self.last_vfe:
out_channels = out_channels // 2
if self.use_norm:
self.linear = nn.Linear(in_channels, out_channels, bias=False)
self.norm = nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01)
else:
self.linear = nn.Linear(in_channels, out_channels, bias=True)
self.relu = nn.ReLU()
def forward(self, inputs, unq_inv):
x = self.linear(inputs)
x = self.norm(x) if self.use_norm else x
x = self.relu(x)
x_max = torch_scatter.scatter_max(x, unq_inv, dim=0)[0]
if self.last_vfe:
return x_max
else:
x_concatenated = torch.cat([x, x_max[unq_inv, :]], dim=1)
return x_concatenated
class DynamicPillarVFE(VFETemplate):
def __init__(self, model_cfg, num_point_features, voxel_size, grid_size, point_cloud_range, **kwargs):
super().__init__(model_cfg=model_cfg)
self.use_norm = self.model_cfg.USE_NORM
self.with_distance = self.model_cfg.WITH_DISTANCE
self.use_absolute_xyz = self.model_cfg.USE_ABSLOTE_XYZ
num_point_features += 6 if self.use_absolute_xyz else 3
if self.with_distance:
num_point_features += 1
self.num_filters = self.model_cfg.NUM_FILTERS
assert len(self.num_filters) > 0
num_filters = [num_point_features] + list(self.num_filters)
pfn_layers = []
for i in range(len(num_filters) - 1):
in_filters = num_filters[i]
out_filters = num_filters[i + 1]
pfn_layers.append(
PFNLayerV2(in_filters, out_filters, self.use_norm, last_layer=(i >= len(num_filters) - 2))
)
self.pfn_layers = nn.ModuleList(pfn_layers)
self.voxel_x = voxel_size[0]
self.voxel_y = voxel_size[1]
self.voxel_z = voxel_size[2]
self.x_offset = self.voxel_x / 2 + point_cloud_range[0]
self.y_offset = self.voxel_y / 2 + point_cloud_range[1]
self.z_offset = self.voxel_z / 2 + point_cloud_range[2]
self.scale_xy = grid_size[0] * grid_size[1]
self.scale_y = grid_size[1]
self.grid_size = torch.tensor(grid_size).cuda()
self.voxel_size = torch.tensor(voxel_size).cuda()
self.point_cloud_range = torch.tensor(point_cloud_range).cuda()
def get_output_feature_dim(self):
return self.num_filters[-1]
def forward(self, batch_dict, **kwargs):
points = batch_dict['points'] # (batch_idx, x, y, z, i, e)
points_coords = torch.floor((points[:, [1,2]] - self.point_cloud_range[[0,1]]) / self.voxel_size[[0,1]]).int()
mask = ((points_coords >= 0) & (points_coords < self.grid_size[[0,1]])).all(dim=1)
points = points[mask]
points_coords = points_coords[mask]
points_xyz = points[:, [1, 2, 3]].contiguous()
merge_coords = points[:, 0].int() * self.scale_xy + \
points_coords[:, 0] * self.scale_y + \
points_coords[:, 1]
unq_coords, unq_inv, unq_cnt = torch.unique(merge_coords, return_inverse=True, return_counts=True, dim=0)
points_mean = torch_scatter.scatter_mean(points_xyz, unq_inv, dim=0)
f_cluster = points_xyz - points_mean[unq_inv, :]
f_center = torch.zeros_like(points_xyz)
f_center[:, 0] = points_xyz[:, 0] - (points_coords[:, 0].to(points_xyz.dtype) * self.voxel_x + self.x_offset)
f_center[:, 1] = points_xyz[:, 1] - (points_coords[:, 1].to(points_xyz.dtype) * self.voxel_y + self.y_offset)
f_center[:, 2] = points_xyz[:, 2] - self.z_offset
if self.use_absolute_xyz:
features = [points[:, 1:], f_cluster, f_center]
else:
features = [points[:, 4:], f_cluster, f_center]
if self.with_distance:
points_dist = torch.norm(points[:, 1:4], 2, dim=1, keepdim=True)
features.append(points_dist)
features = torch.cat(features, dim=-1)
for pfn in self.pfn_layers:
features = pfn(features, unq_inv)
# features = self.linear1(features)
# features_max = torch_scatter.scatter_max(features, unq_inv, dim=0)[0]
# features = torch.cat([features, features_max[unq_inv, :]], dim=1)
# features = self.linear2(features)
# features = torch_scatter.scatter_max(features, unq_inv, dim=0)[0]
# generate voxel coordinates
unq_coords = unq_coords.int()
voxel_coords = torch.stack((unq_coords // self.scale_xy,
(unq_coords % self.scale_xy) // self.scale_y,
unq_coords % self.scale_y,
torch.zeros(unq_coords.shape[0]).to(unq_coords.device).int()
), dim=1)
voxel_coords = voxel_coords[:, [0, 3, 2, 1]]
batch_dict['pillar_features'] = features
batch_dict['voxel_coords'] = voxel_coords
return batch_dict
| 5,614 | 38.265734 | 118 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/backbones_3d/vfe/pillar_vfe.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from .vfe_template import VFETemplate
class PFNLayer(nn.Module):
def __init__(self,
in_channels,
out_channels,
use_norm=True,
last_layer=False):
super().__init__()
self.last_vfe = last_layer
self.use_norm = use_norm
if not self.last_vfe:
out_channels = out_channels // 2
if self.use_norm:
self.linear = nn.Linear(in_channels, out_channels, bias=False)
self.norm = nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01)
else:
self.linear = nn.Linear(in_channels, out_channels, bias=True)
self.part = 50000
def forward(self, inputs):
if inputs.shape[0] > self.part:
# nn.Linear performs randomly when batch size is too large
num_parts = inputs.shape[0] // self.part
part_linear_out = [self.linear(inputs[num_part*self.part:(num_part+1)*self.part])
for num_part in range(num_parts+1)]
x = torch.cat(part_linear_out, dim=0)
else:
x = self.linear(inputs)
torch.backends.cudnn.enabled = False
x = self.norm(x.permute(0, 2, 1)).permute(0, 2, 1) if self.use_norm else x
torch.backends.cudnn.enabled = True
x = F.relu(x)
x_max = torch.max(x, dim=1, keepdim=True)[0]
if self.last_vfe:
return x_max
else:
x_repeat = x_max.repeat(1, inputs.shape[1], 1)
x_concatenated = torch.cat([x, x_repeat], dim=2)
return x_concatenated
class PillarVFE(VFETemplate):
def __init__(self, model_cfg, num_point_features, voxel_size, point_cloud_range, **kwargs):
super().__init__(model_cfg=model_cfg)
self.use_norm = self.model_cfg.USE_NORM
self.with_distance = self.model_cfg.WITH_DISTANCE
self.use_absolute_xyz = self.model_cfg.USE_ABSLOTE_XYZ
num_point_features += 6 if self.use_absolute_xyz else 3
if self.with_distance:
num_point_features += 1
self.num_filters = self.model_cfg.NUM_FILTERS
assert len(self.num_filters) > 0
num_filters = [num_point_features] + list(self.num_filters)
pfn_layers = []
for i in range(len(num_filters) - 1):
in_filters = num_filters[i]
out_filters = num_filters[i + 1]
pfn_layers.append(
PFNLayer(in_filters, out_filters, self.use_norm, last_layer=(i >= len(num_filters) - 2))
)
self.pfn_layers = nn.ModuleList(pfn_layers)
self.voxel_x = voxel_size[0]
self.voxel_y = voxel_size[1]
self.voxel_z = voxel_size[2]
self.x_offset = self.voxel_x / 2 + point_cloud_range[0]
self.y_offset = self.voxel_y / 2 + point_cloud_range[1]
self.z_offset = self.voxel_z / 2 + point_cloud_range[2]
def get_output_feature_dim(self):
return self.num_filters[-1]
def get_paddings_indicator(self, actual_num, max_num, axis=0):
actual_num = torch.unsqueeze(actual_num, axis + 1)
max_num_shape = [1] * len(actual_num.shape)
max_num_shape[axis + 1] = -1
max_num = torch.arange(max_num, dtype=torch.int, device=actual_num.device).view(max_num_shape)
paddings_indicator = actual_num.int() > max_num
return paddings_indicator
def forward(self, batch_dict, **kwargs):
voxel_features, voxel_num_points, coords = batch_dict['voxels'], batch_dict['voxel_num_points'], batch_dict['voxel_coords']
points_mean = voxel_features[:, :, :3].sum(dim=1, keepdim=True) / voxel_num_points.type_as(voxel_features).view(-1, 1, 1)
f_cluster = voxel_features[:, :, :3] - points_mean
f_center = torch.zeros_like(voxel_features[:, :, :3])
f_center[:, :, 0] = voxel_features[:, :, 0] - (coords[:, 3].to(voxel_features.dtype).unsqueeze(1) * self.voxel_x + self.x_offset)
f_center[:, :, 1] = voxel_features[:, :, 1] - (coords[:, 2].to(voxel_features.dtype).unsqueeze(1) * self.voxel_y + self.y_offset)
f_center[:, :, 2] = voxel_features[:, :, 2] - (coords[:, 1].to(voxel_features.dtype).unsqueeze(1) * self.voxel_z + self.z_offset)
if self.use_absolute_xyz:
features = [voxel_features, f_cluster, f_center]
else:
features = [voxel_features[..., 3:], f_cluster, f_center]
if self.with_distance:
points_dist = torch.norm(voxel_features[:, :, :3], 2, 2, keepdim=True)
features.append(points_dist)
features = torch.cat(features, dim=-1)
voxel_count = features.shape[1]
mask = self.get_paddings_indicator(voxel_num_points, voxel_count, axis=0)
mask = torch.unsqueeze(mask, -1).type_as(voxel_features)
features *= mask
for pfn in self.pfn_layers:
features = pfn(features)
features = features.squeeze()
batch_dict['pillar_features'] = features
return batch_dict
| 5,099 | 40.129032 | 137 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/backbones_3d/vfe/image_vfe.py | import torch
from .vfe_template import VFETemplate
from .image_vfe_modules import ffn, f2v
class ImageVFE(VFETemplate):
def __init__(self, model_cfg, grid_size, point_cloud_range, depth_downsample_factor, **kwargs):
super().__init__(model_cfg=model_cfg)
self.grid_size = grid_size
self.pc_range = point_cloud_range
self.downsample_factor = depth_downsample_factor
self.module_topology = [
'ffn', 'f2v'
]
self.build_modules()
def build_modules(self):
"""
Builds modules
"""
for module_name in self.module_topology:
module = getattr(self, 'build_%s' % module_name)()
self.add_module(module_name, module)
def build_ffn(self):
"""
Builds frustum feature network
Returns:
ffn_module: nn.Module, Frustum feature network
"""
ffn_module = ffn.__all__[self.model_cfg.FFN.NAME](
model_cfg=self.model_cfg.FFN,
downsample_factor=self.downsample_factor
)
self.disc_cfg = ffn_module.disc_cfg
return ffn_module
def build_f2v(self):
"""
Builds frustum to voxel transformation
Returns:
f2v_module: nn.Module, Frustum to voxel transformation
"""
f2v_module = f2v.__all__[self.model_cfg.F2V.NAME](
model_cfg=self.model_cfg.F2V,
grid_size=self.grid_size,
pc_range=self.pc_range,
disc_cfg=self.disc_cfg
)
return f2v_module
def get_output_feature_dim(self):
"""
Gets number of output channels
Returns:
out_feature_dim: int, Number of output channels
"""
out_feature_dim = self.ffn.get_output_feature_dim()
return out_feature_dim
def forward(self, batch_dict, **kwargs):
"""
Args:
batch_dict:
images: (N, 3, H_in, W_in), Input images
**kwargs:
Returns:
batch_dict:
voxel_features: (B, C, Z, Y, X), Image voxel features
"""
batch_dict = self.ffn(batch_dict)
batch_dict = self.f2v(batch_dict)
return batch_dict
def get_loss(self):
"""
Gets DDN loss
Returns:
loss: (1), Depth distribution network loss
tb_dict: dict[float], All losses to log in tensorboard
"""
loss, tb_dict = self.ffn.get_loss()
return loss, tb_dict
| 2,526 | 28.383721 | 99 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/depth_ffn.py | import torch.nn as nn
import torch.nn.functional as F
from . import ddn, ddn_loss
from pcdet.models.model_utils.basic_block_2d import BasicBlock2D
class DepthFFN(nn.Module):
def __init__(self, model_cfg, downsample_factor):
"""
Initialize frustum feature network via depth distribution estimation
Args:
model_cfg: EasyDict, Depth classification network config
downsample_factor: int, Depth map downsample factor
"""
super().__init__()
self.model_cfg = model_cfg
self.disc_cfg = model_cfg.DISCRETIZE
self.downsample_factor = downsample_factor
# Create modules
self.ddn = ddn.__all__[model_cfg.DDN.NAME](
num_classes=self.disc_cfg["num_bins"] + 1,
backbone_name=model_cfg.DDN.BACKBONE_NAME,
**model_cfg.DDN.ARGS
)
self.channel_reduce = BasicBlock2D(**model_cfg.CHANNEL_REDUCE)
self.ddn_loss = ddn_loss.__all__[model_cfg.LOSS.NAME](
disc_cfg=self.disc_cfg,
downsample_factor=downsample_factor,
**model_cfg.LOSS.ARGS
)
self.forward_ret_dict = {}
def get_output_feature_dim(self):
return self.channel_reduce.out_channels
def forward(self, batch_dict):
"""
Predicts depths and creates image depth feature volume using depth distributions
Args:
batch_dict:
images: (N, 3, H_in, W_in), Input images
Returns:
batch_dict:
frustum_features: (N, C, D, H_out, W_out), Image depth features
"""
# Pixel-wise depth classification
images = batch_dict["images"]
ddn_result = self.ddn(images)
image_features = ddn_result["features"]
depth_logits = ddn_result["logits"]
# Channel reduce
if self.channel_reduce is not None:
image_features = self.channel_reduce(image_features)
# Create image feature plane-sweep volume
frustum_features = self.create_frustum_features(image_features=image_features,
depth_logits=depth_logits)
batch_dict["frustum_features"] = frustum_features
if self.training:
self.forward_ret_dict["depth_maps"] = batch_dict["depth_maps"]
self.forward_ret_dict["gt_boxes2d"] = batch_dict["gt_boxes2d"]
self.forward_ret_dict["depth_logits"] = depth_logits
return batch_dict
def create_frustum_features(self, image_features, depth_logits):
"""
Create image depth feature volume by multiplying image features with depth distributions
Args:
image_features: (N, C, H, W), Image features
depth_logits: (N, D+1, H, W), Depth classification logits
Returns:
frustum_features: (N, C, D, H, W), Image features
"""
channel_dim = 1
depth_dim = 2
# Resize to match dimensions
image_features = image_features.unsqueeze(depth_dim)
depth_logits = depth_logits.unsqueeze(channel_dim)
# Apply softmax along depth axis and remove last depth category (> Max Range)
depth_probs = F.softmax(depth_logits, dim=depth_dim)
depth_probs = depth_probs[:, :, :-1]
# Multiply to form image depth feature volume
frustum_features = depth_probs * image_features
return frustum_features
def get_loss(self):
"""
Gets DDN loss
Args:
Returns:
loss: (1), Depth distribution network loss
tb_dict: dict[float], All losses to log in tensorboard
"""
loss, tb_dict = self.ddn_loss(**self.forward_ret_dict)
return loss, tb_dict
| 3,778 | 35.336538 | 96 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/ddn/ddn_deeplabv3.py | from .ddn_template import DDNTemplate
try:
import torchvision
except:
pass
class DDNDeepLabV3(DDNTemplate):
def __init__(self, backbone_name, **kwargs):
"""
Initializes DDNDeepLabV3 model
Args:
backbone_name: string, ResNet Backbone Name [ResNet50/ResNet101]
"""
if backbone_name == "ResNet50":
constructor = torchvision.models.segmentation.deeplabv3_resnet50
elif backbone_name == "ResNet101":
constructor = torchvision.models.segmentation.deeplabv3_resnet101
else:
raise NotImplementedError
super().__init__(constructor=constructor, **kwargs)
| 674 | 26 | 77 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/ddn/ddn_template.py | from collections import OrderedDict
from pathlib import Path
from torch import hub
import torch
import torch.nn as nn
import torch.nn.functional as F
try:
from kornia.enhance.normalize import normalize
except:
pass
# print('Warning: kornia is not installed. This package is only required by CaDDN')
class DDNTemplate(nn.Module):
def __init__(self, constructor, feat_extract_layer, num_classes, pretrained_path=None, aux_loss=None):
"""
Initializes depth distribution network.
Args:
constructor: function, Model constructor
feat_extract_layer: string, Layer to extract features from
num_classes: int, Number of classes
pretrained_path: string, (Optional) Path of the model to load weights from
aux_loss: bool, Flag to include auxillary loss
"""
super().__init__()
self.num_classes = num_classes
self.pretrained_path = pretrained_path
self.pretrained = pretrained_path is not None
self.aux_loss = aux_loss
if self.pretrained:
# Preprocess Module
self.norm_mean = torch.Tensor([0.485, 0.456, 0.406])
self.norm_std = torch.Tensor([0.229, 0.224, 0.225])
# Model
self.model = self.get_model(constructor=constructor)
self.feat_extract_layer = feat_extract_layer
self.model.backbone.return_layers = {
feat_extract_layer: 'features',
**self.model.backbone.return_layers
}
def get_model(self, constructor):
"""
Get model
Args:
constructor: function, Model constructor
Returns:
model: nn.Module, Model
"""
# Get model
model = constructor(pretrained=False,
pretrained_backbone=False,
num_classes=self.num_classes,
aux_loss=self.aux_loss)
# Update weights
if self.pretrained_path is not None:
model_dict = model.state_dict()
# Download pretrained model if not available yet
checkpoint_path = Path(self.pretrained_path)
if not checkpoint_path.exists():
checkpoint = checkpoint_path.name
save_dir = checkpoint_path.parent
save_dir.mkdir(parents=True)
url = f'https://download.pytorch.org/models/{checkpoint}'
hub.load_state_dict_from_url(url, save_dir)
# Get pretrained state dict
pretrained_dict = torch.load(self.pretrained_path)
pretrained_dict = self.filter_pretrained_dict(model_dict=model_dict,
pretrained_dict=pretrained_dict)
# Update current model state dict
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
return model
def filter_pretrained_dict(self, model_dict, pretrained_dict):
"""
Removes layers from pretrained state dict that are not used or changed in model
Args:
model_dict: dict, Default model state dictionary
pretrained_dict: dict, Pretrained model state dictionary
Returns:
pretrained_dict: dict, Pretrained model state dictionary with removed weights
"""
# Removes aux classifier weights if not used
if "aux_classifier.0.weight" in pretrained_dict and "aux_classifier.0.weight" not in model_dict:
pretrained_dict = {key: value for key, value in pretrained_dict.items()
if "aux_classifier" not in key}
# Removes final conv layer from weights if number of classes are different
model_num_classes = model_dict["classifier.4.weight"].shape[0]
pretrained_num_classes = pretrained_dict["classifier.4.weight"].shape[0]
if model_num_classes != pretrained_num_classes:
pretrained_dict.pop("classifier.4.weight")
pretrained_dict.pop("classifier.4.bias")
return pretrained_dict
def forward(self, images):
"""
Forward pass
Args:
images: (N, 3, H_in, W_in), Input images
Returns
result: dict[torch.Tensor], Depth distribution result
features: (N, C, H_out, W_out), Image features
logits: (N, num_classes, H_out, W_out), Classification logits
aux: (N, num_classes, H_out, W_out), Auxillary classification logits
"""
# Preprocess images
x = self.preprocess(images)
# Extract features
result = OrderedDict()
features = self.model.backbone(x)
result['features'] = features['features']
feat_shape = features['features'].shape[-2:]
# Prediction classification logits
x = features["out"]
x = self.model.classifier(x)
x = F.interpolate(x, size=feat_shape, mode='bilinear', align_corners=False)
result["logits"] = x
# Prediction auxillary classification logits
if self.model.aux_classifier is not None:
x = features["aux"]
x = self.model.aux_classifier(x)
x = F.interpolate(x, size=feat_shape, mode='bilinear', align_corners=False)
result["aux"] = x
return result
def preprocess(self, images):
"""
Preprocess images
Args:
images: (N, 3, H, W), Input images
Return
x: (N, 3, H, W), Preprocessed images
"""
x = images
if self.pretrained:
# Create a mask for padded pixels
mask = torch.isnan(x)
# Match ResNet pretrained preprocessing
x = normalize(x, mean=self.norm_mean, std=self.norm_std)
# Make padded pixels = 0
x[mask] = 0
return x
| 5,941 | 35.453988 | 106 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/ddn_loss/balancer.py | import torch
import torch.nn as nn
from pcdet.utils import loss_utils
class Balancer(nn.Module):
def __init__(self, fg_weight, bg_weight, downsample_factor=1):
"""
Initialize fixed foreground/background loss balancer
Args:
fg_weight: float, Foreground loss weight
bg_weight: float, Background loss weight
downsample_factor: int, Depth map downsample factor
"""
super().__init__()
self.fg_weight = fg_weight
self.bg_weight = bg_weight
self.downsample_factor = downsample_factor
def forward(self, loss, gt_boxes2d):
"""
Forward pass
Args:
loss: (B, H, W), Pixel-wise loss
gt_boxes2d: (B, N, 4), 2D box labels for foreground/background balancing
Returns:
loss: (1), Total loss after foreground/background balancing
tb_dict: dict[float], All losses to log in tensorboard
"""
# Compute masks
fg_mask = loss_utils.compute_fg_mask(gt_boxes2d=gt_boxes2d,
shape=loss.shape,
downsample_factor=self.downsample_factor,
device=loss.device)
bg_mask = ~fg_mask
# Compute balancing weights
weights = self.fg_weight * fg_mask + self.bg_weight * bg_mask
num_pixels = fg_mask.sum() + bg_mask.sum()
# Compute losses
loss *= weights
fg_loss = loss[fg_mask].sum() / num_pixels
bg_loss = loss[bg_mask].sum() / num_pixels
# Get total loss
loss = fg_loss + bg_loss
tb_dict = {"balancer_loss": loss.item(), "fg_loss": fg_loss.item(), "bg_loss": bg_loss.item()}
return loss, tb_dict
| 1,806 | 34.431373 | 102 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/ddn_loss/ddn_loss.py | import torch
import torch.nn as nn
from .balancer import Balancer
from pcdet.utils import transform_utils
try:
from kornia.losses.focal import FocalLoss
except:
pass
# print('Warning: kornia is not installed. This package is only required by CaDDN')
class DDNLoss(nn.Module):
def __init__(self,
weight,
alpha,
gamma,
disc_cfg,
fg_weight,
bg_weight,
downsample_factor):
"""
Initializes DDNLoss module
Args:
weight: float, Loss function weight
alpha: float, Alpha value for Focal Loss
gamma: float, Gamma value for Focal Loss
disc_cfg: dict, Depth discretiziation configuration
fg_weight: float, Foreground loss weight
bg_weight: float, Background loss weight
downsample_factor: int, Depth map downsample factor
"""
super().__init__()
self.device = torch.cuda.current_device()
self.disc_cfg = disc_cfg
self.balancer = Balancer(downsample_factor=downsample_factor,
fg_weight=fg_weight,
bg_weight=bg_weight)
# Set loss function
self.alpha = alpha
self.gamma = gamma
self.loss_func = FocalLoss(alpha=self.alpha, gamma=self.gamma, reduction="none")
self.weight = weight
def forward(self, depth_logits, depth_maps, gt_boxes2d):
"""
Gets DDN loss
Args:
depth_logits: (B, D+1, H, W), Predicted depth logits
depth_maps: (B, H, W), Depth map [m]
gt_boxes2d: torch.Tensor (B, N, 4), 2D box labels for foreground/background balancing
Returns:
loss: (1), Depth distribution network loss
tb_dict: dict[float], All losses to log in tensorboard
"""
tb_dict = {}
# Bin depth map to create target
depth_target = transform_utils.bin_depths(depth_maps, **self.disc_cfg, target=True)
# Compute loss
loss = self.loss_func(depth_logits, depth_target)
# Compute foreground/background balancing
loss, tb_dict = self.balancer(loss=loss, gt_boxes2d=gt_boxes2d)
# Final loss
loss *= self.weight
tb_dict.update({"ddn_loss": loss.item()})
return loss, tb_dict
| 2,428 | 30.960526 | 97 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/backbones_3d/vfe/image_vfe_modules/f2v/frustum_to_voxel.py | import torch
import torch.nn as nn
from .frustum_grid_generator import FrustumGridGenerator
from .sampler import Sampler
class FrustumToVoxel(nn.Module):
def __init__(self, model_cfg, grid_size, pc_range, disc_cfg):
"""
Initializes module to transform frustum features to voxel features via 3D transformation and sampling
Args:
model_cfg: EasyDict, Module configuration
grid_size: [X, Y, Z], Voxel grid size
pc_range: [x_min, y_min, z_min, x_max, y_max, z_max], Voxelization point cloud range (m)
disc_cfg: EasyDict, Depth discretiziation configuration
"""
super().__init__()
self.model_cfg = model_cfg
self.grid_size = grid_size
self.pc_range = pc_range
self.disc_cfg = disc_cfg
self.grid_generator = FrustumGridGenerator(grid_size=grid_size,
pc_range=pc_range,
disc_cfg=disc_cfg)
self.sampler = Sampler(**model_cfg.SAMPLER)
def forward(self, batch_dict):
"""
Generates voxel features via 3D transformation and sampling
Args:
batch_dict:
frustum_features: (B, C, D, H_image, W_image), Image frustum features
lidar_to_cam: (B, 4, 4), LiDAR to camera frame transformation
cam_to_img: (B, 3, 4), Camera projection matrix
image_shape: (B, 2), Image shape [H, W]
Returns:
batch_dict:
voxel_features: (B, C, Z, Y, X), Image voxel features
"""
# Generate sampling grid for frustum volume
grid = self.grid_generator(lidar_to_cam=batch_dict["trans_lidar_to_cam"],
cam_to_img=batch_dict["trans_cam_to_img"],
image_shape=batch_dict["image_shape"]) # (B, X, Y, Z, 3)
# Sample frustum volume to generate voxel volume
voxel_features = self.sampler(input_features=batch_dict["frustum_features"],
grid=grid) # (B, C, X, Y, Z)
# (B, C, X, Y, Z) -> (B, C, Z, Y, X)
voxel_features = voxel_features.permute(0, 1, 4, 3, 2)
batch_dict["voxel_features"] = voxel_features
return batch_dict
| 2,338 | 41.527273 | 109 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/backbones_3d/vfe/image_vfe_modules/f2v/frustum_grid_generator.py | import torch
import torch.nn as nn
try:
from kornia.utils.grid import create_meshgrid3d
from kornia.geometry.linalg import transform_points
except Exception as e:
# Note: Kornia team will fix this import issue to try to allow the usage of lower torch versions.
# print('Warning: kornia is not installed correctly, please ignore this warning if you do not use CaDDN. Otherwise, it is recommended to use torch version greater than 1.2 to use kornia properly.')
pass
from pcdet.utils import transform_utils
class FrustumGridGenerator(nn.Module):
def __init__(self, grid_size, pc_range, disc_cfg):
"""
Initializes Grid Generator for frustum features
Args:
grid_size: [X, Y, Z], Voxel grid size
pc_range: [x_min, y_min, z_min, x_max, y_max, z_max], Voxelization point cloud range (m)
disc_cfg: EasyDict, Depth discretiziation configuration
"""
super().__init__()
try:
import kornia
except Exception as e:
# Note: Kornia team will fix this import issue to try to allow the usage of lower torch versions.
print('Error: kornia is not installed correctly, please ignore this warning if you do not use CaDDN. '
'Otherwise, it is recommended to use torch version greater than 1.2 to use kornia properly.')
exit(-1)
self.dtype = torch.float32
self.grid_size = torch.as_tensor(grid_size, dtype=self.dtype)
self.pc_range = pc_range
self.out_of_bounds_val = -2
self.disc_cfg = disc_cfg
# Calculate voxel size
pc_range = torch.as_tensor(pc_range).reshape(2, 3)
self.pc_min = pc_range[0]
self.pc_max = pc_range[1]
self.voxel_size = (self.pc_max - self.pc_min) / self.grid_size
# Create voxel grid
self.depth, self.width, self.height = self.grid_size.int()
self.voxel_grid = create_meshgrid3d(depth=self.depth,
height=self.height,
width=self.width,
normalized_coordinates=False)
self.voxel_grid = self.voxel_grid.permute(0, 1, 3, 2, 4) # XZY-> XYZ
# Add offsets to center of voxel
self.voxel_grid += 0.5
self.grid_to_lidar = self.grid_to_lidar_unproject(pc_min=self.pc_min,
voxel_size=self.voxel_size)
def grid_to_lidar_unproject(self, pc_min, voxel_size):
"""
Calculate grid to LiDAR unprojection for each plane
Args:
pc_min: [x_min, y_min, z_min], Minimum of point cloud range (m)
voxel_size: [x, y, z], Size of each voxel (m)
Returns:
unproject: (4, 4), Voxel grid to LiDAR unprojection matrix
"""
x_size, y_size, z_size = voxel_size
x_min, y_min, z_min = pc_min
unproject = torch.tensor([[x_size, 0, 0, x_min],
[0, y_size, 0, y_min],
[0, 0, z_size, z_min],
[0, 0, 0, 1]],
dtype=self.dtype) # (4, 4)
return unproject
def transform_grid(self, voxel_grid, grid_to_lidar, lidar_to_cam, cam_to_img):
"""
Transforms voxel sampling grid into frustum sampling grid
Args:
grid: (B, X, Y, Z, 3), Voxel sampling grid
grid_to_lidar: (4, 4), Voxel grid to LiDAR unprojection matrix
lidar_to_cam: (B, 4, 4), LiDAR to camera frame transformation
cam_to_img: (B, 3, 4), Camera projection matrix
Returns:
frustum_grid: (B, X, Y, Z, 3), Frustum sampling grid
"""
B = lidar_to_cam.shape[0]
# Create transformation matricies
V_G = grid_to_lidar # Voxel Grid -> LiDAR (4, 4)
C_V = lidar_to_cam # LiDAR -> Camera (B, 4, 4)
I_C = cam_to_img # Camera -> Image (B, 3, 4)
trans = C_V @ V_G
# Reshape to match dimensions
trans = trans.reshape(B, 1, 1, 4, 4)
voxel_grid = voxel_grid.repeat_interleave(repeats=B, dim=0)
# Transform to camera frame
camera_grid = transform_points(trans_01=trans, points_1=voxel_grid)
# Project to image
I_C = I_C.reshape(B, 1, 1, 3, 4)
image_grid, image_depths = transform_utils.project_to_image(project=I_C, points=camera_grid)
# Convert depths to depth bins
image_depths = transform_utils.bin_depths(depth_map=image_depths, **self.disc_cfg)
# Stack to form frustum grid
image_depths = image_depths.unsqueeze(-1)
frustum_grid = torch.cat((image_grid, image_depths), dim=-1)
return frustum_grid
def forward(self, lidar_to_cam, cam_to_img, image_shape):
"""
Generates sampling grid for frustum features
Args:
lidar_to_cam: (B, 4, 4), LiDAR to camera frame transformation
cam_to_img: (B, 3, 4), Camera projection matrix
image_shape: (B, 2), Image shape [H, W]
Returns:
frustum_grid (B, X, Y, Z, 3), Sampling grids for frustum features
"""
frustum_grid = self.transform_grid(voxel_grid=self.voxel_grid.to(lidar_to_cam.device),
grid_to_lidar=self.grid_to_lidar.to(lidar_to_cam.device),
lidar_to_cam=lidar_to_cam,
cam_to_img=cam_to_img)
# Normalize grid
image_shape, _ = torch.max(image_shape, dim=0)
image_depth = torch.tensor([self.disc_cfg["num_bins"]],
device=image_shape.device,
dtype=image_shape.dtype)
frustum_shape = torch.cat((image_depth, image_shape))
frustum_grid = transform_utils.normalize_coords(coords=frustum_grid, shape=frustum_shape)
# Replace any NaNs or infinites with out of bounds
mask = ~torch.isfinite(frustum_grid)
frustum_grid[mask] = self.out_of_bounds_val
return frustum_grid
| 6,249 | 41.808219 | 201 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/backbones_3d/vfe/image_vfe_modules/f2v/sampler.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class Sampler(nn.Module):
def __init__(self, mode="bilinear", padding_mode="zeros"):
"""
Initializes module
Args:
mode: string, Sampling mode [bilinear/nearest]
padding_mode: string, Padding mode for outside grid values [zeros/border/reflection]
"""
super().__init__()
self.mode = mode
self.padding_mode = padding_mode
def forward(self, input_features, grid):
"""
Samples input using sampling grid
Args:
input_features: (B, C, D, H, W), Input frustum features
grid: (B, X, Y, Z, 3), Sampling grids for input features
Returns
output_features: (B, C, X, Y, Z) Output voxel features
"""
# Sample from grid
output = F.grid_sample(input=input_features, grid=grid, mode=self.mode, padding_mode=self.padding_mode)
return output
| 980 | 30.645161 | 111 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/dense_heads/anchor_head_single.py | import numpy as np
import torch.nn as nn
from .anchor_head_template import AnchorHeadTemplate
class AnchorHeadSingle(AnchorHeadTemplate):
def __init__(self, model_cfg, input_channels, num_class, class_names, grid_size, point_cloud_range,
predict_boxes_when_training=True, **kwargs):
super().__init__(
model_cfg=model_cfg, num_class=num_class, class_names=class_names, grid_size=grid_size, point_cloud_range=point_cloud_range,
predict_boxes_when_training=predict_boxes_when_training
)
self.num_anchors_per_location = sum(self.num_anchors_per_location)
self.conv_cls = nn.Conv2d(
input_channels, self.num_anchors_per_location * self.num_class,
kernel_size=1
)
self.conv_box = nn.Conv2d(
input_channels, self.num_anchors_per_location * self.box_coder.code_size,
kernel_size=1
)
if self.model_cfg.get('USE_DIRECTION_CLASSIFIER', None) is not None:
self.conv_dir_cls = nn.Conv2d(
input_channels,
self.num_anchors_per_location * self.model_cfg.NUM_DIR_BINS,
kernel_size=1
)
else:
self.conv_dir_cls = None
self.init_weights()
def init_weights(self):
pi = 0.01
nn.init.constant_(self.conv_cls.bias, -np.log((1 - pi) / pi))
nn.init.normal_(self.conv_box.weight, mean=0, std=0.001)
def forward(self, data_dict):
spatial_features_2d = data_dict['spatial_features_2d']
cls_preds = self.conv_cls(spatial_features_2d)
box_preds = self.conv_box(spatial_features_2d)
cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous() # [N, H, W, C]
box_preds = box_preds.permute(0, 2, 3, 1).contiguous() # [N, H, W, C]
self.forward_ret_dict['cls_preds'] = cls_preds
self.forward_ret_dict['box_preds'] = box_preds
if self.conv_dir_cls is not None:
dir_cls_preds = self.conv_dir_cls(spatial_features_2d)
dir_cls_preds = dir_cls_preds.permute(0, 2, 3, 1).contiguous()
self.forward_ret_dict['dir_cls_preds'] = dir_cls_preds
else:
dir_cls_preds = None
if self.training:
targets_dict = self.assign_targets(
gt_boxes=data_dict['gt_boxes']
)
self.forward_ret_dict.update(targets_dict)
if not self.training or self.predict_boxes_when_training:
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=data_dict['batch_size'],
cls_preds=cls_preds, box_preds=box_preds, dir_cls_preds=dir_cls_preds
)
data_dict['rpn_preds'] = cls_preds
data_dict['batch_cls_preds'] = batch_cls_preds
data_dict['batch_box_preds'] = batch_box_preds
data_dict['cls_preds_normalized'] = False
return data_dict
| 2,975 | 37.649351 | 136 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/dense_heads/point_head_template.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
from ...utils import common_utils, loss_utils
class PointHeadTemplate(nn.Module):
def __init__(self, model_cfg, num_class):
super().__init__()
self.model_cfg = model_cfg
self.num_class = num_class
self.build_losses(self.model_cfg.LOSS_CONFIG)
self.forward_ret_dict = None
def build_losses(self, losses_cfg):
self.add_module(
'cls_loss_func',
loss_utils.SigmoidFocalClassificationLoss(alpha=0.25, gamma=2.0)
)
reg_loss_type = losses_cfg.get('LOSS_REG', None)
if reg_loss_type == 'smooth-l1':
self.reg_loss_func = F.smooth_l1_loss
elif reg_loss_type == 'l1':
self.reg_loss_func = F.l1_loss
elif reg_loss_type == 'WeightedSmoothL1Loss':
self.reg_loss_func = loss_utils.WeightedSmoothL1Loss(
code_weights=losses_cfg.LOSS_WEIGHTS.get('code_weights', None)
)
else:
self.reg_loss_func = F.smooth_l1_loss
@staticmethod
def make_fc_layers(fc_cfg, input_channels, output_channels):
fc_layers = []
c_in = input_channels
for k in range(0, fc_cfg.__len__()):
fc_layers.extend([
nn.Linear(c_in, fc_cfg[k], bias=False),
nn.BatchNorm1d(fc_cfg[k]),
nn.ReLU(),
])
c_in = fc_cfg[k]
fc_layers.append(nn.Linear(c_in, output_channels, bias=True))
return nn.Sequential(*fc_layers)
def assign_stack_targets(self, points, gt_boxes, extend_gt_boxes=None,
ret_box_labels=False, ret_part_labels=False,
set_ignore_flag=True, use_ball_constraint=False, central_radius=2.0):
"""
Args:
points: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
gt_boxes: (B, M, 8)
extend_gt_boxes: [B, M, 8]
ret_box_labels:
ret_part_labels:
set_ignore_flag:
use_ball_constraint:
central_radius:
Returns:
point_cls_labels: (N1 + N2 + N3 + ...), long type, 0:background, -1:ignored
point_box_labels: (N1 + N2 + N3 + ..., code_size)
"""
assert len(points.shape) == 2 and points.shape[1] == 4, 'points.shape=%s' % str(points.shape)
assert len(gt_boxes.shape) == 3 and gt_boxes.shape[2] == 8, 'gt_boxes.shape=%s' % str(gt_boxes.shape)
assert extend_gt_boxes is None or len(extend_gt_boxes.shape) == 3 and extend_gt_boxes.shape[2] == 8, \
'extend_gt_boxes.shape=%s' % str(extend_gt_boxes.shape)
assert set_ignore_flag != use_ball_constraint, 'Choose one only!'
batch_size = gt_boxes.shape[0]
bs_idx = points[:, 0]
point_cls_labels = points.new_zeros(points.shape[0]).long()
point_box_labels = gt_boxes.new_zeros((points.shape[0], 8)) if ret_box_labels else None
point_part_labels = gt_boxes.new_zeros((points.shape[0], 3)) if ret_part_labels else None
for k in range(batch_size):
bs_mask = (bs_idx == k)
points_single = points[bs_mask][:, 1:4]
point_cls_labels_single = point_cls_labels.new_zeros(bs_mask.sum())
box_idxs_of_pts = roiaware_pool3d_utils.points_in_boxes_gpu(
points_single.unsqueeze(dim=0), gt_boxes[k:k + 1, :, 0:7].contiguous()
).long().squeeze(dim=0)
box_fg_flag = (box_idxs_of_pts >= 0)
if set_ignore_flag:
extend_box_idxs_of_pts = roiaware_pool3d_utils.points_in_boxes_gpu(
points_single.unsqueeze(dim=0), extend_gt_boxes[k:k+1, :, 0:7].contiguous()
).long().squeeze(dim=0)
fg_flag = box_fg_flag
ignore_flag = fg_flag ^ (extend_box_idxs_of_pts >= 0)
point_cls_labels_single[ignore_flag] = -1
elif use_ball_constraint:
box_centers = gt_boxes[k][box_idxs_of_pts][:, 0:3].clone()
box_centers[:, 2] += gt_boxes[k][box_idxs_of_pts][:, 5] / 2
ball_flag = ((box_centers - points_single).norm(dim=1) < central_radius)
fg_flag = box_fg_flag & ball_flag
else:
raise NotImplementedError
gt_box_of_fg_points = gt_boxes[k][box_idxs_of_pts[fg_flag]]
point_cls_labels_single[fg_flag] = 1 if self.num_class == 1 else gt_box_of_fg_points[:, -1].long()
point_cls_labels[bs_mask] = point_cls_labels_single
if ret_box_labels and gt_box_of_fg_points.shape[0] > 0:
point_box_labels_single = point_box_labels.new_zeros((bs_mask.sum(), 8))
fg_point_box_labels = self.box_coder.encode_torch(
gt_boxes=gt_box_of_fg_points[:, :-1], points=points_single[fg_flag],
gt_classes=gt_box_of_fg_points[:, -1].long()
)
point_box_labels_single[fg_flag] = fg_point_box_labels
point_box_labels[bs_mask] = point_box_labels_single
if ret_part_labels:
point_part_labels_single = point_part_labels.new_zeros((bs_mask.sum(), 3))
transformed_points = points_single[fg_flag] - gt_box_of_fg_points[:, 0:3]
transformed_points = common_utils.rotate_points_along_z(
transformed_points.view(-1, 1, 3), -gt_box_of_fg_points[:, 6]
).view(-1, 3)
offset = torch.tensor([0.5, 0.5, 0.5]).view(1, 3).type_as(transformed_points)
point_part_labels_single[fg_flag] = (transformed_points / gt_box_of_fg_points[:, 3:6]) + offset
point_part_labels[bs_mask] = point_part_labels_single
targets_dict = {
'point_cls_labels': point_cls_labels,
'point_box_labels': point_box_labels,
'point_part_labels': point_part_labels
}
return targets_dict
def get_cls_layer_loss(self, tb_dict=None, reduce=True):
point_cls_labels = self.forward_ret_dict['point_cls_labels'].view(-1)
point_cls_preds = self.forward_ret_dict['point_cls_preds'].view(-1, self.num_class)
positives = (point_cls_labels > 0)
negative_cls_weights = (point_cls_labels == 0) * 1.0
cls_weights = (negative_cls_weights + 1.0 * positives).float()
pos_normalizer = positives.sum(dim=0).float()
cls_weights /= torch.clamp(pos_normalizer, min=1.0)
one_hot_targets = point_cls_preds.new_zeros(*list(point_cls_labels.shape), self.num_class + 1)
one_hot_targets.scatter_(-1, (point_cls_labels * (point_cls_labels >= 0).long()).unsqueeze(dim=-1).long(), 1.0)
one_hot_targets = one_hot_targets[..., 1:]
cls_loss_src = self.cls_loss_func(point_cls_preds, one_hot_targets, weights=cls_weights)
if reduce:
point_loss_cls = cls_loss_src.sum()
else:
point_loss_cls = cls_loss_src.view(-1, self.model_cfg.NUM_KEYPOINTS).sum(-1)
loss_weights_dict = self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS
point_loss_cls = point_loss_cls * loss_weights_dict['point_cls_weight']
if tb_dict is None:
tb_dict = {}
tb_dict.update({
'point_loss_cls': point_loss_cls.item() if reduce else point_loss_cls[0].item(),
'point_pos_num': pos_normalizer.item()
})
return point_loss_cls, tb_dict
def get_part_layer_loss(self, tb_dict=None):
pos_mask = self.forward_ret_dict['point_cls_labels'] > 0
pos_normalizer = max(1, (pos_mask > 0).sum().item())
point_part_labels = self.forward_ret_dict['point_part_labels']
point_part_preds = self.forward_ret_dict['point_part_preds']
point_loss_part = F.binary_cross_entropy(torch.sigmoid(point_part_preds), point_part_labels, reduction='none')
point_loss_part = (point_loss_part.sum(dim=-1) * pos_mask.float()).sum() / (3 * pos_normalizer)
loss_weights_dict = self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS
point_loss_part = point_loss_part * loss_weights_dict['point_part_weight']
if tb_dict is None:
tb_dict = {}
tb_dict.update({'point_loss_part': point_loss_part.item()})
return point_loss_part, tb_dict
def get_box_layer_loss(self, tb_dict=None):
pos_mask = self.forward_ret_dict['point_cls_labels'] > 0
point_box_labels = self.forward_ret_dict['point_box_labels']
point_box_preds = self.forward_ret_dict['point_box_preds']
reg_weights = pos_mask.float()
pos_normalizer = pos_mask.sum().float()
reg_weights /= torch.clamp(pos_normalizer, min=1.0)
point_loss_box_src = self.reg_loss_func(
point_box_preds[None, ...], point_box_labels[None, ...], weights=reg_weights[None, ...]
)
point_loss_box = point_loss_box_src.sum()
loss_weights_dict = self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS
point_loss_box = point_loss_box * loss_weights_dict['point_box_weight']
if tb_dict is None:
tb_dict = {}
tb_dict.update({'point_loss_box': point_loss_box.item()})
return point_loss_box, tb_dict
def generate_predicted_boxes(self, points, point_cls_preds, point_box_preds):
"""
Args:
points: (N, 3)
point_cls_preds: (N, num_class)
point_box_preds: (N, box_code_size)
Returns:
point_cls_preds: (N, num_class)
point_box_preds: (N, box_code_size)
"""
_, pred_classes = point_cls_preds.max(dim=-1)
point_box_preds = self.box_coder.decode_torch(point_box_preds, points, pred_classes + 1)
return point_cls_preds, point_box_preds
def forward(self, **kwargs):
raise NotImplementedError
| 9,955 | 45.523364 | 119 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/dense_heads/anchor_head_template.py | import numpy as np
import torch
import torch.nn as nn
from ...utils import box_coder_utils, common_utils, loss_utils
from .target_assigner.anchor_generator import AnchorGenerator
from .target_assigner.atss_target_assigner import ATSSTargetAssigner
from .target_assigner.axis_aligned_target_assigner import AxisAlignedTargetAssigner
class AnchorHeadTemplate(nn.Module):
def __init__(self, model_cfg, num_class, class_names, grid_size, point_cloud_range, predict_boxes_when_training):
super().__init__()
self.model_cfg = model_cfg
self.num_class = num_class
self.class_names = class_names
self.predict_boxes_when_training = predict_boxes_when_training
self.use_multihead = self.model_cfg.get('USE_MULTIHEAD', False)
anchor_target_cfg = self.model_cfg.TARGET_ASSIGNER_CONFIG
self.box_coder = getattr(box_coder_utils, anchor_target_cfg.BOX_CODER)(
num_dir_bins=anchor_target_cfg.get('NUM_DIR_BINS', 6),
**anchor_target_cfg.get('BOX_CODER_CONFIG', {})
)
anchor_generator_cfg = self.model_cfg.ANCHOR_GENERATOR_CONFIG
anchors, self.num_anchors_per_location = self.generate_anchors(
anchor_generator_cfg, grid_size=grid_size, point_cloud_range=point_cloud_range,
anchor_ndim=self.box_coder.code_size
)
self.anchors = [x.cuda() for x in anchors]
self.target_assigner = self.get_target_assigner(anchor_target_cfg)
self.forward_ret_dict = {}
self.build_losses(self.model_cfg.LOSS_CONFIG)
@staticmethod
def generate_anchors(anchor_generator_cfg, grid_size, point_cloud_range, anchor_ndim=7):
anchor_generator = AnchorGenerator(
anchor_range=point_cloud_range,
anchor_generator_config=anchor_generator_cfg
)
feature_map_size = [grid_size[:2] // config['feature_map_stride'] for config in anchor_generator_cfg]
anchors_list, num_anchors_per_location_list = anchor_generator.generate_anchors(feature_map_size)
if anchor_ndim != 7:
for idx, anchors in enumerate(anchors_list):
pad_zeros = anchors.new_zeros([*anchors.shape[0:-1], anchor_ndim - 7])
new_anchors = torch.cat((anchors, pad_zeros), dim=-1)
anchors_list[idx] = new_anchors
return anchors_list, num_anchors_per_location_list
def get_target_assigner(self, anchor_target_cfg):
if anchor_target_cfg.NAME == 'ATSS':
target_assigner = ATSSTargetAssigner(
topk=anchor_target_cfg.TOPK,
box_coder=self.box_coder,
use_multihead=self.use_multihead,
match_height=anchor_target_cfg.MATCH_HEIGHT
)
elif anchor_target_cfg.NAME == 'AxisAlignedTargetAssigner':
target_assigner = AxisAlignedTargetAssigner(
model_cfg=self.model_cfg,
class_names=self.class_names,
box_coder=self.box_coder,
match_height=anchor_target_cfg.MATCH_HEIGHT
)
else:
raise NotImplementedError
return target_assigner
def build_losses(self, losses_cfg):
self.add_module(
'cls_loss_func',
loss_utils.SigmoidFocalClassificationLoss(alpha=0.25, gamma=2.0)
)
reg_loss_name = 'WeightedSmoothL1Loss' if losses_cfg.get('REG_LOSS_TYPE', None) is None \
else losses_cfg.REG_LOSS_TYPE
self.add_module(
'reg_loss_func',
getattr(loss_utils, reg_loss_name)(code_weights=losses_cfg.LOSS_WEIGHTS['code_weights'])
)
self.add_module(
'dir_loss_func',
loss_utils.WeightedCrossEntropyLoss()
)
def assign_targets(self, gt_boxes):
"""
Args:
gt_boxes: (B, M, 8)
Returns:
"""
targets_dict = self.target_assigner.assign_targets(
self.anchors, gt_boxes
)
return targets_dict
def get_cls_layer_loss(self, new_data=None, reduce=True):
if new_data is None:
cls_preds = self.forward_ret_dict['cls_preds']
box_cls_labels = self.forward_ret_dict['box_cls_labels']
else:
cls_preds = new_data['cls_preds']
box_cls_labels = new_data['box_cls_labels']
batch_size = int(cls_preds.shape[0])
cared = box_cls_labels >= 0 # [N, num_anchors]
positives = box_cls_labels > 0
negatives = box_cls_labels == 0
negative_cls_weights = negatives * 1.0
cls_weights = (negative_cls_weights + 1.0 * positives).float()
reg_weights = positives.float()
if self.num_class == 1:
# class agnostic
box_cls_labels[positives] = 1
pos_normalizer = positives.sum(1, keepdim=True).float()
reg_weights /= torch.clamp(pos_normalizer, min=1.0)
cls_weights /= torch.clamp(pos_normalizer, min=1.0)
cls_targets = box_cls_labels * cared.type_as(box_cls_labels)
cls_targets = cls_targets.unsqueeze(dim=-1)
cls_targets = cls_targets.squeeze(dim=-1)
one_hot_targets = torch.zeros(
*list(cls_targets.shape), self.num_class + 1, dtype=cls_preds.dtype, device=cls_targets.device
)
one_hot_targets.scatter_(-1, cls_targets.unsqueeze(dim=-1).long(), 1.0)
cls_preds = cls_preds.view(batch_size, -1, self.num_class)
one_hot_targets = one_hot_targets[..., 1:]
cls_loss_src = self.cls_loss_func(cls_preds, one_hot_targets, weights=cls_weights) # [N, M]
if reduce:
cls_loss = cls_loss_src.sum() / batch_size
else:
cls_loss = cls_loss_src.sum(-1).sum(-1)
cls_loss = cls_loss * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['cls_weight']
tb_dict = {
'rpn_loss_cls': cls_loss.item() if reduce else cls_loss[0].item()
}
return cls_loss, tb_dict
@staticmethod
def add_sin_difference(boxes1, boxes2, dim=6):
assert dim != -1
rad_pred_encoding = torch.sin(boxes1[..., dim:dim + 1]) * torch.cos(boxes2[..., dim:dim + 1])
rad_tg_encoding = torch.cos(boxes1[..., dim:dim + 1]) * torch.sin(boxes2[..., dim:dim + 1])
boxes1 = torch.cat([boxes1[..., :dim], rad_pred_encoding, boxes1[..., dim + 1:]], dim=-1)
boxes2 = torch.cat([boxes2[..., :dim], rad_tg_encoding, boxes2[..., dim + 1:]], dim=-1)
return boxes1, boxes2
@staticmethod
def get_direction_target(anchors, reg_targets, one_hot=True, dir_offset=0, num_bins=2):
batch_size = reg_targets.shape[0]
anchors = anchors.view(batch_size, -1, anchors.shape[-1])
rot_gt = reg_targets[..., 6] + anchors[..., 6]
offset_rot = common_utils.limit_period(rot_gt - dir_offset, 0, 2 * np.pi)
dir_cls_targets = torch.floor(offset_rot / (2 * np.pi / num_bins)).long()
dir_cls_targets = torch.clamp(dir_cls_targets, min=0, max=num_bins - 1)
if one_hot:
dir_targets = torch.zeros(*list(dir_cls_targets.shape), num_bins, dtype=anchors.dtype,
device=dir_cls_targets.device)
dir_targets.scatter_(-1, dir_cls_targets.unsqueeze(dim=-1).long(), 1.0)
dir_cls_targets = dir_targets
return dir_cls_targets
def get_box_reg_layer_loss(self, reduce=True):
box_preds = self.forward_ret_dict['box_preds']
box_dir_cls_preds = self.forward_ret_dict.get('dir_cls_preds', None)
box_reg_targets = self.forward_ret_dict['box_reg_targets']
box_cls_labels = self.forward_ret_dict['box_cls_labels']
batch_size = int(box_preds.shape[0])
positives = box_cls_labels > 0
reg_weights = positives.float()
pos_normalizer = positives.sum(1, keepdim=True).float()
reg_weights /= torch.clamp(pos_normalizer, min=1.0)
if isinstance(self.anchors, list):
if self.use_multihead:
anchors = torch.cat(
[anchor.permute(3, 4, 0, 1, 2, 5).contiguous().view(-1, anchor.shape[-1]) for anchor in
self.anchors], dim=0)
else:
anchors = torch.cat(self.anchors, dim=-3)
else:
anchors = self.anchors
anchors = anchors.view(1, -1, anchors.shape[-1]).repeat(batch_size, 1, 1)
box_preds = box_preds.view(batch_size, -1,
box_preds.shape[-1] // self.num_anchors_per_location if not self.use_multihead else
box_preds.shape[-1])
# sin(a - b) = sinacosb-cosasinb
box_preds_sin, reg_targets_sin = self.add_sin_difference(box_preds, box_reg_targets)
loc_loss_src = self.reg_loss_func(box_preds_sin, reg_targets_sin, weights=reg_weights) # [N, M]
if reduce:
loc_loss = loc_loss_src.sum() / batch_size
else:
loc_loss = loc_loss_src.sum(-1).sum(-1)
loc_loss = loc_loss * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['loc_weight']
box_loss = loc_loss
tb_dict = {
'rpn_loss_loc': loc_loss.item() if reduce else loc_loss[0].item()
}
if box_dir_cls_preds is not None:
dir_targets = self.get_direction_target(
anchors, box_reg_targets,
dir_offset=self.model_cfg.DIR_OFFSET,
num_bins=self.model_cfg.NUM_DIR_BINS
)
dir_logits = box_dir_cls_preds.view(batch_size, -1, self.model_cfg.NUM_DIR_BINS)
weights = positives.type_as(dir_logits)
weights /= torch.clamp(weights.sum(-1, keepdim=True), min=1.0)
dir_loss = self.dir_loss_func(dir_logits, dir_targets, weights=weights)
if reduce:
dir_loss = dir_loss.sum() / batch_size
else:
dir_loss = dir_loss.sum(-1).sum(-1)
dir_loss = dir_loss * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['dir_weight']
box_loss += dir_loss
tb_dict['rpn_loss_dir'] = dir_loss.item() if reduce else dir_loss.item()
return box_loss, tb_dict
def get_loss(self, reduce=True):
cls_loss, tb_dict = self.get_cls_layer_loss(reduce=reduce)
box_loss, tb_dict_box = self.get_box_reg_layer_loss(reduce=reduce)
tb_dict.update(tb_dict_box)
rpn_loss = cls_loss + box_loss
tb_dict['rpn_loss'] = rpn_loss.item() if reduce else rpn_loss[0].item()
return rpn_loss, tb_dict
def generate_predicted_boxes(self, batch_size, cls_preds, box_preds, dir_cls_preds=None):
"""
Args:
batch_size:
cls_preds: (N, H, W, C1)
box_preds: (N, H, W, C2)
dir_cls_preds: (N, H, W, C3)
Returns:
batch_cls_preds: (B, num_boxes, num_classes)
batch_box_preds: (B, num_boxes, 7+C)
"""
if isinstance(self.anchors, list):
if self.use_multihead:
anchors = torch.cat([anchor.permute(3, 4, 0, 1, 2, 5).contiguous().view(-1, anchor.shape[-1])
for anchor in self.anchors], dim=0)
else:
anchors = torch.cat(self.anchors, dim=-3)
else:
anchors = self.anchors
num_anchors = anchors.view(-1, anchors.shape[-1]).shape[0]
batch_anchors = anchors.view(1, -1, anchors.shape[-1]).repeat(batch_size, 1, 1)
batch_cls_preds = cls_preds.view(batch_size, num_anchors, -1).float() \
if not isinstance(cls_preds, list) else cls_preds
batch_box_preds = box_preds.view(batch_size, num_anchors, -1) if not isinstance(box_preds, list) \
else torch.cat(box_preds, dim=1).view(batch_size, num_anchors, -1)
batch_box_preds = self.box_coder.decode_torch(batch_box_preds, batch_anchors)
if dir_cls_preds is not None:
dir_offset = self.model_cfg.DIR_OFFSET
dir_limit_offset = self.model_cfg.DIR_LIMIT_OFFSET
dir_cls_preds = dir_cls_preds.view(batch_size, num_anchors, -1) if not isinstance(dir_cls_preds, list) \
else torch.cat(dir_cls_preds, dim=1).view(batch_size, num_anchors, -1)
dir_labels = torch.max(dir_cls_preds, dim=-1)[1]
period = (2 * np.pi / self.model_cfg.NUM_DIR_BINS)
dir_rot = common_utils.limit_period(
batch_box_preds[..., 6] - dir_offset, dir_limit_offset, period
)
batch_box_preds[..., 6] = dir_rot + dir_offset + period * dir_labels.to(batch_box_preds.dtype)
if isinstance(self.box_coder, box_coder_utils.PreviousResidualDecoder):
batch_box_preds[..., 6] = common_utils.limit_period(
-(batch_box_preds[..., 6] + np.pi / 2), offset=0.5, period=np.pi * 2
)
return batch_cls_preds, batch_box_preds
def forward(self, **kwargs):
raise NotImplementedError
| 13,005 | 44.00346 | 118 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/dense_heads/anchor_head_multi.py | import numpy as np
import torch
import torch.nn as nn
from ..backbones_2d import BaseBEVBackbone
from .anchor_head_template import AnchorHeadTemplate
class SingleHead(BaseBEVBackbone):
def __init__(self, model_cfg, input_channels, num_class, num_anchors_per_location, code_size, rpn_head_cfg=None,
head_label_indices=None, separate_reg_config=None):
super().__init__(rpn_head_cfg, input_channels)
self.num_anchors_per_location = num_anchors_per_location
self.num_class = num_class
self.code_size = code_size
self.model_cfg = model_cfg
self.separate_reg_config = separate_reg_config
self.register_buffer('head_label_indices', head_label_indices)
if self.separate_reg_config is not None:
code_size_cnt = 0
self.conv_box = nn.ModuleDict()
self.conv_box_names = []
num_middle_conv = self.separate_reg_config.NUM_MIDDLE_CONV
num_middle_filter = self.separate_reg_config.NUM_MIDDLE_FILTER
conv_cls_list = []
c_in = input_channels
for k in range(num_middle_conv):
conv_cls_list.extend([
nn.Conv2d(
c_in, num_middle_filter,
kernel_size=3, stride=1, padding=1, bias=False
),
nn.BatchNorm2d(num_middle_filter),
nn.ReLU()
])
c_in = num_middle_filter
conv_cls_list.append(nn.Conv2d(
c_in, self.num_anchors_per_location * self.num_class,
kernel_size=3, stride=1, padding=1
))
self.conv_cls = nn.Sequential(*conv_cls_list)
for reg_config in self.separate_reg_config.REG_LIST:
reg_name, reg_channel = reg_config.split(':')
reg_channel = int(reg_channel)
cur_conv_list = []
c_in = input_channels
for k in range(num_middle_conv):
cur_conv_list.extend([
nn.Conv2d(
c_in, num_middle_filter,
kernel_size=3, stride=1, padding=1, bias=False
),
nn.BatchNorm2d(num_middle_filter),
nn.ReLU()
])
c_in = num_middle_filter
cur_conv_list.append(nn.Conv2d(
c_in, self.num_anchors_per_location * int(reg_channel),
kernel_size=3, stride=1, padding=1, bias=True
))
code_size_cnt += reg_channel
self.conv_box[f'conv_{reg_name}'] = nn.Sequential(*cur_conv_list)
self.conv_box_names.append(f'conv_{reg_name}')
for m in self.conv_box.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
assert code_size_cnt == code_size, f'Code size does not match: {code_size_cnt}:{code_size}'
else:
self.conv_cls = nn.Conv2d(
input_channels, self.num_anchors_per_location * self.num_class,
kernel_size=1
)
self.conv_box = nn.Conv2d(
input_channels, self.num_anchors_per_location * self.code_size,
kernel_size=1
)
if self.model_cfg.get('USE_DIRECTION_CLASSIFIER', None) is not None:
self.conv_dir_cls = nn.Conv2d(
input_channels,
self.num_anchors_per_location * self.model_cfg.NUM_DIR_BINS,
kernel_size=1
)
else:
self.conv_dir_cls = None
self.use_multihead = self.model_cfg.get('USE_MULTIHEAD', False)
self.init_weights()
def init_weights(self):
pi = 0.01
if isinstance(self.conv_cls, nn.Conv2d):
nn.init.constant_(self.conv_cls.bias, -np.log((1 - pi) / pi))
else:
nn.init.constant_(self.conv_cls[-1].bias, -np.log((1 - pi) / pi))
def forward(self, spatial_features_2d):
ret_dict = {}
spatial_features_2d = super().forward({'spatial_features': spatial_features_2d})['spatial_features_2d']
cls_preds = self.conv_cls(spatial_features_2d)
if self.separate_reg_config is None:
box_preds = self.conv_box(spatial_features_2d)
else:
box_preds_list = []
for reg_name in self.conv_box_names:
box_preds_list.append(self.conv_box[reg_name](spatial_features_2d))
box_preds = torch.cat(box_preds_list, dim=1)
if not self.use_multihead:
box_preds = box_preds.permute(0, 2, 3, 1).contiguous()
cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous()
else:
H, W = box_preds.shape[2:]
batch_size = box_preds.shape[0]
box_preds = box_preds.view(-1, self.num_anchors_per_location,
self.code_size, H, W).permute(0, 1, 3, 4, 2).contiguous()
cls_preds = cls_preds.view(-1, self.num_anchors_per_location,
self.num_class, H, W).permute(0, 1, 3, 4, 2).contiguous()
box_preds = box_preds.view(batch_size, -1, self.code_size)
cls_preds = cls_preds.view(batch_size, -1, self.num_class)
if self.conv_dir_cls is not None:
dir_cls_preds = self.conv_dir_cls(spatial_features_2d)
if self.use_multihead:
dir_cls_preds = dir_cls_preds.view(
-1, self.num_anchors_per_location, self.model_cfg.NUM_DIR_BINS, H, W).permute(0, 1, 3, 4,
2).contiguous()
dir_cls_preds = dir_cls_preds.view(batch_size, -1, self.model_cfg.NUM_DIR_BINS)
else:
dir_cls_preds = dir_cls_preds.permute(0, 2, 3, 1).contiguous()
else:
dir_cls_preds = None
ret_dict['cls_preds'] = cls_preds
ret_dict['box_preds'] = box_preds
ret_dict['dir_cls_preds'] = dir_cls_preds
return ret_dict
class AnchorHeadMulti(AnchorHeadTemplate):
def __init__(self, model_cfg, input_channels, num_class, class_names, grid_size, point_cloud_range,
predict_boxes_when_training=True, **kwargs):
super().__init__(
model_cfg=model_cfg, num_class=num_class, class_names=class_names, grid_size=grid_size,
point_cloud_range=point_cloud_range, predict_boxes_when_training=predict_boxes_when_training
)
self.model_cfg = model_cfg
self.separate_multihead = self.model_cfg.get('SEPARATE_MULTIHEAD', False)
if self.model_cfg.get('SHARED_CONV_NUM_FILTER', None) is not None:
shared_conv_num_filter = self.model_cfg.SHARED_CONV_NUM_FILTER
self.shared_conv = nn.Sequential(
nn.Conv2d(input_channels, shared_conv_num_filter, 3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(shared_conv_num_filter, eps=1e-3, momentum=0.01),
nn.ReLU(),
)
else:
self.shared_conv = None
shared_conv_num_filter = input_channels
self.rpn_heads = None
self.make_multihead(shared_conv_num_filter)
def make_multihead(self, input_channels):
rpn_head_cfgs = self.model_cfg.RPN_HEAD_CFGS
rpn_heads = []
class_names = []
for rpn_head_cfg in rpn_head_cfgs:
class_names.extend(rpn_head_cfg['HEAD_CLS_NAME'])
for rpn_head_cfg in rpn_head_cfgs:
num_anchors_per_location = sum([self.num_anchors_per_location[class_names.index(head_cls)]
for head_cls in rpn_head_cfg['HEAD_CLS_NAME']])
head_label_indices = torch.from_numpy(np.array([
self.class_names.index(cur_name) + 1 for cur_name in rpn_head_cfg['HEAD_CLS_NAME']
]))
rpn_head = SingleHead(
self.model_cfg, input_channels,
len(rpn_head_cfg['HEAD_CLS_NAME']) if self.separate_multihead else self.num_class,
num_anchors_per_location, self.box_coder.code_size, rpn_head_cfg,
head_label_indices=head_label_indices,
separate_reg_config=self.model_cfg.get('SEPARATE_REG_CONFIG', None)
)
rpn_heads.append(rpn_head)
self.rpn_heads = nn.ModuleList(rpn_heads)
def forward(self, data_dict):
spatial_features_2d = data_dict['spatial_features_2d']
if self.shared_conv is not None:
spatial_features_2d = self.shared_conv(spatial_features_2d)
ret_dicts = []
for rpn_head in self.rpn_heads:
ret_dicts.append(rpn_head(spatial_features_2d))
cls_preds = [ret_dict['cls_preds'] for ret_dict in ret_dicts]
box_preds = [ret_dict['box_preds'] for ret_dict in ret_dicts]
ret = {
'cls_preds': cls_preds if self.separate_multihead else torch.cat(cls_preds, dim=1),
'box_preds': box_preds if self.separate_multihead else torch.cat(box_preds, dim=1),
}
if self.model_cfg.get('USE_DIRECTION_CLASSIFIER', False):
dir_cls_preds = [ret_dict['dir_cls_preds'] for ret_dict in ret_dicts]
ret['dir_cls_preds'] = dir_cls_preds if self.separate_multihead else torch.cat(dir_cls_preds, dim=1)
self.forward_ret_dict.update(ret)
if self.training:
targets_dict = self.assign_targets(
gt_boxes=data_dict['gt_boxes']
)
self.forward_ret_dict.update(targets_dict)
if not self.training or self.predict_boxes_when_training:
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=data_dict['batch_size'],
cls_preds=ret['cls_preds'], box_preds=ret['box_preds'], dir_cls_preds=ret.get('dir_cls_preds', None)
)
if isinstance(batch_cls_preds, list):
multihead_label_mapping = []
for idx in range(len(batch_cls_preds)):
multihead_label_mapping.append(self.rpn_heads[idx].head_label_indices)
data_dict['multihead_label_mapping'] = multihead_label_mapping
data_dict['batch_cls_preds'] = batch_cls_preds
data_dict['batch_box_preds'] = batch_box_preds
data_dict['cls_preds_normalized'] = False
return data_dict
def get_cls_layer_loss(self):
loss_weights = self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS
if 'pos_cls_weight' in loss_weights:
pos_cls_weight = loss_weights['pos_cls_weight']
neg_cls_weight = loss_weights['neg_cls_weight']
else:
pos_cls_weight = neg_cls_weight = 1.0
cls_preds = self.forward_ret_dict['cls_preds']
box_cls_labels = self.forward_ret_dict['box_cls_labels']
if not isinstance(cls_preds, list):
cls_preds = [cls_preds]
batch_size = int(cls_preds[0].shape[0])
cared = box_cls_labels >= 0 # [N, num_anchors]
positives = box_cls_labels > 0
negatives = box_cls_labels == 0
negative_cls_weights = negatives * 1.0 * neg_cls_weight
cls_weights = (negative_cls_weights + pos_cls_weight * positives).float()
reg_weights = positives.float()
if self.num_class == 1:
# class agnostic
box_cls_labels[positives] = 1
pos_normalizer = positives.sum(1, keepdim=True).float()
reg_weights /= torch.clamp(pos_normalizer, min=1.0)
cls_weights /= torch.clamp(pos_normalizer, min=1.0)
cls_targets = box_cls_labels * cared.type_as(box_cls_labels)
one_hot_targets = torch.zeros(
*list(cls_targets.shape), self.num_class + 1, dtype=cls_preds[0].dtype, device=cls_targets.device
)
one_hot_targets.scatter_(-1, cls_targets.unsqueeze(dim=-1).long(), 1.0)
one_hot_targets = one_hot_targets[..., 1:]
start_idx = c_idx = 0
cls_losses = 0
for idx, cls_pred in enumerate(cls_preds):
cur_num_class = self.rpn_heads[idx].num_class
cls_pred = cls_pred.view(batch_size, -1, cur_num_class)
if self.separate_multihead:
one_hot_target = one_hot_targets[:, start_idx:start_idx + cls_pred.shape[1],
c_idx:c_idx + cur_num_class]
c_idx += cur_num_class
else:
one_hot_target = one_hot_targets[:, start_idx:start_idx + cls_pred.shape[1]]
cls_weight = cls_weights[:, start_idx:start_idx + cls_pred.shape[1]]
cls_loss_src = self.cls_loss_func(cls_pred, one_hot_target, weights=cls_weight) # [N, M]
cls_loss = cls_loss_src.sum() / batch_size
cls_loss = cls_loss * loss_weights['cls_weight']
cls_losses += cls_loss
start_idx += cls_pred.shape[1]
assert start_idx == one_hot_targets.shape[1]
tb_dict = {
'rpn_loss_cls': cls_losses.item()
}
return cls_losses, tb_dict
def get_box_reg_layer_loss(self):
box_preds = self.forward_ret_dict['box_preds']
box_dir_cls_preds = self.forward_ret_dict.get('dir_cls_preds', None)
box_reg_targets = self.forward_ret_dict['box_reg_targets']
box_cls_labels = self.forward_ret_dict['box_cls_labels']
positives = box_cls_labels > 0
reg_weights = positives.float()
pos_normalizer = positives.sum(1, keepdim=True).float()
reg_weights /= torch.clamp(pos_normalizer, min=1.0)
if not isinstance(box_preds, list):
box_preds = [box_preds]
batch_size = int(box_preds[0].shape[0])
if isinstance(self.anchors, list):
if self.use_multihead:
anchors = torch.cat(
[anchor.permute(3, 4, 0, 1, 2, 5).contiguous().view(-1, anchor.shape[-1])
for anchor in self.anchors], dim=0
)
else:
anchors = torch.cat(self.anchors, dim=-3)
else:
anchors = self.anchors
anchors = anchors.view(1, -1, anchors.shape[-1]).repeat(batch_size, 1, 1)
start_idx = 0
box_losses = 0
tb_dict = {}
for idx, box_pred in enumerate(box_preds):
box_pred = box_pred.view(
batch_size, -1,
box_pred.shape[-1] // self.num_anchors_per_location if not self.use_multihead else box_pred.shape[-1]
)
box_reg_target = box_reg_targets[:, start_idx:start_idx + box_pred.shape[1]]
reg_weight = reg_weights[:, start_idx:start_idx + box_pred.shape[1]]
# sin(a - b) = sinacosb-cosasinb
if box_dir_cls_preds is not None:
box_pred_sin, reg_target_sin = self.add_sin_difference(box_pred, box_reg_target)
loc_loss_src = self.reg_loss_func(box_pred_sin, reg_target_sin, weights=reg_weight) # [N, M]
else:
loc_loss_src = self.reg_loss_func(box_pred, box_reg_target, weights=reg_weight) # [N, M]
loc_loss = loc_loss_src.sum() / batch_size
loc_loss = loc_loss * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['loc_weight']
box_losses += loc_loss
tb_dict['rpn_loss_loc'] = tb_dict.get('rpn_loss_loc', 0) + loc_loss.item()
if box_dir_cls_preds is not None:
if not isinstance(box_dir_cls_preds, list):
box_dir_cls_preds = [box_dir_cls_preds]
dir_targets = self.get_direction_target(
anchors, box_reg_targets,
dir_offset=self.model_cfg.DIR_OFFSET,
num_bins=self.model_cfg.NUM_DIR_BINS
)
box_dir_cls_pred = box_dir_cls_preds[idx]
dir_logit = box_dir_cls_pred.view(batch_size, -1, self.model_cfg.NUM_DIR_BINS)
weights = positives.type_as(dir_logit)
weights /= torch.clamp(weights.sum(-1, keepdim=True), min=1.0)
weight = weights[:, start_idx:start_idx + box_pred.shape[1]]
dir_target = dir_targets[:, start_idx:start_idx + box_pred.shape[1]]
dir_loss = self.dir_loss_func(dir_logit, dir_target, weights=weight)
dir_loss = dir_loss.sum() / batch_size
dir_loss = dir_loss * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['dir_weight']
box_losses += dir_loss
tb_dict['rpn_loss_dir'] = tb_dict.get('rpn_loss_dir', 0) + dir_loss.item()
start_idx += box_pred.shape[1]
return box_losses, tb_dict
| 17,041 | 44.566845 | 117 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/dense_heads/center_head.py | import copy
import numpy as np
import torch
import torch.nn as nn
from torch.nn.init import kaiming_normal_
from ..model_utils import model_nms_utils
from ..model_utils import centernet_utils
from ...utils import loss_utils
class SeparateHead(nn.Module):
def __init__(self, input_channels, sep_head_dict, init_bias=-2.19, use_bias=False):
super().__init__()
self.sep_head_dict = sep_head_dict
for cur_name in self.sep_head_dict:
output_channels = self.sep_head_dict[cur_name]['out_channels']
num_conv = self.sep_head_dict[cur_name]['num_conv']
fc_list = []
for k in range(num_conv - 1):
fc_list.append(nn.Sequential(
nn.Conv2d(input_channels, input_channels, kernel_size=3, stride=1, padding=1, bias=use_bias),
nn.BatchNorm2d(input_channels),
nn.ReLU()
))
fc_list.append(nn.Conv2d(input_channels, output_channels, kernel_size=3, stride=1, padding=1, bias=True))
fc = nn.Sequential(*fc_list)
if 'hm' in cur_name:
fc[-1].bias.data.fill_(init_bias)
else:
for m in fc.modules():
if isinstance(m, nn.Conv2d):
kaiming_normal_(m.weight.data)
if hasattr(m, "bias") and m.bias is not None:
nn.init.constant_(m.bias, 0)
self.__setattr__(cur_name, fc)
def forward(self, x):
ret_dict = {}
for cur_name in self.sep_head_dict:
ret_dict[cur_name] = self.__getattr__(cur_name)(x)
return ret_dict
class CenterHead(nn.Module):
def __init__(self, model_cfg, input_channels, num_class, class_names, grid_size, point_cloud_range, voxel_size,
predict_boxes_when_training=True):
super().__init__()
self.model_cfg = model_cfg
self.num_class = num_class
self.grid_size = grid_size
self.point_cloud_range = point_cloud_range
self.voxel_size = voxel_size
self.feature_map_stride = self.model_cfg.TARGET_ASSIGNER_CONFIG.get('FEATURE_MAP_STRIDE', None)
self.class_names = class_names
self.class_names_each_head = []
self.class_id_mapping_each_head = []
for cur_class_names in self.model_cfg.CLASS_NAMES_EACH_HEAD:
self.class_names_each_head.append([x for x in cur_class_names if x in class_names])
cur_class_id_mapping = torch.from_numpy(np.array(
[self.class_names.index(x) for x in cur_class_names if x in class_names]
)).cuda()
self.class_id_mapping_each_head.append(cur_class_id_mapping)
total_classes = sum([len(x) for x in self.class_names_each_head])
assert total_classes == len(self.class_names), f'class_names_each_head={self.class_names_each_head}'
self.shared_conv = nn.Sequential(
nn.Conv2d(
input_channels, self.model_cfg.SHARED_CONV_CHANNEL, 3, stride=1, padding=1,
bias=self.model_cfg.get('USE_BIAS_BEFORE_NORM', False)
),
nn.BatchNorm2d(self.model_cfg.SHARED_CONV_CHANNEL),
nn.ReLU(),
)
self.heads_list = nn.ModuleList()
self.separate_head_cfg = self.model_cfg.SEPARATE_HEAD_CFG
for idx, cur_class_names in enumerate(self.class_names_each_head):
cur_head_dict = copy.deepcopy(self.separate_head_cfg.HEAD_DICT)
cur_head_dict['hm'] = dict(out_channels=len(cur_class_names), num_conv=self.model_cfg.NUM_HM_CONV)
self.heads_list.append(
SeparateHead(
input_channels=self.model_cfg.SHARED_CONV_CHANNEL,
sep_head_dict=cur_head_dict,
init_bias=-2.19,
use_bias=self.model_cfg.get('USE_BIAS_BEFORE_NORM', False)
)
)
self.predict_boxes_when_training = predict_boxes_when_training
self.forward_ret_dict = {}
self.build_losses()
def build_losses(self):
self.add_module('hm_loss_func', loss_utils.FocalLossCenterNet())
self.add_module('reg_loss_func', loss_utils.RegLossCenterNet())
def assign_target_of_single_head(
self, num_classes, gt_boxes, feature_map_size, feature_map_stride, num_max_objs=500,
gaussian_overlap=0.1, min_radius=2
):
"""
Args:
gt_boxes: (N, 8)
feature_map_size: (2), [x, y]
Returns:
"""
heatmap = gt_boxes.new_zeros(num_classes, feature_map_size[1], feature_map_size[0])
ret_boxes = gt_boxes.new_zeros((num_max_objs, gt_boxes.shape[-1] - 1 + 1))
inds = gt_boxes.new_zeros(num_max_objs).long()
mask = gt_boxes.new_zeros(num_max_objs).long()
x, y, z = gt_boxes[:, 0], gt_boxes[:, 1], gt_boxes[:, 2]
coord_x = (x - self.point_cloud_range[0]) / self.voxel_size[0] / feature_map_stride
coord_y = (y - self.point_cloud_range[1]) / self.voxel_size[1] / feature_map_stride
coord_x = torch.clamp(coord_x, min=0, max=feature_map_size[0] - 0.5) # bugfixed: 1e-6 does not work for center.int()
coord_y = torch.clamp(coord_y, min=0, max=feature_map_size[1] - 0.5) #
center = torch.cat((coord_x[:, None], coord_y[:, None]), dim=-1)
center_int = center.int()
center_int_float = center_int.float()
dx, dy, dz = gt_boxes[:, 3], gt_boxes[:, 4], gt_boxes[:, 5]
dx = dx / self.voxel_size[0] / feature_map_stride
dy = dy / self.voxel_size[1] / feature_map_stride
radius = centernet_utils.gaussian_radius(dx, dy, min_overlap=gaussian_overlap)
radius = torch.clamp_min(radius.int(), min=min_radius)
for k in range(min(num_max_objs, gt_boxes.shape[0])):
if dx[k] <= 0 or dy[k] <= 0:
continue
if not (0 <= center_int[k][0] <= feature_map_size[0] and 0 <= center_int[k][1] <= feature_map_size[1]):
continue
cur_class_id = (gt_boxes[k, -1] - 1).long()
centernet_utils.draw_gaussian_to_heatmap(heatmap[cur_class_id], center[k], radius[k].item())
inds[k] = center_int[k, 1] * feature_map_size[0] + center_int[k, 0]
mask[k] = 1
ret_boxes[k, 0:2] = center[k] - center_int_float[k].float()
ret_boxes[k, 2] = z[k]
ret_boxes[k, 3:6] = gt_boxes[k, 3:6].log()
ret_boxes[k, 6] = torch.cos(gt_boxes[k, 6])
ret_boxes[k, 7] = torch.sin(gt_boxes[k, 6])
if gt_boxes.shape[1] > 8:
ret_boxes[k, 8:] = gt_boxes[k, 7:-1]
return heatmap, ret_boxes, inds, mask
def assign_targets(self, gt_boxes, feature_map_size=None, **kwargs):
"""
Args:
gt_boxes: (B, M, 8)
range_image_polar: (B, 3, H, W)
feature_map_size: (2) [H, W]
spatial_cartesian: (B, 4, H, W)
Returns:
"""
feature_map_size = feature_map_size[::-1] # [H, W] ==> [x, y]
target_assigner_cfg = self.model_cfg.TARGET_ASSIGNER_CONFIG
# feature_map_size = self.grid_size[:2] // target_assigner_cfg.FEATURE_MAP_STRIDE
batch_size = gt_boxes.shape[0]
ret_dict = {
'heatmaps': [],
'target_boxes': [],
'inds': [],
'masks': [],
'heatmap_masks': []
}
all_names = np.array(['bg', *self.class_names])
for idx, cur_class_names in enumerate(self.class_names_each_head):
heatmap_list, target_boxes_list, inds_list, masks_list = [], [], [], []
for bs_idx in range(batch_size):
cur_gt_boxes = gt_boxes[bs_idx]
gt_class_names = all_names[cur_gt_boxes[:, -1].cpu().long().numpy()]
gt_boxes_single_head = []
for idx, name in enumerate(gt_class_names):
if name not in cur_class_names:
continue
temp_box = cur_gt_boxes[idx]
temp_box[-1] = cur_class_names.index(name) + 1
gt_boxes_single_head.append(temp_box[None, :])
if len(gt_boxes_single_head) == 0:
gt_boxes_single_head = cur_gt_boxes[:0, :]
else:
gt_boxes_single_head = torch.cat(gt_boxes_single_head, dim=0)
heatmap, ret_boxes, inds, mask = self.assign_target_of_single_head(
num_classes=len(cur_class_names), gt_boxes=gt_boxes_single_head.cpu(),
feature_map_size=feature_map_size, feature_map_stride=target_assigner_cfg.FEATURE_MAP_STRIDE,
num_max_objs=target_assigner_cfg.NUM_MAX_OBJS,
gaussian_overlap=target_assigner_cfg.GAUSSIAN_OVERLAP,
min_radius=target_assigner_cfg.MIN_RADIUS,
)
heatmap_list.append(heatmap.to(gt_boxes_single_head.device))
target_boxes_list.append(ret_boxes.to(gt_boxes_single_head.device))
inds_list.append(inds.to(gt_boxes_single_head.device))
masks_list.append(mask.to(gt_boxes_single_head.device))
ret_dict['heatmaps'].append(torch.stack(heatmap_list, dim=0))
ret_dict['target_boxes'].append(torch.stack(target_boxes_list, dim=0))
ret_dict['inds'].append(torch.stack(inds_list, dim=0))
ret_dict['masks'].append(torch.stack(masks_list, dim=0))
return ret_dict
def sigmoid(self, x):
y = torch.clamp(x.sigmoid(), min=1e-4, max=1 - 1e-4)
return y
def get_loss(self):
pred_dicts = self.forward_ret_dict['pred_dicts']
target_dicts = self.forward_ret_dict['target_dicts']
tb_dict = {}
loss = 0
for idx, pred_dict in enumerate(pred_dicts):
pred_dict['hm'] = self.sigmoid(pred_dict['hm'])
hm_loss = self.hm_loss_func(pred_dict['hm'], target_dicts['heatmaps'][idx])
hm_loss *= self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['cls_weight']
target_boxes = target_dicts['target_boxes'][idx]
pred_boxes = torch.cat([pred_dict[head_name] for head_name in self.separate_head_cfg.HEAD_ORDER], dim=1)
reg_loss = self.reg_loss_func(
pred_boxes, target_dicts['masks'][idx], target_dicts['inds'][idx], target_boxes
)
loc_loss = (reg_loss * reg_loss.new_tensor(self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['code_weights'])).sum()
loc_loss = loc_loss * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['loc_weight']
loss += hm_loss + loc_loss
tb_dict['hm_loss_head_%d' % idx] = hm_loss.item()
tb_dict['loc_loss_head_%d' % idx] = loc_loss.item()
tb_dict['rpn_loss'] = loss.item()
return loss, tb_dict
def generate_predicted_boxes(self, batch_size, pred_dicts):
post_process_cfg = self.model_cfg.POST_PROCESSING
post_center_limit_range = torch.tensor(post_process_cfg.POST_CENTER_LIMIT_RANGE).cuda().float()
ret_dict = [{
'pred_boxes': [],
'pred_scores': [],
'pred_labels': [],
} for k in range(batch_size)]
for idx, pred_dict in enumerate(pred_dicts):
batch_hm = pred_dict['hm'].sigmoid()
batch_center = pred_dict['center']
batch_center_z = pred_dict['center_z']
batch_dim = pred_dict['dim'].exp()
batch_rot_cos = pred_dict['rot'][:, 0].unsqueeze(dim=1)
batch_rot_sin = pred_dict['rot'][:, 1].unsqueeze(dim=1)
batch_vel = pred_dict['vel'] if 'vel' in self.separate_head_cfg.HEAD_ORDER else None
final_pred_dicts = centernet_utils.decode_bbox_from_heatmap(
heatmap=batch_hm, rot_cos=batch_rot_cos, rot_sin=batch_rot_sin,
center=batch_center, center_z=batch_center_z, dim=batch_dim, vel=batch_vel,
point_cloud_range=self.point_cloud_range, voxel_size=self.voxel_size,
feature_map_stride=self.feature_map_stride,
K=post_process_cfg.MAX_OBJ_PER_SAMPLE,
circle_nms=(post_process_cfg.NMS_CONFIG.NMS_TYPE == 'circle_nms'),
score_thresh=post_process_cfg.SCORE_THRESH,
post_center_limit_range=post_center_limit_range
)
for k, final_dict in enumerate(final_pred_dicts):
final_dict['pred_labels'] = self.class_id_mapping_each_head[idx][final_dict['pred_labels'].long()]
if post_process_cfg.NMS_CONFIG.NMS_TYPE != 'circle_nms':
selected, selected_scores = model_nms_utils.class_agnostic_nms(
box_scores=final_dict['pred_scores'], box_preds=final_dict['pred_boxes'],
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=None
)
final_dict['pred_boxes'] = final_dict['pred_boxes'][selected]
final_dict['pred_scores'] = selected_scores
final_dict['pred_labels'] = final_dict['pred_labels'][selected]
ret_dict[k]['pred_boxes'].append(final_dict['pred_boxes'])
ret_dict[k]['pred_scores'].append(final_dict['pred_scores'])
ret_dict[k]['pred_labels'].append(final_dict['pred_labels'])
for k in range(batch_size):
ret_dict[k]['pred_boxes'] = torch.cat(ret_dict[k]['pred_boxes'], dim=0)
ret_dict[k]['pred_scores'] = torch.cat(ret_dict[k]['pred_scores'], dim=0)
ret_dict[k]['pred_labels'] = torch.cat(ret_dict[k]['pred_labels'], dim=0) + 1
return ret_dict
@staticmethod
def reorder_rois_for_refining(batch_size, pred_dicts):
num_max_rois = max([len(cur_dict['pred_boxes']) for cur_dict in pred_dicts])
num_max_rois = max(1, num_max_rois) # at least one faked rois to avoid error
pred_boxes = pred_dicts[0]['pred_boxes']
rois = pred_boxes.new_zeros((batch_size, num_max_rois, pred_boxes.shape[-1]))
roi_scores = pred_boxes.new_zeros((batch_size, num_max_rois))
roi_labels = pred_boxes.new_zeros((batch_size, num_max_rois)).long()
for bs_idx in range(batch_size):
num_boxes = len(pred_dicts[bs_idx]['pred_boxes'])
rois[bs_idx, :num_boxes, :] = pred_dicts[bs_idx]['pred_boxes']
roi_scores[bs_idx, :num_boxes] = pred_dicts[bs_idx]['pred_scores']
roi_labels[bs_idx, :num_boxes] = pred_dicts[bs_idx]['pred_labels']
return rois, roi_scores, roi_labels
def forward(self, data_dict):
spatial_features_2d = data_dict['spatial_features_2d']
x = self.shared_conv(spatial_features_2d)
pred_dicts = []
for head in self.heads_list:
pred_dicts.append(head(x))
if self.training:
target_dict = self.assign_targets(
data_dict['gt_boxes'], feature_map_size=spatial_features_2d.size()[2:],
feature_map_stride=data_dict.get('spatial_features_2d_strides', None)
)
self.forward_ret_dict['target_dicts'] = target_dict
self.forward_ret_dict['pred_dicts'] = pred_dicts
if not self.training or self.predict_boxes_when_training:
pred_dicts = self.generate_predicted_boxes(
data_dict['batch_size'], pred_dicts
)
if self.predict_boxes_when_training:
rois, roi_scores, roi_labels = self.reorder_rois_for_refining(data_dict['batch_size'], pred_dicts)
data_dict['rois'] = rois
data_dict['roi_scores'] = roi_scores
data_dict['roi_labels'] = roi_labels
data_dict['has_class_labels'] = True
else:
data_dict['final_box_dicts'] = pred_dicts
return data_dict
| 16,051 | 44.089888 | 125 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/dense_heads/point_head_box.py | import torch
from ...utils import box_coder_utils, box_utils
from .point_head_template import PointHeadTemplate
class PointHeadBox(PointHeadTemplate):
"""
A simple point-based segmentation head, which are used for PointRCNN.
Reference Paper: https://arxiv.org/abs/1812.04244
PointRCNN: 3D Object Proposal Generation and Detection from Point Cloud
"""
def __init__(self, num_class, input_channels, model_cfg, predict_boxes_when_training=False, **kwargs):
super().__init__(model_cfg=model_cfg, num_class=num_class)
self.predict_boxes_when_training = predict_boxes_when_training
self.cls_layers = self.make_fc_layers(
fc_cfg=self.model_cfg.CLS_FC,
input_channels=input_channels,
output_channels=num_class
)
target_cfg = self.model_cfg.TARGET_CONFIG
self.box_coder = getattr(box_coder_utils, target_cfg.BOX_CODER)(
**target_cfg.BOX_CODER_CONFIG
)
self.box_layers = self.make_fc_layers(
fc_cfg=self.model_cfg.REG_FC,
input_channels=input_channels,
output_channels=self.box_coder.code_size
)
def assign_targets(self, input_dict):
"""
Args:
input_dict:
point_features: (N1 + N2 + N3 + ..., C)
batch_size:
point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
gt_boxes (optional): (B, M, 8)
Returns:
point_cls_labels: (N1 + N2 + N3 + ...), long type, 0:background, -1:ignored
point_part_labels: (N1 + N2 + N3 + ..., 3)
"""
point_coords = input_dict['point_coords']
gt_boxes = input_dict['gt_boxes']
assert gt_boxes.shape.__len__() == 3, 'gt_boxes.shape=%s' % str(gt_boxes.shape)
assert point_coords.shape.__len__() in [2], 'points.shape=%s' % str(point_coords.shape)
batch_size = gt_boxes.shape[0]
extend_gt_boxes = box_utils.enlarge_box3d(
gt_boxes.view(-1, gt_boxes.shape[-1]), extra_width=self.model_cfg.TARGET_CONFIG.GT_EXTRA_WIDTH
).view(batch_size, -1, gt_boxes.shape[-1])
targets_dict = self.assign_stack_targets(
points=point_coords, gt_boxes=gt_boxes, extend_gt_boxes=extend_gt_boxes,
set_ignore_flag=True, use_ball_constraint=False,
ret_part_labels=False, ret_box_labels=True
)
return targets_dict
def get_loss(self, tb_dict=None):
tb_dict = {} if tb_dict is None else tb_dict
point_loss_cls, tb_dict_1 = self.get_cls_layer_loss()
point_loss_box, tb_dict_2 = self.get_box_layer_loss()
point_loss = point_loss_cls + point_loss_box
tb_dict.update(tb_dict_1)
tb_dict.update(tb_dict_2)
return point_loss, tb_dict
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
point_features: (N1 + N2 + N3 + ..., C) or (B, N, C)
point_features_before_fusion: (N1 + N2 + N3 + ..., C)
point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
point_labels (optional): (N1 + N2 + N3 + ...)
gt_boxes (optional): (B, M, 8)
Returns:
batch_dict:
point_cls_scores: (N1 + N2 + N3 + ..., 1)
point_part_offset: (N1 + N2 + N3 + ..., 3)
"""
if self.model_cfg.get('USE_POINT_FEATURES_BEFORE_FUSION', False):
point_features = batch_dict['point_features_before_fusion']
else:
point_features = batch_dict['point_features']
point_cls_preds = self.cls_layers(point_features) # (total_points, num_class)
point_box_preds = self.box_layers(point_features) # (total_points, box_code_size)
point_cls_preds_max, _ = point_cls_preds.max(dim=-1)
batch_dict['point_cls_scores'] = torch.sigmoid(point_cls_preds_max)
ret_dict = {'point_cls_preds': point_cls_preds,
'point_box_preds': point_box_preds}
if self.training:
targets_dict = self.assign_targets(batch_dict)
ret_dict['point_cls_labels'] = targets_dict['point_cls_labels']
ret_dict['point_box_labels'] = targets_dict['point_box_labels']
if not self.training or self.predict_boxes_when_training:
point_cls_preds, point_box_preds = self.generate_predicted_boxes(
points=batch_dict['point_coords'][:, 1:4],
point_cls_preds=point_cls_preds, point_box_preds=point_box_preds
)
batch_dict['batch_cls_preds'] = point_cls_preds
batch_dict['batch_box_preds'] = point_box_preds
batch_dict['batch_index'] = batch_dict['point_coords'][:, 0]
batch_dict['cls_preds_normalized'] = False
self.forward_ret_dict = ret_dict
return batch_dict
| 4,930 | 41.508621 | 106 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/dense_heads/point_head_simple.py | import torch
from ...utils import box_utils
from .point_head_template import PointHeadTemplate
class PointHeadSimple(PointHeadTemplate):
"""
A simple point-based segmentation head, which are used for PV-RCNN keypoint segmentaion.
Reference Paper: https://arxiv.org/abs/1912.13192
PV-RCNN: Point-Voxel Feature Set Abstraction for 3D Object Detection
"""
def __init__(self, num_class, input_channels, model_cfg, **kwargs):
super().__init__(model_cfg=model_cfg, num_class=num_class)
self.cls_layers = self.make_fc_layers(
fc_cfg=self.model_cfg.CLS_FC,
input_channels=input_channels,
output_channels=num_class
)
def assign_targets(self, input_dict):
"""
Args:
input_dict:
point_features: (N1 + N2 + N3 + ..., C)
batch_size:
point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
gt_boxes (optional): (B, M, 8)
Returns:
point_cls_labels: (N1 + N2 + N3 + ...), long type, 0:background, -1:ignored
point_part_labels: (N1 + N2 + N3 + ..., 3)
"""
point_coords = input_dict['point_coords']
gt_boxes = input_dict['gt_boxes']
assert gt_boxes.shape.__len__() == 3, 'gt_boxes.shape=%s' % str(gt_boxes.shape)
assert point_coords.shape.__len__() in [2], 'points.shape=%s' % str(point_coords.shape)
batch_size = gt_boxes.shape[0]
extend_gt_boxes = box_utils.enlarge_box3d(
gt_boxes.view(-1, gt_boxes.shape[-1]), extra_width=self.model_cfg.TARGET_CONFIG.GT_EXTRA_WIDTH
).view(batch_size, -1, gt_boxes.shape[-1])
targets_dict = self.assign_stack_targets(
points=point_coords, gt_boxes=gt_boxes, extend_gt_boxes=extend_gt_boxes,
set_ignore_flag=True, use_ball_constraint=False,
ret_part_labels=False
)
return targets_dict
def get_loss(self, tb_dict=None, reduce=True):
tb_dict = {} if tb_dict is None else tb_dict
point_loss_cls, tb_dict_1 = self.get_cls_layer_loss(reduce=reduce)
point_loss = point_loss_cls
tb_dict.update(tb_dict_1)
return point_loss, tb_dict
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
point_features: (N1 + N2 + N3 + ..., C) or (B, N, C)
point_features_before_fusion: (N1 + N2 + N3 + ..., C)
point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
point_labels (optional): (N1 + N2 + N3 + ...)
gt_boxes (optional): (B, M, 8)
Returns:
batch_dict:
point_cls_scores: (N1 + N2 + N3 + ..., 1)
point_part_offset: (N1 + N2 + N3 + ..., 3)
"""
if self.model_cfg.get('USE_POINT_FEATURES_BEFORE_FUSION', False):
point_features = batch_dict['point_features_before_fusion']
else:
point_features = batch_dict['point_features']
point_cls_preds = self.cls_layers(point_features) # (total_points, num_class)
ret_dict = {
'point_cls_preds': point_cls_preds,
}
point_cls_scores = torch.sigmoid(point_cls_preds)
batch_dict['point_cls_scores'], _ = point_cls_scores.max(dim=-1)
if self.training:
targets_dict = self.assign_targets(batch_dict)
ret_dict['point_cls_labels'] = targets_dict['point_cls_labels']
self.forward_ret_dict = ret_dict
return batch_dict
| 3,594 | 38.076087 | 106 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/dense_heads/point_intra_part_head.py | import torch
from ...utils import box_coder_utils, box_utils
from .point_head_template import PointHeadTemplate
class PointIntraPartOffsetHead(PointHeadTemplate):
"""
Point-based head for predicting the intra-object part locations.
Reference Paper: https://arxiv.org/abs/1907.03670
From Points to Parts: 3D Object Detection from Point Cloud with Part-aware and Part-aggregation Network
"""
def __init__(self, num_class, input_channels, model_cfg, predict_boxes_when_training=False, **kwargs):
super().__init__(model_cfg=model_cfg, num_class=num_class)
self.predict_boxes_when_training = predict_boxes_when_training
self.cls_layers = self.make_fc_layers(
fc_cfg=self.model_cfg.CLS_FC,
input_channels=input_channels,
output_channels=num_class
)
self.part_reg_layers = self.make_fc_layers(
fc_cfg=self.model_cfg.PART_FC,
input_channels=input_channels,
output_channels=3
)
target_cfg = self.model_cfg.TARGET_CONFIG
if target_cfg.get('BOX_CODER', None) is not None:
self.box_coder = getattr(box_coder_utils, target_cfg.BOX_CODER)(
**target_cfg.BOX_CODER_CONFIG
)
self.box_layers = self.make_fc_layers(
fc_cfg=self.model_cfg.REG_FC,
input_channels=input_channels,
output_channels=self.box_coder.code_size
)
else:
self.box_layers = None
def assign_targets(self, input_dict):
"""
Args:
input_dict:
point_features: (N1 + N2 + N3 + ..., C)
batch_size:
point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
gt_boxes (optional): (B, M, 8)
Returns:
point_cls_labels: (N1 + N2 + N3 + ...), long type, 0:background, -1:ignored
point_part_labels: (N1 + N2 + N3 + ..., 3)
"""
point_coords = input_dict['point_coords']
gt_boxes = input_dict['gt_boxes']
assert gt_boxes.shape.__len__() == 3, 'gt_boxes.shape=%s' % str(gt_boxes.shape)
assert point_coords.shape.__len__() in [2], 'points.shape=%s' % str(point_coords.shape)
batch_size = gt_boxes.shape[0]
extend_gt_boxes = box_utils.enlarge_box3d(
gt_boxes.view(-1, gt_boxes.shape[-1]), extra_width=self.model_cfg.TARGET_CONFIG.GT_EXTRA_WIDTH
).view(batch_size, -1, gt_boxes.shape[-1])
targets_dict = self.assign_stack_targets(
points=point_coords, gt_boxes=gt_boxes, extend_gt_boxes=extend_gt_boxes,
set_ignore_flag=True, use_ball_constraint=False,
ret_part_labels=True, ret_box_labels=(self.box_layers is not None)
)
return targets_dict
def get_loss(self, tb_dict=None):
tb_dict = {} if tb_dict is None else tb_dict
point_loss_cls, tb_dict = self.get_cls_layer_loss(tb_dict)
point_loss_part, tb_dict = self.get_part_layer_loss(tb_dict)
point_loss = point_loss_cls + point_loss_part
if self.box_layers is not None:
point_loss_box, tb_dict = self.get_box_layer_loss(tb_dict)
point_loss += point_loss_box
return point_loss, tb_dict
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
point_features: (N1 + N2 + N3 + ..., C) or (B, N, C)
point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
point_labels (optional): (N1 + N2 + N3 + ...)
gt_boxes (optional): (B, M, 8)
Returns:
batch_dict:
point_cls_scores: (N1 + N2 + N3 + ..., 1)
point_part_offset: (N1 + N2 + N3 + ..., 3)
"""
point_features = batch_dict['point_features']
point_cls_preds = self.cls_layers(point_features) # (total_points, num_class)
point_part_preds = self.part_reg_layers(point_features)
ret_dict = {
'point_cls_preds': point_cls_preds,
'point_part_preds': point_part_preds,
}
if self.box_layers is not None:
point_box_preds = self.box_layers(point_features)
ret_dict['point_box_preds'] = point_box_preds
point_cls_scores = torch.sigmoid(point_cls_preds)
point_part_offset = torch.sigmoid(point_part_preds)
batch_dict['point_cls_scores'], _ = point_cls_scores.max(dim=-1)
batch_dict['point_part_offset'] = point_part_offset
if self.training:
targets_dict = self.assign_targets(batch_dict)
ret_dict['point_cls_labels'] = targets_dict['point_cls_labels']
ret_dict['point_part_labels'] = targets_dict.get('point_part_labels')
ret_dict['point_box_labels'] = targets_dict.get('point_box_labels')
if self.box_layers is not None and (not self.training or self.predict_boxes_when_training):
point_cls_preds, point_box_preds = self.generate_predicted_boxes(
points=batch_dict['point_coords'][:, 1:4],
point_cls_preds=point_cls_preds, point_box_preds=ret_dict['point_box_preds']
)
batch_dict['batch_cls_preds'] = point_cls_preds
batch_dict['batch_box_preds'] = point_box_preds
batch_dict['batch_index'] = batch_dict['point_coords'][:, 0]
batch_dict['cls_preds_normalized'] = False
self.forward_ret_dict = ret_dict
return batch_dict
| 5,568 | 42.507813 | 107 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/dense_heads/target_assigner/anchor_generator.py | import torch
class AnchorGenerator(object):
def __init__(self, anchor_range, anchor_generator_config):
super().__init__()
self.anchor_generator_cfg = anchor_generator_config
self.anchor_range = anchor_range
self.anchor_sizes = [config['anchor_sizes'] for config in anchor_generator_config]
self.anchor_rotations = [config['anchor_rotations'] for config in anchor_generator_config]
self.anchor_heights = [config['anchor_bottom_heights'] for config in anchor_generator_config]
self.align_center = [config.get('align_center', False) for config in anchor_generator_config]
assert len(self.anchor_sizes) == len(self.anchor_rotations) == len(self.anchor_heights)
self.num_of_anchor_sets = len(self.anchor_sizes)
def generate_anchors(self, grid_sizes):
assert len(grid_sizes) == self.num_of_anchor_sets
all_anchors = []
num_anchors_per_location = []
for grid_size, anchor_size, anchor_rotation, anchor_height, align_center in zip(
grid_sizes, self.anchor_sizes, self.anchor_rotations, self.anchor_heights, self.align_center):
num_anchors_per_location.append(len(anchor_rotation) * len(anchor_size) * len(anchor_height))
if align_center:
x_stride = (self.anchor_range[3] - self.anchor_range[0]) / grid_size[0]
y_stride = (self.anchor_range[4] - self.anchor_range[1]) / grid_size[1]
x_offset, y_offset = x_stride / 2, y_stride / 2
else:
x_stride = (self.anchor_range[3] - self.anchor_range[0]) / (grid_size[0] - 1)
y_stride = (self.anchor_range[4] - self.anchor_range[1]) / (grid_size[1] - 1)
x_offset, y_offset = 0, 0
x_shifts = torch.arange(
self.anchor_range[0] + x_offset, self.anchor_range[3] + 1e-5, step=x_stride, dtype=torch.float32,
).cuda()
y_shifts = torch.arange(
self.anchor_range[1] + y_offset, self.anchor_range[4] + 1e-5, step=y_stride, dtype=torch.float32,
).cuda()
z_shifts = x_shifts.new_tensor(anchor_height)
num_anchor_size, num_anchor_rotation = anchor_size.__len__(), anchor_rotation.__len__()
anchor_rotation = x_shifts.new_tensor(anchor_rotation)
anchor_size = x_shifts.new_tensor(anchor_size)
x_shifts, y_shifts, z_shifts = torch.meshgrid([
x_shifts, y_shifts, z_shifts
]) # [x_grid, y_grid, z_grid]
anchors = torch.stack((x_shifts, y_shifts, z_shifts), dim=-1) # [x, y, z, 3]
anchors = anchors[:, :, :, None, :].repeat(1, 1, 1, anchor_size.shape[0], 1)
anchor_size = anchor_size.view(1, 1, 1, -1, 3).repeat([*anchors.shape[0:3], 1, 1])
anchors = torch.cat((anchors, anchor_size), dim=-1)
anchors = anchors[:, :, :, :, None, :].repeat(1, 1, 1, 1, num_anchor_rotation, 1)
anchor_rotation = anchor_rotation.view(1, 1, 1, 1, -1, 1).repeat([*anchors.shape[0:3], num_anchor_size, 1, 1])
anchors = torch.cat((anchors, anchor_rotation), dim=-1) # [x, y, z, num_size, num_rot, 7]
anchors = anchors.permute(2, 1, 0, 3, 4, 5).contiguous()
#anchors = anchors.view(-1, anchors.shape[-1])
anchors[..., 2] += anchors[..., 5] / 2 # shift to box centers
all_anchors.append(anchors)
return all_anchors, num_anchors_per_location
if __name__ == '__main__':
from easydict import EasyDict
config = [
EasyDict({
'anchor_sizes': [[2.1, 4.7, 1.7], [0.86, 0.91, 1.73], [0.84, 1.78, 1.78]],
'anchor_rotations': [0, 1.57],
'anchor_heights': [0, 0.5]
})
]
A = AnchorGenerator(
anchor_range=[-75.2, -75.2, -2, 75.2, 75.2, 4],
anchor_generator_config=config
)
import pdb
pdb.set_trace()
A.generate_anchors([[188, 188]])
| 3,990 | 48.8875 | 122 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/dense_heads/target_assigner/axis_aligned_target_assigner.py | import numpy as np
import torch
from ....ops.iou3d_nms import iou3d_nms_utils
from ....utils import box_utils
class AxisAlignedTargetAssigner(object):
def __init__(self, model_cfg, class_names, box_coder, match_height=False):
super().__init__()
anchor_generator_cfg = model_cfg.ANCHOR_GENERATOR_CONFIG
anchor_target_cfg = model_cfg.TARGET_ASSIGNER_CONFIG
self.box_coder = box_coder
self.match_height = match_height
self.class_names = np.array(class_names)
self.anchor_class_names = [config['class_name'] for config in anchor_generator_cfg]
self.pos_fraction = anchor_target_cfg.POS_FRACTION if anchor_target_cfg.POS_FRACTION >= 0 else None
self.sample_size = anchor_target_cfg.SAMPLE_SIZE
self.norm_by_num_examples = anchor_target_cfg.NORM_BY_NUM_EXAMPLES
self.matched_thresholds = {}
self.unmatched_thresholds = {}
for config in anchor_generator_cfg:
self.matched_thresholds[config['class_name']] = config['matched_threshold']
self.unmatched_thresholds[config['class_name']] = config['unmatched_threshold']
self.use_multihead = model_cfg.get('USE_MULTIHEAD', False)
# self.separate_multihead = model_cfg.get('SEPARATE_MULTIHEAD', False)
# if self.seperate_multihead:
# rpn_head_cfgs = model_cfg.RPN_HEAD_CFGS
# self.gt_remapping = {}
# for rpn_head_cfg in rpn_head_cfgs:
# for idx, name in enumerate(rpn_head_cfg['HEAD_CLS_NAME']):
# self.gt_remapping[name] = idx + 1
def assign_targets(self, all_anchors, gt_boxes_with_classes):
"""
Args:
all_anchors: [(N, 7), ...]
gt_boxes: (B, M, 8)
Returns:
"""
bbox_targets = []
cls_labels = []
reg_weights = []
batch_size = gt_boxes_with_classes.shape[0]
gt_classes = gt_boxes_with_classes[:, :, -1]
gt_boxes = gt_boxes_with_classes[:, :, :-1]
for k in range(batch_size):
cur_gt = gt_boxes[k]
cnt = cur_gt.__len__() - 1
while cnt > 0 and cur_gt[cnt].sum() == 0:
cnt -= 1
cur_gt = cur_gt[:cnt + 1]
cur_gt_classes = gt_classes[k][:cnt + 1].int()
target_list = []
for anchor_class_name, anchors in zip(self.anchor_class_names, all_anchors):
if cur_gt_classes.shape[0] > 1:
mask = torch.from_numpy(self.class_names[cur_gt_classes.cpu() - 1] == anchor_class_name)
else:
mask = torch.tensor([self.class_names[c - 1] == anchor_class_name
for c in cur_gt_classes], dtype=torch.bool)
if self.use_multihead:
anchors = anchors.permute(3, 4, 0, 1, 2, 5).contiguous().view(-1, anchors.shape[-1])
# if self.seperate_multihead:
# selected_classes = cur_gt_classes[mask].clone()
# if len(selected_classes) > 0:
# new_cls_id = self.gt_remapping[anchor_class_name]
# selected_classes[:] = new_cls_id
# else:
# selected_classes = cur_gt_classes[mask]
selected_classes = cur_gt_classes[mask]
else:
feature_map_size = anchors.shape[:3]
anchors = anchors.view(-1, anchors.shape[-1])
selected_classes = cur_gt_classes[mask]
single_target = self.assign_targets_single(
anchors,
cur_gt[mask],
gt_classes=selected_classes,
matched_threshold=self.matched_thresholds[anchor_class_name],
unmatched_threshold=self.unmatched_thresholds[anchor_class_name]
)
target_list.append(single_target)
if self.use_multihead:
target_dict = {
'box_cls_labels': [t['box_cls_labels'].view(-1) for t in target_list],
'box_reg_targets': [t['box_reg_targets'].view(-1, self.box_coder.code_size) for t in target_list],
'reg_weights': [t['reg_weights'].view(-1) for t in target_list]
}
target_dict['box_reg_targets'] = torch.cat(target_dict['box_reg_targets'], dim=0)
target_dict['box_cls_labels'] = torch.cat(target_dict['box_cls_labels'], dim=0).view(-1)
target_dict['reg_weights'] = torch.cat(target_dict['reg_weights'], dim=0).view(-1)
else:
target_dict = {
'box_cls_labels': [t['box_cls_labels'].view(*feature_map_size, -1) for t in target_list],
'box_reg_targets': [t['box_reg_targets'].view(*feature_map_size, -1, self.box_coder.code_size)
for t in target_list],
'reg_weights': [t['reg_weights'].view(*feature_map_size, -1) for t in target_list]
}
target_dict['box_reg_targets'] = torch.cat(
target_dict['box_reg_targets'], dim=-2
).view(-1, self.box_coder.code_size)
target_dict['box_cls_labels'] = torch.cat(target_dict['box_cls_labels'], dim=-1).view(-1)
target_dict['reg_weights'] = torch.cat(target_dict['reg_weights'], dim=-1).view(-1)
bbox_targets.append(target_dict['box_reg_targets'])
cls_labels.append(target_dict['box_cls_labels'])
reg_weights.append(target_dict['reg_weights'])
bbox_targets = torch.stack(bbox_targets, dim=0)
cls_labels = torch.stack(cls_labels, dim=0)
reg_weights = torch.stack(reg_weights, dim=0)
all_targets_dict = {
'box_cls_labels': cls_labels,
'box_reg_targets': bbox_targets,
'reg_weights': reg_weights
}
return all_targets_dict
def assign_targets_single(self, anchors, gt_boxes, gt_classes, matched_threshold=0.6, unmatched_threshold=0.45):
num_anchors = anchors.shape[0]
num_gt = gt_boxes.shape[0]
labels = torch.ones((num_anchors,), dtype=torch.int32, device=anchors.device) * -1
gt_ids = torch.ones((num_anchors,), dtype=torch.int32, device=anchors.device) * -1
if len(gt_boxes) > 0 and anchors.shape[0] > 0:
anchor_by_gt_overlap = iou3d_nms_utils.boxes_iou3d_gpu(anchors[:, 0:7], gt_boxes[:, 0:7]) \
if self.match_height else box_utils.boxes3d_nearest_bev_iou(anchors[:, 0:7], gt_boxes[:, 0:7])
# NOTE: The speed of these two versions depends the environment and the number of anchors
# anchor_to_gt_argmax = torch.from_numpy(anchor_by_gt_overlap.cpu().numpy().argmax(axis=1)).cuda()
anchor_to_gt_argmax = anchor_by_gt_overlap.argmax(dim=1)
anchor_to_gt_max = anchor_by_gt_overlap[torch.arange(num_anchors, device=anchors.device), anchor_to_gt_argmax]
# gt_to_anchor_argmax = torch.from_numpy(anchor_by_gt_overlap.cpu().numpy().argmax(axis=0)).cuda()
gt_to_anchor_argmax = anchor_by_gt_overlap.argmax(dim=0)
gt_to_anchor_max = anchor_by_gt_overlap[gt_to_anchor_argmax, torch.arange(num_gt, device=anchors.device)]
empty_gt_mask = gt_to_anchor_max == 0
gt_to_anchor_max[empty_gt_mask] = -1
anchors_with_max_overlap = (anchor_by_gt_overlap == gt_to_anchor_max).nonzero()[:, 0]
gt_inds_force = anchor_to_gt_argmax[anchors_with_max_overlap]
labels[anchors_with_max_overlap] = gt_classes[gt_inds_force]
gt_ids[anchors_with_max_overlap] = gt_inds_force.int()
pos_inds = anchor_to_gt_max >= matched_threshold
gt_inds_over_thresh = anchor_to_gt_argmax[pos_inds]
labels[pos_inds] = gt_classes[gt_inds_over_thresh]
gt_ids[pos_inds] = gt_inds_over_thresh.int()
bg_inds = (anchor_to_gt_max < unmatched_threshold).nonzero()[:, 0]
else:
bg_inds = torch.arange(num_anchors, device=anchors.device)
fg_inds = (labels > 0).nonzero()[:, 0]
if self.pos_fraction is not None:
num_fg = int(self.pos_fraction * self.sample_size)
if len(fg_inds) > num_fg:
num_disabled = len(fg_inds) - num_fg
disable_inds = torch.randperm(len(fg_inds))[:num_disabled]
labels[disable_inds] = -1
fg_inds = (labels > 0).nonzero()[:, 0]
num_bg = self.sample_size - (labels > 0).sum()
if len(bg_inds) > num_bg:
enable_inds = bg_inds[torch.randint(0, len(bg_inds), size=(num_bg,))]
labels[enable_inds] = 0
# bg_inds = torch.nonzero(labels == 0)[:, 0]
else:
if len(gt_boxes) == 0 or anchors.shape[0] == 0:
labels[:] = 0
else:
labels[bg_inds] = 0
labels[anchors_with_max_overlap] = gt_classes[gt_inds_force]
bbox_targets = anchors.new_zeros((num_anchors, self.box_coder.code_size))
if len(gt_boxes) > 0 and anchors.shape[0] > 0:
fg_gt_boxes = gt_boxes[anchor_to_gt_argmax[fg_inds], :]
fg_anchors = anchors[fg_inds, :]
bbox_targets[fg_inds, :] = self.box_coder.encode_torch(fg_gt_boxes, fg_anchors)
reg_weights = anchors.new_zeros((num_anchors,))
if self.norm_by_num_examples:
num_examples = (labels >= 0).sum()
num_examples = num_examples if num_examples > 1.0 else 1.0
reg_weights[labels > 0] = 1.0 / num_examples
else:
reg_weights[labels > 0] = 1.0
ret_dict = {
'box_cls_labels': labels,
'box_reg_targets': bbox_targets,
'reg_weights': reg_weights,
}
return ret_dict
| 10,042 | 46.597156 | 122 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/dense_heads/target_assigner/atss_target_assigner.py | import torch
from ....ops.iou3d_nms import iou3d_nms_utils
from ....utils import common_utils
class ATSSTargetAssigner(object):
"""
Reference: https://arxiv.org/abs/1912.02424
"""
def __init__(self, topk, box_coder, match_height=False):
self.topk = topk
self.box_coder = box_coder
self.match_height = match_height
def assign_targets(self, anchors_list, gt_boxes_with_classes, use_multihead=False):
"""
Args:
anchors: [(N, 7), ...]
gt_boxes: (B, M, 8)
Returns:
"""
if not isinstance(anchors_list, list):
anchors_list = [anchors_list]
single_set_of_anchor = True
else:
single_set_of_anchor = len(anchors_list) == 1
cls_labels_list, reg_targets_list, reg_weights_list = [], [], []
for anchors in anchors_list:
batch_size = gt_boxes_with_classes.shape[0]
gt_classes = gt_boxes_with_classes[:, :, -1]
gt_boxes = gt_boxes_with_classes[:, :, :-1]
if use_multihead:
anchors = anchors.permute(3, 4, 0, 1, 2, 5).contiguous().view(-1, anchors.shape[-1])
else:
anchors = anchors.view(-1, anchors.shape[-1])
cls_labels, reg_targets, reg_weights = [], [], []
for k in range(batch_size):
cur_gt = gt_boxes[k]
cnt = cur_gt.__len__() - 1
while cnt > 0 and cur_gt[cnt].sum() == 0:
cnt -= 1
cur_gt = cur_gt[:cnt + 1]
cur_gt_classes = gt_classes[k][:cnt + 1]
cur_cls_labels, cur_reg_targets, cur_reg_weights = self.assign_targets_single(
anchors, cur_gt, cur_gt_classes
)
cls_labels.append(cur_cls_labels)
reg_targets.append(cur_reg_targets)
reg_weights.append(cur_reg_weights)
cls_labels = torch.stack(cls_labels, dim=0)
reg_targets = torch.stack(reg_targets, dim=0)
reg_weights = torch.stack(reg_weights, dim=0)
cls_labels_list.append(cls_labels)
reg_targets_list.append(reg_targets)
reg_weights_list.append(reg_weights)
if single_set_of_anchor:
ret_dict = {
'box_cls_labels': cls_labels_list[0],
'box_reg_targets': reg_targets_list[0],
'reg_weights': reg_weights_list[0]
}
else:
ret_dict = {
'box_cls_labels': torch.cat(cls_labels_list, dim=1),
'box_reg_targets': torch.cat(reg_targets_list, dim=1),
'reg_weights': torch.cat(reg_weights_list, dim=1)
}
return ret_dict
def assign_targets_single(self, anchors, gt_boxes, gt_classes):
"""
Args:
anchors: (N, 7) [x, y, z, dx, dy, dz, heading]
gt_boxes: (M, 7) [x, y, z, dx, dy, dz, heading]
gt_classes: (M)
Returns:
"""
num_anchor = anchors.shape[0]
num_gt = gt_boxes.shape[0]
# select topk anchors for each gt_boxes
if self.match_height:
ious = iou3d_nms_utils.boxes_iou3d_gpu(anchors[:, 0:7], gt_boxes[:, 0:7]) # (N, M)
else:
ious = iou3d_nms_utils.boxes_iou_bev(anchors[:, 0:7], gt_boxes[:, 0:7])
distance = (anchors[:, None, 0:3] - gt_boxes[None, :, 0:3]).norm(dim=-1) # (N, M)
_, topk_idxs = distance.topk(self.topk, dim=0, largest=False) # (K, M)
candidate_ious = ious[topk_idxs, torch.arange(num_gt)] # (K, M)
iou_mean_per_gt = candidate_ious.mean(dim=0)
iou_std_per_gt = candidate_ious.std(dim=0)
iou_thresh_per_gt = iou_mean_per_gt + iou_std_per_gt + 1e-6
is_pos = candidate_ious >= iou_thresh_per_gt[None, :] # (K, M)
# check whether anchor_center in gt_boxes, only check BEV x-y axes
candidate_anchors = anchors[topk_idxs.view(-1)] # (KxM, 7)
gt_boxes_of_each_anchor = gt_boxes[:, :].repeat(self.topk, 1) # (KxM, 7)
xyz_local = candidate_anchors[:, 0:3] - gt_boxes_of_each_anchor[:, 0:3]
xyz_local = common_utils.rotate_points_along_z(
xyz_local[:, None, :], -gt_boxes_of_each_anchor[:, 6]
).squeeze(dim=1)
xy_local = xyz_local[:, 0:2]
lw = gt_boxes_of_each_anchor[:, 3:5][:, [1, 0]] # bugfixed: w ==> y, l ==> x in local coords
is_in_gt = ((xy_local <= lw / 2) & (xy_local >= -lw / 2)).all(dim=-1).view(-1, num_gt) # (K, M)
is_pos = is_pos & is_in_gt # (K, M)
for ng in range(num_gt):
topk_idxs[:, ng] += ng * num_anchor
# select the highest IoU if an anchor box is assigned with multiple gt_boxes
INF = -0x7FFFFFFF
ious_inf = torch.full_like(ious, INF).t().contiguous().view(-1) # (MxN)
index = topk_idxs.view(-1)[is_pos.view(-1)]
ious_inf[index] = ious.t().contiguous().view(-1)[index]
ious_inf = ious_inf.view(num_gt, -1).t() # (N, M)
anchors_to_gt_values, anchors_to_gt_indexs = ious_inf.max(dim=1)
# match the gt_boxes to the anchors which have maximum iou with them
max_iou_of_each_gt, argmax_iou_of_each_gt = ious.max(dim=0)
anchors_to_gt_indexs[argmax_iou_of_each_gt] = torch.arange(0, num_gt, device=ious.device)
anchors_to_gt_values[argmax_iou_of_each_gt] = max_iou_of_each_gt
cls_labels = gt_classes[anchors_to_gt_indexs]
cls_labels[anchors_to_gt_values == INF] = 0
matched_gts = gt_boxes[anchors_to_gt_indexs]
pos_mask = cls_labels > 0
reg_targets = matched_gts.new_zeros((num_anchor, self.box_coder.code_size))
reg_weights = matched_gts.new_zeros(num_anchor)
if pos_mask.sum() > 0:
reg_targets[pos_mask > 0] = self.box_coder.encode_torch(matched_gts[pos_mask > 0], anchors[pos_mask > 0])
reg_weights[pos_mask] = 1.0
return cls_labels, reg_targets, reg_weights
| 6,050 | 41.612676 | 117 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/roi_heads/roi_head_template.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pcdet.config import cfg
from ...utils import box_coder_utils, common_utils, loss_utils
from ..model_utils.model_nms_utils import class_agnostic_nms
from .target_assigner.proposal_target_layer import ProposalTargetLayer
class RoIHeadTemplate(nn.Module):
def __init__(self, num_class, model_cfg, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.num_class = num_class
self.box_coder = getattr(box_coder_utils, self.model_cfg.TARGET_CONFIG.BOX_CODER)(
**self.model_cfg.TARGET_CONFIG.get('BOX_CODER_CONFIG', {})
)
self.proposal_target_layer = ProposalTargetLayer(roi_sampler_cfg=self.model_cfg.TARGET_CONFIG)
self.build_losses(self.model_cfg.LOSS_CONFIG)
self.forward_ret_dict = None
def build_losses(self, losses_cfg):
self.add_module(
'reg_loss_func',
loss_utils.WeightedSmoothL1Loss(code_weights=losses_cfg.LOSS_WEIGHTS['code_weights'])
)
def make_fc_layers(self, input_channels, output_channels, fc_list):
fc_layers = []
pre_channel = input_channels
for k in range(0, fc_list.__len__()):
fc_layers.extend([
nn.Conv1d(pre_channel, fc_list[k], kernel_size=1, bias=False),
nn.BatchNorm1d(fc_list[k]),
nn.ReLU()
])
pre_channel = fc_list[k]
if self.model_cfg.DP_RATIO >= 0 and k == 0:
fc_layers.append(nn.Dropout(self.model_cfg.DP_RATIO))
fc_layers.append(nn.Conv1d(pre_channel, output_channels, kernel_size=1, bias=True))
fc_layers = nn.Sequential(*fc_layers)
return fc_layers
@torch.no_grad()
def proposal_layer(self, batch_dict, nms_config):
"""
Args:
batch_dict:
batch_size:
batch_cls_preds: (B, num_boxes, num_classes | 1) or (N1+N2+..., num_classes | 1)
batch_box_preds: (B, num_boxes, 7+C) or (N1+N2+..., 7+C)
cls_preds_normalized: indicate whether batch_cls_preds is normalized
batch_index: optional (N1+N2+...)
nms_config:
Returns:
batch_dict:
rois: (B, num_rois, 7+C)
roi_scores: (B, num_rois)
roi_labels: (B, num_rois)
"""
if batch_dict.get('rois', None) is not None:
return batch_dict
batch_size = batch_dict['batch_size']
batch_box_preds = batch_dict['batch_box_preds']
batch_cls_preds = batch_dict['batch_cls_preds']
rois = batch_box_preds.new_zeros((batch_size, nms_config.NMS_POST_MAXSIZE, batch_box_preds.shape[-1]))
roi_scores = batch_box_preds.new_zeros((batch_size, nms_config.NMS_POST_MAXSIZE))
roi_labels = batch_box_preds.new_zeros((batch_size, nms_config.NMS_POST_MAXSIZE), dtype=torch.long)
# export full logits
full_cls_scores = batch_box_preds.new_zeros((batch_size, nms_config.NMS_POST_MAXSIZE, len(cfg.CLASS_NAMES)))
for index in range(batch_size):
if batch_dict.get('batch_index', None) is not None:
assert batch_cls_preds.shape.__len__() == 2
batch_mask = (batch_dict['batch_index'] == index)
else:
assert batch_dict['batch_cls_preds'].shape.__len__() == 3
batch_mask = index
box_preds = batch_box_preds[batch_mask]
cls_preds = batch_cls_preds[batch_mask]
cur_roi_scores, cur_roi_labels = torch.max(cls_preds, dim=1)
if nms_config.MULTI_CLASSES_NMS:
raise NotImplementedError
else:
selected, selected_scores = class_agnostic_nms(
box_scores=cur_roi_scores, box_preds=box_preds, nms_config=nms_config
)
rois[index, :len(selected), :] = box_preds[selected]
roi_scores[index, :len(selected)] = cur_roi_scores[selected]
roi_labels[index, :len(selected)] = cur_roi_labels[selected]
# keep this as well
full_cls_scores[index, :len(selected), :] = cls_preds[selected]
batch_dict['rois'] = rois
batch_dict['roi_scores'] = roi_scores
batch_dict['roi_labels'] = roi_labels + 1
batch_dict['full_cls_scores'] = full_cls_scores
batch_dict['has_class_labels'] = True if batch_cls_preds.shape[-1] > 1 else False
batch_dict.pop('batch_index', None)
return batch_dict
def assign_targets(self, batch_dict):
batch_size = batch_dict['batch_size']
with torch.no_grad():
targets_dict = self.proposal_target_layer.forward(batch_dict)
rois = targets_dict['rois'] # (B, N, 7 + C)
gt_of_rois = targets_dict['gt_of_rois'] # (B, N, 7 + C + 1)
targets_dict['gt_of_rois_src'] = gt_of_rois.clone().detach()
# canonical transformation
roi_center = rois[:, :, 0:3]
roi_ry = rois[:, :, 6] % (2 * np.pi)
gt_of_rois[:, :, 0:3] = gt_of_rois[:, :, 0:3] - roi_center
gt_of_rois[:, :, 6] = gt_of_rois[:, :, 6] - roi_ry
# transfer LiDAR coords to local coords
gt_of_rois = common_utils.rotate_points_along_z(
points=gt_of_rois.view(-1, 1, gt_of_rois.shape[-1]), angle=-roi_ry.view(-1)
).view(batch_size, -1, gt_of_rois.shape[-1])
# flip orientation if rois have opposite orientation
heading_label = gt_of_rois[:, :, 6] % (2 * np.pi) # 0 ~ 2pi
opposite_flag = (heading_label > np.pi * 0.5) & (heading_label < np.pi * 1.5)
heading_label[opposite_flag] = (heading_label[opposite_flag] + np.pi) % (2 * np.pi) # (0 ~ pi/2, 3pi/2 ~ 2pi)
flag = heading_label > np.pi
heading_label[flag] = heading_label[flag] - np.pi * 2 # (-pi/2, pi/2)
heading_label = torch.clamp(heading_label, min=-np.pi / 2, max=np.pi / 2)
gt_of_rois[:, :, 6] = heading_label
targets_dict['gt_of_rois'] = gt_of_rois
return targets_dict
def get_box_reg_layer_loss(self, forward_ret_dict, reduce=True):
loss_cfgs = self.model_cfg.LOSS_CONFIG
rcnn_reg = forward_ret_dict['rcnn_reg'] # (rcnn_batch_size, C)
# if badge selection
if 'reg_sample_targets' in forward_ret_dict.keys():
reg_targets = forward_ret_dict['reg_sample_targets']
if loss_cfgs.REG_LOSS == 'smooth-l1':
rcnn_loss_reg = self.reg_loss_func(
rcnn_reg.unsqueeze(dim=0),
reg_targets.unsqueeze(dim=0),
) # [B, M, 7]
rcnn_loss_reg = rcnn_loss_reg * loss_cfgs.LOSS_WEIGHTS['rcnn_reg_weight']
return rcnn_loss_reg
code_size = self.box_coder.code_size
reg_valid_mask = forward_ret_dict['reg_valid_mask'].view(-1)
gt_boxes3d_ct = forward_ret_dict['gt_of_rois'][..., 0:code_size]
gt_of_rois_src = forward_ret_dict['gt_of_rois_src'][..., 0:code_size].view(-1, code_size)
roi_boxes3d = forward_ret_dict['rois']
batch_size = gt_boxes3d_ct.shape[0]
rcnn_batch_size = gt_boxes3d_ct.view(-1, code_size).shape[0]
fg_mask = (reg_valid_mask > 0)
fg_sum = fg_mask.long().sum().item()
tb_dict = {}
if loss_cfgs.REG_LOSS == 'smooth-l1':
rois_anchor = roi_boxes3d.clone().detach().view(-1, code_size)
rois_anchor[:, 0:3] = 0
rois_anchor[:, 6] = 0
reg_targets = self.box_coder.encode_torch(
gt_boxes3d_ct.view(rcnn_batch_size, code_size), rois_anchor
)
forward_ret_dict['rcnn_reg_gt'] = reg_targets
rcnn_loss_reg = self.reg_loss_func(
rcnn_reg.view(rcnn_batch_size, -1).unsqueeze(dim=0),
reg_targets.unsqueeze(dim=0),
) # [B, M, 7]
if reduce:
rcnn_loss_reg = (rcnn_loss_reg.view(rcnn_batch_size, -1) * fg_mask.unsqueeze(dim=-1).float()).sum() / max(fg_sum, 1)
else:
rcnn_loss_reg = (rcnn_loss_reg.view(rcnn_batch_size, -1) * fg_mask.unsqueeze(dim=-1).float()) / max(fg_sum, 1)
rcnn_loss_reg = rcnn_loss_reg.view(batch_size, -1).sum(-1)
rcnn_loss_reg = rcnn_loss_reg * loss_cfgs.LOSS_WEIGHTS['rcnn_reg_weight']
tb_dict['rcnn_loss_reg'] = rcnn_loss_reg.item() if reduce else rcnn_loss_reg[0].item()
if loss_cfgs.CORNER_LOSS_REGULARIZATION and fg_sum > 0:
if reduce:
fg_rcnn_reg = rcnn_reg.view(rcnn_batch_size, -1)[fg_mask]
fg_roi_boxes3d = roi_boxes3d.view(-1, code_size)[fg_mask]
fg_roi_boxes3d = fg_roi_boxes3d.view(1, -1, code_size)
batch_anchors = fg_roi_boxes3d.clone().detach()
roi_ry = fg_roi_boxes3d[:, :, 6].view(-1)
roi_xyz = fg_roi_boxes3d[:, :, 0:3].view(-1, 3)
batch_anchors[:, :, 0:3] = 0
rcnn_boxes3d = self.box_coder.decode_torch(
fg_rcnn_reg.view(batch_anchors.shape[0], -1, code_size), batch_anchors
).view(-1, code_size)
rcnn_boxes3d = common_utils.rotate_points_along_z(
rcnn_boxes3d.unsqueeze(dim=1), roi_ry
).squeeze(dim=1)
rcnn_boxes3d[:, 0:3] += roi_xyz
loss_corner = loss_utils.get_corner_loss_lidar(
rcnn_boxes3d[:, 0:7],
gt_of_rois_src[fg_mask][:, 0:7]
)
loss_corner = loss_corner.mean()
else:
fg_mask_background = torch.zeros_like(fg_mask).bool().view(batch_size, -1)
fg_mask = fg_mask.view(batch_size, -1)
loss_corner_list = []
for batch_idx in range(batch_size):
fg_mask_temp = fg_mask_background.clone()
fg_mask_temp[batch_idx, :] = fg_mask[batch_idx, :]
fg_mask_temp = fg_mask_temp.view(-1)
fg_rcnn_reg = rcnn_reg.view(rcnn_batch_size, -1)[fg_mask_temp]
fg_roi_boxes3d = roi_boxes3d.view(-1, code_size)[fg_mask_temp]
fg_roi_boxes3d = fg_roi_boxes3d.view(1, -1, code_size)
batch_anchors = fg_roi_boxes3d.clone().detach()
roi_ry = fg_roi_boxes3d[:, :, 6].view(-1)
roi_xyz = fg_roi_boxes3d[:, :, 0:3].view(-1, 3)
batch_anchors[:, :, 0:3] = 0
rcnn_boxes3d = self.box_coder.decode_torch(
fg_rcnn_reg.view(batch_anchors.shape[0], -1, code_size), batch_anchors
).view(-1, code_size)
rcnn_boxes3d = common_utils.rotate_points_along_z(
rcnn_boxes3d.unsqueeze(dim=1), roi_ry
).squeeze(dim=1)
rcnn_boxes3d[:, 0:3] += roi_xyz
loss_corner = loss_utils.get_corner_loss_lidar(
rcnn_boxes3d[:, 0:7],
gt_of_rois_src[fg_mask_temp][:, 0:7]
)
loss_corner = loss_corner.mean()
loss_corner_list.append(loss_corner)
loss_corner = torch.stack(loss_corner_list)
loss_corner = loss_corner * loss_cfgs.LOSS_WEIGHTS['rcnn_corner_weight']
rcnn_loss_reg += loss_corner
tb_dict['rcnn_loss_corner'] = loss_corner.item() if reduce else loss_corner[0].item()
else:
raise NotImplementedError
return rcnn_loss_reg, tb_dict
def get_box_cls_layer_loss(self, forward_ret_dict, reduce=True):
loss_cfgs = self.model_cfg.LOSS_CONFIG
rcnn_cls = forward_ret_dict['rcnn_cls']
batch_size = forward_ret_dict['rcnn_cls_labels'].shape[0]
rcnn_cls_labels = forward_ret_dict['rcnn_cls_labels'].view(-1)
if loss_cfgs.CLS_LOSS == 'BinaryCrossEntropy':
rcnn_cls_flat = rcnn_cls.view(-1)
batch_loss_cls = F.binary_cross_entropy(torch.sigmoid(rcnn_cls_flat), rcnn_cls_labels.float(), reduction='none')
cls_valid_mask = (rcnn_cls_labels >= 0).float()
if reduce:
rcnn_loss_cls = (batch_loss_cls * cls_valid_mask).sum() / torch.clamp(cls_valid_mask.sum(), min=1.0)
else:
rcnn_loss_cls = (batch_loss_cls * cls_valid_mask) / torch.clamp(cls_valid_mask.sum(), min=1.0)
rcnn_loss_cls = rcnn_loss_cls.view(batch_size, -1).sum(-1)
elif loss_cfgs.CLS_LOSS == 'CrossEntropy':
batch_loss_cls = F.cross_entropy(rcnn_cls, rcnn_cls_labels, reduction='none', ignore_index=-1)
cls_valid_mask = (rcnn_cls_labels >= 0).float()
if reduce:
rcnn_loss_cls = (batch_loss_cls * cls_valid_mask).sum() / torch.clamp(cls_valid_mask.sum(), min=1.0)
else:
raise NotImplementedError
else:
raise NotImplementedError
rcnn_loss_cls = rcnn_loss_cls * loss_cfgs.LOSS_WEIGHTS['rcnn_cls_weight']
tb_dict = {'rcnn_loss_cls': rcnn_loss_cls.item() if reduce else rcnn_loss_cls[0].item()}
return rcnn_loss_cls, tb_dict
def LossPredLoss(self, input, target, margin=1.0, reduction='mean'):
'''
Source: https://github.com/Mephisto405/Learning-Loss-for-Active-Learning/blob/3c11ff7cf96d8bb4596209fe56265630fda19da6/main.py#L61
'''
assert len(input) % 2 == 0, 'the batch size is not even.'
assert input.shape == input.flip(0).shape
input = (input - input.flip(0))[:len(input)//2] # [l_1 - l_2B, l_2 - l_2B-1, ... , l_B - l_B+1], where batch_size = 2B
target = (target - target.flip(0))[:len(target)//2]
target = target.detach()
one = 2 * torch.sign(torch.clamp(target, min=0)) - 1 # 1 operation which is defined by the authors
if reduction == 'mean':
loss = torch.sum(torch.clamp(margin - one * input, min=0))
loss = loss / input.size(0) # Note that the size of input is already halved
elif reduction == 'none':
loss = torch.clamp(margin - one * input, min=0)
else:
NotImplementedError()
return loss
def get_loss(self, tb_dict=None, reduce=True):
tb_dict = {} if tb_dict is None else tb_dict
rcnn_loss = 0
rcnn_loss_cls, cls_tb_dict = self.get_box_cls_layer_loss(self.forward_ret_dict, reduce=reduce)
rcnn_loss += rcnn_loss_cls
tb_dict.update(cls_tb_dict)
rcnn_loss_reg, reg_tb_dict = self.get_box_reg_layer_loss(self.forward_ret_dict, reduce=reduce)
rcnn_loss += rcnn_loss_reg
tb_dict.update(reg_tb_dict)
tb_dict['rcnn_loss'] = rcnn_loss.item() if reduce else rcnn_loss[0].item()
# calculate loss for loss_net [method: llal]
return rcnn_loss, tb_dict
def get_loss_loss_net(self, tb_dict=None, loss=None):
tb_dict = {} if tb_dict is None else tb_dict
loss_predictions = self.forward_ret_dict['loss_predictions']
loss_loss_net = self.LossPredLoss(loss_predictions, loss)
tb_dict['loss_loss_net'] = loss_loss_net.item()
return loss_loss_net
def generate_predicted_boxes(self, batch_size, rois, cls_preds, box_preds):
"""
Args:
batch_size:
rois: (B, N, 7)
cls_preds: (BN, num_class)
box_preds: (BN, code_size)
Returns:
"""
code_size = self.box_coder.code_size
# batch_cls_preds: (B, N, num_class or 1)
batch_cls_preds = cls_preds.view(batch_size, -1, cls_preds.shape[-1])
batch_box_preds = box_preds.view(batch_size, -1, code_size)
roi_ry = rois[:, :, 6].view(-1)
roi_xyz = rois[:, :, 0:3].view(-1, 3)
local_rois = rois.clone().detach()
local_rois[:, :, 0:3] = 0
batch_box_preds = self.box_coder.decode_torch(batch_box_preds, local_rois).view(-1, code_size)
batch_box_preds = common_utils.rotate_points_along_z(
batch_box_preds.unsqueeze(dim=1), roi_ry
).squeeze(dim=1)
batch_box_preds[:, 0:3] += roi_xyz
batch_box_preds = batch_box_preds.view(batch_size, -1, code_size)
return batch_cls_preds, batch_box_preds
| 16,806 | 45.173077 | 138 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/roi_heads/voxelrcnn_head.py | import torch
import torch.nn as nn
from ...ops.pointnet2.pointnet2_stack import voxel_pool_modules as voxelpool_stack_modules
from ...utils import common_utils
from .roi_head_template import RoIHeadTemplate
class VoxelRCNNHead(RoIHeadTemplate):
def __init__(self, backbone_channels, model_cfg, point_cloud_range, voxel_size, num_class=1, **kwargs):
super().__init__(num_class=num_class, model_cfg=model_cfg)
self.model_cfg = model_cfg
self.pool_cfg = model_cfg.ROI_GRID_POOL
LAYER_cfg = self.pool_cfg.POOL_LAYERS
self.point_cloud_range = point_cloud_range
self.voxel_size = voxel_size
c_out = 0
self.roi_grid_pool_layers = nn.ModuleList()
for src_name in self.pool_cfg.FEATURES_SOURCE:
mlps = LAYER_cfg[src_name].MLPS
for k in range(len(mlps)):
mlps[k] = [backbone_channels[src_name]] + mlps[k]
pool_layer = voxelpool_stack_modules.NeighborVoxelSAModuleMSG(
query_ranges=LAYER_cfg[src_name].QUERY_RANGES,
nsamples=LAYER_cfg[src_name].NSAMPLE,
radii=LAYER_cfg[src_name].POOL_RADIUS,
mlps=mlps,
pool_method=LAYER_cfg[src_name].POOL_METHOD,
)
self.roi_grid_pool_layers.append(pool_layer)
c_out += sum([x[-1] for x in mlps])
GRID_SIZE = self.model_cfg.ROI_GRID_POOL.GRID_SIZE
# c_out = sum([x[-1] for x in mlps])
pre_channel = GRID_SIZE * GRID_SIZE * GRID_SIZE * c_out
shared_fc_list = []
for k in range(0, self.model_cfg.SHARED_FC.__len__()):
shared_fc_list.extend([
nn.Linear(pre_channel, self.model_cfg.SHARED_FC[k], bias=False),
nn.BatchNorm1d(self.model_cfg.SHARED_FC[k]),
nn.ReLU(inplace=True)
])
pre_channel = self.model_cfg.SHARED_FC[k]
if k != self.model_cfg.SHARED_FC.__len__() - 1 and self.model_cfg.DP_RATIO > 0:
shared_fc_list.append(nn.Dropout(self.model_cfg.DP_RATIO))
self.shared_fc_layer = nn.Sequential(*shared_fc_list)
cls_fc_list = []
for k in range(0, self.model_cfg.CLS_FC.__len__()):
cls_fc_list.extend([
nn.Linear(pre_channel, self.model_cfg.CLS_FC[k], bias=False),
nn.BatchNorm1d(self.model_cfg.CLS_FC[k]),
nn.ReLU()
])
pre_channel = self.model_cfg.CLS_FC[k]
if k != self.model_cfg.CLS_FC.__len__() - 1 and self.model_cfg.DP_RATIO > 0:
cls_fc_list.append(nn.Dropout(self.model_cfg.DP_RATIO))
self.cls_fc_layers = nn.Sequential(*cls_fc_list)
self.cls_pred_layer = nn.Linear(pre_channel, self.num_class, bias=True)
reg_fc_list = []
for k in range(0, self.model_cfg.REG_FC.__len__()):
reg_fc_list.extend([
nn.Linear(pre_channel, self.model_cfg.REG_FC[k], bias=False),
nn.BatchNorm1d(self.model_cfg.REG_FC[k]),
nn.ReLU()
])
pre_channel = self.model_cfg.REG_FC[k]
if k != self.model_cfg.REG_FC.__len__() - 1 and self.model_cfg.DP_RATIO > 0:
reg_fc_list.append(nn.Dropout(self.model_cfg.DP_RATIO))
self.reg_fc_layers = nn.Sequential(*reg_fc_list)
self.reg_pred_layer = nn.Linear(pre_channel, self.box_coder.code_size * self.num_class, bias=True)
self.init_weights()
def init_weights(self):
init_func = nn.init.xavier_normal_
for module_list in [self.shared_fc_layer, self.cls_fc_layers, self.reg_fc_layers]:
for m in module_list.modules():
if isinstance(m, nn.Linear):
init_func(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
nn.init.normal_(self.cls_pred_layer.weight, 0, 0.01)
nn.init.constant_(self.cls_pred_layer.bias, 0)
nn.init.normal_(self.reg_pred_layer.weight, mean=0, std=0.001)
nn.init.constant_(self.reg_pred_layer.bias, 0)
# def _init_weights(self):
# init_func = nn.init.xavier_normal_
# for m in self.modules():
# if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
# init_func(m.weight)
# if m.bias is not None:
# nn.init.constant_(m.bias, 0)
# nn.init.normal_(self.reg_layers[-1].weight, mean=0, std=0.001)
def roi_grid_pool(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
rois: (B, num_rois, 7 + C)
point_coords: (num_points, 4) [bs_idx, x, y, z]
point_features: (num_points, C)
point_cls_scores: (N1 + N2 + N3 + ..., 1)
point_part_offset: (N1 + N2 + N3 + ..., 3)
Returns:
"""
rois = batch_dict['rois']
batch_size = batch_dict['batch_size']
with_vf_transform = batch_dict.get('with_voxel_feature_transform', False)
roi_grid_xyz, _ = self.get_global_grid_points_of_roi(
rois, grid_size=self.pool_cfg.GRID_SIZE
) # (BxN, 6x6x6, 3)
# roi_grid_xyz: (B, Nx6x6x6, 3)
roi_grid_xyz = roi_grid_xyz.view(batch_size, -1, 3)
# compute the voxel coordinates of grid points
roi_grid_coords_x = (roi_grid_xyz[:, :, 0:1] - self.point_cloud_range[0]) // self.voxel_size[0]
roi_grid_coords_y = (roi_grid_xyz[:, :, 1:2] - self.point_cloud_range[1]) // self.voxel_size[1]
roi_grid_coords_z = (roi_grid_xyz[:, :, 2:3] - self.point_cloud_range[2]) // self.voxel_size[2]
# roi_grid_coords: (B, Nx6x6x6, 3)
roi_grid_coords = torch.cat([roi_grid_coords_x, roi_grid_coords_y, roi_grid_coords_z], dim=-1)
batch_idx = rois.new_zeros(batch_size, roi_grid_coords.shape[1], 1)
for bs_idx in range(batch_size):
batch_idx[bs_idx, :, 0] = bs_idx
# roi_grid_coords: (B, Nx6x6x6, 4)
# roi_grid_coords = torch.cat([batch_idx, roi_grid_coords], dim=-1)
# roi_grid_coords = roi_grid_coords.int()
roi_grid_batch_cnt = rois.new_zeros(batch_size).int().fill_(roi_grid_coords.shape[1])
pooled_features_list = []
for k, src_name in enumerate(self.pool_cfg.FEATURES_SOURCE):
pool_layer = self.roi_grid_pool_layers[k]
cur_stride = batch_dict['multi_scale_3d_strides'][src_name]
cur_sp_tensors = batch_dict['multi_scale_3d_features'][src_name]
if with_vf_transform:
cur_sp_tensors = batch_dict['multi_scale_3d_features_post'][src_name]
else:
cur_sp_tensors = batch_dict['multi_scale_3d_features'][src_name]
# compute voxel center xyz and batch_cnt
cur_coords = cur_sp_tensors.indices
cur_voxel_xyz = common_utils.get_voxel_centers(
cur_coords[:, 1:4],
downsample_times=cur_stride,
voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range
)
cur_voxel_xyz_batch_cnt = cur_voxel_xyz.new_zeros(batch_size).int()
for bs_idx in range(batch_size):
cur_voxel_xyz_batch_cnt[bs_idx] = (cur_coords[:, 0] == bs_idx).sum()
# get voxel2point tensor
v2p_ind_tensor = common_utils.generate_voxel2pinds(cur_sp_tensors)
# compute the grid coordinates in this scale, in [batch_idx, x y z] order
cur_roi_grid_coords = roi_grid_coords // cur_stride
cur_roi_grid_coords = torch.cat([batch_idx, cur_roi_grid_coords], dim=-1)
cur_roi_grid_coords = cur_roi_grid_coords.int()
# voxel neighbor aggregation
pooled_features = pool_layer(
xyz=cur_voxel_xyz.contiguous(),
xyz_batch_cnt=cur_voxel_xyz_batch_cnt,
new_xyz=roi_grid_xyz.contiguous().view(-1, 3),
new_xyz_batch_cnt=roi_grid_batch_cnt,
new_coords=cur_roi_grid_coords.contiguous().view(-1, 4),
features=cur_sp_tensors.features.contiguous(),
voxel2point_indices=v2p_ind_tensor
)
pooled_features = pooled_features.view(
-1, self.pool_cfg.GRID_SIZE ** 3,
pooled_features.shape[-1]
) # (BxN, 6x6x6, C)
pooled_features_list.append(pooled_features)
ms_pooled_features = torch.cat(pooled_features_list, dim=-1)
return ms_pooled_features
def get_global_grid_points_of_roi(self, rois, grid_size):
rois = rois.view(-1, rois.shape[-1])
batch_size_rcnn = rois.shape[0]
local_roi_grid_points = self.get_dense_grid_points(rois, batch_size_rcnn, grid_size) # (B, 6x6x6, 3)
global_roi_grid_points = common_utils.rotate_points_along_z(
local_roi_grid_points.clone(), rois[:, 6]
).squeeze(dim=1)
global_center = rois[:, 0:3].clone()
global_roi_grid_points += global_center.unsqueeze(dim=1)
return global_roi_grid_points, local_roi_grid_points
@staticmethod
def get_dense_grid_points(rois, batch_size_rcnn, grid_size):
faked_features = rois.new_ones((grid_size, grid_size, grid_size))
dense_idx = faked_features.nonzero() # (N, 3) [x_idx, y_idx, z_idx]
dense_idx = dense_idx.repeat(batch_size_rcnn, 1, 1).float() # (B, 6x6x6, 3)
local_roi_size = rois.view(batch_size_rcnn, -1)[:, 3:6]
roi_grid_points = (dense_idx + 0.5) / grid_size * local_roi_size.unsqueeze(dim=1) \
- (local_roi_size.unsqueeze(dim=1) / 2) # (B, 6x6x6, 3)
return roi_grid_points
def forward(self, batch_dict):
"""
:param input_data: input dict
:return:
"""
targets_dict = self.proposal_layer(
batch_dict, nms_config=self.model_cfg.NMS_CONFIG['TRAIN' if self.training else 'TEST']
)
if self.training:
targets_dict = self.assign_targets(batch_dict)
batch_dict['rois'] = targets_dict['rois']
batch_dict['roi_labels'] = targets_dict['roi_labels']
# RoI aware pooling
pooled_features = self.roi_grid_pool(batch_dict) # (BxN, 6x6x6, C)
# Box Refinement
pooled_features = pooled_features.view(pooled_features.size(0), -1)
shared_features = self.shared_fc_layer(pooled_features)
rcnn_cls = self.cls_pred_layer(self.cls_fc_layers(shared_features))
rcnn_reg = self.reg_pred_layer(self.reg_fc_layers(shared_features))
# grid_size = self.model_cfg.ROI_GRID_POOL.GRID_SIZE
# batch_size_rcnn = pooled_features.shape[0]
# pooled_features = pooled_features.permute(0, 2, 1).\
# contiguous().view(batch_size_rcnn, -1, grid_size, grid_size, grid_size) # (BxN, C, 6, 6, 6)
# shared_features = self.shared_fc_layer(pooled_features.view(batch_size_rcnn, -1, 1))
# rcnn_cls = self.cls_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, 1 or 2)
# rcnn_reg = self.reg_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, C)
if not self.training:
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=batch_dict['batch_size'], rois=batch_dict['rois'], cls_preds=rcnn_cls, box_preds=rcnn_reg
)
batch_dict['batch_cls_preds'] = batch_cls_preds
batch_dict['batch_box_preds'] = batch_box_preds
batch_dict['cls_preds_normalized'] = False
else:
targets_dict['rcnn_cls'] = rcnn_cls
targets_dict['rcnn_reg'] = rcnn_reg
self.forward_ret_dict = targets_dict
return batch_dict
| 11,993 | 44.604563 | 116 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/roi_heads/loss_net.py | import torch.nn as nn
import torch
class LossNet(nn.Module):
def __init__(self, model_cfg, **kwargs):
"""
Initializes convolutional block
Args:
in_channels: int, Number of input channels
out_channels: int, Number of output channels
**kwargs: Dict, Extra arguments for nn.Conv2d
"""
super().__init__()
self.model_cfg = model_cfg
self.num_layer = self.model_cfg.LOSS_NET.SHARED_FC.__len__()
for k in range(self.num_layer):
end_channel = 1
conv = nn.Conv1d(self.model_cfg.LOSS_NET.SHARED_FC[k], end_channel, kernel_size=1, bias=False)
bn = nn.BatchNorm1d(end_channel)
relu = nn.ReLU()
setattr(self, 'conv_' + str(k), conv)
setattr(self, 'bn_' + str(k), bn)
setattr(self, 'relu_' + str(k), relu)
total_dim = model_cfg.TARGET_CONFIG.ROI_PER_IMAGE * self.num_layer
self.linear = nn.Linear(total_dim, 1)
self.init_weights(weight_init='xavier')
def init_weights(self, weight_init='xavier'):
if weight_init == 'kaiming':
init_func = nn.init.kaiming_normal_
elif weight_init == 'xavier':
init_func = nn.init.xavier_normal_
elif weight_init == 'normal':
init_func = nn.init.normal_
else:
raise NotImplementedError
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
if weight_init == 'normal':
init_func(m.weight, mean=0, std=0.001)
else:
init_func(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, features, batch_size=None):
"""
Applies convolutional block
Args:
features: (B, C_in, H, W), Input features
Returns:
x: (B, C_out, H, W), Output features
"""
out_list = []
for i in range(self.num_layer):
out = getattr(self, 'conv_' + str(i))(features[i])
out = getattr(self, 'bn_' + str(i))(out)
out = getattr(self, 'relu_' + str(i))(out)
out_list.append(out.view(batch_size, -1))
output = self.linear(torch.cat(out_list, 1))
return output
| 2,413 | 32.527778 | 106 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/roi_heads/partA2_head.py | import numpy as np
import torch
import torch.nn as nn
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
from ...utils.spconv_utils import spconv
from .roi_head_template import RoIHeadTemplate
class PartA2FCHead(RoIHeadTemplate):
def __init__(self, input_channels, model_cfg, num_class=1, **kwargs):
super().__init__(num_class=num_class, model_cfg=model_cfg)
self.model_cfg = model_cfg
self.SA_modules = nn.ModuleList()
block = self.post_act_block
c0 = self.model_cfg.ROI_AWARE_POOL.NUM_FEATURES // 2
self.conv_part = spconv.SparseSequential(
block(4, 64, 3, padding=1, indice_key='rcnn_subm1'),
block(64, c0, 3, padding=1, indice_key='rcnn_subm1_1'),
)
self.conv_rpn = spconv.SparseSequential(
block(input_channels, 64, 3, padding=1, indice_key='rcnn_subm2'),
block(64, c0, 3, padding=1, indice_key='rcnn_subm1_2'),
)
shared_fc_list = []
pool_size = self.model_cfg.ROI_AWARE_POOL.POOL_SIZE
pre_channel = self.model_cfg.ROI_AWARE_POOL.NUM_FEATURES * pool_size * pool_size * pool_size
for k in range(0, self.model_cfg.SHARED_FC.__len__()):
shared_fc_list.extend([
nn.Conv1d(pre_channel, self.model_cfg.SHARED_FC[k], kernel_size=1, bias=False),
nn.BatchNorm1d(self.model_cfg.SHARED_FC[k]),
nn.ReLU()
])
pre_channel = self.model_cfg.SHARED_FC[k]
if k != self.model_cfg.SHARED_FC.__len__() - 1 and self.model_cfg.DP_RATIO > 0:
shared_fc_list.append(nn.Dropout(self.model_cfg.DP_RATIO))
self.shared_fc_layer = nn.Sequential(*shared_fc_list)
self.cls_layers = self.make_fc_layers(
input_channels=pre_channel, output_channels=self.num_class, fc_list=self.model_cfg.CLS_FC
)
self.reg_layers = self.make_fc_layers(
input_channels=pre_channel,
output_channels=self.box_coder.code_size * self.num_class,
fc_list=self.model_cfg.REG_FC
)
self.roiaware_pool3d_layer = roiaware_pool3d_utils.RoIAwarePool3d(
out_size=self.model_cfg.ROI_AWARE_POOL.POOL_SIZE,
max_pts_each_voxel=self.model_cfg.ROI_AWARE_POOL.MAX_POINTS_PER_VOXEL
)
self.init_weights(weight_init='xavier')
def init_weights(self, weight_init='xavier'):
if weight_init == 'kaiming':
init_func = nn.init.kaiming_normal_
elif weight_init == 'xavier':
init_func = nn.init.xavier_normal_
elif weight_init == 'normal':
init_func = nn.init.normal_
else:
raise NotImplementedError
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
if weight_init == 'normal':
init_func(m.weight, mean=0, std=0.001)
else:
init_func(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
nn.init.normal_(self.reg_layers[-1].weight, mean=0, std=0.001)
def post_act_block(self, in_channels, out_channels, kernel_size, indice_key, stride=1, padding=0, conv_type='subm'):
if conv_type == 'subm':
m = spconv.SparseSequential(
spconv.SubMConv3d(in_channels, out_channels, kernel_size, bias=False, indice_key=indice_key),
nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01),
nn.ReLU(),
)
elif conv_type == 'spconv':
m = spconv.SparseSequential(
spconv.SparseConv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding,
bias=False, indice_key=indice_key),
nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01),
nn.ReLU(),
)
elif conv_type == 'inverseconv':
m = spconv.SparseSequential(
spconv.SparseInverseConv3d(in_channels, out_channels, kernel_size,
indice_key=indice_key, bias=False),
nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01),
nn.ReLU(),
)
else:
raise NotImplementedError
return m
def roiaware_pool(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
rois: (B, num_rois, 7 + C)
point_coords: (num_points, 4) [bs_idx, x, y, z]
point_features: (num_points, C)
point_cls_scores: (N1 + N2 + N3 + ..., 1)
point_part_offset: (N1 + N2 + N3 + ..., 3)
Returns:
"""
batch_size = batch_dict['batch_size']
batch_idx = batch_dict['point_coords'][:, 0]
point_coords = batch_dict['point_coords'][:, 1:4]
point_features = batch_dict['point_features']
part_features = torch.cat((
batch_dict['point_part_offset'] if not self.model_cfg.get('DISABLE_PART', False) else point_coords,
batch_dict['point_cls_scores'].view(-1, 1).detach()
), dim=1)
part_features[part_features[:, -1] < self.model_cfg.SEG_MASK_SCORE_THRESH, 0:3] = 0
rois = batch_dict['rois']
pooled_part_features_list, pooled_rpn_features_list = [], []
for bs_idx in range(batch_size):
bs_mask = (batch_idx == bs_idx)
cur_point_coords = point_coords[bs_mask]
cur_part_features = part_features[bs_mask]
cur_rpn_features = point_features[bs_mask]
cur_roi = rois[bs_idx][:, 0:7].contiguous() # (N, 7)
pooled_part_features = self.roiaware_pool3d_layer.forward(
cur_roi, cur_point_coords, cur_part_features, pool_method='avg'
) # (N, out_x, out_y, out_z, 4)
pooled_rpn_features = self.roiaware_pool3d_layer.forward(
cur_roi, cur_point_coords, cur_rpn_features, pool_method='max'
) # (N, out_x, out_y, out_z, C)
pooled_part_features_list.append(pooled_part_features)
pooled_rpn_features_list.append(pooled_rpn_features)
pooled_part_features = torch.cat(pooled_part_features_list, dim=0) # (B * N, out_x, out_y, out_z, 4)
pooled_rpn_features = torch.cat(pooled_rpn_features_list, dim=0) # (B * N, out_x, out_y, out_z, C)
return pooled_part_features, pooled_rpn_features
@staticmethod
def fake_sparse_idx(sparse_idx, batch_size_rcnn):
print('Warning: Sparse_Idx_Shape(%s) \r' % (str(sparse_idx.shape)), end='', flush=True)
# at most one sample is non-empty, then fake the first voxels of each sample(BN needs at least
# two values each channel) as non-empty for the below calculation
sparse_idx = sparse_idx.new_zeros((batch_size_rcnn, 3))
bs_idxs = torch.arange(batch_size_rcnn).type_as(sparse_idx).view(-1, 1)
sparse_idx = torch.cat((bs_idxs, sparse_idx), dim=1)
return sparse_idx
def forward(self, batch_dict):
"""
Args:
batch_dict:
Returns:
"""
targets_dict = self.proposal_layer(
batch_dict, nms_config=self.model_cfg.NMS_CONFIG['TRAIN' if self.training else 'TEST']
)
if self.training:
targets_dict = self.assign_targets(batch_dict)
batch_dict['rois'] = targets_dict['rois']
batch_dict['roi_labels'] = targets_dict['roi_labels']
# RoI aware pooling
pooled_part_features, pooled_rpn_features = self.roiaware_pool(batch_dict)
batch_size_rcnn = pooled_part_features.shape[0] # (B * N, out_x, out_y, out_z, 4)
# transform to sparse tensors
sparse_shape = np.array(pooled_part_features.shape[1:4], dtype=np.int32)
sparse_idx = pooled_part_features.sum(dim=-1).nonzero() # (non_empty_num, 4) ==> [bs_idx, x_idx, y_idx, z_idx]
if sparse_idx.shape[0] < 3:
sparse_idx = self.fake_sparse_idx(sparse_idx, batch_size_rcnn)
if self.training:
# these are invalid samples
targets_dict['rcnn_cls_labels'].fill_(-1)
targets_dict['reg_valid_mask'].fill_(-1)
part_features = pooled_part_features[sparse_idx[:, 0], sparse_idx[:, 1], sparse_idx[:, 2], sparse_idx[:, 3]]
rpn_features = pooled_rpn_features[sparse_idx[:, 0], sparse_idx[:, 1], sparse_idx[:, 2], sparse_idx[:, 3]]
coords = sparse_idx.int().contiguous()
part_features = spconv.SparseConvTensor(part_features, coords, sparse_shape, batch_size_rcnn)
rpn_features = spconv.SparseConvTensor(rpn_features, coords, sparse_shape, batch_size_rcnn)
# forward rcnn network
x_part = self.conv_part(part_features)
x_rpn = self.conv_rpn(rpn_features)
merged_feature = torch.cat((x_rpn.features, x_part.features), dim=1) # (N, C)
shared_feature = spconv.SparseConvTensor(merged_feature, coords, sparse_shape, batch_size_rcnn)
shared_feature = shared_feature.dense().view(batch_size_rcnn, -1, 1)
shared_feature = self.shared_fc_layer(shared_feature)
rcnn_cls = self.cls_layers(shared_feature).transpose(1, 2).contiguous().squeeze(dim=1) # (B, 1 or 2)
rcnn_reg = self.reg_layers(shared_feature).transpose(1, 2).contiguous().squeeze(dim=1) # (B, C)
if not self.training:
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=batch_dict['batch_size'], rois=batch_dict['rois'], cls_preds=rcnn_cls, box_preds=rcnn_reg
)
batch_dict['batch_cls_preds'] = batch_cls_preds
batch_dict['batch_box_preds'] = batch_box_preds
batch_dict['cls_preds_normalized'] = False
else:
targets_dict['rcnn_cls'] = rcnn_cls
targets_dict['rcnn_reg'] = rcnn_reg
self.forward_ret_dict = targets_dict
return batch_dict
| 10,089 | 43.844444 | 120 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/roi_heads/pvrcnn_head.py | import torch.nn as nn
from ...ops.pointnet2.pointnet2_stack import pointnet2_modules as pointnet2_stack_modules
from ...utils import common_utils
from .roi_head_template import RoIHeadTemplate
from .loss_net import LossNet
import torch
class PVRCNNHead(RoIHeadTemplate):
def __init__(self, input_channels, model_cfg, num_class=1, **kwargs):
super().__init__(num_class=num_class, model_cfg=model_cfg)
self.model_cfg = model_cfg
self.roi_grid_pool_layer, num_c_out = pointnet2_stack_modules.build_local_aggregation_module(
input_channels=input_channels, config=self.model_cfg.ROI_GRID_POOL
)
GRID_SIZE = self.model_cfg.ROI_GRID_POOL.GRID_SIZE
pre_channel = GRID_SIZE * GRID_SIZE * GRID_SIZE * num_c_out
shared_fc_list = []
for k in range(0, self.model_cfg.SHARED_FC.__len__()):
shared_fc_list.extend([
nn.Conv1d(pre_channel, self.model_cfg.SHARED_FC[k], kernel_size=1, bias=False),
nn.BatchNorm1d(self.model_cfg.SHARED_FC[k]),
nn.ReLU()
])
pre_channel = self.model_cfg.SHARED_FC[k]
if k != self.model_cfg.SHARED_FC.__len__() - 1 and self.model_cfg.DP_RATIO > 0:
shared_fc_list.append(nn.Dropout(self.model_cfg.DP_RATIO))
self.shared_fc_layer = nn.Sequential(*shared_fc_list)
self.cls_layers = self.make_fc_layers(
input_channels=pre_channel, output_channels=self.num_class, fc_list=self.model_cfg.CLS_FC
)
self.reg_layers = self.make_fc_layers(
input_channels=pre_channel,
output_channels=self.box_coder.code_size * self.num_class,
fc_list=self.model_cfg.REG_FC
)
if model_cfg.get('LOSS_NET', None):
self.loss_net = LossNet(model_cfg=model_cfg)
self.init_weights(weight_init='xavier')
def init_weights(self, weight_init='xavier'):
if weight_init == 'kaiming':
init_func = nn.init.kaiming_normal_
elif weight_init == 'xavier':
init_func = nn.init.xavier_normal_
elif weight_init == 'normal':
init_func = nn.init.normal_
else:
raise NotImplementedError
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
if weight_init == 'normal':
init_func(m.weight, mean=0, std=0.001)
else:
init_func(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
nn.init.normal_(self.reg_layers[-1].weight, mean=0, std=0.001)
def roi_grid_pool(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
rois: (B, num_rois, 7 + C)
point_coords: (num_points, 4) [bs_idx, x, y, z]
point_features: (num_points, C)
point_cls_scores: (N1 + N2 + N3 + ..., 1)
point_part_offset: (N1 + N2 + N3 + ..., 3)
Returns:
"""
batch_size = batch_dict['batch_size']
# 2 * 128 * 7 - training
rois = batch_dict['rois']
point_coords = batch_dict['point_coords']
point_features = batch_dict['point_features']
point_features = point_features * batch_dict['point_cls_scores'].view(-1, 1)
global_roi_grid_points, local_roi_grid_points = self.get_global_grid_points_of_roi(
rois, grid_size=self.model_cfg.ROI_GRID_POOL.GRID_SIZE
) # (BxN, 6x6x6, 3)
global_roi_grid_points = global_roi_grid_points.view(batch_size, -1, 3) # (B, Nx6x6x6, 3)
xyz = point_coords[:, 1:4]
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
batch_idx = point_coords[:, 0]
for k in range(batch_size):
xyz_batch_cnt[k] = (batch_idx == k).sum()
new_xyz = global_roi_grid_points.view(-1, 3)
new_xyz_batch_cnt = xyz.new_zeros(batch_size).int().fill_(global_roi_grid_points.shape[1])
pooled_points, pooled_features = self.roi_grid_pool_layer(
xyz=xyz.contiguous(),
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
features=point_features.contiguous(),
) # (M1 + M2 ..., C)
pooled_features = pooled_features.view(
-1, self.model_cfg.ROI_GRID_POOL.GRID_SIZE ** 3,
pooled_features.shape[-1]
) # (BxN, 6x6x6, C)
return pooled_features
def get_global_grid_points_of_roi(self, rois, grid_size):
rois = rois.view(-1, rois.shape[-1])
batch_size_rcnn = rois.shape[0]
local_roi_grid_points = self.get_dense_grid_points(rois, batch_size_rcnn, grid_size) # (B, 6x6x6, 3)
global_roi_grid_points = common_utils.rotate_points_along_z(
local_roi_grid_points.clone(), rois[:, 6]
).squeeze(dim=1)
global_center = rois[:, 0:3].clone()
global_roi_grid_points += global_center.unsqueeze(dim=1)
return global_roi_grid_points, local_roi_grid_points
@staticmethod
def get_dense_grid_points(rois, batch_size_rcnn, grid_size):
faked_features = rois.new_ones((grid_size, grid_size, grid_size))
dense_idx = faked_features.nonzero() # (N, 3) [x_idx, y_idx, z_idx]
dense_idx = dense_idx.repeat(batch_size_rcnn, 1, 1).float() # (B, 6x6x6, 3)
local_roi_size = rois.view(batch_size_rcnn, -1)[:, 3:6]
roi_grid_points = (dense_idx + 0.5) / grid_size * local_roi_size.unsqueeze(dim=1) \
- (local_roi_size.unsqueeze(dim=1) / 2) # (B, 6x6x6, 3)
return roi_grid_points
def forward(self, batch_dict):
"""
:param input_data: input dict
:return:
"""
targets_dict = self.proposal_layer(
batch_dict, nms_config=self.model_cfg.NMS_CONFIG['TRAIN' if self.training else 'TEST']
)
if self.training:
targets_dict = batch_dict.get('roi_targets_dict', None)
if targets_dict is None:
targets_dict = self.assign_targets(batch_dict)
batch_dict['rois'] = targets_dict['rois']
batch_dict['roi_labels'] = targets_dict['roi_labels']
# RoI aware pooling
pooled_features = self.roi_grid_pool(batch_dict) # (BxN, 6x6x6, C)
grid_size = self.model_cfg.ROI_GRID_POOL.GRID_SIZE
batch_size_rcnn = pooled_features.shape[0]
pooled_features = pooled_features.permute(0, 2, 1).\
contiguous().view(batch_size_rcnn, -1, grid_size, grid_size, grid_size) # (BxN, C, 6, 6, 6)
if hasattr(self, 'loss_net'):
batch_size = batch_dict['batch_size']
# obtain latent features for llal
latent_features = []
input = pooled_features.view(batch_size_rcnn, -1, 1)
for module in self.shared_fc_layer:
output = module(input)
output_dim = output.shape[-2]
if isinstance(module, nn.ReLU):
latent_features.append(output) # B * N * D
input = output
loss_predictions = self.loss_net(latent_features, batch_size=batch_size)
shared_features = output
if not self.training:
batch_dict['loss_predictions'] = loss_predictions
else:
targets_dict['loss_predictions'] = loss_predictions
else:
shared_features = self.shared_fc_layer(pooled_features.view(batch_size_rcnn, -1, 1))
rcnn_cls = self.cls_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, 1 or 2)
rcnn_reg = self.reg_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, C)
if not self.training:
if self.model_cfg.get('SAMPLING_ROUND', None):
# shared_features_list = [shared_features]
rcnn_cls_list = [rcnn_cls]
rcnn_reg_list = [rcnn_reg]
for i in range(self.model_cfg.SAMPLING_ROUND - 1):
shared_features = self.shared_fc_layer(pooled_features.view(batch_size_rcnn, -1, 1))
rcnn_cls = self.cls_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, 1 or 2)
rcnn_reg = self.reg_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, C)
# shared_features_list.append(shared_features)
rcnn_cls_list.append(rcnn_cls)
rcnn_reg_list.append(rcnn_reg)
batch_dict['rcnn_cls'] = torch.stack(rcnn_cls_list, 0)
batch_dict['rcnn_reg'] = torch.stack(rcnn_reg_list, 0)
elif self.model_cfg.get('EMBEDDING_REQUIRED', None):
batch_dict['shared_features'] = shared_features
# inference at the test time
if 'box_label' in self.model_cfg.get('SAMPLING_FEAT', []) and 'test' in batch_dict.keys():
batch_cls_preds_list = []
batch_box_preds_list = []
for i in range(self.model_cfg.SAMPLING_ROUND):
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=batch_dict['batch_size'], rois=batch_dict['rois'], cls_preds=rcnn_cls_list[i], box_preds=rcnn_reg_list[i]
)
batch_cls_preds_list.append(batch_cls_preds)
batch_box_preds_list.append(batch_box_preds)
batch_dict['batch_cls_preds'] = batch_cls_preds_list
batch_dict['batch_box_preds'] = torch.stack(batch_box_preds_list, 0)
else:
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=batch_dict['batch_size'], rois=batch_dict['rois'], cls_preds=rcnn_cls, box_preds=rcnn_reg
)
batch_dict['batch_cls_preds'] = batch_cls_preds
batch_dict['batch_box_preds'] = batch_box_preds
batch_dict['cls_preds_normalized'] = False
else:
targets_dict['rcnn_cls'] = rcnn_cls
targets_dict['rcnn_reg'] = rcnn_reg
self.forward_ret_dict = targets_dict
# save the preds at the train mode
batch_dict['rcnn_cls'] = rcnn_cls
batch_dict['rcnn_reg'] = rcnn_reg
return batch_dict
| 10,603 | 42.63786 | 136 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/roi_heads/second_head.py | import torch
import torch.nn as nn
from .roi_head_template import RoIHeadTemplate
from ...utils import common_utils, loss_utils
class SECONDHead(RoIHeadTemplate):
def __init__(self, input_channels, model_cfg, num_class=1, **kwargs):
super().__init__(num_class=num_class, model_cfg=model_cfg)
self.model_cfg = model_cfg
GRID_SIZE = self.model_cfg.ROI_GRID_POOL.GRID_SIZE
pre_channel = self.model_cfg.ROI_GRID_POOL.IN_CHANNEL * GRID_SIZE * GRID_SIZE
shared_fc_list = []
for k in range(0, self.model_cfg.SHARED_FC.__len__()):
shared_fc_list.extend([
nn.Conv1d(pre_channel, self.model_cfg.SHARED_FC[k], kernel_size=1, bias=False),
nn.BatchNorm1d(self.model_cfg.SHARED_FC[k]),
nn.ReLU()
])
pre_channel = self.model_cfg.SHARED_FC[k]
if k != self.model_cfg.SHARED_FC.__len__() - 1 and self.model_cfg.DP_RATIO > 0:
shared_fc_list.append(nn.Dropout(self.model_cfg.DP_RATIO))
self.shared_fc_layer = nn.Sequential(*shared_fc_list)
self.iou_layers = self.make_fc_layers(
input_channels=pre_channel, output_channels=1, fc_list=self.model_cfg.IOU_FC
)
self.init_weights(weight_init='xavier')
def init_weights(self, weight_init='xavier'):
if weight_init == 'kaiming':
init_func = nn.init.kaiming_normal_
elif weight_init == 'xavier':
init_func = nn.init.xavier_normal_
elif weight_init == 'normal':
init_func = nn.init.normal_
else:
raise NotImplementedError
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
if weight_init == 'normal':
init_func(m.weight, mean=0, std=0.001)
else:
init_func(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def roi_grid_pool(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
rois: (B, num_rois, 7 + C)
spatial_features_2d: (B, C, H, W)
Returns:
"""
batch_size = batch_dict['batch_size']
rois = batch_dict['rois'].detach()
spatial_features_2d = batch_dict['spatial_features_2d'].detach()
height, width = spatial_features_2d.size(2), spatial_features_2d.size(3)
dataset_cfg = batch_dict['dataset_cfg']
min_x = dataset_cfg.POINT_CLOUD_RANGE[0]
min_y = dataset_cfg.POINT_CLOUD_RANGE[1]
voxel_size_x = dataset_cfg.DATA_PROCESSOR[-1].VOXEL_SIZE[0]
voxel_size_y = dataset_cfg.DATA_PROCESSOR[-1].VOXEL_SIZE[1]
down_sample_ratio = self.model_cfg.ROI_GRID_POOL.DOWNSAMPLE_RATIO
pooled_features_list = []
torch.backends.cudnn.enabled = False
for b_id in range(batch_size):
# Map global boxes coordinates to feature map coordinates
x1 = (rois[b_id, :, 0] - rois[b_id, :, 3] / 2 - min_x) / (voxel_size_x * down_sample_ratio)
x2 = (rois[b_id, :, 0] + rois[b_id, :, 3] / 2 - min_x) / (voxel_size_x * down_sample_ratio)
y1 = (rois[b_id, :, 1] - rois[b_id, :, 4] / 2 - min_y) / (voxel_size_y * down_sample_ratio)
y2 = (rois[b_id, :, 1] + rois[b_id, :, 4] / 2 - min_y) / (voxel_size_y * down_sample_ratio)
angle, _ = common_utils.check_numpy_to_torch(rois[b_id, :, 6])
cosa = torch.cos(angle)
sina = torch.sin(angle)
theta = torch.stack((
(x2 - x1) / (width - 1) * cosa, (x2 - x1) / (width - 1) * (-sina), (x1 + x2 - width + 1) / (width - 1),
(y2 - y1) / (height - 1) * sina, (y2 - y1) / (height - 1) * cosa, (y1 + y2 - height + 1) / (height - 1)
), dim=1).view(-1, 2, 3).float()
grid_size = self.model_cfg.ROI_GRID_POOL.GRID_SIZE
grid = nn.functional.affine_grid(
theta,
torch.Size((rois.size(1), spatial_features_2d.size(1), grid_size, grid_size))
)
pooled_features = nn.functional.grid_sample(
spatial_features_2d[b_id].unsqueeze(0).expand(rois.size(1), spatial_features_2d.size(1), height, width),
grid
)
pooled_features_list.append(pooled_features)
torch.backends.cudnn.enabled = True
pooled_features = torch.cat(pooled_features_list, dim=0)
return pooled_features
def forward(self, batch_dict):
"""
:param input_data: input dict
:return:
"""
targets_dict = self.proposal_layer(
batch_dict, nms_config=self.model_cfg.NMS_CONFIG['TRAIN' if self.training else 'TEST']
)
if self.training:
targets_dict = self.assign_targets(batch_dict)
batch_dict['rois'] = targets_dict['rois']
batch_dict['roi_labels'] = targets_dict['roi_labels']
# RoI aware pooling
pooled_features = self.roi_grid_pool(batch_dict) # (BxN, C, 7, 7)
batch_size_rcnn = pooled_features.shape[0]
shared_features = self.shared_fc_layer(pooled_features.view(batch_size_rcnn, -1, 1))
rcnn_iou = self.iou_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B*N, 1)
if not self.training:
batch_dict['batch_cls_preds'] = rcnn_iou.view(batch_dict['batch_size'], -1, rcnn_iou.shape[-1])
batch_dict['batch_box_preds'] = batch_dict['rois']
batch_dict['cls_preds_normalized'] = False
else:
targets_dict['rcnn_iou'] = rcnn_iou
self.forward_ret_dict = targets_dict
return batch_dict
def get_loss(self, tb_dict=None):
tb_dict = {} if tb_dict is None else tb_dict
rcnn_loss = 0
rcnn_loss_cls, cls_tb_dict = self.get_box_iou_layer_loss(self.forward_ret_dict)
rcnn_loss += rcnn_loss_cls
tb_dict.update(cls_tb_dict)
tb_dict['rcnn_loss'] = rcnn_loss.item()
return rcnn_loss, tb_dict
def get_box_iou_layer_loss(self, forward_ret_dict):
loss_cfgs = self.model_cfg.LOSS_CONFIG
rcnn_iou = forward_ret_dict['rcnn_iou']
rcnn_iou_labels = forward_ret_dict['rcnn_cls_labels'].view(-1)
rcnn_iou_flat = rcnn_iou.view(-1)
if loss_cfgs.IOU_LOSS == 'BinaryCrossEntropy':
batch_loss_iou = nn.functional.binary_cross_entropy_with_logits(
rcnn_iou_flat,
rcnn_iou_labels.float(), reduction='none'
)
elif loss_cfgs.IOU_LOSS == 'L2':
batch_loss_iou = nn.functional.mse_loss(rcnn_iou_flat, rcnn_iou_labels, reduction='none')
elif loss_cfgs.IOU_LOSS == 'smoothL1':
diff = rcnn_iou_flat - rcnn_iou_labels
batch_loss_iou = loss_utils.WeightedSmoothL1Loss.smooth_l1_loss(diff, 1.0 / 9.0)
elif loss_cfgs.IOU_LOSS == 'focalbce':
batch_loss_iou = loss_utils.sigmoid_focal_cls_loss(rcnn_iou_flat, rcnn_iou_labels)
else:
raise NotImplementedError
iou_valid_mask = (rcnn_iou_labels >= 0).float()
rcnn_loss_iou = (batch_loss_iou * iou_valid_mask).sum() / torch.clamp(iou_valid_mask.sum(), min=1.0)
rcnn_loss_iou = rcnn_loss_iou * loss_cfgs.LOSS_WEIGHTS['rcnn_iou_weight']
tb_dict = {'rcnn_loss_iou': rcnn_loss_iou.item()}
return rcnn_loss_iou, tb_dict
| 7,527 | 41.055866 | 120 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/roi_heads/pointrcnn_head.py | import torch
import torch.nn as nn
from ...ops.pointnet2.pointnet2_batch import pointnet2_modules
from ...ops.roipoint_pool3d import roipoint_pool3d_utils
from ...utils import common_utils
from .roi_head_template import RoIHeadTemplate
class PointRCNNHead(RoIHeadTemplate):
def __init__(self, input_channels, model_cfg, num_class=1, **kwargs):
super().__init__(num_class=num_class, model_cfg=model_cfg)
self.model_cfg = model_cfg
use_bn = self.model_cfg.USE_BN
self.SA_modules = nn.ModuleList()
channel_in = input_channels
self.num_prefix_channels = 3 + 2 # xyz + point_scores + point_depth
xyz_mlps = [self.num_prefix_channels] + self.model_cfg.XYZ_UP_LAYER
shared_mlps = []
for k in range(len(xyz_mlps) - 1):
shared_mlps.append(nn.Conv2d(xyz_mlps[k], xyz_mlps[k + 1], kernel_size=1, bias=not use_bn))
if use_bn:
shared_mlps.append(nn.BatchNorm2d(xyz_mlps[k + 1]))
shared_mlps.append(nn.ReLU())
self.xyz_up_layer = nn.Sequential(*shared_mlps)
c_out = self.model_cfg.XYZ_UP_LAYER[-1]
self.merge_down_layer = nn.Sequential(
nn.Conv2d(c_out * 2, c_out, kernel_size=1, bias=not use_bn),
*[nn.BatchNorm2d(c_out), nn.ReLU()] if use_bn else [nn.ReLU()]
)
for k in range(self.model_cfg.SA_CONFIG.NPOINTS.__len__()):
mlps = [channel_in] + self.model_cfg.SA_CONFIG.MLPS[k]
npoint = self.model_cfg.SA_CONFIG.NPOINTS[k] if self.model_cfg.SA_CONFIG.NPOINTS[k] != -1 else None
self.SA_modules.append(
pointnet2_modules.PointnetSAModule(
npoint=npoint,
radius=self.model_cfg.SA_CONFIG.RADIUS[k],
nsample=self.model_cfg.SA_CONFIG.NSAMPLE[k],
mlp=mlps,
use_xyz=True,
bn=use_bn
)
)
channel_in = mlps[-1]
self.cls_layers = self.make_fc_layers(
input_channels=channel_in, output_channels=self.num_class, fc_list=self.model_cfg.CLS_FC
)
self.reg_layers = self.make_fc_layers(
input_channels=channel_in,
output_channels=self.box_coder.code_size * self.num_class,
fc_list=self.model_cfg.REG_FC
)
self.roipoint_pool3d_layer = roipoint_pool3d_utils.RoIPointPool3d(
num_sampled_points=self.model_cfg.ROI_POINT_POOL.NUM_SAMPLED_POINTS,
pool_extra_width=self.model_cfg.ROI_POINT_POOL.POOL_EXTRA_WIDTH
)
self.init_weights(weight_init='xavier')
def init_weights(self, weight_init='xavier'):
if weight_init == 'kaiming':
init_func = nn.init.kaiming_normal_
elif weight_init == 'xavier':
init_func = nn.init.xavier_normal_
elif weight_init == 'normal':
init_func = nn.init.normal_
else:
raise NotImplementedError
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
if weight_init == 'normal':
init_func(m.weight, mean=0, std=0.001)
else:
init_func(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
nn.init.normal_(self.reg_layers[-1].weight, mean=0, std=0.001)
def roipool3d_gpu(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
rois: (B, num_rois, 7 + C)
point_coords: (num_points, 4) [bs_idx, x, y, z]
point_features: (num_points, C)
point_cls_scores: (N1 + N2 + N3 + ..., 1)
point_part_offset: (N1 + N2 + N3 + ..., 3)
Returns:
"""
batch_size = batch_dict['batch_size']
batch_idx = batch_dict['point_coords'][:, 0]
point_coords = batch_dict['point_coords'][:, 1:4]
point_features = batch_dict['point_features']
rois = batch_dict['rois'] # (B, num_rois, 7 + C)
batch_cnt = point_coords.new_zeros(batch_size).int()
for bs_idx in range(batch_size):
batch_cnt[bs_idx] = (batch_idx == bs_idx).sum()
assert batch_cnt.min() == batch_cnt.max()
point_scores = batch_dict['point_cls_scores'].detach()
point_depths = point_coords.norm(dim=1) / self.model_cfg.ROI_POINT_POOL.DEPTH_NORMALIZER - 0.5
point_features_list = [point_scores[:, None], point_depths[:, None], point_features]
point_features_all = torch.cat(point_features_list, dim=1)
batch_points = point_coords.view(batch_size, -1, 3)
batch_point_features = point_features_all.view(batch_size, -1, point_features_all.shape[-1])
with torch.no_grad():
pooled_features, pooled_empty_flag = self.roipoint_pool3d_layer(
batch_points, batch_point_features, rois
) # pooled_features: (B, num_rois, num_sampled_points, 3 + C), pooled_empty_flag: (B, num_rois)
# canonical transformation
roi_center = rois[:, :, 0:3]
pooled_features[:, :, :, 0:3] -= roi_center.unsqueeze(dim=2)
pooled_features = pooled_features.view(-1, pooled_features.shape[-2], pooled_features.shape[-1])
pooled_features[:, :, 0:3] = common_utils.rotate_points_along_z(
pooled_features[:, :, 0:3], -rois.view(-1, rois.shape[-1])[:, 6]
)
pooled_features[pooled_empty_flag.view(-1) > 0] = 0
return pooled_features
def forward(self, batch_dict):
"""
Args:
batch_dict:
Returns:
"""
targets_dict = self.proposal_layer(
batch_dict, nms_config=self.model_cfg.NMS_CONFIG['TRAIN' if self.training else 'TEST']
)
if self.training:
targets_dict = self.assign_targets(batch_dict)
batch_dict['rois'] = targets_dict['rois']
batch_dict['roi_labels'] = targets_dict['roi_labels']
pooled_features = self.roipool3d_gpu(batch_dict) # (total_rois, num_sampled_points, 3 + C)
xyz_input = pooled_features[..., 0:self.num_prefix_channels].transpose(1, 2).unsqueeze(dim=3).contiguous()
xyz_features = self.xyz_up_layer(xyz_input)
point_features = pooled_features[..., self.num_prefix_channels:].transpose(1, 2).unsqueeze(dim=3)
merged_features = torch.cat((xyz_features, point_features), dim=1)
merged_features = self.merge_down_layer(merged_features)
l_xyz, l_features = [pooled_features[..., 0:3].contiguous()], [merged_features.squeeze(dim=3).contiguous()]
for i in range(len(self.SA_modules)):
li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i])
l_xyz.append(li_xyz)
l_features.append(li_features)
shared_features = l_features[-1] # (total_rois, num_features, 1)
rcnn_cls = self.cls_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, 1 or 2)
rcnn_reg = self.reg_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, C)
if not self.training:
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=batch_dict['batch_size'], rois=batch_dict['rois'], cls_preds=rcnn_cls, box_preds=rcnn_reg
)
batch_dict['batch_cls_preds'] = batch_cls_preds
batch_dict['batch_box_preds'] = batch_box_preds
batch_dict['cls_preds_normalized'] = False
else:
targets_dict['rcnn_cls'] = rcnn_cls
targets_dict['rcnn_reg'] = rcnn_reg
self.forward_ret_dict = targets_dict
return batch_dict
| 7,835 | 42.533333 | 116 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/roi_heads/target_assigner/proposal_target_layer.py | import numpy as np
import torch
import torch.nn as nn
from ....ops.iou3d_nms import iou3d_nms_utils
class ProposalTargetLayer(nn.Module):
def __init__(self, roi_sampler_cfg):
super().__init__()
self.roi_sampler_cfg = roi_sampler_cfg
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
rois: (B, num_rois, 7 + C)
roi_scores: (B, num_rois)
gt_boxes: (B, N, 7 + C + 1)
roi_labels: (B, num_rois)
Returns:
batch_dict:
rois: (B, M, 7 + C)
gt_of_rois: (B, M, 7 + C)
gt_iou_of_rois: (B, M)
roi_scores: (B, M)
roi_labels: (B, M)
reg_valid_mask: (B, M)
rcnn_cls_labels: (B, M)
"""
batch_rois, batch_gt_of_rois, batch_roi_ious, batch_roi_scores, batch_roi_labels = self.sample_rois_for_rcnn(
batch_dict=batch_dict
)
# regression valid mask
reg_valid_mask = (batch_roi_ious > self.roi_sampler_cfg.REG_FG_THRESH).long()
# classification label
if self.roi_sampler_cfg.CLS_SCORE_TYPE == 'cls':
batch_cls_labels = (batch_roi_ious > self.roi_sampler_cfg.CLS_FG_THRESH).long()
ignore_mask = (batch_roi_ious > self.roi_sampler_cfg.CLS_BG_THRESH) & \
(batch_roi_ious < self.roi_sampler_cfg.CLS_FG_THRESH)
batch_cls_labels[ignore_mask > 0] = -1
elif self.roi_sampler_cfg.CLS_SCORE_TYPE == 'roi_iou':
iou_bg_thresh = self.roi_sampler_cfg.CLS_BG_THRESH
iou_fg_thresh = self.roi_sampler_cfg.CLS_FG_THRESH
fg_mask = batch_roi_ious > iou_fg_thresh
bg_mask = batch_roi_ious < iou_bg_thresh
interval_mask = (fg_mask == 0) & (bg_mask == 0)
batch_cls_labels = (fg_mask > 0).float()
batch_cls_labels[interval_mask] = \
(batch_roi_ious[interval_mask] - iou_bg_thresh) / (iou_fg_thresh - iou_bg_thresh)
else:
raise NotImplementedError
targets_dict = {'rois': batch_rois, 'gt_of_rois': batch_gt_of_rois, 'gt_iou_of_rois': batch_roi_ious,
'roi_scores': batch_roi_scores, 'roi_labels': batch_roi_labels,
'reg_valid_mask': reg_valid_mask,
'rcnn_cls_labels': batch_cls_labels}
return targets_dict
def sample_rois_for_rcnn(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
rois: (B, num_rois, 7 + C)
roi_scores: (B, num_rois)
gt_boxes: (B, N, 7 + C + 1)
roi_labels: (B, num_rois)
Returns:
"""
batch_size = batch_dict['batch_size']
rois = batch_dict['rois']
roi_scores = batch_dict['roi_scores']
roi_labels = batch_dict['roi_labels']
gt_boxes = batch_dict['gt_boxes']
code_size = rois.shape[-1]
batch_rois = rois.new_zeros(batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE, code_size)
batch_gt_of_rois = rois.new_zeros(batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE, code_size + 1)
batch_roi_ious = rois.new_zeros(batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE)
batch_roi_scores = rois.new_zeros(batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE)
batch_roi_labels = rois.new_zeros((batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE), dtype=torch.long)
for index in range(batch_size):
cur_roi, cur_gt, cur_roi_labels, cur_roi_scores = \
rois[index], gt_boxes[index], roi_labels[index], roi_scores[index]
k = cur_gt.__len__() - 1
while k > 0 and cur_gt[k].sum() == 0:
k -= 1
cur_gt = cur_gt[:k + 1]
cur_gt = cur_gt.new_zeros((1, cur_gt.shape[1])) if len(cur_gt) == 0 else cur_gt
if self.roi_sampler_cfg.get('SAMPLE_ROI_BY_EACH_CLASS', False):
max_overlaps, gt_assignment = self.get_max_iou_with_same_class(
rois=cur_roi, roi_labels=cur_roi_labels,
gt_boxes=cur_gt[:, 0:7], gt_labels=cur_gt[:, -1].long()
)
else:
iou3d = iou3d_nms_utils.boxes_iou3d_gpu(cur_roi, cur_gt[:, 0:7]) # (M, N)
max_overlaps, gt_assignment = torch.max(iou3d, dim=1)
sampled_inds = self.subsample_rois(max_overlaps=max_overlaps)
batch_rois[index] = cur_roi[sampled_inds]
batch_roi_labels[index] = cur_roi_labels[sampled_inds]
batch_roi_ious[index] = max_overlaps[sampled_inds]
batch_roi_scores[index] = cur_roi_scores[sampled_inds]
batch_gt_of_rois[index] = cur_gt[gt_assignment[sampled_inds]]
return batch_rois, batch_gt_of_rois, batch_roi_ious, batch_roi_scores, batch_roi_labels
def subsample_rois(self, max_overlaps):
# sample fg, easy_bg, hard_bg
fg_rois_per_image = int(np.round(self.roi_sampler_cfg.FG_RATIO * self.roi_sampler_cfg.ROI_PER_IMAGE))
fg_thresh = min(self.roi_sampler_cfg.REG_FG_THRESH, self.roi_sampler_cfg.CLS_FG_THRESH)
fg_inds = ((max_overlaps >= fg_thresh)).nonzero().view(-1)
easy_bg_inds = ((max_overlaps < self.roi_sampler_cfg.CLS_BG_THRESH_LO)).nonzero().view(-1)
hard_bg_inds = ((max_overlaps < self.roi_sampler_cfg.REG_FG_THRESH) &
(max_overlaps >= self.roi_sampler_cfg.CLS_BG_THRESH_LO)).nonzero().view(-1)
fg_num_rois = fg_inds.numel()
bg_num_rois = hard_bg_inds.numel() + easy_bg_inds.numel()
if fg_num_rois > 0 and bg_num_rois > 0:
# sampling fg
fg_rois_per_this_image = min(fg_rois_per_image, fg_num_rois)
rand_num = torch.from_numpy(np.random.permutation(fg_num_rois)).type_as(max_overlaps).long()
fg_inds = fg_inds[rand_num[:fg_rois_per_this_image]]
# sampling bg
bg_rois_per_this_image = self.roi_sampler_cfg.ROI_PER_IMAGE - fg_rois_per_this_image
bg_inds = self.sample_bg_inds(
hard_bg_inds, easy_bg_inds, bg_rois_per_this_image, self.roi_sampler_cfg.HARD_BG_RATIO
)
elif fg_num_rois > 0 and bg_num_rois == 0:
# sampling fg
rand_num = np.floor(np.random.rand(self.roi_sampler_cfg.ROI_PER_IMAGE) * fg_num_rois)
rand_num = torch.from_numpy(rand_num).type_as(max_overlaps).long()
fg_inds = fg_inds[rand_num]
bg_inds = []
elif bg_num_rois > 0 and fg_num_rois == 0:
# sampling bg
bg_rois_per_this_image = self.roi_sampler_cfg.ROI_PER_IMAGE
bg_inds = self.sample_bg_inds(
hard_bg_inds, easy_bg_inds, bg_rois_per_this_image, self.roi_sampler_cfg.HARD_BG_RATIO
)
else:
print('maxoverlaps:(min=%f, max=%f)' % (max_overlaps.min().item(), max_overlaps.max().item()))
print('ERROR: FG=%d, BG=%d' % (fg_num_rois, bg_num_rois))
raise NotImplementedError
sampled_inds = torch.cat((fg_inds, bg_inds), dim=0)
return sampled_inds
@staticmethod
def sample_bg_inds(hard_bg_inds, easy_bg_inds, bg_rois_per_this_image, hard_bg_ratio):
if hard_bg_inds.numel() > 0 and easy_bg_inds.numel() > 0:
hard_bg_rois_num = min(int(bg_rois_per_this_image * hard_bg_ratio), len(hard_bg_inds))
easy_bg_rois_num = bg_rois_per_this_image - hard_bg_rois_num
# sampling hard bg
rand_idx = torch.randint(low=0, high=hard_bg_inds.numel(), size=(hard_bg_rois_num,)).long()
hard_bg_inds = hard_bg_inds[rand_idx]
# sampling easy bg
rand_idx = torch.randint(low=0, high=easy_bg_inds.numel(), size=(easy_bg_rois_num,)).long()
easy_bg_inds = easy_bg_inds[rand_idx]
bg_inds = torch.cat([hard_bg_inds, easy_bg_inds], dim=0)
elif hard_bg_inds.numel() > 0 and easy_bg_inds.numel() == 0:
hard_bg_rois_num = bg_rois_per_this_image
# sampling hard bg
rand_idx = torch.randint(low=0, high=hard_bg_inds.numel(), size=(hard_bg_rois_num,)).long()
bg_inds = hard_bg_inds[rand_idx]
elif hard_bg_inds.numel() == 0 and easy_bg_inds.numel() > 0:
easy_bg_rois_num = bg_rois_per_this_image
# sampling easy bg
rand_idx = torch.randint(low=0, high=easy_bg_inds.numel(), size=(easy_bg_rois_num,)).long()
bg_inds = easy_bg_inds[rand_idx]
else:
raise NotImplementedError
return bg_inds
@staticmethod
def get_max_iou_with_same_class(rois, roi_labels, gt_boxes, gt_labels):
"""
Args:
rois: (N, 7)
roi_labels: (N)
gt_boxes: (N, )
gt_labels:
Returns:
"""
"""
:param rois: (N, 7)
:param roi_labels: (N)
:param gt_boxes: (N, 8)
:return:
"""
max_overlaps = rois.new_zeros(rois.shape[0])
gt_assignment = roi_labels.new_zeros(roi_labels.shape[0])
for k in range(gt_labels.min().item(), gt_labels.max().item() + 1):
roi_mask = (roi_labels == k)
gt_mask = (gt_labels == k)
if roi_mask.sum() > 0 and gt_mask.sum() > 0:
cur_roi = rois[roi_mask]
cur_gt = gt_boxes[gt_mask]
original_gt_assignment = gt_mask.nonzero().view(-1)
iou3d = iou3d_nms_utils.boxes_iou3d_gpu(cur_roi, cur_gt) # (M, N)
cur_max_overlaps, cur_gt_assignment = torch.max(iou3d, dim=1)
max_overlaps[roi_mask] = cur_max_overlaps
gt_assignment[roi_mask] = original_gt_assignment[cur_gt_assignment]
return max_overlaps, gt_assignment
| 9,946 | 42.436681 | 117 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/model_utils/centernet_utils.py | # This file is modified from https://github.com/tianweiy/CenterPoint
import torch
import torch.nn.functional as F
import numpy as np
import numba
def gaussian_radius(height, width, min_overlap=0.5):
"""
Args:
height: (N)
width: (N)
min_overlap:
Returns:
"""
a1 = 1
b1 = (height + width)
c1 = width * height * (1 - min_overlap) / (1 + min_overlap)
sq1 = (b1 ** 2 - 4 * a1 * c1).sqrt()
r1 = (b1 + sq1) / 2
a2 = 4
b2 = 2 * (height + width)
c2 = (1 - min_overlap) * width * height
sq2 = (b2 ** 2 - 4 * a2 * c2).sqrt()
r2 = (b2 + sq2) / 2
a3 = 4 * min_overlap
b3 = -2 * min_overlap * (height + width)
c3 = (min_overlap - 1) * width * height
sq3 = (b3 ** 2 - 4 * a3 * c3).sqrt()
r3 = (b3 + sq3) / 2
ret = torch.min(torch.min(r1, r2), r3)
return ret
def gaussian2D(shape, sigma=1):
m, n = [(ss - 1.) / 2. for ss in shape]
y, x = np.ogrid[-m:m + 1, -n:n + 1]
h = np.exp(-(x * x + y * y) / (2 * sigma * sigma))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
return h
def draw_gaussian_to_heatmap(heatmap, center, radius, k=1, valid_mask=None):
diameter = 2 * radius + 1
gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6)
x, y = int(center[0]), int(center[1])
height, width = heatmap.shape[0:2]
left, right = min(x, radius), min(width - x, radius + 1)
top, bottom = min(y, radius), min(height - y, radius + 1)
masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
masked_gaussian = torch.from_numpy(
gaussian[radius - top:radius + bottom, radius - left:radius + right]
).to(heatmap.device).float()
if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: # TODO debug
if valid_mask is not None:
cur_valid_mask = valid_mask[y - top:y + bottom, x - left:x + right]
masked_gaussian = masked_gaussian * cur_valid_mask.float()
torch.max(masked_heatmap, masked_gaussian * k, out=masked_heatmap)
return heatmap
def _nms(heat, kernel=3):
pad = (kernel - 1) // 2
hmax = F.max_pool2d(heat, (kernel, kernel), stride=1, padding=pad)
keep = (hmax == heat).float()
return heat * keep
@numba.jit(nopython=True)
def circle_nms(dets, thresh):
x1 = dets[:, 0]
y1 = dets[:, 1]
scores = dets[:, 2]
order = scores.argsort()[::-1].astype(np.int32) # highest->lowest
ndets = dets.shape[0]
suppressed = np.zeros((ndets), dtype=np.int32)
keep = []
for _i in range(ndets):
i = order[_i] # start with highest score box
if suppressed[i] == 1: # if any box have enough iou with this, remove it
continue
keep.append(i)
for _j in range(_i + 1, ndets):
j = order[_j]
if suppressed[j] == 1:
continue
# calculate center distance between i and j box
dist = (x1[i] - x1[j]) ** 2 + (y1[i] - y1[j]) ** 2
# ovr = inter / areas[j]
if dist <= thresh:
suppressed[j] = 1
return keep
def _circle_nms(boxes, min_radius, post_max_size=83):
"""
NMS according to center distance
"""
keep = np.array(circle_nms(boxes.cpu().numpy(), thresh=min_radius))[:post_max_size]
keep = torch.from_numpy(keep).long().to(boxes.device)
return keep
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _transpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3))
feat = _gather_feat(feat, ind)
return feat
def _topk(scores, K=40):
batch, num_class, height, width = scores.size()
topk_scores, topk_inds = torch.topk(scores.flatten(2, 3), K)
topk_inds = topk_inds % (height * width)
topk_ys = (topk_inds // width).float()
topk_xs = (topk_inds % width).int().float()
topk_score, topk_ind = torch.topk(topk_scores.view(batch, -1), K)
topk_classes = (topk_ind // K).int()
topk_inds = _gather_feat(topk_inds.view(batch, -1, 1), topk_ind).view(batch, K)
topk_ys = _gather_feat(topk_ys.view(batch, -1, 1), topk_ind).view(batch, K)
topk_xs = _gather_feat(topk_xs.view(batch, -1, 1), topk_ind).view(batch, K)
return topk_score, topk_inds, topk_classes, topk_ys, topk_xs
def decode_bbox_from_heatmap(heatmap, rot_cos, rot_sin, center, center_z, dim,
point_cloud_range=None, voxel_size=None, feature_map_stride=None, vel=None, K=100,
circle_nms=False, score_thresh=None, post_center_limit_range=None):
batch_size, num_class, _, _ = heatmap.size()
if circle_nms:
# TODO: not checked yet
assert False, 'not checked yet'
heatmap = _nms(heatmap)
scores, inds, class_ids, ys, xs = _topk(heatmap, K=K)
center = _transpose_and_gather_feat(center, inds).view(batch_size, K, 2)
rot_sin = _transpose_and_gather_feat(rot_sin, inds).view(batch_size, K, 1)
rot_cos = _transpose_and_gather_feat(rot_cos, inds).view(batch_size, K, 1)
center_z = _transpose_and_gather_feat(center_z, inds).view(batch_size, K, 1)
dim = _transpose_and_gather_feat(dim, inds).view(batch_size, K, 3)
angle = torch.atan2(rot_sin, rot_cos)
xs = xs.view(batch_size, K, 1) + center[:, :, 0:1]
ys = ys.view(batch_size, K, 1) + center[:, :, 1:2]
xs = xs * feature_map_stride * voxel_size[0] + point_cloud_range[0]
ys = ys * feature_map_stride * voxel_size[1] + point_cloud_range[1]
box_part_list = [xs, ys, center_z, dim, angle]
if vel is not None:
vel = _transpose_and_gather_feat(vel, inds).view(batch_size, K, 2)
box_part_list.append(vel)
final_box_preds = torch.cat((box_part_list), dim=-1)
final_scores = scores.view(batch_size, K)
final_class_ids = class_ids.view(batch_size, K)
assert post_center_limit_range is not None
mask = (final_box_preds[..., :3] >= post_center_limit_range[:3]).all(2)
mask &= (final_box_preds[..., :3] <= post_center_limit_range[3:]).all(2)
if score_thresh is not None:
mask &= (final_scores > score_thresh)
ret_pred_dicts = []
for k in range(batch_size):
cur_mask = mask[k]
cur_boxes = final_box_preds[k, cur_mask]
cur_scores = final_scores[k, cur_mask]
cur_labels = final_class_ids[k, cur_mask]
if circle_nms:
assert False, 'not checked yet'
centers = cur_boxes[:, [0, 1]]
boxes = torch.cat((centers, scores.view(-1, 1)), dim=1)
keep = _circle_nms(boxes, min_radius=min_radius, post_max_size=nms_post_max_size)
cur_boxes = cur_boxes[keep]
cur_scores = cur_scores[keep]
cur_labels = cur_labels[keep]
ret_pred_dicts.append({
'pred_boxes': cur_boxes,
'pred_scores': cur_scores,
'pred_labels': cur_labels
})
return ret_pred_dicts
| 7,235 | 32.345622 | 111 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/model_utils/model_nms_utils.py | import torch
from ...ops.iou3d_nms import iou3d_nms_utils
def class_agnostic_nms(box_scores, box_preds, nms_config, score_thresh=None):
src_box_scores = box_scores
if score_thresh is not None:
scores_mask = (box_scores >= score_thresh)
box_scores = box_scores[scores_mask]
box_preds = box_preds[scores_mask]
selected = []
if box_scores.shape[0] > 0:
box_scores_nms, indices = torch.topk(box_scores, k=min(nms_config.NMS_PRE_MAXSIZE, box_scores.shape[0]))
boxes_for_nms = box_preds[indices]
keep_idx, selected_scores = getattr(iou3d_nms_utils, nms_config.NMS_TYPE)(
boxes_for_nms[:, 0:7], box_scores_nms, nms_config.NMS_THRESH, **nms_config
)
selected = indices[keep_idx[:nms_config.NMS_POST_MAXSIZE]]
if score_thresh is not None:
original_idxs = scores_mask.nonzero().view(-1)
selected = original_idxs[selected]
return selected, src_box_scores[selected]
def multi_classes_nms(cls_scores, box_preds, nms_config, score_thresh=None):
"""
Args:
cls_scores: (N, num_class)
box_preds: (N, 7 + C)
nms_config:
score_thresh:
Returns:
"""
pred_scores, pred_labels, pred_boxes = [], [], []
for k in range(cls_scores.shape[1]):
if score_thresh is not None:
scores_mask = (cls_scores[:, k] >= score_thresh)
box_scores = cls_scores[scores_mask, k]
cur_box_preds = box_preds[scores_mask]
else:
box_scores = cls_scores[:, k]
cur_box_preds = box_preds
selected = []
if box_scores.shape[0] > 0:
box_scores_nms, indices = torch.topk(box_scores, k=min(nms_config.NMS_PRE_MAXSIZE, box_scores.shape[0]))
boxes_for_nms = cur_box_preds[indices]
keep_idx, selected_scores = getattr(iou3d_nms_utils, nms_config.NMS_TYPE)(
boxes_for_nms[:, 0:7], box_scores_nms, nms_config.NMS_THRESH, **nms_config
)
selected = indices[keep_idx[:nms_config.NMS_POST_MAXSIZE]]
pred_scores.append(box_scores[selected])
pred_labels.append(box_scores.new_ones(len(selected)).long() * k)
pred_boxes.append(cur_box_preds[selected])
pred_scores = torch.cat(pred_scores, dim=0)
pred_labels = torch.cat(pred_labels, dim=0)
pred_boxes = torch.cat(pred_boxes, dim=0)
return pred_scores, pred_labels, pred_boxes
| 2,457 | 35.686567 | 116 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/model_utils/basic_block_2d.py | import torch.nn as nn
class BasicBlock2D(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
"""
Initializes convolutional block
Args:
in_channels: int, Number of input channels
out_channels: int, Number of output channels
**kwargs: Dict, Extra arguments for nn.Conv2d
"""
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.conv = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
**kwargs)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, features):
"""
Applies convolutional block
Args:
features: (B, C_in, H, W), Input features
Returns:
x: (B, C_out, H, W), Output features
"""
x = self.conv(features)
x = self.bn(x)
x = self.relu(x)
return x
| 1,038 | 28.685714 | 60 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/backbones_2d/base_bev_backbone.py | import numpy as np
import torch
import torch.nn as nn
class BaseBEVBackbone(nn.Module):
def __init__(self, model_cfg, input_channels):
super().__init__()
self.model_cfg = model_cfg
if self.model_cfg.get('LAYER_NUMS', None) is not None:
assert len(self.model_cfg.LAYER_NUMS) == len(self.model_cfg.LAYER_STRIDES) == len(self.model_cfg.NUM_FILTERS)
layer_nums = self.model_cfg.LAYER_NUMS
layer_strides = self.model_cfg.LAYER_STRIDES
num_filters = self.model_cfg.NUM_FILTERS
else:
layer_nums = layer_strides = num_filters = []
if self.model_cfg.get('UPSAMPLE_STRIDES', None) is not None:
assert len(self.model_cfg.UPSAMPLE_STRIDES) == len(self.model_cfg.NUM_UPSAMPLE_FILTERS)
num_upsample_filters = self.model_cfg.NUM_UPSAMPLE_FILTERS
upsample_strides = self.model_cfg.UPSAMPLE_STRIDES
else:
upsample_strides = num_upsample_filters = []
num_levels = len(layer_nums)
c_in_list = [input_channels, *num_filters[:-1]]
self.blocks = nn.ModuleList()
self.deblocks = nn.ModuleList()
for idx in range(num_levels):
cur_layers = [
nn.ZeroPad2d(1),
nn.Conv2d(
c_in_list[idx], num_filters[idx], kernel_size=3,
stride=layer_strides[idx], padding=0, bias=False
),
nn.BatchNorm2d(num_filters[idx], eps=1e-3, momentum=0.01),
nn.ReLU()
]
for k in range(layer_nums[idx]):
cur_layers.extend([
nn.Conv2d(num_filters[idx], num_filters[idx], kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(num_filters[idx], eps=1e-3, momentum=0.01),
nn.ReLU()
])
self.blocks.append(nn.Sequential(*cur_layers))
if len(upsample_strides) > 0:
stride = upsample_strides[idx]
if stride >= 1:
self.deblocks.append(nn.Sequential(
nn.ConvTranspose2d(
num_filters[idx], num_upsample_filters[idx],
upsample_strides[idx],
stride=upsample_strides[idx], bias=False
),
nn.BatchNorm2d(num_upsample_filters[idx], eps=1e-3, momentum=0.01),
nn.ReLU()
))
else:
stride = np.round(1 / stride).astype(np.int)
self.deblocks.append(nn.Sequential(
nn.Conv2d(
num_filters[idx], num_upsample_filters[idx],
stride,
stride=stride, bias=False
),
nn.BatchNorm2d(num_upsample_filters[idx], eps=1e-3, momentum=0.01),
nn.ReLU()
))
c_in = sum(num_upsample_filters)
if len(upsample_strides) > num_levels:
self.deblocks.append(nn.Sequential(
nn.ConvTranspose2d(c_in, c_in, upsample_strides[-1], stride=upsample_strides[-1], bias=False),
nn.BatchNorm2d(c_in, eps=1e-3, momentum=0.01),
nn.ReLU(),
))
self.num_bev_features = c_in
def forward(self, data_dict):
"""
Args:
data_dict:
spatial_features
Returns:
"""
spatial_features = data_dict['spatial_features']
ups = []
ret_dict = {}
x = spatial_features
for i in range(len(self.blocks)):
x = self.blocks[i](x)
stride = int(spatial_features.shape[2] / x.shape[2])
ret_dict['spatial_features_%dx' % stride] = x
if len(self.deblocks) > 0:
ups.append(self.deblocks[i](x))
else:
ups.append(x)
if len(ups) > 1:
x = torch.cat(ups, dim=1)
elif len(ups) == 1:
x = ups[0]
if len(self.deblocks) > len(self.blocks):
x = self.deblocks[-1](x)
data_dict['spatial_features_2d'] = x
return data_dict
| 4,318 | 37.221239 | 121 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/backbones_2d/map_to_bev/conv2d_collapse.py | import torch
import torch.nn as nn
from pcdet.models.model_utils.basic_block_2d import BasicBlock2D
class Conv2DCollapse(nn.Module):
def __init__(self, model_cfg, grid_size):
"""
Initializes 2D convolution collapse module
Args:
model_cfg: EasyDict, Model configuration
grid_size: (X, Y, Z) Voxel grid size
"""
super().__init__()
self.model_cfg = model_cfg
self.num_heights = grid_size[-1]
self.num_bev_features = self.model_cfg.NUM_BEV_FEATURES
self.block = BasicBlock2D(in_channels=self.num_bev_features * self.num_heights,
out_channels=self.num_bev_features,
**self.model_cfg.ARGS)
def forward(self, batch_dict):
"""
Collapses voxel features to BEV via concatenation and channel reduction
Args:
batch_dict:
voxel_features: (B, C, Z, Y, X), Voxel feature representation
Returns:
batch_dict:
spatial_features: (B, C, Y, X), BEV feature representation
"""
voxel_features = batch_dict["voxel_features"]
bev_features = voxel_features.flatten(start_dim=1, end_dim=2) # (B, C, Z, Y, X) -> (B, C*Z, Y, X)
bev_features = self.block(bev_features) # (B, C*Z, Y, X) -> (B, C, Y, X)
batch_dict["spatial_features"] = bev_features
return batch_dict
| 1,451 | 36.230769 | 106 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/backbones_2d/map_to_bev/pointpillar_scatter.py | import torch
import torch.nn as nn
class PointPillarScatter(nn.Module):
def __init__(self, model_cfg, grid_size, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.num_bev_features = self.model_cfg.NUM_BEV_FEATURES
self.nx, self.ny, self.nz = grid_size
assert self.nz == 1
def forward(self, batch_dict, **kwargs):
pillar_features, coords = batch_dict['pillar_features'], batch_dict['voxel_coords']
batch_spatial_features = []
batch_size = coords[:, 0].max().int().item() + 1
for batch_idx in range(batch_size):
spatial_feature = torch.zeros(
self.num_bev_features,
self.nz * self.nx * self.ny,
dtype=pillar_features.dtype,
device=pillar_features.device)
batch_mask = coords[:, 0] == batch_idx
this_coords = coords[batch_mask, :]
indices = this_coords[:, 1] + this_coords[:, 2] * self.nx + this_coords[:, 3]
indices = indices.type(torch.long)
pillars = pillar_features[batch_mask, :]
pillars = pillars.t()
spatial_feature[:, indices] = pillars
batch_spatial_features.append(spatial_feature)
batch_spatial_features = torch.stack(batch_spatial_features, 0)
batch_spatial_features = batch_spatial_features.view(batch_size, self.num_bev_features * self.nz, self.ny, self.nx)
batch_dict['spatial_features'] = batch_spatial_features
return batch_dict
| 1,545 | 39.684211 | 123 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/models/backbones_2d/map_to_bev/height_compression.py | import torch.nn as nn
class HeightCompression(nn.Module):
def __init__(self, model_cfg, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.num_bev_features = self.model_cfg.NUM_BEV_FEATURES
def forward(self, batch_dict):
"""
Args:
batch_dict:
encoded_spconv_tensor: sparse tensor
Returns:
batch_dict:
spatial_features:
"""
encoded_spconv_tensor = batch_dict['encoded_spconv_tensor']
spatial_features = encoded_spconv_tensor.dense()
N, C, D, H, W = spatial_features.shape
spatial_features = spatial_features.view(N, C * D, H, W)
batch_dict['spatial_features'] = spatial_features
batch_dict['spatial_features_stride'] = batch_dict['encoded_spconv_tensor_stride']
return batch_dict
| 870 | 31.259259 | 90 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/datasets/dataset.py | from collections import defaultdict
from pathlib import Path
import numpy as np
import torch.utils.data as torch_data
from ..utils import common_utils
from .augmentor.data_augmentor import DataAugmentor
from .processor.data_processor import DataProcessor
from .processor.point_feature_encoder import PointFeatureEncoder
class DatasetTemplate(torch_data.Dataset):
def __init__(self, dataset_cfg=None, class_names=None, training=True, root_path=None, logger=None):
super().__init__()
self.dataset_cfg = dataset_cfg
self.training = training
self.class_names = class_names
self.logger = logger
self.root_path = root_path if root_path is not None else Path(self.dataset_cfg.DATA_PATH)
self.logger = logger
if self.dataset_cfg is None or class_names is None:
return
self.point_cloud_range = np.array(self.dataset_cfg.POINT_CLOUD_RANGE, dtype=np.float32)
self.point_feature_encoder = PointFeatureEncoder(
self.dataset_cfg.POINT_FEATURE_ENCODING,
point_cloud_range=self.point_cloud_range
)
self.data_augmentor = DataAugmentor(
self.root_path, self.dataset_cfg.DATA_AUGMENTOR, self.class_names, logger=self.logger
) if self.training else None
self.data_processor = DataProcessor(
self.dataset_cfg.DATA_PROCESSOR, point_cloud_range=self.point_cloud_range,
training=self.training, num_point_features=self.point_feature_encoder.num_point_features
)
self.grid_size = self.data_processor.grid_size
self.voxel_size = self.data_processor.voxel_size
self.total_epochs = 0
self._merge_all_iters_to_one_epoch = False
if hasattr(self.data_processor, "depth_downsample_factor"):
self.depth_downsample_factor = self.data_processor.depth_downsample_factor
else:
self.depth_downsample_factor = None
@property
def mode(self):
return 'train' if self.training else 'test'
def __getstate__(self):
d = dict(self.__dict__)
del d['logger']
return d
def __setstate__(self, d):
self.__dict__.update(d)
@staticmethod
def generate_prediction_dicts(batch_dict, pred_dicts, class_names, output_path=None):
"""
To support a custom dataset, implement this function to receive the predicted results from the model, and then
transform the unified normative coordinate to your required coordinate, and optionally save them to disk.
Args:
batch_dict: dict of original data from the dataloader
pred_dicts: dict of predicted results from the model
pred_boxes: (N, 7), Tensor
pred_scores: (N), Tensor
pred_labels: (N), Tensor
class_names:
output_path: if it is not None, save the results to this path
Returns:
"""
def merge_all_iters_to_one_epoch(self, merge=True, epochs=None):
if merge:
self._merge_all_iters_to_one_epoch = True
self.total_epochs = epochs
else:
self._merge_all_iters_to_one_epoch = False
def __len__(self):
raise NotImplementedError
def __getitem__(self, index):
"""
To support a custom dataset, implement this function to load the raw data (and labels), then transform them to
the unified normative coordinate and call the function self.prepare_data() to process the data and send them
to the model.
Args:
index:
Returns:
"""
raise NotImplementedError
def prepare_data(self, data_dict):
"""
Args:
data_dict:
points: optional, (N, 3 + C_in)
gt_boxes: optional, (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
gt_names: optional, (N), string
...
Returns:
data_dict:
frame_id: string
points: (N, 3 + C_in)
gt_boxes: optional, (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
gt_names: optional, (N), string
use_lead_xyz: bool
voxels: optional (num_voxels, max_points_per_voxel, 3 + C)
voxel_coords: optional (num_voxels, 3)
voxel_num_points: optional (num_voxels)
...
"""
if self.training:
assert 'gt_boxes' in data_dict, 'gt_boxes should be provided for training'
gt_boxes_mask = np.array([n in self.class_names for n in data_dict['gt_names']], dtype=np.bool_)
data_dict = self.data_augmentor.forward(
data_dict={
**data_dict,
'gt_boxes_mask': gt_boxes_mask
}
)
if data_dict.get('gt_boxes', None) is not None:
selected = common_utils.keep_arrays_by_name(data_dict['gt_names'], self.class_names)
data_dict['gt_boxes'] = data_dict['gt_boxes'][selected]
data_dict['gt_names'] = data_dict['gt_names'][selected]
gt_classes = np.array([self.class_names.index(n) + 1 for n in data_dict['gt_names']], dtype=np.int32)
gt_boxes = np.concatenate((data_dict['gt_boxes'], gt_classes.reshape(-1, 1).astype(np.float32)), axis=1)
data_dict['gt_boxes'] = gt_boxes
if data_dict.get('gt_boxes2d', None) is not None:
data_dict['gt_boxes2d'] = data_dict['gt_boxes2d'][selected]
if data_dict.get('points', None) is not None:
data_dict = self.point_feature_encoder.forward(data_dict)
data_dict = self.data_processor.forward(
data_dict=data_dict
)
if self.training and len(data_dict['gt_boxes']) == 0:
new_index = np.random.randint(self.__len__())
return self.__getitem__(new_index)
data_dict.pop('gt_names', None)
return data_dict
@staticmethod
def collate_batch(batch_list, _unused=False):
data_dict = defaultdict(list)
for cur_sample in batch_list:
for key, val in cur_sample.items():
data_dict[key].append(val)
batch_size = len(batch_list)
ret = {}
for key, val in data_dict.items():
try:
if key in ['voxels', 'voxel_num_points']:
ret[key] = np.concatenate(val, axis=0)
elif key in ['points', 'voxel_coords']:
coors = []
for i, coor in enumerate(val):
coor_pad = np.pad(coor, ((0, 0), (1, 0)), mode='constant', constant_values=i)
coors.append(coor_pad)
ret[key] = np.concatenate(coors, axis=0)
elif key in ['gt_boxes']:
max_gt = max([len(x) for x in val])
batch_gt_boxes3d = np.zeros((batch_size, max_gt, val[0].shape[-1]), dtype=np.float32)
for k in range(batch_size):
batch_gt_boxes3d[k, :val[k].__len__(), :] = val[k]
ret[key] = batch_gt_boxes3d
elif key in ['gt_boxes2d']:
max_boxes = 0
max_boxes = max([len(x) for x in val])
batch_boxes2d = np.zeros((batch_size, max_boxes, val[0].shape[-1]), dtype=np.float32)
for k in range(batch_size):
if val[k].size > 0:
batch_boxes2d[k, :val[k].__len__(), :] = val[k]
ret[key] = batch_boxes2d
elif key in ["images", "depth_maps"]:
# Get largest image size (H, W)
max_h = 0
max_w = 0
for image in val:
max_h = max(max_h, image.shape[0])
max_w = max(max_w, image.shape[1])
# Change size of images
images = []
for image in val:
pad_h = common_utils.get_pad_params(desired_size=max_h, cur_size=image.shape[0])
pad_w = common_utils.get_pad_params(desired_size=max_w, cur_size=image.shape[1])
pad_width = (pad_h, pad_w)
# Pad with nan, to be replaced later in the pipeline.
pad_value = np.nan
if key == "images":
pad_width = (pad_h, pad_w, (0, 0))
elif key == "depth_maps":
pad_width = (pad_h, pad_w)
image_pad = np.pad(image,
pad_width=pad_width,
mode='constant',
constant_values=pad_value)
images.append(image_pad)
ret[key] = np.stack(images, axis=0)
else:
ret[key] = np.stack(val, axis=0)
except:
print('Error in collate_batch: key=%s' % key)
raise TypeError
ret['batch_size'] = batch_size
return ret
| 9,308 | 39.473913 | 118 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/datasets/__init__.py | import torch
# print(torch.__version__)
from torch.utils.data import DataLoader
from torch.utils.data import DistributedSampler as _DistributedSampler
from pcdet.utils import common_utils
from .dataset import DatasetTemplate
from .kitti.kitti_dataset import KittiDataset
from .nuscenes.nuscenes_dataset import NuScenesDataset
from .waymo.waymo_dataset import WaymoDataset
from .pandaset.pandaset_dataset import PandasetDataset
from .lyft.lyft_dataset import LyftDataset
import random
from pcdet.config import cfg
__all__ = {
'DatasetTemplate': DatasetTemplate,
'KittiDataset': KittiDataset,
'NuScenesDataset': NuScenesDataset,
'WaymoDataset': WaymoDataset,
'PandasetDataset': PandasetDataset,
'LyftDataset': LyftDataset
}
class DistributedSampler(_DistributedSampler):
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
super().__init__(dataset, num_replicas=num_replicas, rank=rank)
self.shuffle = shuffle
def __iter__(self):
if self.shuffle:
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
def build_dataloader(dataset_cfg, class_names, batch_size, dist, root_path=None, workers=4,
logger=None, training=True, merge_all_iters_to_one_epoch=False, total_epochs=0):
dataset = __all__[dataset_cfg.DATASET](
dataset_cfg=dataset_cfg,
class_names=class_names,
root_path=root_path,
training=training,
logger=logger,
)
if merge_all_iters_to_one_epoch:
assert hasattr(dataset, 'merge_all_iters_to_one_epoch')
dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
if dist:
if training:
sampler = torch.utils.data.distributed.DistributedSampler(dataset)
else:
rank, world_size = common_utils.get_dist_info()
sampler = DistributedSampler(dataset, world_size, rank, shuffle=False)
else:
sampler = None
dataloader = DataLoader(
dataset, batch_size=batch_size, pin_memory=True, num_workers=workers,
shuffle=(sampler is None) and training, collate_fn=dataset.collate_batch,
drop_last=False, sampler=sampler, timeout=0
)
return dataset, dataloader, sampler
def build_active_dataloader(dataset_cfg, class_names, batch_size, dist, root_path=None, workers=4,
logger=None, training=True, merge_all_iters_to_one_epoch=False, total_epochs=0,
active_training=None):
dataset = __all__[dataset_cfg.DATASET](
dataset_cfg=dataset_cfg,
class_names=class_names,
root_path=root_path,
training=training,
logger=logger,
)
labelled_set = __all__[dataset_cfg.DATASET](
dataset_cfg=dataset_cfg,
class_names=class_names,
root_path=root_path,
training=True,
logger=logger,
)
unlabelled_set = __all__[dataset_cfg.DATASET](
dataset_cfg=dataset_cfg,
class_names=class_names,
root_path=root_path,
training=False,
logger=logger,
)
if active_training is not None:
# after selecting samples,
# build new datasets and dataloaders during active training loop
if cfg.DATA_CONFIG.DATASET == 'WaymoDataset':
labelled_set.frame_ids, labelled_set.infos = \
active_training[0], active_training[1]
unlabelled_set.frame_ids, unlabelled_set.infos = \
active_training[2], active_training[3]
else: # kitti cases
labelled_set.sample_id_list, labelled_set.kitti_infos = \
active_training[0], active_training[1]
unlabelled_set.sample_id_list, unlabelled_set.kitti_infos = \
active_training[2], active_training[3]
else:
# Build pre-train datasets and dataloaders before active training loop
if cfg.DATA_CONFIG.DATASET == 'WaymoDataset':
infos = dataset.infos
random.shuffle(infos)
labelled_set.infos = infos[:cfg.ACTIVE_TRAIN.PRE_TRAIN_SAMPLE_NUMS]
unlabelled_set.infos = infos[cfg.ACTIVE_TRAIN.PRE_TRAIN_SAMPLE_NUMS:]
for info in labelled_set.infos:
labelled_set.frame_ids.append(info["frame_id"])
for info in unlabelled_set.infos:
unlabelled_set.frame_ids.append(info["frame_id"])
else: # kitti case
pairs = list(zip(dataset.sample_id_list, dataset.kitti_infos))
random.shuffle(pairs)
# labelled_set, unlabelled_set = copy.deepcopy(dataset), copy.deepcopy(dataset)
labelled_set.sample_id_list, labelled_set.kitti_infos = \
zip(*pairs[:cfg.ACTIVE_TRAIN.PRE_TRAIN_SAMPLE_NUMS])
unlabelled_set.sample_id_list, unlabelled_set.kitti_infos = \
zip(*pairs[cfg.ACTIVE_TRAIN.PRE_TRAIN_SAMPLE_NUMS:])
if merge_all_iters_to_one_epoch:
assert hasattr(dataset, 'merge_all_iters_to_one_epoch')
labelled_set.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
unlabelled_set.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
if dist:
if training:
sampler_labelled = torch.utils.data.distributed.DistributedSampler(labelled_set)
sampler_unlabelled = torch.utils.data.distributed.DistributedSampler(unlabelled_set)
else:
rank, world_size = common_utils.get_dist_info()
sampler_labelled = DistributedSampler(labelled_set, world_size, rank, shuffle=False)
sampler_unlabelled = DistributedSampler(unlabelled_set, world_size, rank, shuffle=False)
else:
sampler_labelled, sampler_unlabelled = None, None
dataloader_labelled = DataLoader(
labelled_set, batch_size=batch_size, pin_memory=True, num_workers=workers,
shuffle=(sampler_labelled is None) and training, collate_fn=labelled_set.collate_batch,
drop_last=False, sampler=sampler_labelled, timeout=0
)
dataloader_unlabelled = DataLoader(
unlabelled_set, batch_size=batch_size, pin_memory=True, num_workers=workers,
shuffle=(sampler_unlabelled is None) and training, collate_fn=unlabelled_set.collate_batch,
drop_last=False, sampler=sampler_unlabelled, timeout=0
)
del dataset
return labelled_set, unlabelled_set, \
dataloader_labelled, dataloader_unlabelled, \
sampler_labelled, sampler_unlabelled
| 6,957 | 37.230769 | 107 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/datasets/waymo/waymo_dataset.py | # OpenPCDet PyTorch Dataloader and Evaluation Tools for Waymo Open Dataset
# Reference https://github.com/open-mmlab/OpenPCDet
# Written by Shaoshuai Shi, Chaoxu Guo
# All Rights Reserved 2019-2020.
import os
import pickle
import copy
import numpy as np
import torch
import multiprocessing
import SharedArray
import torch.distributed as dist
from tqdm import tqdm
from pathlib import Path
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
from ...utils import box_utils, common_utils
from ..dataset import DatasetTemplate
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
class WaymoDataset(DatasetTemplate):
def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None):
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger
)
self.data_path = self.root_path / self.dataset_cfg.PROCESSED_DATA_TAG
self.split = self.dataset_cfg.DATA_SPLIT[self.mode]
split_dir = self.root_path / 'ImageSets' / (self.split + '.txt')
self.sample_sequence_list = [x.strip() for x in open(split_dir).readlines()]
self.infos = []
self.frame_ids = []
self.include_waymo_data(self.mode)
self.use_shared_memory = self.dataset_cfg.get('USE_SHARED_MEMORY', False) and self.training
if self.use_shared_memory:
self.shared_memory_file_limit = self.dataset_cfg.get('SHARED_MEMORY_FILE_LIMIT', 0x7FFFFFFF)
self.load_data_to_shared_memory()
def set_split(self, split):
super().__init__(
dataset_cfg=self.dataset_cfg, class_names=self.class_names, training=self.training,
root_path=self.root_path, logger=self.logger
)
self.split = split
split_dir = self.root_path / 'ImageSets' / (self.split + '.txt')
self.sample_sequence_list = [x.strip() for x in open(split_dir).readlines()]
self.infos = []
self.include_waymo_data(self.mode)
def include_waymo_data(self, mode):
print('Loading Waymo dataset')
waymo_infos = []
num_skipped_infos = 0
for k in range(len(self.sample_sequence_list)):
sequence_name = os.path.splitext(self.sample_sequence_list[k])[0]
info_path = self.data_path / sequence_name / ('%s.pkl' % sequence_name)
info_path = self.check_sequence_name_with_all_version(info_path)
if not info_path.exists():
num_skipped_infos += 1
continue
with open(info_path, 'rb') as f:
infos = pickle.load(f)
waymo_infos.extend(infos)
self.infos.extend(waymo_infos[:])
print('Total skipped info %s' % num_skipped_infos)
print('Total samples for Waymo dataset: %d' % (len(waymo_infos)))
if self.dataset_cfg.SAMPLED_INTERVAL[mode] > 1:
sampled_waymo_infos = []
for k in range(0, len(self.infos), self.dataset_cfg.SAMPLED_INTERVAL[mode]):
sampled_waymo_infos.append(self.infos[k])
self.infos = sampled_waymo_infos
# print('Total sampled samples for Waymo dataset: %d' % len(self.infos))
def load_data_to_shared_memory(self):
print(f'Loading training data to shared memory (file limit={self.shared_memory_file_limit})')
cur_rank, num_gpus = common_utils.get_dist_info()
all_infos = self.infos[:self.shared_memory_file_limit] \
if self.shared_memory_file_limit < len(self.infos) else self.infos
cur_infos = all_infos[cur_rank::num_gpus]
for info in cur_infos:
pc_info = info['point_cloud']
sequence_name = pc_info['lidar_sequence']
sample_idx = pc_info['sample_idx']
sa_key = f'{sequence_name}___{sample_idx}'
if os.path.exists(f"/dev/shm/{sa_key}"):
continue
points = self.get_lidar(sequence_name, sample_idx)
common_utils.sa_create(f"shm://{sa_key}", points)
dist.barrier()
print('Training data has been saved to shared memory')
def clean_shared_memory(self):
print(f'Clean training data from shared memory (file limit={self.shared_memory_file_limit})')
cur_rank, num_gpus = common_utils.get_dist_info()
all_infos = self.infos[:self.shared_memory_file_limit] \
if self.shared_memory_file_limit < len(self.infos) else self.infos
cur_infos = all_infos[cur_rank::num_gpus]
for info in cur_infos:
pc_info = info['point_cloud']
sequence_name = pc_info['lidar_sequence']
sample_idx = pc_info['sample_idx']
sa_key = f'{sequence_name}___{sample_idx}'
if not os.path.exists(f"/dev/shm/{sa_key}"):
continue
SharedArray.delete(f"shm://{sa_key}")
if num_gpus > 1:
dist.barrier()
print('Training data has been deleted from shared memory')
@staticmethod
def check_sequence_name_with_all_version(sequence_file):
if not sequence_file.exists():
found_sequence_file = sequence_file
for pre_text in ['training', 'validation', 'testing']:
if not sequence_file.exists():
temp_sequence_file = Path(str(sequence_file).replace('segment', pre_text + '_segment'))
if temp_sequence_file.exists():
found_sequence_file = temp_sequence_file
break
if not found_sequence_file.exists():
found_sequence_file = Path(str(sequence_file).replace('_with_camera_labels', ''))
if found_sequence_file.exists():
sequence_file = found_sequence_file
return sequence_file
def get_infos(self, raw_data_path, save_path, num_workers=multiprocessing.cpu_count(), has_label=True, sampled_interval=1):
from functools import partial
from . import waymo_utils
print('---------------The waymo sample interval is %d, total sequecnes is %d-----------------'
% (sampled_interval, len(self.sample_sequence_list)))
process_single_sequence = partial(
waymo_utils.process_single_sequence,
save_path=save_path, sampled_interval=sampled_interval, has_label=has_label
)
sample_sequence_file_list = [
self.check_sequence_name_with_all_version(raw_data_path / sequence_file)
for sequence_file in self.sample_sequence_list
]
with multiprocessing.Pool(num_workers) as p:
sequence_infos = list(tqdm(p.imap(process_single_sequence, sample_sequence_file_list),
total=len(sample_sequence_file_list)))
all_sequences_infos = [item for infos in sequence_infos for item in infos]
return all_sequences_infos
def get_lidar(self, sequence_name, sample_idx):
lidar_file = self.data_path / sequence_name / ('%04d.npy' % sample_idx)
point_features = np.load(lidar_file) # (N, 7): [x, y, z, intensity, elongation, NLZ_flag]
points_all, NLZ_flag = point_features[:, 0:5], point_features[:, 5]
if not self.dataset_cfg.get('DISABLE_NLZ_FLAG_ON_POINTS', False):
points_all = points_all[NLZ_flag == -1]
points_all[:, 3] = np.tanh(points_all[:, 3])
return points_all
def __len__(self):
if self._merge_all_iters_to_one_epoch:
return len(self.infos) * self.total_epochs
return len(self.infos)
def __getitem__(self, index):
if self._merge_all_iters_to_one_epoch:
index = index % len(self.infos)
info = copy.deepcopy(self.infos[index])
pc_info = info['point_cloud']
sequence_name = pc_info['lidar_sequence']
sample_idx = pc_info['sample_idx']
if self.use_shared_memory and index < self.shared_memory_file_limit:
sa_key = f'{sequence_name}___{sample_idx}'
points = SharedArray.attach(f"shm://{sa_key}").copy()
else:
points = self.get_lidar(sequence_name, sample_idx)
input_dict = {
'points': points,
'frame_id': info['frame_id'],
}
if 'annos' in info:
annos = info['annos']
annos = common_utils.drop_info_with_name(annos, name='unknown')
if self.dataset_cfg.get('INFO_WITH_FAKELIDAR', False):
gt_boxes_lidar = box_utils.boxes3d_kitti_fakelidar_to_lidar(annos['gt_boxes_lidar'])
else:
gt_boxes_lidar = annos['gt_boxes_lidar']
if self.training and self.dataset_cfg.get('FILTER_EMPTY_BOXES_FOR_TRAIN', False):
mask = (annos['num_points_in_gt'] > 0) # filter empty boxes
annos['name'] = annos['name'][mask]
gt_boxes_lidar = gt_boxes_lidar[mask]
annos['num_points_in_gt'] = annos['num_points_in_gt'][mask]
input_dict.update({
'gt_names': annos['name'],
'gt_boxes': gt_boxes_lidar,
'num_points_in_gt': annos.get('num_points_in_gt', None)
})
input_dict['sample_id_list'] = self.frame_ids
data_dict = self.prepare_data(data_dict=input_dict)
data_dict['metadata'] = info.get('metadata', info['frame_id'])
data_dict.pop('num_points_in_gt', None)
return data_dict
@staticmethod
def generate_prediction_dicts(batch_dict, pred_dicts, class_names, output_path=None):
"""
Args:
batch_dict:
frame_id:
pred_dicts: list of pred_dicts
pred_boxes: (N, 7), Tensor
pred_scores: (N), Tensor
pred_labels: (N), Tensor
class_names:
output_path:
Returns:
"""
def get_template_prediction(num_samples):
ret_dict = {
'name': np.zeros(num_samples), 'score': np.zeros(num_samples),
'boxes_lidar': np.zeros([num_samples, 7])
}
return ret_dict
def generate_single_sample_dict(box_dict):
pred_scores = box_dict['pred_scores'].cpu().numpy()
pred_boxes = box_dict['pred_boxes'].cpu().numpy()
pred_labels = box_dict['pred_labels'].cpu().numpy()
pred_dict = get_template_prediction(pred_scores.shape[0])
if pred_scores.shape[0] == 0:
return pred_dict
pred_dict['name'] = np.array(class_names)[pred_labels - 1]
pred_dict['score'] = pred_scores
pred_dict['boxes_lidar'] = pred_boxes
return pred_dict
annos = []
for index, box_dict in enumerate(pred_dicts):
single_pred_dict = generate_single_sample_dict(box_dict)
single_pred_dict['frame_id'] = batch_dict['frame_id'][index]
single_pred_dict['metadata'] = batch_dict['metadata'][index]
annos.append(single_pred_dict)
return annos
def evaluation(self, det_annos, class_names, **kwargs):
if 'annos' not in self.infos[0].keys():
return 'No ground-truth boxes for evaluation', {}
def kitti_eval(eval_det_annos, eval_gt_annos):
from ..kitti.kitti_object_eval_python import eval as kitti_eval
from ..kitti import kitti_utils
map_name_to_kitti = {
'Vehicle': 'Car',
'Pedestrian': 'Pedestrian',
'Cyclist': 'Cyclist',
'Sign': 'Sign',
'Car': 'Car'
}
kitti_utils.transform_annotations_to_kitti_format(eval_det_annos, map_name_to_kitti=map_name_to_kitti)
kitti_utils.transform_annotations_to_kitti_format(
eval_gt_annos, map_name_to_kitti=map_name_to_kitti,
info_with_fakelidar=self.dataset_cfg.get('INFO_WITH_FAKELIDAR', False)
)
kitti_class_names = [map_name_to_kitti[x] for x in class_names]
ap_result_str, ap_dict = kitti_eval.get_official_eval_result(
gt_annos=eval_gt_annos, dt_annos=eval_det_annos, current_classes=kitti_class_names
)
return ap_result_str, ap_dict
def waymo_eval(eval_det_annos, eval_gt_annos):
from .waymo_eval import OpenPCDetWaymoDetectionMetricsEstimator
eval = OpenPCDetWaymoDetectionMetricsEstimator()
ap_dict = eval.waymo_evaluation(
eval_det_annos, eval_gt_annos, class_name=class_names,
distance_thresh=1000, fake_gt_infos=self.dataset_cfg.get('INFO_WITH_FAKELIDAR', False)
)
ap_result_str = '\n'
for key in ap_dict:
ap_dict[key] = ap_dict[key][0]
ap_result_str += '%s: %.4f \n' % (key, ap_dict[key])
return ap_result_str, ap_dict
eval_det_annos = copy.deepcopy(det_annos)
eval_gt_annos = [copy.deepcopy(info['annos']) for info in self.infos]
if kwargs['eval_metric'] == 'kitti':
ap_result_str, ap_dict = kitti_eval(eval_det_annos, eval_gt_annos)
elif kwargs['eval_metric'] == 'waymo':
ap_result_str, ap_dict = waymo_eval(eval_det_annos, eval_gt_annos)
else:
raise NotImplementedError
return ap_result_str, ap_dict
def create_groundtruth_database(self, info_path, save_path, used_classes=None, split='train', sampled_interval=10,
processed_data_tag=None):
database_save_path = save_path / ('%s_gt_database_%s_sampled_%d' % (processed_data_tag, split, sampled_interval))
db_info_save_path = save_path / ('%s_waymo_dbinfos_%s_sampled_%d.pkl' % (processed_data_tag, split, sampled_interval))
db_data_save_path = save_path / ('%s_gt_database_%s_sampled_%d_global.npy' % (processed_data_tag, split, sampled_interval))
database_save_path.mkdir(parents=True, exist_ok=True)
all_db_infos = {}
with open(info_path, 'rb') as f:
infos = pickle.load(f)
point_offset_cnt = 0
stacked_gt_points = []
for k in range(0, len(infos), sampled_interval):
print('gt_database sample: %d/%d' % (k + 1, len(infos)))
info = infos[k]
pc_info = info['point_cloud']
sequence_name = pc_info['lidar_sequence']
sample_idx = pc_info['sample_idx']
points = self.get_lidar(sequence_name, sample_idx)
annos = info['annos']
names = annos['name']
difficulty = annos['difficulty']
gt_boxes = annos['gt_boxes_lidar']
if k % 4 != 0 and len(names) > 0:
mask = (names == 'Vehicle')
names = names[~mask]
difficulty = difficulty[~mask]
gt_boxes = gt_boxes[~mask]
if k % 2 != 0 and len(names) > 0:
mask = (names == 'Pedestrian')
names = names[~mask]
difficulty = difficulty[~mask]
gt_boxes = gt_boxes[~mask]
num_obj = gt_boxes.shape[0]
if num_obj == 0:
continue
box_idxs_of_pts = roiaware_pool3d_utils.points_in_boxes_gpu(
torch.from_numpy(points[:, 0:3]).unsqueeze(dim=0).float().cuda(),
torch.from_numpy(gt_boxes[:, 0:7]).unsqueeze(dim=0).float().cuda()
).long().squeeze(dim=0).cpu().numpy()
for i in range(num_obj):
filename = '%s_%04d_%s_%d.bin' % (sequence_name, sample_idx, names[i], i)
filepath = database_save_path / filename
gt_points = points[box_idxs_of_pts == i]
gt_points[:, :3] -= gt_boxes[i, :3]
if (used_classes is None) or names[i] in used_classes:
with open(filepath, 'w') as f:
gt_points.tofile(f)
db_path = str(filepath.relative_to(self.root_path)) # gt_database/xxxxx.bin
db_info = {'name': names[i], 'path': db_path, 'sequence_name': sequence_name,
'sample_idx': sample_idx, 'gt_idx': i, 'box3d_lidar': gt_boxes[i],
'num_points_in_gt': gt_points.shape[0], 'difficulty': difficulty[i]}
# it will be used if you choose to use shared memory for gt sampling
stacked_gt_points.append(gt_points)
db_info['global_data_offset'] = [point_offset_cnt, point_offset_cnt + gt_points.shape[0]]
point_offset_cnt += gt_points.shape[0]
if names[i] in all_db_infos:
all_db_infos[names[i]].append(db_info)
else:
all_db_infos[names[i]] = [db_info]
for k, v in all_db_infos.items():
print('Database %s: %d' % (k, len(v)))
with open(db_info_save_path, 'wb') as f:
pickle.dump(all_db_infos, f)
# it will be used if you choose to use shared memory for gt sampling
stacked_gt_points = np.concatenate(stacked_gt_points, axis=0)
np.save(db_data_save_path, stacked_gt_points)
def create_waymo_infos(dataset_cfg, class_names, data_path, save_path,
raw_data_tag='raw_data', processed_data_tag='waymo_processed_data',
workers=min(16, multiprocessing.cpu_count())):
dataset = WaymoDataset(
dataset_cfg=dataset_cfg, class_names=class_names, root_path=data_path,
training=False, logger=common_utils.create_logger()
)
train_split, val_split = 'train', 'val'
train_filename = save_path / ('%s_infos_%s.pkl' % (processed_data_tag, train_split))
val_filename = save_path / ('%s_infos_%s.pkl' % (processed_data_tag, val_split))
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
print('---------------Start to generate data infos---------------')
dataset.set_split(train_split)
waymo_infos_train = dataset.get_infos(
raw_data_path=data_path / raw_data_tag,
save_path=save_path / processed_data_tag, num_workers=workers, has_label=True,
sampled_interval=1
)
with open(train_filename, 'wb') as f:
pickle.dump(waymo_infos_train, f)
print('----------------Waymo info train file is saved to %s----------------' % train_filename)
dataset.set_split(val_split)
waymo_infos_val = dataset.get_infos(
raw_data_path=data_path / raw_data_tag,
save_path=save_path / processed_data_tag, num_workers=workers, has_label=True,
sampled_interval=1
)
with open(val_filename, 'wb') as f:
pickle.dump(waymo_infos_val, f)
print('----------------Waymo info val file is saved to %s----------------' % val_filename)
print('---------------Start create groundtruth database for data augmentation---------------')
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
dataset.set_split(train_split)
dataset.create_groundtruth_database(
info_path=train_filename, save_path=save_path, split='train', sampled_interval=1,
used_classes=['Vehicle', 'Pedestrian', 'Cyclist'], processed_data_tag=processed_data_tag
)
print('---------------Data preparation Done---------------')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config of dataset')
parser.add_argument('--func', type=str, default='create_waymo_infos', help='')
parser.add_argument('--processed_data_tag', type=str, default='waymo_processed_data_v0_5_0', help='')
args = parser.parse_args()
if args.func == 'create_waymo_infos':
import yaml
from easydict import EasyDict
try:
yaml_config = yaml.safe_load(open(args.cfg_file), Loader=yaml.FullLoader)
except:
yaml_config = yaml.safe_load(open(args.cfg_file))
dataset_cfg = EasyDict(yaml_config)
ROOT_DIR = (Path(__file__).resolve().parent / '../../../').resolve()
dataset_cfg.PROCESSED_DATA_TAG = args.processed_data_tag
create_waymo_infos(
dataset_cfg=dataset_cfg,
class_names=['Vehicle', 'Pedestrian', 'Cyclist'],
data_path=ROOT_DIR / 'data' / 'waymo',
save_path=ROOT_DIR / 'data' / 'waymo',
raw_data_tag='raw_data',
processed_data_tag=args.processed_data_tag
)
| 20,694 | 42.114583 | 131 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/datasets/lyft/lyft_dataset.py | import copy
import pickle
from pathlib import Path
import numpy as np
from tqdm import tqdm
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
from ...utils import common_utils, box_utils
from ..dataset import DatasetTemplate
class LyftDataset(DatasetTemplate):
def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None):
self.root_path = (root_path if root_path is not None else Path(dataset_cfg.DATA_PATH)) / dataset_cfg.VERSION
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=self.root_path, logger=logger
)
self.infos = []
self.include_lyft_data(self.mode)
def include_lyft_data(self, mode):
self.logger.info('Loading lyft dataset')
lyft_infos = []
for info_path in self.dataset_cfg.INFO_PATH[mode]:
info_path = self.root_path / info_path
if not info_path.exists():
continue
with open(info_path, 'rb') as f:
infos = pickle.load(f)
lyft_infos.extend(infos)
self.infos.extend(lyft_infos)
self.logger.info('Total samples for lyft dataset: %d' % (len(lyft_infos)))
@staticmethod
def remove_ego_points(points, center_radius=1.0):
mask = ~((np.abs(points[:, 0]) < center_radius*1.5) & (np.abs(points[:, 1]) < center_radius))
return points[mask]
def get_sweep(self, sweep_info):
lidar_path = self.root_path / sweep_info['lidar_path']
points_sweep = np.fromfile(str(lidar_path), dtype=np.float32, count=-1)
if points_sweep.shape[0] % 5 != 0:
points_sweep = points_sweep[: points_sweep.shape[0] - (points_sweep.shape[0] % 5)]
points_sweep = points_sweep.reshape([-1, 5])[:, :4]
points_sweep = self.remove_ego_points(points_sweep).T
if sweep_info['transform_matrix'] is not None:
num_points = points_sweep.shape[1]
points_sweep[:3, :] = sweep_info['transform_matrix'].dot(
np.vstack((points_sweep[:3, :], np.ones(num_points))))[:3, :]
cur_times = sweep_info['time_lag'] * np.ones((1, points_sweep.shape[1]))
return points_sweep.T, cur_times.T
def get_lidar_with_sweeps(self, index, max_sweeps=1):
info = self.infos[index]
lidar_path = self.root_path / info['lidar_path']
points = np.fromfile(str(lidar_path), dtype=np.float32, count=-1)
if points.shape[0] % 5 != 0:
points = points[: points.shape[0] - (points.shape[0] % 5)]
points = points.reshape([-1, 5])[:, :4]
sweep_points_list = [points]
sweep_times_list = [np.zeros((points.shape[0], 1))]
for k in np.random.choice(len(info['sweeps']), max_sweeps - 1, replace=False):
points_sweep, times_sweep = self.get_sweep(info['sweeps'][k])
sweep_points_list.append(points_sweep)
sweep_times_list.append(times_sweep)
points = np.concatenate(sweep_points_list, axis=0)
times = np.concatenate(sweep_times_list, axis=0).astype(points.dtype)
points = np.concatenate((points, times), axis=1)
return points
def __len__(self):
if self._merge_all_iters_to_one_epoch:
return len(self.infos) * self.total_epochs
return len(self.infos)
def __getitem__(self, index):
if self._merge_all_iters_to_one_epoch:
index = index % len(self.infos)
info = copy.deepcopy(self.infos[index])
points = self.get_lidar_with_sweeps(index, max_sweeps=self.dataset_cfg.MAX_SWEEPS)
input_dict = {
'points': points,
'frame_id': Path(info['lidar_path']).stem,
'metadata': {'token': info['token']}
}
if 'gt_boxes' in info:
input_dict.update({
'gt_boxes': info['gt_boxes'],
'gt_names': info['gt_names']
})
data_dict = self.prepare_data(data_dict=input_dict)
return data_dict
def generate_prediction_dicts(self, batch_dict, pred_dicts, class_names, output_path=None):
"""
Args:
batch_dict:
frame_id:
pred_dicts: list of pred_dicts
pred_boxes: (N, 7), Tensor
pred_scores: (N), Tensor
pred_labels: (N), Tensor
class_names:
output_path:
Returns:
"""
def get_template_prediction(num_samples):
ret_dict = {
'name': np.zeros(num_samples), 'score': np.zeros(num_samples),
'boxes_lidar': np.zeros([num_samples, 7]), 'pred_labels': np.zeros(num_samples)
}
return ret_dict
def generate_single_sample_dict(box_dict):
pred_scores = box_dict['pred_scores'].cpu().numpy()
pred_boxes = box_dict['pred_boxes'].cpu().numpy()
pred_labels = box_dict['pred_labels'].cpu().numpy()
pred_dict = get_template_prediction(pred_scores.shape[0])
if pred_scores.shape[0] == 0:
return pred_dict
pred_dict['name'] = np.array(class_names)[pred_labels - 1]
pred_dict['score'] = pred_scores
pred_dict['boxes_lidar'] = pred_boxes
pred_dict['pred_labels'] = pred_labels
return pred_dict
annos = []
for index, box_dict in enumerate(pred_dicts):
single_pred_dict = generate_single_sample_dict(box_dict)
single_pred_dict['frame_id'] = batch_dict['frame_id'][index]
single_pred_dict['metadata'] = batch_dict['metadata'][index]
annos.append(single_pred_dict)
return annos
def kitti_eval(self, eval_det_annos, eval_gt_annos, class_names):
from ..kitti.kitti_object_eval_python import eval as kitti_eval
from ..kitti import kitti_utils
map_name_to_kitti = {
'car': 'Car',
'pedestrian': 'Pedestrian',
'truck': 'Truck',
'bicycle': 'Cyclist',
'motorcycle': 'Cyclist'
}
kitti_utils.transform_to_kitti_format(eval_det_annos, map_name_to_kitti=map_name_to_kitti)
kitti_utils.transform_to_kitti_format(
eval_gt_annos, map_name_to_kitti=map_name_to_kitti,
info_with_fakelidar=self.dataset_cfg.get('INFO_WITH_FAKELIDAR', False)
)
kitti_class_names = [map_name_to_kitti[x] for x in class_names]
ap_result_str, ap_dict = kitti_eval.get_official_eval_result(
gt_annos=eval_gt_annos, dt_annos=eval_det_annos, current_classes=kitti_class_names
)
return ap_result_str, ap_dict
def evaluation(self, det_annos, class_names, **kwargs):
if kwargs['eval_metric'] == 'kitti':
eval_det_annos = copy.deepcopy(det_annos)
eval_gt_annos = copy.deepcopy(self.infos)
return self.kitti_eval(eval_det_annos, eval_gt_annos, class_names)
elif kwargs['eval_metric'] == 'lyft':
return self.lyft_eval(det_annos, class_names,
iou_thresholds=self.dataset_cfg.EVAL_LYFT_IOU_LIST)
else:
raise NotImplementedError
def lyft_eval(self, det_annos, class_names, iou_thresholds=[0.5]):
from lyft_dataset_sdk.lyftdataset import LyftDataset as Lyft
from . import lyft_utils
# from lyft_dataset_sdk.eval.detection.mAP_evaluation import get_average_precisions
from .lyft_mAP_eval.lyft_eval import get_average_precisions
lyft = Lyft(json_path=self.root_path / 'data', data_path=self.root_path, verbose=True)
det_lyft_boxes, sample_tokens = lyft_utils.convert_det_to_lyft_format(lyft, det_annos)
gt_lyft_boxes = lyft_utils.load_lyft_gt_by_tokens(lyft, sample_tokens)
average_precisions = get_average_precisions(gt_lyft_boxes, det_lyft_boxes, class_names, iou_thresholds)
ap_result_str, ap_dict = lyft_utils.format_lyft_results(average_precisions, class_names, iou_thresholds, version=self.dataset_cfg.VERSION)
return ap_result_str, ap_dict
def create_groundtruth_database(self, used_classes=None, max_sweeps=10):
import torch
database_save_path = self.root_path / f'gt_database'
db_info_save_path = self.root_path / f'lyft_dbinfos_{max_sweeps}sweeps.pkl'
database_save_path.mkdir(parents=True, exist_ok=True)
all_db_infos = {}
for idx in tqdm(range(len(self.infos))):
sample_idx = idx
info = self.infos[idx]
points = self.get_lidar_with_sweeps(idx, max_sweeps=max_sweeps)
gt_boxes = info['gt_boxes']
gt_names = info['gt_names']
box_idxs_of_pts = roiaware_pool3d_utils.points_in_boxes_gpu(
torch.from_numpy(points[:, 0:3]).unsqueeze(dim=0).float().cuda(),
torch.from_numpy(gt_boxes[:, 0:7]).unsqueeze(dim=0).float().cuda()
).long().squeeze(dim=0).cpu().numpy()
for i in range(gt_boxes.shape[0]):
filename = '%s_%s_%d.bin' % (sample_idx, gt_names[i], i)
filepath = database_save_path / filename
gt_points = points[box_idxs_of_pts == i]
gt_points[:, :3] -= gt_boxes[i, :3]
with open(filepath, 'w') as f:
gt_points.tofile(f)
if (used_classes is None) or gt_names[i] in used_classes:
db_path = str(filepath.relative_to(self.root_path)) # gt_database/xxxxx.bin
db_info = {'name': gt_names[i], 'path': db_path, 'image_idx': sample_idx, 'gt_idx': i,
'box3d_lidar': gt_boxes[i], 'num_points_in_gt': gt_points.shape[0]}
if gt_names[i] in all_db_infos:
all_db_infos[gt_names[i]].append(db_info)
else:
all_db_infos[gt_names[i]] = [db_info]
for k, v in all_db_infos.items():
print('Database %s: %d' % (k, len(v)))
with open(db_info_save_path, 'wb') as f:
pickle.dump(all_db_infos, f)
def create_lyft_info(version, data_path, save_path, split, max_sweeps=10):
from lyft_dataset_sdk.lyftdataset import LyftDataset
from . import lyft_utils
data_path = data_path / version
save_path = save_path / version
split_path = data_path.parent / 'ImageSets'
if split is not None:
save_path = save_path / split
split_path = split_path / split
save_path.mkdir(exist_ok=True)
assert version in ['trainval', 'one_scene', 'test']
if version == 'trainval':
train_split_path = split_path / 'train.txt'
val_split_path = split_path / 'val.txt'
elif version == 'test':
train_split_path = split_path / 'test.txt'
val_split_path = None
elif version == 'one_scene':
train_split_path = split_path / 'one_scene.txt'
val_split_path = split_path / 'one_scene.txt'
else:
raise NotImplementedError
train_scenes = [x.strip() for x in open(train_split_path).readlines()] if train_split_path.exists() else []
val_scenes = [x.strip() for x in open(val_split_path).readlines()] if val_split_path.exists() else []
lyft = LyftDataset(json_path=data_path / 'data', data_path=data_path, verbose=True)
available_scenes = lyft_utils.get_available_scenes(lyft)
available_scene_names = [s['name'] for s in available_scenes]
train_scenes = list(filter(lambda x: x in available_scene_names, train_scenes))
val_scenes = list(filter(lambda x: x in available_scene_names, val_scenes))
train_scenes = set([available_scenes[available_scene_names.index(s)]['token'] for s in train_scenes])
val_scenes = set([available_scenes[available_scene_names.index(s)]['token'] for s in val_scenes])
print('%s: train scene(%d), val scene(%d)' % (version, len(train_scenes), len(val_scenes)))
train_lyft_infos, val_lyft_infos = lyft_utils.fill_trainval_infos(
data_path=data_path, lyft=lyft, train_scenes=train_scenes, val_scenes=val_scenes,
test='test' in version, max_sweeps=max_sweeps
)
if version == 'test':
print('test sample: %d' % len(train_lyft_infos))
with open(save_path / f'lyft_infos_test.pkl', 'wb') as f:
pickle.dump(train_lyft_infos, f)
else:
print('train sample: %d, val sample: %d' % (len(train_lyft_infos), len(val_lyft_infos)))
with open(save_path / f'lyft_infos_train.pkl', 'wb') as f:
pickle.dump(train_lyft_infos, f)
with open(save_path / f'lyft_infos_val.pkl', 'wb') as f:
pickle.dump(val_lyft_infos, f)
if __name__ == '__main__':
import yaml
import argparse
from pathlib import Path
from easydict import EasyDict
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config of dataset')
parser.add_argument('--func', type=str, default='create_lyft_infos', help='')
parser.add_argument('--version', type=str, default='trainval', help='')
parser.add_argument('--split', type=str, default=None, help='')
parser.add_argument('--max_sweeps', type=int, default=10, help='')
args = parser.parse_args()
if args.func == 'create_lyft_infos':
try:
yaml_config = yaml.safe_load(open(args.cfg_file), Loader=yaml.FullLoader)
except:
yaml_config = yaml.safe_load(open(args.cfg_file))
dataset_cfg = EasyDict(yaml_config)
ROOT_DIR = (Path(__file__).resolve().parent / '../../../').resolve()
dataset_cfg.VERSION = args.version
dataset_cfg.MAX_SWEEPS = args.max_sweeps
create_lyft_info(
version=dataset_cfg.VERSION,
data_path=ROOT_DIR / 'data' / 'lyft',
save_path=ROOT_DIR / 'data' / 'lyft',
split=args.split,
max_sweeps=dataset_cfg.MAX_SWEEPS
)
lyft_dataset = LyftDataset(
dataset_cfg=dataset_cfg, class_names=None,
root_path=ROOT_DIR / 'data' / 'lyft',
logger=common_utils.create_logger(), training=True
)
lyft_dataset.create_groundtruth_database(max_sweeps=dataset_cfg.MAX_SWEEPS)
| 14,392 | 40.598266 | 146 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/datasets/nuscenes/nuscenes_utils.py | """
The NuScenes data pre-processing and evaluation is modified from
https://github.com/traveller59/second.pytorch and https://github.com/poodarchu/Det3D
"""
import operator
from functools import reduce
from pathlib import Path
import numpy as np
import tqdm
from nuscenes.utils.data_classes import Box
from nuscenes.utils.geometry_utils import transform_matrix
from pyquaternion import Quaternion
map_name_from_general_to_detection = {
'human.pedestrian.adult': 'pedestrian',
'human.pedestrian.child': 'pedestrian',
'human.pedestrian.wheelchair': 'ignore',
'human.pedestrian.stroller': 'ignore',
'human.pedestrian.personal_mobility': 'ignore',
'human.pedestrian.police_officer': 'pedestrian',
'human.pedestrian.construction_worker': 'pedestrian',
'animal': 'ignore',
'vehicle.car': 'car',
'vehicle.motorcycle': 'motorcycle',
'vehicle.bicycle': 'bicycle',
'vehicle.bus.bendy': 'bus',
'vehicle.bus.rigid': 'bus',
'vehicle.truck': 'truck',
'vehicle.construction': 'construction_vehicle',
'vehicle.emergency.ambulance': 'ignore',
'vehicle.emergency.police': 'ignore',
'vehicle.trailer': 'trailer',
'movable_object.barrier': 'barrier',
'movable_object.trafficcone': 'traffic_cone',
'movable_object.pushable_pullable': 'ignore',
'movable_object.debris': 'ignore',
'static_object.bicycle_rack': 'ignore',
}
cls_attr_dist = {
'barrier': {
'cycle.with_rider': 0,
'cycle.without_rider': 0,
'pedestrian.moving': 0,
'pedestrian.sitting_lying_down': 0,
'pedestrian.standing': 0,
'vehicle.moving': 0,
'vehicle.parked': 0,
'vehicle.stopped': 0,
},
'bicycle': {
'cycle.with_rider': 2791,
'cycle.without_rider': 8946,
'pedestrian.moving': 0,
'pedestrian.sitting_lying_down': 0,
'pedestrian.standing': 0,
'vehicle.moving': 0,
'vehicle.parked': 0,
'vehicle.stopped': 0,
},
'bus': {
'cycle.with_rider': 0,
'cycle.without_rider': 0,
'pedestrian.moving': 0,
'pedestrian.sitting_lying_down': 0,
'pedestrian.standing': 0,
'vehicle.moving': 9092,
'vehicle.parked': 3294,
'vehicle.stopped': 3881,
},
'car': {
'cycle.with_rider': 0,
'cycle.without_rider': 0,
'pedestrian.moving': 0,
'pedestrian.sitting_lying_down': 0,
'pedestrian.standing': 0,
'vehicle.moving': 114304,
'vehicle.parked': 330133,
'vehicle.stopped': 46898,
},
'construction_vehicle': {
'cycle.with_rider': 0,
'cycle.without_rider': 0,
'pedestrian.moving': 0,
'pedestrian.sitting_lying_down': 0,
'pedestrian.standing': 0,
'vehicle.moving': 882,
'vehicle.parked': 11549,
'vehicle.stopped': 2102,
},
'ignore': {
'cycle.with_rider': 307,
'cycle.without_rider': 73,
'pedestrian.moving': 0,
'pedestrian.sitting_lying_down': 0,
'pedestrian.standing': 0,
'vehicle.moving': 165,
'vehicle.parked': 400,
'vehicle.stopped': 102,
},
'motorcycle': {
'cycle.with_rider': 4233,
'cycle.without_rider': 8326,
'pedestrian.moving': 0,
'pedestrian.sitting_lying_down': 0,
'pedestrian.standing': 0,
'vehicle.moving': 0,
'vehicle.parked': 0,
'vehicle.stopped': 0,
},
'pedestrian': {
'cycle.with_rider': 0,
'cycle.without_rider': 0,
'pedestrian.moving': 157444,
'pedestrian.sitting_lying_down': 13939,
'pedestrian.standing': 46530,
'vehicle.moving': 0,
'vehicle.parked': 0,
'vehicle.stopped': 0,
},
'traffic_cone': {
'cycle.with_rider': 0,
'cycle.without_rider': 0,
'pedestrian.moving': 0,
'pedestrian.sitting_lying_down': 0,
'pedestrian.standing': 0,
'vehicle.moving': 0,
'vehicle.parked': 0,
'vehicle.stopped': 0,
},
'trailer': {
'cycle.with_rider': 0,
'cycle.without_rider': 0,
'pedestrian.moving': 0,
'pedestrian.sitting_lying_down': 0,
'pedestrian.standing': 0,
'vehicle.moving': 3421,
'vehicle.parked': 19224,
'vehicle.stopped': 1895,
},
'truck': {
'cycle.with_rider': 0,
'cycle.without_rider': 0,
'pedestrian.moving': 0,
'pedestrian.sitting_lying_down': 0,
'pedestrian.standing': 0,
'vehicle.moving': 21339,
'vehicle.parked': 55626,
'vehicle.stopped': 11097,
},
}
def get_available_scenes(nusc):
available_scenes = []
print('total scene num:', len(nusc.scene))
for scene in nusc.scene:
scene_token = scene['token']
scene_rec = nusc.get('scene', scene_token)
sample_rec = nusc.get('sample', scene_rec['first_sample_token'])
sd_rec = nusc.get('sample_data', sample_rec['data']['LIDAR_TOP'])
has_more_frames = True
scene_not_exist = False
while has_more_frames:
lidar_path, boxes, _ = nusc.get_sample_data(sd_rec['token'])
if not Path(lidar_path).exists():
scene_not_exist = True
break
else:
break
# if not sd_rec['next'] == '':
# sd_rec = nusc.get('sample_data', sd_rec['next'])
# else:
# has_more_frames = False
if scene_not_exist:
continue
available_scenes.append(scene)
print('exist scene num:', len(available_scenes))
return available_scenes
def get_sample_data(nusc, sample_data_token, selected_anntokens=None):
"""
Returns the data path as well as all annotations related to that sample_data.
Note that the boxes are transformed into the current sensor's coordinate frame.
Args:
nusc:
sample_data_token: Sample_data token.
selected_anntokens: If provided only return the selected annotation.
Returns:
"""
# Retrieve sensor & pose records
sd_record = nusc.get('sample_data', sample_data_token)
cs_record = nusc.get('calibrated_sensor', sd_record['calibrated_sensor_token'])
sensor_record = nusc.get('sensor', cs_record['sensor_token'])
pose_record = nusc.get('ego_pose', sd_record['ego_pose_token'])
data_path = nusc.get_sample_data_path(sample_data_token)
if sensor_record['modality'] == 'camera':
cam_intrinsic = np.array(cs_record['camera_intrinsic'])
imsize = (sd_record['width'], sd_record['height'])
else:
cam_intrinsic = imsize = None
# Retrieve all sample annotations and map to sensor coordinate system.
if selected_anntokens is not None:
boxes = list(map(nusc.get_box, selected_anntokens))
else:
boxes = nusc.get_boxes(sample_data_token)
# Make list of Box objects including coord system transforms.
box_list = []
for box in boxes:
box.velocity = nusc.box_velocity(box.token)
# Move box to ego vehicle coord system
box.translate(-np.array(pose_record['translation']))
box.rotate(Quaternion(pose_record['rotation']).inverse)
# Move box to sensor coord system
box.translate(-np.array(cs_record['translation']))
box.rotate(Quaternion(cs_record['rotation']).inverse)
box_list.append(box)
return data_path, box_list, cam_intrinsic
def quaternion_yaw(q: Quaternion) -> float:
"""
Calculate the yaw angle from a quaternion.
Note that this only works for a quaternion that represents a box in lidar or global coordinate frame.
It does not work for a box in the camera frame.
:param q: Quaternion of interest.
:return: Yaw angle in radians.
"""
# Project into xy plane.
v = np.dot(q.rotation_matrix, np.array([1, 0, 0]))
# Measure yaw using arctan.
yaw = np.arctan2(v[1], v[0])
return yaw
def fill_trainval_infos(data_path, nusc, train_scenes, val_scenes, test=False, max_sweeps=10):
train_nusc_infos = []
val_nusc_infos = []
progress_bar = tqdm.tqdm(total=len(nusc.sample), desc='create_info', dynamic_ncols=True)
ref_chan = 'LIDAR_TOP' # The radar channel from which we track back n sweeps to aggregate the point cloud.
chan = 'LIDAR_TOP' # The reference channel of the current sample_rec that the point clouds are mapped to.
for index, sample in enumerate(nusc.sample):
progress_bar.update()
ref_sd_token = sample['data'][ref_chan]
ref_sd_rec = nusc.get('sample_data', ref_sd_token)
ref_cs_rec = nusc.get('calibrated_sensor', ref_sd_rec['calibrated_sensor_token'])
ref_pose_rec = nusc.get('ego_pose', ref_sd_rec['ego_pose_token'])
ref_time = 1e-6 * ref_sd_rec['timestamp']
ref_lidar_path, ref_boxes, _ = get_sample_data(nusc, ref_sd_token)
ref_cam_front_token = sample['data']['CAM_FRONT']
ref_cam_path, _, ref_cam_intrinsic = nusc.get_sample_data(ref_cam_front_token)
# Homogeneous transform from ego car frame to reference frame
ref_from_car = transform_matrix(
ref_cs_rec['translation'], Quaternion(ref_cs_rec['rotation']), inverse=True
)
# Homogeneous transformation matrix from global to _current_ ego car frame
car_from_global = transform_matrix(
ref_pose_rec['translation'], Quaternion(ref_pose_rec['rotation']), inverse=True,
)
info = {
'lidar_path': Path(ref_lidar_path).relative_to(data_path).__str__(),
'cam_front_path': Path(ref_cam_path).relative_to(data_path).__str__(),
'cam_intrinsic': ref_cam_intrinsic,
'token': sample['token'],
'sweeps': [],
'ref_from_car': ref_from_car,
'car_from_global': car_from_global,
'timestamp': ref_time,
}
sample_data_token = sample['data'][chan]
curr_sd_rec = nusc.get('sample_data', sample_data_token)
sweeps = []
while len(sweeps) < max_sweeps - 1:
if curr_sd_rec['prev'] == '':
if len(sweeps) == 0:
sweep = {
'lidar_path': Path(ref_lidar_path).relative_to(data_path).__str__(),
'sample_data_token': curr_sd_rec['token'],
'transform_matrix': None,
'time_lag': curr_sd_rec['timestamp'] * 0,
}
sweeps.append(sweep)
else:
sweeps.append(sweeps[-1])
else:
curr_sd_rec = nusc.get('sample_data', curr_sd_rec['prev'])
# Get past pose
current_pose_rec = nusc.get('ego_pose', curr_sd_rec['ego_pose_token'])
global_from_car = transform_matrix(
current_pose_rec['translation'], Quaternion(current_pose_rec['rotation']), inverse=False,
)
# Homogeneous transformation matrix from sensor coordinate frame to ego car frame.
current_cs_rec = nusc.get(
'calibrated_sensor', curr_sd_rec['calibrated_sensor_token']
)
car_from_current = transform_matrix(
current_cs_rec['translation'], Quaternion(current_cs_rec['rotation']), inverse=False,
)
tm = reduce(np.dot, [ref_from_car, car_from_global, global_from_car, car_from_current])
lidar_path = nusc.get_sample_data_path(curr_sd_rec['token'])
time_lag = ref_time - 1e-6 * curr_sd_rec['timestamp']
sweep = {
'lidar_path': Path(lidar_path).relative_to(data_path).__str__(),
'sample_data_token': curr_sd_rec['token'],
'transform_matrix': tm,
'global_from_car': global_from_car,
'car_from_current': car_from_current,
'time_lag': time_lag,
}
sweeps.append(sweep)
info['sweeps'] = sweeps
assert len(info['sweeps']) == max_sweeps - 1, \
f"sweep {curr_sd_rec['token']} only has {len(info['sweeps'])} sweeps, " \
f"you should duplicate to sweep num {max_sweeps - 1}"
if not test:
annotations = [nusc.get('sample_annotation', token) for token in sample['anns']]
# the filtering gives 0.5~1 map improvement
num_lidar_pts = np.array([anno['num_lidar_pts'] for anno in annotations])
num_radar_pts = np.array([anno['num_radar_pts'] for anno in annotations])
mask = (num_lidar_pts + num_radar_pts > 0)
locs = np.array([b.center for b in ref_boxes]).reshape(-1, 3)
dims = np.array([b.wlh for b in ref_boxes]).reshape(-1, 3)[:, [1, 0, 2]] # wlh == > dxdydz (lwh)
velocity = np.array([b.velocity for b in ref_boxes]).reshape(-1, 3)
rots = np.array([quaternion_yaw(b.orientation) for b in ref_boxes]).reshape(-1, 1)
names = np.array([b.name for b in ref_boxes])
tokens = np.array([b.token for b in ref_boxes])
gt_boxes = np.concatenate([locs, dims, rots, velocity[:, :2]], axis=1)
assert len(annotations) == len(gt_boxes) == len(velocity)
info['gt_boxes'] = gt_boxes[mask, :]
info['gt_boxes_velocity'] = velocity[mask, :]
info['gt_names'] = np.array([map_name_from_general_to_detection[name] for name in names])[mask]
info['gt_boxes_token'] = tokens[mask]
info['num_lidar_pts'] = num_lidar_pts[mask]
info['num_radar_pts'] = num_radar_pts[mask]
if sample['scene_token'] in train_scenes:
train_nusc_infos.append(info)
else:
val_nusc_infos.append(info)
progress_bar.close()
return train_nusc_infos, val_nusc_infos
def boxes_lidar_to_nusenes(det_info):
boxes3d = det_info['boxes_lidar']
scores = det_info['score']
labels = det_info['pred_labels']
box_list = []
for k in range(boxes3d.shape[0]):
quat = Quaternion(axis=[0, 0, 1], radians=boxes3d[k, 6])
velocity = (*boxes3d[k, 7:9], 0.0) if boxes3d.shape[1] == 9 else (0.0, 0.0, 0.0)
box = Box(
boxes3d[k, :3],
boxes3d[k, [4, 3, 5]], # wlh
quat, label=labels[k], score=scores[k], velocity=velocity,
)
box_list.append(box)
return box_list
def lidar_nusc_box_to_global(nusc, boxes, sample_token):
s_record = nusc.get('sample', sample_token)
sample_data_token = s_record['data']['LIDAR_TOP']
sd_record = nusc.get('sample_data', sample_data_token)
cs_record = nusc.get('calibrated_sensor', sd_record['calibrated_sensor_token'])
sensor_record = nusc.get('sensor', cs_record['sensor_token'])
pose_record = nusc.get('ego_pose', sd_record['ego_pose_token'])
data_path = nusc.get_sample_data_path(sample_data_token)
box_list = []
for box in boxes:
# Move box to ego vehicle coord system
box.rotate(Quaternion(cs_record['rotation']))
box.translate(np.array(cs_record['translation']))
# Move box to global coord system
box.rotate(Quaternion(pose_record['rotation']))
box.translate(np.array(pose_record['translation']))
box_list.append(box)
return box_list
def transform_det_annos_to_nusc_annos(det_annos, nusc):
nusc_annos = {
'results': {},
'meta': None,
}
for det in det_annos:
annos = []
box_list = boxes_lidar_to_nusenes(det)
box_list = lidar_nusc_box_to_global(
nusc=nusc, boxes=box_list, sample_token=det['metadata']['token']
)
for k, box in enumerate(box_list):
name = det['name'][k]
if np.sqrt(box.velocity[0] ** 2 + box.velocity[1] ** 2) > 0.2:
if name in ['car', 'construction_vehicle', 'bus', 'truck', 'trailer']:
attr = 'vehicle.moving'
elif name in ['bicycle', 'motorcycle']:
attr = 'cycle.with_rider'
else:
attr = None
else:
if name in ['pedestrian']:
attr = 'pedestrian.standing'
elif name in ['bus']:
attr = 'vehicle.stopped'
else:
attr = None
attr = attr if attr is not None else max(
cls_attr_dist[name].items(), key=operator.itemgetter(1))[0]
nusc_anno = {
'sample_token': det['metadata']['token'],
'translation': box.center.tolist(),
'size': box.wlh.tolist(),
'rotation': box.orientation.elements.tolist(),
'velocity': box.velocity[:2].tolist(),
'detection_name': name,
'detection_score': box.score,
'attribute_name': attr
}
annos.append(nusc_anno)
nusc_annos['results'].update({det["metadata"]["token"]: annos})
return nusc_annos
def format_nuscene_results(metrics, class_names, version='default'):
result = '----------------Nuscene %s results-----------------\n' % version
for name in class_names:
threshs = ', '.join(list(metrics['label_aps'][name].keys()))
ap_list = list(metrics['label_aps'][name].values())
err_name =', '.join([x.split('_')[0] for x in list(metrics['label_tp_errors'][name].keys())])
error_list = list(metrics['label_tp_errors'][name].values())
result += f'***{name} error@{err_name} | AP@{threshs}\n'
result += ', '.join(['%.2f' % x for x in error_list]) + ' | '
result += ', '.join(['%.2f' % (x * 100) for x in ap_list])
result += f" | mean AP: {metrics['mean_dist_aps'][name]}"
result += '\n'
result += '--------------average performance-------------\n'
details = {}
for key, val in metrics['tp_errors'].items():
result += '%s:\t %.4f\n' % (key, val)
details[key] = val
result += 'mAP:\t %.4f\n' % metrics['mean_ap']
result += 'NDS:\t %.4f\n' % metrics['nd_score']
details.update({
'mAP': metrics['mean_ap'],
'NDS': metrics['nd_score'],
})
return result, details
| 18,474 | 35.876248 | 111 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/datasets/nuscenes/nuscenes_dataset.py | import copy
import pickle
from pathlib import Path
import numpy as np
from tqdm import tqdm
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
from ...utils import common_utils
from ..dataset import DatasetTemplate
class NuScenesDataset(DatasetTemplate):
def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None):
root_path = (root_path if root_path is not None else Path(dataset_cfg.DATA_PATH)) / dataset_cfg.VERSION
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger
)
self.infos = []
self.include_nuscenes_data(self.mode)
if self.training and self.dataset_cfg.get('BALANCED_RESAMPLING', False):
self.infos = self.balanced_infos_resampling(self.infos)
def include_nuscenes_data(self, mode):
self.logger.info('Loading NuScenes dataset')
nuscenes_infos = []
for info_path in self.dataset_cfg.INFO_PATH[mode]:
info_path = self.root_path / info_path
if not info_path.exists():
continue
with open(info_path, 'rb') as f:
infos = pickle.load(f)
nuscenes_infos.extend(infos)
self.infos.extend(nuscenes_infos)
self.logger.info('Total samples for NuScenes dataset: %d' % (len(nuscenes_infos)))
def balanced_infos_resampling(self, infos):
"""
Class-balanced sampling of nuScenes dataset from https://arxiv.org/abs/1908.09492
"""
if self.class_names is None:
return infos
cls_infos = {name: [] for name in self.class_names}
for info in infos:
for name in set(info['gt_names']):
if name in self.class_names:
cls_infos[name].append(info)
duplicated_samples = sum([len(v) for _, v in cls_infos.items()])
cls_dist = {k: len(v) / duplicated_samples for k, v in cls_infos.items()}
sampled_infos = []
frac = 1.0 / len(self.class_names)
ratios = [frac / v for v in cls_dist.values()]
for cur_cls_infos, ratio in zip(list(cls_infos.values()), ratios):
sampled_infos += np.random.choice(
cur_cls_infos, int(len(cur_cls_infos) * ratio)
).tolist()
self.logger.info('Total samples after balanced resampling: %s' % (len(sampled_infos)))
cls_infos_new = {name: [] for name in self.class_names}
for info in sampled_infos:
for name in set(info['gt_names']):
if name in self.class_names:
cls_infos_new[name].append(info)
cls_dist_new = {k: len(v) / len(sampled_infos) for k, v in cls_infos_new.items()}
return sampled_infos
def get_sweep(self, sweep_info):
def remove_ego_points(points, center_radius=1.0):
mask = ~((np.abs(points[:, 0]) < center_radius) & (np.abs(points[:, 1]) < center_radius))
return points[mask]
lidar_path = self.root_path / sweep_info['lidar_path']
points_sweep = np.fromfile(str(lidar_path), dtype=np.float32, count=-1).reshape([-1, 5])[:, :4]
points_sweep = remove_ego_points(points_sweep).T
if sweep_info['transform_matrix'] is not None:
num_points = points_sweep.shape[1]
points_sweep[:3, :] = sweep_info['transform_matrix'].dot(
np.vstack((points_sweep[:3, :], np.ones(num_points))))[:3, :]
cur_times = sweep_info['time_lag'] * np.ones((1, points_sweep.shape[1]))
return points_sweep.T, cur_times.T
def get_lidar_with_sweeps(self, index, max_sweeps=1):
info = self.infos[index]
lidar_path = self.root_path / info['lidar_path']
points = np.fromfile(str(lidar_path), dtype=np.float32, count=-1).reshape([-1, 5])[:, :4]
sweep_points_list = [points]
sweep_times_list = [np.zeros((points.shape[0], 1))]
for k in np.random.choice(len(info['sweeps']), max_sweeps - 1, replace=False):
points_sweep, times_sweep = self.get_sweep(info['sweeps'][k])
sweep_points_list.append(points_sweep)
sweep_times_list.append(times_sweep)
points = np.concatenate(sweep_points_list, axis=0)
times = np.concatenate(sweep_times_list, axis=0).astype(points.dtype)
points = np.concatenate((points, times), axis=1)
return points
def __len__(self):
if self._merge_all_iters_to_one_epoch:
return len(self.infos) * self.total_epochs
return len(self.infos)
def __getitem__(self, index):
if self._merge_all_iters_to_one_epoch:
index = index % len(self.infos)
info = copy.deepcopy(self.infos[index])
points = self.get_lidar_with_sweeps(index, max_sweeps=self.dataset_cfg.MAX_SWEEPS)
input_dict = {
'points': points,
'frame_id': Path(info['lidar_path']).stem,
'metadata': {'token': info['token']}
}
if 'gt_boxes' in info:
if self.dataset_cfg.get('FILTER_MIN_POINTS_IN_GT', False):
mask = (info['num_lidar_pts'] > self.dataset_cfg.FILTER_MIN_POINTS_IN_GT - 1)
else:
mask = None
input_dict.update({
'gt_names': info['gt_names'] if mask is None else info['gt_names'][mask],
'gt_boxes': info['gt_boxes'] if mask is None else info['gt_boxes'][mask]
})
data_dict = self.prepare_data(data_dict=input_dict)
if self.dataset_cfg.get('SET_NAN_VELOCITY_TO_ZEROS', False):
gt_boxes = data_dict['gt_boxes']
gt_boxes[np.isnan(gt_boxes)] = 0
data_dict['gt_boxes'] = gt_boxes
if not self.dataset_cfg.PRED_VELOCITY and 'gt_boxes' in data_dict:
data_dict['gt_boxes'] = data_dict['gt_boxes'][:, [0, 1, 2, 3, 4, 5, 6, -1]]
return data_dict
@staticmethod
def generate_prediction_dicts(batch_dict, pred_dicts, class_names, output_path=None):
"""
Args:
batch_dict:
frame_id:
pred_dicts: list of pred_dicts
pred_boxes: (N, 7), Tensor
pred_scores: (N), Tensor
pred_labels: (N), Tensor
class_names:
output_path:
Returns:
"""
def get_template_prediction(num_samples):
ret_dict = {
'name': np.zeros(num_samples), 'score': np.zeros(num_samples),
'boxes_lidar': np.zeros([num_samples, 7]), 'pred_labels': np.zeros(num_samples)
}
return ret_dict
def generate_single_sample_dict(box_dict):
pred_scores = box_dict['pred_scores'].cpu().numpy()
pred_boxes = box_dict['pred_boxes'].cpu().numpy()
pred_labels = box_dict['pred_labels'].cpu().numpy()
pred_dict = get_template_prediction(pred_scores.shape[0])
if pred_scores.shape[0] == 0:
return pred_dict
pred_dict['name'] = np.array(class_names)[pred_labels - 1]
pred_dict['score'] = pred_scores
pred_dict['boxes_lidar'] = pred_boxes
pred_dict['pred_labels'] = pred_labels
return pred_dict
annos = []
for index, box_dict in enumerate(pred_dicts):
single_pred_dict = generate_single_sample_dict(box_dict)
single_pred_dict['frame_id'] = batch_dict['frame_id'][index]
single_pred_dict['metadata'] = batch_dict['metadata'][index]
annos.append(single_pred_dict)
return annos
def evaluation(self, det_annos, class_names, **kwargs):
import json
from nuscenes.nuscenes import NuScenes
from . import nuscenes_utils
nusc = NuScenes(version=self.dataset_cfg.VERSION, dataroot=str(self.root_path), verbose=True)
nusc_annos = nuscenes_utils.transform_det_annos_to_nusc_annos(det_annos, nusc)
nusc_annos['meta'] = {
'use_camera': False,
'use_lidar': True,
'use_radar': False,
'use_map': False,
'use_external': False,
}
output_path = Path(kwargs['output_path'])
output_path.mkdir(exist_ok=True, parents=True)
res_path = str(output_path / 'results_nusc.json')
with open(res_path, 'w') as f:
json.dump(nusc_annos, f)
self.logger.info(f'The predictions of NuScenes have been saved to {res_path}')
if self.dataset_cfg.VERSION == 'v1.0-test':
return 'No ground-truth annotations for evaluation', {}
from nuscenes.eval.detection.config import config_factory
from nuscenes.eval.detection.evaluate import NuScenesEval
eval_set_map = {
'v1.0-mini': 'mini_val',
'v1.0-trainval': 'val',
'v1.0-test': 'test'
}
try:
eval_version = 'detection_cvpr_2019'
eval_config = config_factory(eval_version)
except:
eval_version = 'cvpr_2019'
eval_config = config_factory(eval_version)
nusc_eval = NuScenesEval(
nusc,
config=eval_config,
result_path=res_path,
eval_set=eval_set_map[self.dataset_cfg.VERSION],
output_dir=str(output_path),
verbose=True,
)
metrics_summary = nusc_eval.main(plot_examples=0, render_curves=False)
with open(output_path / 'metrics_summary.json', 'r') as f:
metrics = json.load(f)
result_str, result_dict = nuscenes_utils.format_nuscene_results(metrics, self.class_names, version=eval_version)
return result_str, result_dict
def create_groundtruth_database(self, used_classes=None, max_sweeps=10):
import torch
database_save_path = self.root_path / f'gt_database_{max_sweeps}sweeps_withvelo'
db_info_save_path = self.root_path / f'nuscenes_dbinfos_{max_sweeps}sweeps_withvelo.pkl'
database_save_path.mkdir(parents=True, exist_ok=True)
all_db_infos = {}
for idx in tqdm(range(len(self.infos))):
sample_idx = idx
info = self.infos[idx]
points = self.get_lidar_with_sweeps(idx, max_sweeps=max_sweeps)
gt_boxes = info['gt_boxes']
gt_names = info['gt_names']
box_idxs_of_pts = roiaware_pool3d_utils.points_in_boxes_gpu(
torch.from_numpy(points[:, 0:3]).unsqueeze(dim=0).float().cuda(),
torch.from_numpy(gt_boxes[:, 0:7]).unsqueeze(dim=0).float().cuda()
).long().squeeze(dim=0).cpu().numpy()
for i in range(gt_boxes.shape[0]):
filename = '%s_%s_%d.bin' % (sample_idx, gt_names[i], i)
filepath = database_save_path / filename
gt_points = points[box_idxs_of_pts == i]
gt_points[:, :3] -= gt_boxes[i, :3]
with open(filepath, 'w') as f:
gt_points.tofile(f)
if (used_classes is None) or gt_names[i] in used_classes:
db_path = str(filepath.relative_to(self.root_path)) # gt_database/xxxxx.bin
db_info = {'name': gt_names[i], 'path': db_path, 'image_idx': sample_idx, 'gt_idx': i,
'box3d_lidar': gt_boxes[i], 'num_points_in_gt': gt_points.shape[0]}
if gt_names[i] in all_db_infos:
all_db_infos[gt_names[i]].append(db_info)
else:
all_db_infos[gt_names[i]] = [db_info]
for k, v in all_db_infos.items():
print('Database %s: %d' % (k, len(v)))
with open(db_info_save_path, 'wb') as f:
pickle.dump(all_db_infos, f)
def create_nuscenes_info(version, data_path, save_path, max_sweeps=10):
from nuscenes.nuscenes import NuScenes
from nuscenes.utils import splits
from . import nuscenes_utils
data_path = data_path / version
save_path = save_path / version
assert version in ['v1.0-trainval', 'v1.0-test', 'v1.0-mini']
if version == 'v1.0-trainval':
train_scenes = splits.train
val_scenes = splits.val
elif version == 'v1.0-test':
train_scenes = splits.test
val_scenes = []
elif version == 'v1.0-mini':
train_scenes = splits.mini_train
val_scenes = splits.mini_val
else:
raise NotImplementedError
nusc = NuScenes(version=version, dataroot=data_path, verbose=True)
available_scenes = nuscenes_utils.get_available_scenes(nusc)
available_scene_names = [s['name'] for s in available_scenes]
train_scenes = list(filter(lambda x: x in available_scene_names, train_scenes))
val_scenes = list(filter(lambda x: x in available_scene_names, val_scenes))
train_scenes = set([available_scenes[available_scene_names.index(s)]['token'] for s in train_scenes])
val_scenes = set([available_scenes[available_scene_names.index(s)]['token'] for s in val_scenes])
print('%s: train scene(%d), val scene(%d)' % (version, len(train_scenes), len(val_scenes)))
train_nusc_infos, val_nusc_infos = nuscenes_utils.fill_trainval_infos(
data_path=data_path, nusc=nusc, train_scenes=train_scenes, val_scenes=val_scenes,
test='test' in version, max_sweeps=max_sweeps
)
if version == 'v1.0-test':
print('test sample: %d' % len(train_nusc_infos))
with open(save_path / f'nuscenes_infos_{max_sweeps}sweeps_test.pkl', 'wb') as f:
pickle.dump(train_nusc_infos, f)
else:
print('train sample: %d, val sample: %d' % (len(train_nusc_infos), len(val_nusc_infos)))
with open(save_path / f'nuscenes_infos_{max_sweeps}sweeps_train.pkl', 'wb') as f:
pickle.dump(train_nusc_infos, f)
with open(save_path / f'nuscenes_infos_{max_sweeps}sweeps_val.pkl', 'wb') as f:
pickle.dump(val_nusc_infos, f)
if __name__ == '__main__':
import yaml
import argparse
from pathlib import Path
from easydict import EasyDict
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config of dataset')
parser.add_argument('--func', type=str, default='create_nuscenes_infos', help='')
parser.add_argument('--version', type=str, default='v1.0-trainval', help='')
args = parser.parse_args()
if args.func == 'create_nuscenes_infos':
dataset_cfg = EasyDict(yaml.safe_load(open(args.cfg_file)))
ROOT_DIR = (Path(__file__).resolve().parent / '../../../').resolve()
dataset_cfg.VERSION = args.version
create_nuscenes_info(
version=dataset_cfg.VERSION,
data_path=ROOT_DIR / 'data' / 'nuscenes',
save_path=ROOT_DIR / 'data' / 'nuscenes',
max_sweeps=dataset_cfg.MAX_SWEEPS,
)
nuscenes_dataset = NuScenesDataset(
dataset_cfg=dataset_cfg, class_names=None,
root_path=ROOT_DIR / 'data' / 'nuscenes',
logger=common_utils.create_logger(), training=True
)
nuscenes_dataset.create_groundtruth_database(max_sweeps=dataset_cfg.MAX_SWEEPS)
| 15,327 | 39.874667 | 120 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/datasets/pandaset/pandaset_dataset.py | """
Dataset from Pandaset (Hesai)
"""
import pickle
import os
try:
import pandas as pd
import pandaset as ps
except:
pass
import numpy as np
from ..dataset import DatasetTemplate
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
import torch
def pose_dict_to_numpy(pose):
"""
Conert pandaset pose dict to a numpy vector in order to pass it through the network
"""
pose_np = [pose["position"]["x"],
pose["position"]["y"],
pose["position"]["z"],
pose["heading"]["w"],
pose["heading"]["x"],
pose["heading"]["y"],
pose["heading"]["z"]]
return pose_np
def pose_numpy_to_dict(pose):
"""
Conert pandaset pose dict to a numpy vector in order to pass it through the network
"""
pose_dict = {'position':
{'x': pose[0],
'y': pose[1],
'z': pose[2]},
'heading':
{'w': pose[3],
'x': pose[4],
'y': pose[5],
'z': pose[6]}}
return pose_dict
class PandasetDataset(DatasetTemplate):
def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None):
"""
Args:
root_path:
dataset_cfg:
class_names:
training:
logger:
"""
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger
)
if root_path is None:
root_path = self.dataset_cfg.DATA_PATH
self.dataset = ps.DataSet(os.path.join(root_path, 'dataset'))
self.split = self.dataset_cfg.DATA_SPLIT[self.mode]
self.pandaset_infos = []
self.include_pandaset_infos(self.mode)
def include_pandaset_infos(self, mode):
if self.logger is not None:
self.logger.info('Loading PandaSet dataset')
pandaset_infos = []
for info_path in self.dataset_cfg.INFO_PATH[mode]:
info_path = os.path.join(self.root_path, info_path)
if not os.path.exists(info_path):
continue
with open(info_path, 'rb') as f:
infos = pickle.load(f)
pandaset_infos.extend(infos)
self.pandaset_infos.extend(pandaset_infos)
if self.logger is not None:
self.logger.info('Total samples for PandaSet dataset ({}): {}'.format(self.mode, len(pandaset_infos)))
def set_split(self, split):
self.sequences = self.dataset_cfg.SEQUENCES[split]
self.split = split
def __len__(self):
return len(self.pandaset_infos)
def __getitem__(self, index):
"""
To support a custom dataset, implement this function to load the raw data (and labels), then transform them to
the unified normative coordinate (x pointing forward, z pointing upwards) and call the function self.prepare_data() to process the data and send them
to the model.
Args:
index:
Returns:
"""
info = self.pandaset_infos[index]
seq_idx = info['sequence']
pose = self._get_pose(info)
points = self._get_lidar_points(info, pose)
boxes, labels, zrot_world_to_ego = self._get_annotations(info, pose)
pose_np = pose_dict_to_numpy(pose)
input_dict = {'points': points,
'gt_boxes': boxes,
'gt_names': labels,
'sequence': int(seq_idx),
'frame_idx': info['frame_idx'],
'zrot_world_to_ego': zrot_world_to_ego,
'pose': pose_dict_to_numpy(pose)
}
# seq_idx is converted to int because strings can't be passed to
# the gpu in pytorch
# zrot_world_to_ego is propagated in order to be able to transform the
# predicted yaws back to world coordinates
data_dict = self.prepare_data(data_dict=input_dict)
return data_dict
def _get_pose(self, info):
seq_idx = info['sequence']
# get pose for world to ego frame transformation
if self.dataset[seq_idx].lidar.poses is None:
self.dataset[seq_idx].lidar._load_poses()
pose = self.dataset[seq_idx].lidar.poses[info['frame_idx']]
return pose
def _get_lidar_points(self, info, pose):
"""
Get lidar in the unified normative coordinate system for a given frame
The intensity is normalized to fit [0-1] range (pandaset intensity is in [0-255] range)
"""
# get lidar points
lidar_frame = pd.read_pickle(info['lidar_path'])
# get points for the required lidar(s) only
device = self.dataset_cfg.get('LIDAR_DEVICE', 0)
if device != -1:
lidar_frame = lidar_frame[lidar_frame.d == device]
world_points = lidar_frame.to_numpy()
# There seems to be issues with the automatic deletion of pandas datasets sometimes
del lidar_frame
points_loc = world_points[:, :3]
points_int = world_points[:, 3]
# nromalize intensity
points_int = points_int / 255
ego_points = ps.geometry.lidar_points_to_ego(points_loc, pose)
# Pandaset ego coordinates are:
# - x pointing to the right
# - y pointing to the front
# - z pointing up
# Normative coordinates are:
# - x pointing foreward
# - y pointings to the left
# - z pointing to the top
# So a transformation is required to the match the normative coordinates
ego_points = ego_points[:, [1, 0, 2]] # switch x and y
ego_points[:, 1] = - ego_points[:, 1] # revert y axis
return np.append(ego_points, np.expand_dims(points_int, axis=1), axis=1).astype(np.float32)
def _get_annotations(self,info, pose):
"""
Get box informations in the unified normative coordinate system for a given frame
"""
# get boxes
cuboids = pd.read_pickle(info["cuboids_path"])
device = self.dataset_cfg.get('LIDAR_DEVICE', 0)
if device != -1:
# keep cuboids that are seen by a given device
cuboids = cuboids[cuboids["cuboids.sensor_id"] != 1 - device]
xs = cuboids['position.x'].to_numpy()
ys = cuboids['position.y'].to_numpy()
zs = cuboids['position.z'].to_numpy()
dxs = cuboids['dimensions.x'].to_numpy()
dys = cuboids['dimensions.y'].to_numpy()
dzs = cuboids['dimensions.z'].to_numpy()
yaws = cuboids['yaw'].to_numpy()
labels = cuboids['label'].to_numpy()
del cuboids # There seem to be issues with the automatic deletion of pandas datasets sometimes
labels = np.array([self.dataset_cfg.TRAINING_CATEGORIES.get(lab, lab)
for lab in labels] )
# Compute the center points coordinates in ego coordinates
centers = np.vstack([xs, ys, zs]).T
ego_centers = ps.geometry.lidar_points_to_ego(centers, pose)
# Compute the yaw in ego coordinates
# The following implementation supposes that the pitch of the car is
# negligible compared to its yaw, in order to be able to express the
# bbox coordinates in the ego coordinate system with an {axis aligned
# box + yaw} only representation
yaxis_points_from_pose = ps.geometry.lidar_points_to_ego(np.array([[0, 0, 0], [0, 1., 0]]), pose)
yaxis_from_pose = yaxis_points_from_pose[1, :] - yaxis_points_from_pose[0, :]
if yaxis_from_pose[-1] >= 10**-1:
if self.logger is not None:
self.logger.warning("The car's pitch is supposed to be negligible " +
"sin(pitch) is >= 10**-1 ({})".format(yaxis_from_pose[-1]))
# rotation angle in rads of the y axis around thz z axis
zrot_world_to_ego = np.arctan2(-yaxis_from_pose[0], yaxis_from_pose[1])
ego_yaws = yaws + zrot_world_to_ego
# Pandaset ego coordinates are:
# - x pointing to the right
# - y pointing to the front
# - z pointing up
# Normative coordinates are:
# - x pointing foreward
# - y pointings to the left
# - z pointing to the top
# So a transformation is required to the match the normative coordinates
ego_xs = ego_centers[:, 1]
ego_ys = -ego_centers[:, 0]
ego_zs = ego_centers[:, 2]
ego_dxs = dys
ego_dys = dxs # stays >= 0
ego_dzs = dzs
ego_boxes = np.vstack([ego_xs, ego_ys, ego_zs, ego_dxs, ego_dys, ego_dzs, ego_yaws]).T
return ego_boxes.astype(np.float32), labels, zrot_world_to_ego
@staticmethod
def generate_prediction_dicts(batch_dict, pred_dicts, class_names, output_path=None):
"""
To support a custom dataset, implement this function to receive the predicted results from the model, and then
transform the unified normative coordinate to your required coordinate, and optionally save them to disk.
Args:
batch_dict: dict of original data from the dataloader
pred_dicts: dict of predicted results from the model
pred_boxes: (N, 7), Tensor
pred_scores: (N), Tensor
pred_labels: (N), Tensor
class_names:
output_path: if it is not None, save the results to this path
Returns:
"""
def generate_single_sample_dataframe(batch_index, box_dict, zrot_world_to_ego, pose):
pred_boxes = box_dict["pred_boxes"].cpu().numpy()
pred_scores = box_dict["pred_scores"].cpu().numpy()
pred_labels = box_dict["pred_labels"].cpu().numpy()
zrot = zrot_world_to_ego.cpu().numpy()
pose_dict = pose_numpy_to_dict(pose.cpu().numpy())
xs = pred_boxes[:, 0]
ys = pred_boxes[:, 1]
zs = pred_boxes[:, 2]
dxs = pred_boxes[:, 3]
dys = pred_boxes[:, 4]
dzs = pred_boxes[:, 5]
yaws = pred_boxes[:, 6]
names = np.array(class_names)[pred_labels - 1] # Predicted labels start on 1
# convert from normative coordinates to pandaset ego coordinates
ego_xs = - ys
ego_ys = xs
ego_zs = zs
ego_dxs = dys
ego_dys = dxs
ego_dzs = dzs
ego_yaws = yaws
# convert from pandaset ego coordinates to world coordinates
# for the moment, an simplified estimation of the ego yaw is computed in __getitem__
# which sets ego_yaw = world_yaw + zrot_world_to_ego
world_yaws = ego_yaws - zrot
ego_centers = np.vstack([ego_xs, ego_ys, ego_zs]).T
world_centers = ps.geometry.ego_to_lidar_points(ego_centers, pose_dict)
world_xs = world_centers[:, 0]
world_ys = world_centers[:, 1]
world_zs = world_centers[:, 2]
# dx, dy, dz remain unchanged as the bbox orientation is handled by
# the yaw information
data_dict = {'position.x': world_xs,
'position.y': world_ys,
'position.z': world_zs,
'dimensions.x': ego_dxs,
'dimensions.y': ego_dys,
'dimensions.z': ego_dzs,
'yaw': world_yaws % (2 * np.pi),
'label': names,
'score': pred_scores
}
return pd.DataFrame(data_dict)
annos = []
for index, box_dict in enumerate(pred_dicts):
frame_idx = batch_dict['frame_idx'][index]
seq_idx = batch_dict['sequence'][index]
zrot = batch_dict['zrot_world_to_ego'][index]
pose = batch_dict['pose'][index]
single_pred_df = generate_single_sample_dataframe(index, box_dict, zrot, pose)
single_pred_dict = {'preds' : single_pred_df,
# 'name 'ensures testing the number of detections in a compatible format as kitti
'name' : single_pred_df['label'].tolist(),
'frame_idx': frame_idx,
'sequence': str(seq_idx).zfill(3)}
# seq_idx was converted to int in self.__getitem__` because strings
# can't be passed to the gpu in pytorch.
# To convert it back to a string, we assume that the sequence is
# provided in pandaset format with 3 digits
if output_path is not None:
frame_id = str(int(frame_idx)).zfill(2)
seq_id = str(int(seq_idx)).zfill(3)
cur_det_file = os.path.join(output_path, seq_id, 'predictions',
'cuboids', ("{}.pkl.gz".format(frame_id)))
os.makedirs(os.path.dirname(cur_det_file), exist_ok=True)
single_pred_df.to_pickle(cur_det_file)
annos.append(single_pred_dict)
return annos
def get_infos(self):
"""
Generate the dataset infos dict for each sample of the dataset.
For each sample, this dict contains:
- the sequence index
- the frame index
- the path to the lidar data
- the path to the bounding box annotations
"""
infos = []
for seq in self.sequences:
s = self.dataset[seq]
s.load_lidar()
if len(s.lidar.data) > 100:
raise ValueError("The implementation for this dataset assumes that each sequence is " +
"no longer than 100 frames. The current sequence has {}".format(len(s.lidar.data)))
info = [{'sequence': seq,
'frame_idx': ii,
'lidar_path': os.path.join(self.root_path, 'dataset', seq, 'lidar', ("{:02d}.pkl.gz".format(ii))),
'cuboids_path': os.path.join(self.root_path, 'dataset', seq,
'annotations', 'cuboids', ("{:02d}.pkl.gz".format(ii)))
} for ii in range(len(s.lidar.data))]
infos.extend(info)
del self.dataset._sequences[seq]
return infos
def create_groundtruth_database(self, info_path=None, used_classes=None, split='train'):
database_save_path = os.path.join(self.root_path,
'gt_database' if split == 'train' else 'gt_database_{}'.format(split))
db_info_save_path = os.path.join(self.root_path,
'pandaset_dbinfos_{}.pkl'.format(split))
os.makedirs(database_save_path, exist_ok=True)
all_db_infos = {}
with open(info_path, 'rb') as f:
infos = pickle.load(f)
for k in range(len(infos)):
print('gt_database sample: %d/%d' % (k + 1, len(infos)))
info = infos[k]
sample_idx = info['frame_idx']
pose = self._get_pose(info)
points = self._get_lidar_points(info, pose)
gt_boxes, names, _ = self._get_annotations(info, pose)
num_obj = gt_boxes.shape[0]
point_indices = roiaware_pool3d_utils.points_in_boxes_cpu(
torch.from_numpy(points[:, 0:3]), torch.from_numpy(gt_boxes)
).numpy() # (nboxes, npoints)
for i in range(num_obj):
tmp_name = names[i].replace("/", "").replace(" ", "")
filename = '%s_%s_%d.bin' % (sample_idx, tmp_name, i)
filepath = os.path.join(database_save_path, filename)
gt_points = points[point_indices[i] > 0]
gt_points[:, :3] -= gt_boxes[i, :3]
with open(filepath, 'wb') as f:
gt_points.tofile(f)
if (used_classes is None) or names[i] in used_classes:
db_path = os.path.relpath(filepath, self.root_path) # gt_database/xxxxx.bin
db_info = {'name': names[i], 'path': db_path, 'gt_idx': i,
'box3d_lidar': gt_boxes[i], 'num_points_in_gt': gt_points.shape[0],
'difficulty': -1}
if names[i] in all_db_infos:
all_db_infos[names[i]].append(db_info)
else:
all_db_infos[names[i]] = [db_info]
for k, v in all_db_infos.items():
print('Database %s: %d' % (k, len(v)))
with open(db_info_save_path, 'wb') as f:
pickle.dump(all_db_infos, f)
def evaluation(self, det_annos, class_names, **kwargs):
self.logger.warning('Evaluation is not implemented for Pandaset as there is no official one. ' +
'Returning an empty evaluation result.')
ap_result_str = ''
ap_dict = {}
return ap_result_str, ap_dict
def create_pandaset_infos(dataset_cfg, class_names, data_path, save_path):
"""
Create dataset_infos files in order not to have it in a preprocessed pickle
file with the info for each sample
See PandasetDataset.get_infos for further details.
"""
dataset = PandasetDataset(dataset_cfg=dataset_cfg, class_names=class_names, root_path=data_path, training=False)
for split in ["train", "val", "test"]:
print("---------------- Start to generate {} data infos ---------------".format(split))
dataset.set_split(split)
infos = dataset.get_infos()
file_path = os.path.join(save_path, 'pandaset_infos_{}.pkl'.format(split))
with open(file_path, 'wb') as f:
pickle.dump(infos, f)
print("Pandaset info {} file is saved to {}".format(split, file_path))
print('------------Start create groundtruth database for data augmentation-----------')
dataset = PandasetDataset(dataset_cfg=dataset_cfg, class_names=class_names, root_path=data_path, training=False)
dataset.set_split("train")
dataset.create_groundtruth_database(
os.path.join(save_path, 'pandaset_infos_train.pkl'),
split="train"
)
print('---------------Data preparation Done---------------')
if __name__ == '__main__':
import sys
if sys.argv.__len__() > 1 and sys.argv[1] == 'create_pandaset_infos':
import yaml
from pathlib import Path
from easydict import EasyDict
dataset_cfg = EasyDict(yaml.safe_load(open(sys.argv[2])))
ROOT_DIR = (Path(__file__).resolve().parent / '../../../').resolve()
create_pandaset_infos(
dataset_cfg=dataset_cfg,
class_names=['Car', 'Pedestrian', 'Cyclist'],
data_path=ROOT_DIR / 'data' / 'pandaset',
save_path=ROOT_DIR / 'data' / 'pandaset'
)
| 19,065 | 37.910204 | 157 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/datasets/kitti/kitti_dataset.py | import copy
import pickle
import numpy as np
from skimage import io
from . import kitti_utils
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
from ...utils import box_utils, calibration_kitti, common_utils, object3d_kitti
from ..dataset import DatasetTemplate
class KittiDataset(DatasetTemplate):
def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None):
"""
Args:
root_path:
dataset_cfg:
class_names:
training:
logger:
"""
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger
)
self.split = self.dataset_cfg.DATA_SPLIT[self.mode]
self.root_split_path = self.root_path / ('training' if self.split != 'test' else 'testing')
split_dir = self.root_path / 'ImageSets' / (self.split + '.txt')
self.sample_id_list = [x.strip() for x in open(split_dir).readlines()] if split_dir.exists() else None
self.kitti_infos = []
self.include_kitti_data(self.mode)
def include_kitti_data(self, mode):
if self.logger is not None:
self.logger.info('Loading KITTI dataset')
kitti_infos = []
for info_path in self.dataset_cfg.INFO_PATH[mode]:
info_path = self.root_path / info_path
if not info_path.exists():
continue
with open(info_path, 'rb') as f:
infos = pickle.load(f)
kitti_infos.extend(infos)
self.kitti_infos.extend(kitti_infos)
if self.logger is not None:
self.logger.info('Total samples for KITTI dataset: %d' % (len(kitti_infos)))
def set_split(self, split):
super().__init__(
dataset_cfg=self.dataset_cfg, class_names=self.class_names, training=self.training, root_path=self.root_path, logger=self.logger
)
self.split = split
self.root_split_path = self.root_path / ('training' if self.split != 'test' else 'testing')
split_dir = self.root_path / 'ImageSets' / (self.split + '.txt')
self.sample_id_list = [x.strip() for x in open(split_dir).readlines()] if split_dir.exists() else None
def get_lidar(self, idx):
lidar_file = self.root_split_path / 'velodyne' / ('%s.bin' % idx)
assert lidar_file.exists()
return np.fromfile(str(lidar_file), dtype=np.float32).reshape(-1, 4)
def get_image(self, idx):
"""
Loads image for a sample
Args:
idx: int, Sample index
Returns:
image: (H, W, 3), RGB Image
"""
img_file = self.root_split_path / 'image_2' / ('%s.png' % idx)
assert img_file.exists()
image = io.imread(img_file)
image = image.astype(np.float32)
image /= 255.0
return image
def get_image_shape(self, idx):
img_file = self.root_split_path / 'image_2' / ('%s.png' % idx)
assert img_file.exists()
return np.array(io.imread(img_file).shape[:2], dtype=np.int32)
def get_label(self, idx):
label_file = self.root_split_path / 'label_2' / ('%s.txt' % idx)
assert label_file.exists()
return object3d_kitti.get_objects_from_label(label_file)
def get_depth_map(self, idx):
"""
Loads depth map for a sample
Args:
idx: str, Sample index
Returns:
depth: (H, W), Depth map
"""
depth_file = self.root_split_path / 'depth_2' / ('%s.png' % idx)
assert depth_file.exists()
depth = io.imread(depth_file)
depth = depth.astype(np.float32)
depth /= 256.0
return depth
def get_calib(self, idx):
calib_file = self.root_split_path / 'calib' / ('%s.txt' % idx)
assert calib_file.exists()
return calibration_kitti.Calibration(calib_file)
def get_road_plane(self, idx):
plane_file = self.root_split_path / 'planes' / ('%s.txt' % idx)
if not plane_file.exists():
return None
with open(plane_file, 'r') as f:
lines = f.readlines()
lines = [float(i) for i in lines[3].split()]
plane = np.asarray(lines)
# Ensure normal is always facing up, this is in the rectified camera coordinate
if plane[1] > 0:
plane = -plane
norm = np.linalg.norm(plane[0:3])
plane = plane / norm
return plane
@staticmethod
def get_fov_flag(pts_rect, img_shape, calib):
"""
Args:
pts_rect:
img_shape:
calib:
Returns:
"""
pts_img, pts_rect_depth = calib.rect_to_img(pts_rect)
val_flag_1 = np.logical_and(pts_img[:, 0] >= 0, pts_img[:, 0] < img_shape[1])
val_flag_2 = np.logical_and(pts_img[:, 1] >= 0, pts_img[:, 1] < img_shape[0])
val_flag_merge = np.logical_and(val_flag_1, val_flag_2)
pts_valid_flag = np.logical_and(val_flag_merge, pts_rect_depth >= 0)
return pts_valid_flag
def get_infos(self, num_workers=4, has_label=True, count_inside_pts=True, sample_id_list=None):
import concurrent.futures as futures
def process_single_scene(sample_idx):
print('%s sample_idx: %s' % (self.split, sample_idx))
info = {}
pc_info = {'num_features': 4, 'lidar_idx': sample_idx}
info['point_cloud'] = pc_info
image_info = {'image_idx': sample_idx, 'image_shape': self.get_image_shape(sample_idx)}
info['image'] = image_info
calib = self.get_calib(sample_idx)
P2 = np.concatenate([calib.P2, np.array([[0., 0., 0., 1.]])], axis=0)
R0_4x4 = np.zeros([4, 4], dtype=calib.R0.dtype)
R0_4x4[3, 3] = 1.
R0_4x4[:3, :3] = calib.R0
V2C_4x4 = np.concatenate([calib.V2C, np.array([[0., 0., 0., 1.]])], axis=0)
calib_info = {'P2': P2, 'R0_rect': R0_4x4, 'Tr_velo_to_cam': V2C_4x4}
info['calib'] = calib_info
if has_label:
obj_list = self.get_label(sample_idx)
annotations = {}
annotations['name'] = np.array([obj.cls_type for obj in obj_list])
annotations['truncated'] = np.array([obj.truncation for obj in obj_list])
annotations['occluded'] = np.array([obj.occlusion for obj in obj_list])
annotations['alpha'] = np.array([obj.alpha for obj in obj_list])
annotations['bbox'] = np.concatenate([obj.box2d.reshape(1, 4) for obj in obj_list], axis=0)
annotations['dimensions'] = np.array([[obj.l, obj.h, obj.w] for obj in obj_list]) # lhw(camera) format
annotations['location'] = np.concatenate([obj.loc.reshape(1, 3) for obj in obj_list], axis=0)
annotations['rotation_y'] = np.array([obj.ry for obj in obj_list])
annotations['score'] = np.array([obj.score for obj in obj_list])
annotations['difficulty'] = np.array([obj.level for obj in obj_list], np.int32)
num_objects = len([obj.cls_type for obj in obj_list if obj.cls_type != 'DontCare'])
num_gt = len(annotations['name'])
index = list(range(num_objects)) + [-1] * (num_gt - num_objects)
annotations['index'] = np.array(index, dtype=np.int32)
loc = annotations['location'][:num_objects]
dims = annotations['dimensions'][:num_objects]
rots = annotations['rotation_y'][:num_objects]
loc_lidar = calib.rect_to_lidar(loc)
l, h, w = dims[:, 0:1], dims[:, 1:2], dims[:, 2:3]
loc_lidar[:, 2] += h[:, 0] / 2
gt_boxes_lidar = np.concatenate([loc_lidar, l, w, h, -(np.pi / 2 + rots[..., np.newaxis])], axis=1)
annotations['gt_boxes_lidar'] = gt_boxes_lidar
info['annos'] = annotations
if count_inside_pts:
points = self.get_lidar(sample_idx)
calib = self.get_calib(sample_idx)
pts_rect = calib.lidar_to_rect(points[:, 0:3])
fov_flag = self.get_fov_flag(pts_rect, info['image']['image_shape'], calib)
pts_fov = points[fov_flag]
corners_lidar = box_utils.boxes_to_corners_3d(gt_boxes_lidar)
num_points_in_gt = -np.ones(num_gt, dtype=np.int32)
for k in range(num_objects):
flag = box_utils.in_hull(pts_fov[:, 0:3], corners_lidar[k])
num_points_in_gt[k] = flag.sum()
annotations['num_points_in_gt'] = num_points_in_gt
return info
sample_id_list = sample_id_list if sample_id_list is not None else self.sample_id_list
with futures.ThreadPoolExecutor(num_workers) as executor:
infos = executor.map(process_single_scene, sample_id_list)
return list(infos)
def create_groundtruth_database(self, info_path=None, used_classes=None, split='train'):
import torch
database_save_path = Path(self.root_path) / ('gt_database' if split == 'train' else ('gt_database_%s' % split))
db_info_save_path = Path(self.root_path) / ('kitti_dbinfos_%s.pkl' % split)
database_save_path.mkdir(parents=True, exist_ok=True)
all_db_infos = {}
with open(info_path, 'rb') as f:
infos = pickle.load(f)
for k in range(len(infos)):
print('gt_database sample: %d/%d' % (k + 1, len(infos)))
info = infos[k]
sample_idx = info['point_cloud']['lidar_idx']
points = self.get_lidar(sample_idx)
annos = info['annos']
names = annos['name']
difficulty = annos['difficulty']
bbox = annos['bbox']
gt_boxes = annos['gt_boxes_lidar']
num_obj = gt_boxes.shape[0]
point_indices = roiaware_pool3d_utils.points_in_boxes_cpu(
torch.from_numpy(points[:, 0:3]), torch.from_numpy(gt_boxes)
).numpy() # (nboxes, npoints)
for i in range(num_obj):
filename = '%s_%s_%d.bin' % (sample_idx, names[i], i)
filepath = database_save_path / filename
gt_points = points[point_indices[i] > 0]
gt_points[:, :3] -= gt_boxes[i, :3]
with open(filepath, 'w') as f:
gt_points.tofile(f)
if (used_classes is None) or names[i] in used_classes:
db_path = str(filepath.relative_to(self.root_path)) # gt_database/xxxxx.bin
db_info = {'name': names[i], 'path': db_path, 'image_idx': sample_idx, 'gt_idx': i,
'box3d_lidar': gt_boxes[i], 'num_points_in_gt': gt_points.shape[0],
'difficulty': difficulty[i], 'bbox': bbox[i], 'score': annos['score'][i]}
if names[i] in all_db_infos:
all_db_infos[names[i]].append(db_info)
else:
all_db_infos[names[i]] = [db_info]
for k, v in all_db_infos.items():
print('Database %s: %d' % (k, len(v)))
with open(db_info_save_path, 'wb') as f:
pickle.dump(all_db_infos, f)
@staticmethod
def generate_prediction_dicts(batch_dict, pred_dicts, class_names, output_path=None):
"""
Args:
batch_dict:
frame_id:
pred_dicts: list of pred_dicts
pred_boxes: (N, 7), Tensor
pred_scores: (N), Tensor
pred_labels: (N), Tensor
class_names:
output_path:
Returns:
"""
def get_template_prediction(num_samples):
ret_dict = {
'name': np.zeros(num_samples), 'truncated': np.zeros(num_samples),
'occluded': np.zeros(num_samples), 'alpha': np.zeros(num_samples),
'bbox': np.zeros([num_samples, 4]), 'dimensions': np.zeros([num_samples, 3]),
'location': np.zeros([num_samples, 3]), 'rotation_y': np.zeros(num_samples),
'score': np.zeros(num_samples), 'boxes_lidar': np.zeros([num_samples, 7])
}
return ret_dict
def generate_single_sample_dict(batch_index, box_dict):
pred_scores = box_dict['pred_scores'][0].cpu().numpy() if isinstance(box_dict['pred_scores'], list) else box_dict['pred_scores'].cpu().numpy()
pred_boxes = box_dict['pred_boxes'][0].cpu().numpy() if isinstance(box_dict['pred_boxes'], list) else box_dict['pred_boxes'].cpu().numpy()
pred_labels = box_dict['pred_labels'][0].cpu().numpy() if isinstance(box_dict['pred_labels'], list) else box_dict['pred_labels'].cpu().numpy()
pred_dict = get_template_prediction(pred_scores.shape[0])
if pred_scores.shape[0] == 0:
return pred_dict
calib = batch_dict['calib'][batch_index]
image_shape = batch_dict['image_shape'][batch_index].cpu().numpy()
pred_boxes_camera = box_utils.boxes3d_lidar_to_kitti_camera(pred_boxes, calib)
pred_boxes_img = box_utils.boxes3d_kitti_camera_to_imageboxes(
pred_boxes_camera, calib, image_shape=image_shape
)
pred_dict['name'] = np.array(class_names)[pred_labels - 1]
pred_dict['alpha'] = -np.arctan2(-pred_boxes[:, 1], pred_boxes[:, 0]) + pred_boxes_camera[:, 6]
pred_dict['bbox'] = pred_boxes_img
pred_dict['dimensions'] = pred_boxes_camera[:, 3:6]
pred_dict['location'] = pred_boxes_camera[:, 0:3]
pred_dict['rotation_y'] = pred_boxes_camera[:, 6]
pred_dict['score'] = pred_scores
pred_dict['boxes_lidar'] = pred_boxes
return pred_dict
annos = []
for index, box_dict in enumerate(pred_dicts):
frame_id = batch_dict['frame_id'][index]
single_pred_dict = generate_single_sample_dict(index, box_dict)
single_pred_dict['frame_id'] = frame_id
annos.append(single_pred_dict)
if output_path is not None:
cur_det_file = output_path / ('%s.txt' % frame_id)
with open(cur_det_file, 'w') as f:
bbox = single_pred_dict['bbox']
loc = single_pred_dict['location']
dims = single_pred_dict['dimensions'] # lhw -> hwl
for idx in range(len(bbox)):
print('%s -1 -1 %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f'
% (single_pred_dict['name'][idx], single_pred_dict['alpha'][idx],
bbox[idx][0], bbox[idx][1], bbox[idx][2], bbox[idx][3],
dims[idx][1], dims[idx][2], dims[idx][0], loc[idx][0],
loc[idx][1], loc[idx][2], single_pred_dict['rotation_y'][idx],
single_pred_dict['score'][idx]), file=f)
return annos
def evaluation(self, det_annos, class_names, **kwargs):
if 'annos' not in self.kitti_infos[0].keys():
return None, {}
from .kitti_object_eval_python import eval as kitti_eval
eval_det_annos = copy.deepcopy(det_annos)
eval_gt_annos = [copy.deepcopy(info['annos']) for info in self.kitti_infos]
ap_result_str, ap_dict = kitti_eval.get_official_eval_result(eval_gt_annos, eval_det_annos, class_names)
return ap_result_str, ap_dict
def __len__(self):
if self._merge_all_iters_to_one_epoch:
return len(self.kitti_infos) * self.total_epochs
return len(self.kitti_infos)
def __getitem__(self, index):
# index = 4
if self._merge_all_iters_to_one_epoch:
index = index % len(self.kitti_infos)
info = copy.deepcopy(self.kitti_infos[index])
sample_idx = info['point_cloud']['lidar_idx']
img_shape = info['image']['image_shape']
calib = self.get_calib(sample_idx)
get_item_list = self.dataset_cfg.get('GET_ITEM_LIST', ['points'])
input_dict = {
'frame_id': sample_idx,
'calib': calib,
}
if 'annos' in info:
annos = info['annos']
annos = common_utils.drop_info_with_name(annos, name='DontCare')
loc, dims, rots = annos['location'], annos['dimensions'], annos['rotation_y']
gt_names = annos['name']
gt_boxes_camera = np.concatenate([loc, dims, rots[..., np.newaxis]], axis=1).astype(np.float32)
gt_boxes_lidar = box_utils.boxes3d_kitti_camera_to_lidar(gt_boxes_camera, calib)
input_dict.update({
'gt_names': gt_names,
'gt_boxes': gt_boxes_lidar
})
if "gt_boxes2d" in get_item_list:
input_dict['gt_boxes2d'] = annos["bbox"]
road_plane = self.get_road_plane(sample_idx)
if road_plane is not None:
input_dict['road_plane'] = road_plane
if "points" in get_item_list:
points = self.get_lidar(sample_idx)
if self.dataset_cfg.FOV_POINTS_ONLY:
pts_rect = calib.lidar_to_rect(points[:, 0:3])
fov_flag = self.get_fov_flag(pts_rect, img_shape, calib)
points = points[fov_flag]
input_dict['points'] = points
if "images" in get_item_list:
input_dict['images'] = self.get_image(sample_idx)
if "depth_maps" in get_item_list:
input_dict['depth_maps'] = self.get_depth_map(sample_idx)
if "calib_matricies" in get_item_list:
input_dict["trans_lidar_to_cam"], input_dict["trans_cam_to_img"] = kitti_utils.calib_to_matricies(calib)
input_dict['sample_id_list'] = self.sample_id_list
data_dict = self.prepare_data(data_dict=input_dict)
data_dict['image_shape'] = img_shape
return data_dict
def create_kitti_infos(dataset_cfg, class_names, data_path, save_path, workers=4):
dataset = KittiDataset(dataset_cfg=dataset_cfg, class_names=class_names, root_path=data_path, training=False)
train_split, val_split = 'train', 'val'
train_filename = save_path / ('kitti_infos_%s.pkl' % train_split)
val_filename = save_path / ('kitti_infos_%s.pkl' % val_split)
trainval_filename = save_path / 'kitti_infos_trainval.pkl'
test_filename = save_path / 'kitti_infos_test.pkl'
print('---------------Start to generate data infos---------------')
dataset.set_split(train_split)
kitti_infos_train = dataset.get_infos(num_workers=workers, has_label=True, count_inside_pts=True)
with open(train_filename, 'wb') as f:
pickle.dump(kitti_infos_train, f)
print('Kitti info train file is saved to %s' % train_filename)
dataset.set_split(val_split)
kitti_infos_val = dataset.get_infos(num_workers=workers, has_label=True, count_inside_pts=True)
with open(val_filename, 'wb') as f:
pickle.dump(kitti_infos_val, f)
print('Kitti info val file is saved to %s' % val_filename)
with open(trainval_filename, 'wb') as f:
pickle.dump(kitti_infos_train + kitti_infos_val, f)
print('Kitti info trainval file is saved to %s' % trainval_filename)
dataset.set_split('test')
kitti_infos_test = dataset.get_infos(num_workers=workers, has_label=False, count_inside_pts=False)
with open(test_filename, 'wb') as f:
pickle.dump(kitti_infos_test, f)
print('Kitti info test file is saved to %s' % test_filename)
print('---------------Start create groundtruth database for data augmentation---------------')
dataset.set_split(train_split)
dataset.create_groundtruth_database(train_filename, split=train_split)
print('---------------Data preparation Done---------------')
if __name__ == '__main__':
import sys
if sys.argv.__len__() > 1 and sys.argv[1] == 'create_kitti_infos':
import yaml
from pathlib import Path
from easydict import EasyDict
dataset_cfg = EasyDict(yaml.safe_load(open(sys.argv[2])))
ROOT_DIR = (Path(__file__).resolve().parent / '../../../').resolve()
create_kitti_infos(
dataset_cfg=dataset_cfg,
class_names=['Car', 'Pedestrian', 'Cyclist'],
data_path=ROOT_DIR / 'data' / 'kitti',
save_path=ROOT_DIR / 'data' / 'kitti'
)
| 20,757 | 41.711934 | 154 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/query_strategies/bald_sampling.py |
import torch
from .strategy import Strategy
from pcdet.models import load_data_to_gpu
import torch.nn.functional as F
import tqdm
class BALDSampling(Strategy):
def __init__(self, model, labelled_loader, unlabelled_loader, rank, active_label_dir, cfg):
super(BALDSampling, self).__init__(model, labelled_loader, unlabelled_loader, rank, active_label_dir, cfg)
def query(self, leave_pbar=True, cur_epoch=None):
select_dic = {}
# select_nums = cfg.ACTIVE_TRAIN.SELECT_NUMS
val_dataloader_iter = iter(self.unlabelled_loader)
val_loader = self.unlabelled_loader
total_it_each_epoch = len(self.unlabelled_loader)
# feed forward the model
if self.rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar,
desc='evaluating_unlabelled_set_epoch_%d' % cur_epoch, dynamic_ncols=True)
self.model.eval()
self.enable_dropout(self.model)
for cur_it in range(total_it_each_epoch):
try:
unlabelled_batch = next(val_dataloader_iter)
except StopIteration:
unlabelled_dataloader_iter = iter(val_loader)
unlabelled_batch = next(unlabelled_dataloader_iter)
with torch.no_grad():
load_data_to_gpu(unlabelled_batch)
pred_dicts, _ = self.model(unlabelled_batch)
for batch_inx in range(len(pred_dicts)):
self.save_points(unlabelled_batch['frame_id'][batch_inx], pred_dicts[batch_inx])
final_full_cls_logits = pred_dicts[batch_inx]['pred_logits']
box_values = \
-(F.softmax(final_full_cls_logits, dim=1) *
F.log_softmax(final_full_cls_logits, dim=1)).sum(dim=1)
""" Aggregate all the boxes values in one point cloud """
if self.cfg.ACTIVE_TRAIN.AGGREGATION == 'mean':
aggregated_values = torch.mean(box_values)
else:
raise NotImplementedError
select_dic[unlabelled_batch['frame_id'][batch_inx]] = aggregated_values
if self.rank == 0:
pbar.update()
# pbar.set_postfix(disp_dict)
pbar.refresh()
if self.rank == 0:
pbar.close()
# sort and get selected_frames
select_dic = dict(sorted(select_dic.items(), key=lambda item: item[1]))
unlabelled_sample_num = len(select_dic.keys())
selected_frames = list(select_dic.keys())[unlabelled_sample_num - self.cfg.ACTIVE_TRAIN.SELECT_NUMS:]
return selected_frames
| 2,763 | 37.929577 | 114 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/query_strategies/crb_sampling.py |
import torch
from .strategy import Strategy
from pcdet.models import load_data_to_gpu
from pcdet.datasets import build_active_dataloader
import torch.nn.functional as F
from torch.distributions import Categorical
import tqdm
import numpy as np
import wandb
import time
import scipy
from sklearn.cluster import kmeans_plusplus, KMeans, MeanShift, Birch
from sklearn.mixture import GaussianMixture
from scipy.stats import uniform
from sklearn.neighbors import KernelDensity
from scipy.cluster.vq import vq
from typing import Dict, List
class CRBSampling(Strategy):
def __init__(self, model, labelled_loader, unlabelled_loader, rank, active_label_dir, cfg):
super(CRBSampling, self).__init__(model, labelled_loader, unlabelled_loader, rank, active_label_dir, cfg)
# coefficients controls the ratio of selected subset
self.k1 = getattr(cfg.ACTIVE_TRAIN.ACTIVE_CONFIG, 'K1', 5)
self.k2 = getattr(cfg.ACTIVE_TRAIN.ACTIVE_CONFIG, 'K2', 3)
# bandwidth for the KDE in the GPDB module
self.bandwidth = getattr(cfg.ACTIVE_TRAIN.ACTIVE_CONFIG, 'BANDWDITH', 5)
# ablation study for prototype selection
self.prototype = getattr(cfg.ACTIVE_TRAIN.ACTIVE_CONFIG, 'CLUSTERING', 'kmeans++')
# controls the boundary of the uniform prior distribution
self.alpha = 0.95
def enable_dropout(self, model):
""" Function to enable the dropout layers during test-time """
i = 0
for m in model.modules():
if m.__class__.__name__.startswith('Dropout'):
i += 1
m.train()
print('**found and enabled {} Dropout layers for random sampling**'.format(i))
def query(self, leave_pbar=True, cur_epoch=None):
select_dic = {}
# select_nums = cfg.ACTIVE_TRAIN.SELECT_NUMS
val_dataloader_iter = iter(self.unlabelled_loader)
val_loader = self.unlabelled_loader
total_it_each_epoch = len(self.unlabelled_loader)
# feed forward the model
if self.rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar,
desc='evaluating_unlabelled_set_epoch_%d' % cur_epoch, dynamic_ncols=True)
self.model.eval()
self.enable_dropout(self.model)
num_class = len(self.labelled_loader.dataset.class_names)
check_value = []
cls_results = {}
reg_results = {}
density_list = {}
label_list = {}
'''
------------- Stage 1: Consise Label Sampling ----------------------
'''
for cur_it in range(total_it_each_epoch):
try:
unlabelled_batch = next(val_dataloader_iter)
except StopIteration:
unlabelled_dataloader_iter = iter(val_loader)
unlabelled_batch = next(unlabelled_dataloader_iter)
with torch.no_grad():
load_data_to_gpu(unlabelled_batch)
pred_dicts, _ = self.model(unlabelled_batch)
for batch_inx in range(len(pred_dicts)):
# save the meta information and project it to the wandb dashboard
self.save_points(unlabelled_batch['frame_id'][batch_inx], pred_dicts[batch_inx])
value, counts = torch.unique(pred_dicts[batch_inx]['pred_labels'], return_counts=True)
if len(value) == 0:
entropy = 0
else:
# calculates the shannon entropy of the predicted labels of bounding boxes
unique_proportions = torch.ones(num_class).cuda()
unique_proportions[value - 1] = counts.float()
entropy = Categorical(probs = unique_proportions / sum(counts)).entropy()
check_value.append(entropy)
# save the hypothetical labels for the regression heads at Stage 2
cls_results[unlabelled_batch['frame_id'][batch_inx]] = pred_dicts[batch_inx]['batch_rcnn_cls']
reg_results[unlabelled_batch['frame_id'][batch_inx]] = pred_dicts[batch_inx]['batch_rcnn_reg']
# used for sorting
select_dic[unlabelled_batch['frame_id'][batch_inx]] = entropy
# save the density records for the Stage 3
density_list[unlabelled_batch['frame_id'][batch_inx]] = pred_dicts[batch_inx]['pred_box_unique_density']
label_list[unlabelled_batch['frame_id'][batch_inx]] = pred_dicts[batch_inx]['pred_labels']
if self.rank == 0:
pbar.update()
# pbar.set_postfix(disp_dict)
pbar.refresh()
if self.rank == 0:
pbar.close()
check_value.sort()
log_data = [[idx, value] for idx, value in enumerate(check_value)]
table = wandb.Table(data=log_data, columns=['idx', 'selection_value'])
wandb.log({'value_dist_epoch_{}'.format(cur_epoch) : wandb.plot.line(table, 'idx', 'selection_value',
title='value_dist_epoch_{}'.format(cur_epoch))})
# sort and get selected_frames
select_dic = dict(sorted(select_dic.items(), key=lambda item: item[1]))
# narrow down the scope
selected_frames = list(select_dic.keys())[::-1][:int(self.k1 * self.cfg.ACTIVE_TRAIN.SELECT_NUMS)]
selected_id_list, selected_infos = [], []
unselected_id_list, unselected_infos = [], []
'''
------------- Stage 2: Representative Prototype Selection ----------------------
'''
# rebuild a dataloader for K1 samples
for i in range(len(self.pairs)):
if self.pairs[i][0] in selected_frames:
selected_id_list.append(self.pairs[i][0])
selected_infos.append(self.pairs[i][1])
else:
# no need for unselected part
if len(unselected_id_list) == 0:
unselected_id_list.append(self.pairs[i][0])
unselected_infos.append(self.pairs[i][1])
selected_id_list, selected_infos, \
unselected_id_list, unselected_infos = \
tuple(selected_id_list), tuple(selected_infos), \
tuple(unselected_id_list), tuple(unselected_infos)
active_training = [selected_id_list, selected_infos, unselected_id_list, unselected_infos]
labelled_set, _, \
grad_loader, _, \
_, _ = build_active_dataloader(
self.cfg.DATA_CONFIG,
self.cfg.CLASS_NAMES,
1,
False,
workers=self.labelled_loader.num_workers,
logger=None,
training=True,
active_training=active_training
)
grad_dataloader_iter = iter(grad_loader)
total_it_each_epoch = len(grad_loader)
self.model.train()
fc_grad_embedding_list = []
index_list = []
# start looping over the K1 samples
if self.rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar,
desc='inf_grads_unlabelled_set_epoch_%d' % cur_epoch, dynamic_ncols=True)
for cur_it in range(total_it_each_epoch):
try:
unlabelled_batch = next(grad_dataloader_iter)
except StopIteration:
unlabelled_dataloader_iter = iter(grad_loader)
unlabelled_batch = next(grad_dataloader_iter)
load_data_to_gpu(unlabelled_batch)
pred_dicts, _, _= self.model(unlabelled_batch)
# get sample wise grads
rcnn_cls_preds_per_sample = pred_dicts['rcnn_cls']
rcnn_cls_gt_per_sample = cls_results[unlabelled_batch['frame_id'][0]]
rcnn_reg_preds_per_sample = pred_dicts['rcnn_reg']
rcnn_reg_gt_per_sample = reg_results[unlabelled_batch['frame_id'][0]]
cls_loss, _ = self.model.roi_head.get_box_cls_layer_loss({'rcnn_cls': rcnn_cls_preds_per_sample, 'rcnn_cls_labels': rcnn_cls_gt_per_sample})
reg_loss = self.model.roi_head.get_box_reg_layer_loss({'rcnn_reg': rcnn_reg_preds_per_sample, 'reg_sample_targets': rcnn_reg_gt_per_sample})
# clean cache
del rcnn_cls_preds_per_sample, rcnn_cls_gt_per_sample
del rcnn_reg_preds_per_sample, rcnn_reg_gt_per_sample
torch.cuda.empty_cache()
loss = cls_loss + reg_loss.mean()
self.model.zero_grad()
loss.backward()
fc_grads = self.model.roi_head.shared_fc_layer[4].weight.grad.clone().detach().cpu()
fc_grad_embedding_list.append(fc_grads)
index_list.append(unlabelled_batch['frame_id'][0])
if self.rank == 0:
pbar.update()
# pbar.set_postfix(disp_dict)
pbar.refresh()
if self.rank == 0:
pbar.close()
# stacking gradients for K1 candiates
fc_grad_embeddings = torch.stack(fc_grad_embedding_list, 0)
num_sample = fc_grad_embeddings.shape[0]
fc_grad_embeddings = fc_grad_embeddings.view(num_sample, -1)
start_time = time.time()
# choose the prefered prototype selection method and select the K2 medoids
if self.prototype == 'kmeans++':
_, selected_fc_idx = kmeans_plusplus(fc_grad_embeddings.numpy(), n_clusters=int(self.cfg.ACTIVE_TRAIN.SELECT_NUMS * self.k2), random_state=0)
elif self.prototype == 'kmeans':
km = KMeans(n_clusters=int(self.cfg.ACTIVE_TRAIN.SELECT_NUMS * self.k2), random_state=0).fit(fc_grad_embeddings.numpy())
selected_fc_idx, _ = vq(km.cluster_centers_, fc_grad_embeddings.numpy())
elif self.prototype == 'birch':
ms = Birch(n_clusters=int(self.cfg.ACTIVE_TRAIN.SELECT_NUMS * self.k2)).fit(fc_grad_embeddings.numpy())
selected_fc_idx, _ = vq(ms.subcluster_centers_, fc_grad_embeddings.numpy())
elif self.prototype == 'gmm':
gmm = GaussianMixture(n_components=int(self.cfg.ACTIVE_TRAIN.SELECT_NUMS * self.k2), random_state=0, covariance_type="diag").fit(fc_grad_embeddings.numpy())
selected_fc_idx, _ = vq(gmm.means_, fc_grad_embeddings.numpy())
else:
raise NotImplementedError
selected_frames = [index_list[i] for i in selected_fc_idx]
print("--- {%s} running time: %s seconds for fc grads---" % (self.prototype, time.time() - start_time))
'''
------------- Stage 3: Greedy Point Cloud Density Balancing ----------------------
'''
sampled_density_list = [density_list[i] for i in selected_frames]
sampled_label_list = [label_list[i] for i in selected_frames]
""" Build the uniform distribution for each class """
start_time = time.time()
density_all = torch.cat(list(density_list.values()), 0)
label_all = torch.cat(list(label_list.values()), 0)
unique_labels, label_counts = torch.unique(label_all, return_counts=True)
sorted_density = [torch.sort(density_all[label_all==unique_label])[0] for unique_label in unique_labels]
global_density_max = [int(sorted_density[unique_label][-1]) for unique_label in range(len(unique_labels))]
global_density_high = [int(sorted_density[unique_label][int(self.alpha * label_counts[unique_label])]) for unique_label in range(len(unique_labels))]
global_density_low = [int(sorted_density[unique_label][-int(self.alpha * label_counts[unique_label])]) for unique_label in range(len(unique_labels))]
x_axis = [np.linspace(-50, int(global_density_max[i])+50, 400) for i in range(num_class)]
uniform_dist_per_cls = [uniform.pdf(x_axis[i], global_density_low[i], global_density_high[i] - global_density_low[i]) for i in range(num_class)]
print("--- Build the uniform distribution running time: %s seconds ---" % (time.time() - start_time))
density_list, label_list, frame_id_list = sampled_density_list, sampled_label_list, selected_frames
selected_frames: List[str] = []
selected_box_densities: torch.tensor = torch.tensor([]).cuda()
selected_box_labels: torch.tensor = torch.tensor([]).cuda()
# looping over N_r samples
if self.rank == 0:
pbar = tqdm.tqdm(total=self.cfg.ACTIVE_TRAIN.SELECT_NUMS, leave=leave_pbar,
desc='global_density_div_for_epoch_%d' % cur_epoch, dynamic_ncols=True)
for j in range(self.cfg.ACTIVE_TRAIN.SELECT_NUMS):
if j == 0: # initially, we randomly select a frame.
selected_frames.append(frame_id_list[j])
selected_box_densities = torch.cat((selected_box_densities, density_list[j]))
selected_box_labels = torch.cat((selected_box_labels, label_list[j]))
# remove selected frame
del density_list[0]
del label_list[0]
del frame_id_list[0]
else: # go through all the samples and choose the frame that can most reduce the KL divergence
best_frame_id = None
best_frame_index = None
best_inverse_coff = -1
for i in range(len(density_list)):
unique_proportions = np.zeros(num_class)
KL_scores_per_cls = np.zeros(num_class)
for cls in range(num_class):
if (label_list[i] == cls + 1).sum() == 0:
unique_proportions[cls] = 1
KL_scores_per_cls[cls] = np.inf
else:
# get existing selected box densities
selected_box_densities_cls = selected_box_densities[selected_box_labels==(cls + 1)]
# append new frame's box densities to existing one
selected_box_densities_cls = torch.cat((selected_box_densities_cls,
density_list[i][label_list[i] == (cls + 1)]))
# initialize kde
kde = KernelDensity(kernel='gaussian', bandwidth=self.bandwidth).fit(
selected_box_densities_cls.cpu().numpy()[:, None])
logprob = kde.score_samples(x_axis[cls][:, None])
KL_score_per_cls = scipy.stats.entropy(uniform_dist_per_cls[cls], np.exp(logprob))
KL_scores_per_cls[cls] = KL_score_per_cls
# ranging from 0 to 1
unique_proportions[cls] = 2 / np.pi * np.arctan(np.pi / 2 * KL_score_per_cls)
inverse_coff = np.mean(1 - unique_proportions)
# KL_save_list.append(inverse_coff)
if inverse_coff > best_inverse_coff:
best_inverse_coff = inverse_coff
best_frame_index = i
best_frame_id = frame_id_list[i]
# remove selected frame
selected_box_densities = torch.cat((selected_box_densities, density_list[best_frame_index]))
selected_box_labels = torch.cat((selected_box_labels, label_list[best_frame_index]))
del density_list[best_frame_index]
del label_list[best_frame_index]
del frame_id_list[best_frame_index]
selected_frames.append(best_frame_id)
if self.rank == 0:
pbar.update()
# pbar.set_postfix(disp_dict)
pbar.refresh()
if self.rank == 0:
pbar.close()
self.model.eval()
# returned the index of acquired bounding boxes
return selected_frames
| 16,078 | 45.877551 | 168 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/query_strategies/montecarlo_sampling.py |
import torch
from .strategy import Strategy
from pcdet.models import load_data_to_gpu
import torch.nn.functional as F
import tqdm
def enable_dropout(model):
""" Function to enable the dropout layers during test-time """
i = 0
for m in model.modules():
if m.__class__.__name__.startswith('Dropout'):
i += 1
m.train()
print('**found {} Dropout layers for random sampling'.format(i))
class MonteCarloSampling(Strategy):
def __init__(self, model, labelled_loader, unlabelled_loader, rank, active_label_dir, cfg):
super(MonteCarloSampling, self).__init__(model, labelled_loader, unlabelled_loader, rank, active_label_dir, cfg)
def query(self, leave_pbar=True, cur_epoch=None):
select_dic = {}
# select_nums = cfg.ACTIVE_TRAIN.SELECT_NUMS
val_dataloader_iter = iter(self.unlabelled_loader)
val_loader = self.unlabelled_loader
total_it_each_epoch = len(self.unlabelled_loader)
# feed forward the model
if self.rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar,
desc='evaluating_unlabelled_set_epoch_%d' % cur_epoch, dynamic_ncols=True)
self.model.eval()
enable_dropout(self.model)
for cur_it in range(total_it_each_epoch):
try:
unlabelled_batch = next(val_dataloader_iter)
except StopIteration:
unlabelled_dataloader_iter = iter(val_loader)
unlabelled_batch = next(unlabelled_dataloader_iter)
with torch.no_grad():
load_data_to_gpu(unlabelled_batch)
pred_dicts, _ = self.model(unlabelled_batch)
for batch_inx in range(len(pred_dicts)):
self.save_points(unlabelled_batch['frame_id'][batch_inx], pred_dicts[batch_inx])
# did not apply batch mask -> directly output
cls_var = torch.var(torch.sigmoid(pred_dicts[0]['rcnn_cls']), 0).mean()
reg_var = torch.var(pred_dicts[0]['rcnn_reg'], 0).mean()
""" Aggregate all the boxes values in one point cloud """
if self.cfg.ACTIVE_TRAIN.AGGREGATION == 'mean':
aggregated_values = cls_var + reg_var
else:
raise NotImplementedError
select_dic[unlabelled_batch['frame_id'][batch_inx]] = aggregated_values
if self.rank == 0:
pbar.update()
# pbar.set_postfix(disp_dict)
pbar.refresh()
if self.rank == 0:
pbar.close()
# sort and get selected_frames
select_dic = dict(sorted(select_dic.items(), key=lambda item: item[1]))
unlabelled_sample_num = len(select_dic.keys())
selected_frames = list(select_dic.keys())[unlabelled_sample_num - self.cfg.ACTIVE_TRAIN.SELECT_NUMS:]
return selected_frames
| 3,072 | 36.938272 | 120 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/query_strategies/badge_sampling.py |
import itertools
import torch
from .strategy import Strategy
from pcdet.models import load_data_to_gpu
import torch.nn.functional as F
import tqdm
from sklearn.cluster import kmeans_plusplus
import time
import pickle
import os
from torch.utils.data import DataLoader
import itertools
from collections import Counter
import gc
import numpy as np
class BadgeSampling(Strategy):
def __init__(self, model, labelled_loader, unlabelled_loader, rank, active_label_dir, cfg):
super(BadgeSampling, self).__init__(model, labelled_loader, unlabelled_loader, rank, active_label_dir, cfg)
# num_box decay rate
# self.delta = 0.99
# self.k = 3
def get_grad_embedding(self, probs, feat):
embeddings_pos = feat * (1 - probs)
embeddings = torch.cat((embeddings_pos, -embeddings_pos), axis=-1)
final_embeddings = torch.clone(embeddings)
final_embeddings[probs < 0.5] = torch.cat((-embeddings_pos[probs < 0.5], embeddings_pos[probs < 0.5]), axis=1)
# B x 1 true false true
return embeddings
def enable_dropout(self, model):
""" Function to enable the dropout layers during test-time """
i = 0
for m in model.modules():
if m.__class__.__name__.startswith('Dropout'):
i += 1
m.train()
print('**found and enabled {} Dropout layers for random sampling**'.format(i))
def query(self, leave_pbar=True, cur_epoch=None):
select_dic = {}
# select_nums = cfg.ACTIVE_TRAIN.SELECT_NUMS
val_dataloader_iter = iter(self.unlabelled_loader)
val_loader = self.unlabelled_loader
total_it_each_epoch = len(self.unlabelled_loader)
# feed forward the model
if self.rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar,
desc='evaluating_unlabelled_set_epoch_%d' % cur_epoch, dynamic_ncols=True)
self.model.eval()
# enable dropout for multiple sampling
self.enable_dropout(self.model)
# self.model.roi_head.shared_fc_layer[3].train()
rpn_preds_results = []
# cls_results = []
# reg_results = []
if os.path.isfile(os.path.join(self.active_label_dir, 'grad_embeddings_epoch_{}.pkl'.format(cur_epoch))):
# if found, then resume
print('found {} epoch grad embeddings... start resuming...'.format(cur_epoch))
with open(os.path.join(self.active_label_dir, 'grad_embeddings_epoch_{}.pkl'.format(cur_epoch)), 'rb') as f:
grad_embeddings = pickle.load(f)
else:
for cur_it in range(total_it_each_epoch):
try:
unlabelled_batch = next(val_dataloader_iter)
except StopIteration:
unlabelled_dataloader_iter = iter(val_loader)
unlabelled_batch = next(unlabelled_dataloader_iter)
with torch.no_grad():
load_data_to_gpu(unlabelled_batch)
pred_dicts, _= self.model(unlabelled_batch)
for batch_inx in range(len(pred_dicts)):
self.save_points(unlabelled_batch['frame_id'][batch_inx], pred_dicts[batch_inx])
# final_full_cls_logits = pred_dicts[batch_inx]['pred_logits']
# did not apply batch mask -> directly output
rpn_preds = pred_dicts[0]['rpn_preds']
batch_size = rpn_preds.shape[0]
rpn_preds = torch.argmax(rpn_preds.view(batch_size, -1, self.model.dense_head.num_class), -1)
rpn_preds_results.append(rpn_preds.cpu())
# cls_results.append(torch.mean(torch.sigmoid(pred_dicts[0]['rcnn_cls']), 0).view(batch_size, -1, 1))
# reg_results.append(torch.mean(pred_dicts[0]['rcnn_reg'], 0).view(batch_size, -1, 7))
if self.rank == 0:
pbar.update()
# pbar.set_postfix(disp_dict)
pbar.refresh()
if self.rank == 0:
pbar.close()
del rpn_preds
del pred_dicts
torch.cuda.empty_cache()
print('start stacking cls and reg results as gt...')
# cls_results = torch.cat(cls_results, 0)
# reg_results = torch.cat(reg_results, 0)
rpn_preds_results = torch.cat(rpn_preds_results, 0)
print('retrieving grads on the training mode...')
self.model.train()
# fc_grad_embedding_list = []
rpn_grad_embedding_list = []
# preds_num_bbox_list = []
grad_loader = DataLoader(
self.unlabelled_set, batch_size=1, pin_memory=True, num_workers=self.unlabelled_loader.num_workers,
shuffle=False, collate_fn=self.unlabelled_set.collate_batch,
drop_last=False, sampler=self.unlabelled_loader.sampler, timeout=0
)
grad_dataloader_iter = iter(grad_loader)
total_it_each_epoch = len(grad_loader)
if self.rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar,
desc='inf_grads_unlabelled_set_epoch_%d' % cur_epoch, dynamic_ncols=True)
for cur_it in range(total_it_each_epoch):
try:
unlabelled_batch = next(grad_dataloader_iter)
except StopIteration:
unlabelled_dataloader_iter = iter(grad_loader)
unlabelled_batch = next(grad_dataloader_iter)
load_data_to_gpu(unlabelled_batch)
pred_dicts, _, _= self.model(unlabelled_batch)
# get sample wise grads
# rcnn_cls_preds_per_sample = pred_dicts['rcnn_cls']
# rcnn_cls_gt_per_sample = cls_results[cur_it, :]
# rcnn_reg_preds_per_sample = pred_dicts['rcnn_reg']
# rcnn_reg_gt_per_sample = reg_results[cur_it, :]
# cls_loss, _ = self.model.roi_head.get_box_cls_layer_loss({'rcnn_cls': rcnn_cls_preds_per_sample, 'rcnn_cls_labels': rcnn_cls_gt_per_sample})
# reg_loss = self.model.roi_head.get_box_reg_layer_loss({'rcnn_reg': rcnn_reg_preds_per_sample, 'reg_sample_targets': rcnn_reg_gt_per_sample})
# del rcnn_reg_preds_per_sample, rcnn_reg_gt_per_sample
# torch.cuda.empty_cache()
# assign the rpn hypothetical labels for loss calculation
# self.model.dense_head.forward_red_dict['box_cls_labels'] = rpn_preds_results[cur_it, :].cuda().unsqueeze(0)
new_data = {'box_cls_labels': rpn_preds_results[cur_it, :].cuda().unsqueeze(0), 'cls_preds': pred_dicts['rpn_preds']}
rpn_loss = self.model.dense_head.get_cls_layer_loss(new_data=new_data)[0]
# since the rpn head does not have dropout, we cannot get MC dropout labels for regression
loss = rpn_loss
self.model.zero_grad()
loss.backward()
# fc_grads = self.model.roi_head.shared_fc_layer[4].weight.grad.clone().detach().cpu()
# fc_grad_embedding_list.append(fc_grads)
rpn_grads = self.model.dense_head.conv_cls.weight.grad.clone().detach().cpu()
rpn_grad_embedding_list.append(rpn_grads)
# preds_num_bbox = (torch.norm(self.model.roi_head.reg_layers[-1].weight.grad, dim=0) > 0).sum()
# preds_num_bbox_list.append(preds_num_bbox)
# del fc_grads
# del rpn_grads
# torch.cuda.empty_cache()
if self.rank == 0:
pbar.update()
# pbar.set_postfix(disp_dict)
pbar.refresh()
# if cur_it > 100:
# print('check here')
# break
if self.rank == 0:
pbar.close()
rpn_grad_embeddings = torch.stack(rpn_grad_embedding_list, 0)
del rpn_grad_embedding_list
gc.collect()
num_sample = rpn_grad_embeddings.shape[0]
rpn_grad_embeddings = rpn_grad_embeddings.view(num_sample, -1)
start_time = time.time()
_, selected_rpn_idx = kmeans_plusplus(rpn_grad_embeddings.numpy(), n_clusters=self.cfg.ACTIVE_TRAIN.SELECT_NUMS, random_state=0)
print("--- kmeans++ running time: %s seconds for rpn grads---" % (time.time() - start_time))
selected_idx = selected_rpn_idx
self.model.zero_grad()
self.model.eval()
selected_frames = [self.unlabelled_set.sample_id_list[idx] for idx in selected_idx]
return selected_frames
| 9,089 | 43.558824 | 158 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/query_strategies/entropy_sampling.py |
import torch
from .strategy import Strategy
from pcdet.models import load_data_to_gpu
import torch.nn.functional as F
import tqdm
class EntropySampling(Strategy):
def __init__(self, model, labelled_loader, unlabelled_loader, rank, active_label_dir, cfg):
super(EntropySampling, self).__init__(model, labelled_loader, unlabelled_loader, rank, active_label_dir, cfg)
def query(self, leave_pbar=True, cur_epoch=None):
select_dic = {}
# select_nums = cfg.ACTIVE_TRAIN.SELECT_NUMS
val_dataloader_iter = iter(self.unlabelled_loader)
val_loader = self.unlabelled_loader
total_it_each_epoch = len(self.unlabelled_loader)
# feed forward the model
if self.rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar,
desc='evaluating_unlabelled_set_epoch_%d' % cur_epoch, dynamic_ncols=True)
self.model.eval()
for cur_it in range(total_it_each_epoch):
try:
unlabelled_batch = next(val_dataloader_iter)
except StopIteration:
unlabelled_dataloader_iter = iter(val_loader)
unlabelled_batch = next(unlabelled_dataloader_iter)
with torch.no_grad():
load_data_to_gpu(unlabelled_batch)
pred_dicts, _ = self.model(unlabelled_batch)
for batch_inx in range(len(pred_dicts)):
self.save_points(unlabelled_batch['frame_id'][batch_inx], pred_dicts[batch_inx])
final_full_cls_logits = pred_dicts[batch_inx]['pred_logits']
box_values = \
-(F.softmax(final_full_cls_logits, dim=1) *
F.log_softmax(final_full_cls_logits, dim=1)).sum(dim=1)
""" Aggregate all the boxes values in one point cloud """
if self.cfg.ACTIVE_TRAIN.AGGREGATION == 'mean':
aggregated_values = torch.mean(box_values)
else:
raise NotImplementedError
select_dic[unlabelled_batch['frame_id'][batch_inx]] = aggregated_values
if self.rank == 0:
pbar.update()
# pbar.set_postfix(disp_dict)
pbar.refresh()
if self.rank == 0:
pbar.close()
# sort and get selected_frames
select_dic = dict(sorted(select_dic.items(), key=lambda item: item[1]))
unlabelled_sample_num = len(select_dic.keys())
selected_frames = list(select_dic.keys())[unlabelled_sample_num - self.cfg.ACTIVE_TRAIN.SELECT_NUMS:]
return selected_frames
| 2,721 | 37.885714 | 117 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/query_strategies/strategy.py | import os
import pickle
import wandb
import torch
class Strategy:
def __init__(self, model, labelled_loader, unlabelled_loader, rank, active_label_dir, cfg):
self.cfg = cfg
self.active_label_dir = active_label_dir
self.rank = rank
self.model = model
self.labelled_loader = labelled_loader
self.unlabelled_loader = unlabelled_loader
self.labelled_set = labelled_loader.dataset
self.unlabelled_set = unlabelled_loader.dataset
self.bbox_records = {}
self.point_measures = ['mean', 'median', 'variance']
for met in self.point_measures:
setattr(self, '{}_point_records'.format(met), {})
if cfg.DATA_CONFIG.DATASET == 'KittiDataset':
self.pairs = list(zip(self.unlabelled_set.sample_id_list, self.unlabelled_set.kitti_infos))
else:
self.pairs = list(zip(self.unlabelled_set.frame_ids, self.unlabelled_set.infos))
def save_points(self, frame_id, batch_dict):
# 'num_bbox': num_bbox,
# 'mean_points': mean_points,
# 'median_points': median_points,
# 'variance_points': variance_points,
self.bbox_records[frame_id] = batch_dict['num_bbox']
self.mean_point_records[frame_id] = batch_dict['mean_points']
self.median_point_records[frame_id] = batch_dict['median_points']
self.variance_point_records[frame_id] = batch_dict['variance_points']
def update_dashboard(self, cur_epoch=None, accumulated_iter=None):
classes = list(self.selected_bbox[0].keys())
total_bbox = 0
for cls_idx in classes:
num_cls_bbox = sum([i[cls_idx] for i in self.selected_bbox])
wandb.log({'active_selection/num_bbox_{}'.format(cls_idx): num_cls_bbox}, step=accumulated_iter)
total_bbox += num_cls_bbox
if num_cls_bbox:
for met in self.point_measures:
stats_point = sum([i[cls_idx] for i in getattr(self, 'selected_{}_points'.format(met))]) / len(getattr(self, 'selected_{}_points'.format(met)))
wandb.log({'active_selection/{}_points_{}'.format(met, cls_idx): stats_point}, step=accumulated_iter)
else:
for met in self.point_measures:
wandb.log({'active_selection/{}_points_{}'.format(met, cls_idx): 0}, step=accumulated_iter)
wandb.log({'active_selection/total_bbox_selected': total_bbox}, step=accumulated_iter)
def save_active_labels(self, selected_frames=None, grad_embeddings=None, cur_epoch=None):
if selected_frames is not None:
self.selected_bbox = [self.bbox_records[i] for i in selected_frames]
for met in self.point_measures:
setattr(self, 'selected_{}_points'.format(met), [getattr(self, '{}_point_records'.format(met))[i] for i in selected_frames])
with open(os.path.join(self.active_label_dir, 'selected_frames_epoch_{}_rank_{}.pkl'.format(cur_epoch, self.rank)), 'wb') as f:
pickle.dump({'frame_id': selected_frames, 'selected_mean_points': self.selected_mean_points, 'selected_bbox': self.selected_bbox, \
'selected_median_points': self.selected_median_points, 'selected_variance_points': self.selected_variance_points}, f)
print('successfully saved selected frames for epoch {} for rank {}'.format(cur_epoch, self.rank))
if grad_embeddings is not None:
with open(os.path.join(self.active_label_dir, 'grad_embeddings_epoch_{}.pkl'.format(cur_epoch)), 'wb') as f:
pickle.dump(grad_embeddings, f)
print('successfully saved grad embeddings for epoch {}'.format(cur_epoch))
def query(self, leave_pbar=True, cur_epoch=None):
pass | 3,874 | 45.130952 | 163 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/query_strategies/llal_sampling.py |
import torch
from .strategy import Strategy
from pcdet.models import load_data_to_gpu
import torch.nn.functional as F
import tqdm
class LLALSampling(Strategy):
def __init__(self, model, labelled_loader, unlabelled_loader, rank, active_label_dir, cfg):
super(LLALSampling, self).__init__(model, labelled_loader, unlabelled_loader, rank, active_label_dir, cfg)
def query(self, leave_pbar=True, cur_epoch=None):
select_dic = {}
# select_nums = cfg.ACTIVE_TRAIN.SELECT_NUMS
val_dataloader_iter = iter(self.unlabelled_loader)
val_loader = self.unlabelled_loader
total_it_each_epoch = len(self.unlabelled_loader)
# feed forward the model
if self.rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar,
desc='evaluating_unlabelled_set_epoch_%d' % cur_epoch, dynamic_ncols=True)
self.model.eval()
for cur_it in range(total_it_each_epoch):
try:
unlabelled_batch = next(val_dataloader_iter)
except StopIteration:
unlabelled_dataloader_iter = iter(val_loader)
unlabelled_batch = next(unlabelled_dataloader_iter)
with torch.no_grad():
load_data_to_gpu(unlabelled_batch)
pred_dicts, _ = self.model(unlabelled_batch)
for batch_inx in range(len(pred_dicts)):
self.save_points(unlabelled_batch['frame_id'][batch_inx], pred_dicts[batch_inx])
pred_loss = pred_dicts[batch_inx]['loss_predictions'][batch_inx]
select_dic[unlabelled_batch['frame_id'][batch_inx]] = pred_loss
if self.rank == 0:
pbar.update()
# pbar.set_postfix(disp_dict)
pbar.refresh()
if self.rank == 0:
pbar.close()
# sort and get selected_frames
# low to high
select_dic = dict(sorted(select_dic.items(), key=lambda item: item[1]))
unlabelled_sample_num = len(select_dic.keys())
# fetch the samples with highest pred loss
selected_frames = list(select_dic.keys())[unlabelled_sample_num - self.cfg.ACTIVE_TRAIN.SELECT_NUMS:]
return selected_frames
| 2,318 | 36.403226 | 114 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/query_strategies/confidence_sampling.py |
import torch
from .strategy import Strategy
from pcdet.models import load_data_to_gpu
import torch.nn.functional as F
import tqdm
class ConfidenceSampling(Strategy):
def __init__(self, model, labelled_loader, unlabelled_loader, rank, active_label_dir, cfg):
super(ConfidenceSampling, self).__init__(model, labelled_loader, unlabelled_loader, rank, active_label_dir, cfg)
def query(self, leave_pbar=True, cur_epoch=None):
select_dic = {}
# select_nums = cfg.ACTIVE_TRAIN.SELECT_NUMS
val_dataloader_iter = iter(self.unlabelled_loader)
val_loader = self.unlabelled_loader
total_it_each_epoch = len(self.unlabelled_loader)
# feed forward the model
if self.rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar,
desc='evaluating_unlabelled_set_epoch_%d' % cur_epoch, dynamic_ncols=True)
self.model.eval()
for cur_it in range(total_it_each_epoch):
try:
unlabelled_batch = next(val_dataloader_iter)
except StopIteration:
unlabelled_dataloader_iter = iter(val_loader)
unlabelled_batch = next(unlabelled_dataloader_iter)
with torch.no_grad():
load_data_to_gpu(unlabelled_batch)
pred_dicts, _ = self.model(unlabelled_batch)
for batch_inx in range(len(pred_dicts)):
self.save_points(unlabelled_batch['frame_id'][batch_inx], pred_dicts[batch_inx])
final_full_cls_logits = pred_dicts[batch_inx]['confidence']
box_values = \
-(F.softmax(final_full_cls_logits, dim=1) *
F.log_softmax(final_full_cls_logits, dim=1)).sum(dim=1)
""" Aggregate all the boxes values in one point cloud """
if self.cfg.ACTIVE_TRAIN.AGGREGATION == 'mean':
aggregated_values = torch.mean(box_values)
else:
raise NotImplementedError
select_dic[unlabelled_batch['frame_id'][batch_inx]] = aggregated_values
if self.rank == 0:
pbar.update()
# pbar.set_postfix(disp_dict)
pbar.refresh()
if self.rank == 0:
pbar.close()
# sort and get selected_frames
select_dic = dict(sorted(select_dic.items(), key=lambda item: item[1]))
unlabelled_sample_num = len(select_dic.keys())
selected_frames = list(select_dic.keys())[unlabelled_sample_num - self.cfg.ACTIVE_TRAIN.SELECT_NUMS:]
return selected_frames
| 2,726 | 37.957143 | 120 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/query_strategies/random_sampling.py | import random
from .strategy import Strategy
import tqdm
import torch
from pcdet.models import load_data_to_gpu
class RandomSampling(Strategy):
def __init__(self, model, labelled_loader, unlabelled_loader, rank, active_label_dir, cfg):
super(RandomSampling, self).__init__(model, labelled_loader, unlabelled_loader, rank, active_label_dir, cfg)
def query(self, leave_pbar=True, cur_epoch=None):
if len(self.bbox_records) == 0:
# select_nums = cfg.ACTIVE_TRAIN.SELECT_NUMS
val_dataloader_iter = iter(self.unlabelled_loader)
val_loader = self.unlabelled_loader
total_it_each_epoch = len(self.unlabelled_loader)
all_frames = []
# feed forward the model
if self.rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar,
desc='going through_unlabelled_set_epoch_%d' % cur_epoch, dynamic_ncols=True)
self.model.eval()
for cur_it in range(total_it_each_epoch):
try:
unlabelled_batch = next(val_dataloader_iter)
except StopIteration:
unlabelled_dataloader_iter = iter(val_loader)
unlabelled_batch = next(unlabelled_dataloader_iter)
with torch.no_grad():
load_data_to_gpu(unlabelled_batch)
pred_dicts, _ = self.model(unlabelled_batch)
for batch_inx in range(len(pred_dicts)):
self.save_points(unlabelled_batch['frame_id'][batch_inx], pred_dicts[batch_inx])
all_frames.append(unlabelled_batch['frame_id'][batch_inx])
if self.rank == 0:
pbar.update()
# pbar.set_postfix(disp_dict)
pbar.refresh()
if self.rank == 0:
pbar.close()
random.shuffle(all_frames)
selected_frames = all_frames[:self.cfg.ACTIVE_TRAIN.SELECT_NUMS]
# selected_frames, _ = zip(*self.pairs[:self.cfg.ACTIVE_TRAIN.SELECT_NUMS])
return selected_frames | 2,186 | 39.5 | 116 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/query_strategies/coreset_sampling.py |
import torch
from .strategy import Strategy
from pcdet.models import load_data_to_gpu
import torch.nn.functional as F
import tqdm
class CoresetSampling(Strategy):
def __init__(self, model, labelled_loader, unlabelled_loader, rank, active_label_dir, cfg):
super(CoresetSampling, self).__init__(model, labelled_loader, unlabelled_loader, rank, active_label_dir, cfg)
def pairwise_squared_distances(self, x, y):
# x: N * D * D * 1
# y: M * D * D * 1
# return: N * M
assert (len(x.shape)) > 1
assert (len(y.shape)) > 1
n = x.shape[0]
m = y.shape[0]
x = x.view(n, -1)
y = y.view(m, -1)
x_norm = (x**2).sum(1).view(n, 1)
y_t = y.permute(1, 0).contiguous()
y_norm = (y**2).sum(1).view(1, m)
dist = x_norm + y_norm - 2.0 * torch.mm(x, y_t)
dist[dist != dist] = 0 # replace nan values with 0
return torch.clamp(dist, 0.0, float('inf'))
def furthest_first(self, X, X_set, n):
m = X.shape[0]
m_label = X_set.shape[0]
X = X.view(m, -1)
X_set = X_set.view(m_label, -1)
# dist_ctr: num_unlabel * num_label
dist_ctr = self.pairwise_squared_distances(X, X_set)
# min_dist: [num_unlabel]-D -> record the nearest distance to the orig X_set
min_dist = dist_ctr.mean(1)
idxs = []
for i in range(n):
# choose the furthest to orig X_set
idx = torch.argmax(min_dist)
idxs.append(idx)
# no need to calculate if the last iter
if i < (n-1):
dist_new_ctr = self.pairwise_squared_distances(X, X[idx, :].unsqueeze(0))
for j in range(m):
min_dist[j] = min(min_dist[j], dist_new_ctr[j, 0])
return idxs
def query(self, leave_pbar=True, cur_epoch=None):
select_dic = {}
self.model.eval()
unlabeled_embeddings = []
labeled_embeddings = []
unlabeled_frame_ids = []
# feed unlabeled data forward the model
total_it_each_epoch = len(self.unlabelled_loader)
if self.rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar,
desc='evaluating_unlabelled_set_epoch_%d' % cur_epoch, dynamic_ncols=True)
# select_nums = cfg.ACTIVE_TRAIN.SELECT_NUMS
val_dataloader_iter = iter(self.unlabelled_loader)
val_loader = self.unlabelled_loader
for cur_it in range(total_it_each_epoch):
try:
unlabelled_batch = next(val_dataloader_iter)
except StopIteration:
unlabelled_dataloader_iter = iter(val_loader)
unlabelled_batch = next(unlabelled_dataloader_iter)
with torch.no_grad():
load_data_to_gpu(unlabelled_batch)
pred_dicts, _ = self.model(unlabelled_batch)
batch_size = len(pred_dicts)
for batch_inx in range(len(pred_dicts)):
self.save_points(unlabelled_batch['frame_id'][batch_inx], pred_dicts[batch_inx])
unlabeled_frame_ids.append(unlabelled_batch['frame_id'][batch_inx])
embeddings = pred_dicts[0]['embeddings'].view(-1, 128, self.cfg.MODEL.ROI_HEAD.SHARED_FC[-1])
unlabeled_embeddings.append(embeddings)
if self.rank == 0:
pbar.update()
# pbar.set_postfix(disp_dict)
pbar.refresh()
if self.rank == 0:
pbar.close()
# feed labeled data forward the model
total_it_each_epoch = len(self.labelled_loader)
if self.rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar,
desc='evaluating_labelled_set_epoch_%d' % cur_epoch, dynamic_ncols=True)
# select_nums = cfg.ACTIVE_TRAIN.SELECT_NUMS
val_dataloader_iter = iter(self.labelled_loader)
val_loader = self.labelled_loader
for cur_it in range(total_it_each_epoch):
try:
labelled_batch = next(val_dataloader_iter)
except StopIteration:
labelled_dataloader_iter = iter(val_loader)
labelled_batch = next(labelled_dataloader_iter)
with torch.no_grad():
load_data_to_gpu(labelled_batch)
pred_dicts, _ = self.model(labelled_batch)
batch_size = len(pred_dicts)
embeddings = pred_dicts[0]['embeddings'].view(-1, 128, self.cfg.MODEL.ROI_HEAD.SHARED_FC[-1])
labeled_embeddings.append(embeddings)
if self.rank == 0:
pbar.update()
# pbar.set_postfix(disp_dict)
pbar.refresh()
if self.rank == 0:
pbar.close()
print('** [Coreset] start searching...**')
selected_idx = self.furthest_first(torch.cat(unlabeled_embeddings, 0), torch.cat(labeled_embeddings, 0), n=self.cfg.ACTIVE_TRAIN.SELECT_NUMS)
selected_frames = [unlabeled_frame_ids[idx] for idx in selected_idx]
return selected_frames
| 5,359 | 39.300752 | 149 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/utils/active_training_utils.py | from itertools import accumulate
import torch
import os
import glob
import tqdm
import numpy as np
import torch.distributed as dist
from pcdet.config import cfg
from pcdet.models import load_data_to_gpu
from pcdet.utils import common_utils, commu_utils, memory_ensemble_utils
import pickle as pkl
import re
from pcdet.datasets import build_active_dataloader
from .. import query_strategies
ACTIVE_LABELS = {}
NEW_ACTIVE_LABELS = {}
def check_already_exist_active_label(active_label_dir, start_epoch):
"""
if we continue training, use this to directly
load pseudo labels from exsiting result pkl
if exsit, load latest result pkl to PSEUDO LABEL
otherwise, return false and
Args:
active_label_dir: dir to save pseudo label results pkls.
start_epoch: start epoc
Returns:
"""
# support init ps_label given by cfg
if start_epoch == 0 and cfg.ACTIVE_TRAIN.get('INITIAL_SELECTION', None):
if os.path.exists(cfg.ACTIVE_TRAIN.INITIAL_SELECTION):
init_active_label = pkl.load(open(cfg.ACTIVE_TRAIN.INITIAL_SELECTION, 'rb'))
ACTIVE_LABELS.update(init_active_label)
if cfg.LOCAL_RANK == 0:
ps_path = os.path.join(active_label_dir, "ps_label_e0.pkl")
with open(ps_path, 'wb') as f:
pkl.dump(ACTIVE_LABELS, f)
return cfg.ACTIVE_TRAIN.INITIAL_SELECTION
active_label_list = glob.glob(os.path.join(active_label_dir, 'active_label_e*.pkl'))
if len(active_label_list) == 0:
return
active_label_list.sort(key=os.path.getmtime, reverse=True)
for cur_pkl in active_label_list:
num_epoch = re.findall('ps_label_e(.*).pkl', cur_pkl)
assert len(num_epoch) == 1
# load pseudo label and return
if int(num_epoch[0]) <= start_epoch:
latest_ps_label = pkl.load(open(cur_pkl, 'rb'))
ACTIVE_LABELS.update(latest_ps_label)
return cur_pkl
return None
def save_active_label_epoch(model, val_loader, rank, leave_pbar, active_label_dir, cur_epoch):
"""
Generate active with given model.
Args:
model: model to predict result for active label
val_loader: data_loader to predict labels
rank: process rank
leave_pbar: tqdm bar controller
active_label_dir: dir to save active label
cur_epoch
"""
val_dataloader_iter = iter(val_loader)
total_it_each_epoch = len(val_loader)
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar,
desc='generate_active_e%d' % cur_epoch, dynamic_ncols=True)
# pos_ps_meter = common_utils.AverageMeter()
# ign_ps_meter = common_utils.AverageMeter()
model.eval()
for cur_it in range(total_it_each_epoch):
try:
unlabelled_batch = next(val_dataloader_iter)
except StopIteration:
unlabelled_dataloader_iter = iter(val_loader)
unlabelled_batch = next(unlabelled_dataloader_iter)
# generate gt_boxes for unlabelled_batch and update model weights
with torch.no_grad():
load_data_to_gpu(unlabelled_batch)
pred_dicts, recall_dicts = model(unlabelled_batch)
# select and save active labels
# random
save_active_label_batch(
unlabelled_batch, pred_dicts=pred_dicts
)
# log to console and tensorboard
# pos_ps_meter.update(pos_ps_batch)
# ign_ps_meter.update(ign_ps_batch)
# disp_dict = {'pos_ps_box': "{:.3f}({:.3f})".format(pos_ps_meter.val, pos_ps_meter.avg),
# 'ign_ps_box': "{:.3f}({:.3f})".format(ign_ps_meter.val, ign_ps_meter.avg)}
if rank == 0:
pbar.update()
# pbar.set_postfix(disp_dict)
pbar.refresh()
if rank == 0:
pbar.close()
gather_and_dump_pseudo_label_result(rank, active_label_dir, cur_epoch)
def gather_and_dump_pseudo_label_result(rank, ps_label_dir, cur_epoch):
commu_utils.synchronize()
if dist.is_initialized():
part_pseudo_labels_list = commu_utils.all_gather(NEW_ACTIVE_LABELS)
new_pseudo_label_dict = {}
for pseudo_labels in part_pseudo_labels_list:
new_pseudo_label_dict.update(pseudo_labels)
NEW_ACTIVE_LABELS.update(new_pseudo_label_dict)
# dump new pseudo label to given dir
if rank == 0:
ps_path = os.path.join(ps_label_dir, "active_label_e{}.pkl".format(cur_epoch))
with open(ps_path, 'wb') as f:
pkl.dump(NEW_ACTIVE_LABELS, f)
commu_utils.synchronize()
ACTIVE_LABELS.clear()
ACTIVE_LABELS.update(NEW_ACTIVE_LABELS)
NEW_ACTIVE_LABELS.clear()
def save_active_label_batch(input_dict,
pred_dicts=None,
need_update=True):
"""
Save pseudo label for give batch.
If model is given, use model to inference pred_dicts,
otherwise, directly use given pred_dicts.
Args:
input_dict: batch data read from dataloader
pred_dicts: Dict if not given model.
predict results to be generated pseudo label and saved
need_update: Bool.
If set to true, use consistency matching to update pseudo label
"""
pos_ps_meter = common_utils.AverageMeter()
ign_ps_meter = common_utils.AverageMeter()
batch_size = len(pred_dicts)
for b_idx in range(batch_size):
# pred_cls_scores = pred_iou_scores = None
# if 'pred_boxes' in pred_dicts[b_idx]:
# # Exist predicted boxes passing self-training score threshold
# pred_boxes = pred_dicts[b_idx]['pred_boxes'].detach().cpu().numpy()
# pred_labels = pred_dicts[b_idx]['pred_labels'].detach().cpu().numpy()
# pred_scores = pred_dicts[b_idx]['pred_scores'].detach().cpu().numpy()
# if 'pred_cls_scores' in pred_dicts[b_idx]:
# pred_cls_scores = pred_dicts[b_idx]['pred_cls_scores'].detach().cpu().numpy()
# if 'pred_iou_scores' in pred_dicts[b_idx]:
# pred_iou_scores = pred_dicts[b_idx]['pred_iou_scores'].detach().cpu().numpy()
#
# # remove boxes under negative threshold
# if cfg.SELF_TRAIN.get('NEG_THRESH', None):
# labels_remove_scores = np.array(cfg.SELF_TRAIN.NEG_THRESH)[pred_labels - 1]
# remain_mask = pred_scores >= labels_remove_scores
# pred_labels = pred_labels[remain_mask]
# pred_scores = pred_scores[remain_mask]
# pred_boxes = pred_boxes[remain_mask]
# if 'pred_cls_scores' in pred_dicts[b_idx]:
# pred_cls_scores = pred_cls_scores[remain_mask]
# if 'pred_iou_scores' in pred_dicts[b_idx]:
# pred_iou_scores = pred_iou_scores[remain_mask]
#
# labels_ignore_scores = np.array(cfg.SELF_TRAIN.SCORE_THRESH)[pred_labels - 1]
# ignore_mask = pred_scores < labels_ignore_scores
# pred_labels[ignore_mask] = -1
#
# gt_box = np.concatenate((pred_boxes,
# pred_labels.reshape(-1, 1),
# pred_scores.reshape(-1, 1)), axis=1)
#
# else:
# # no predicted boxes passes self-training score threshold
# gt_box = np.zeros((0, 9), dtype=np.float32)
#
# gt_infos = {
# 'gt_boxes': gt_box,
# 'cls_scores': pred_cls_scores,
# 'iou_scores': pred_iou_scores,
# 'memory_counter': np.zeros(gt_box.shape[0])
# }
# record pseudo label to pseudo label dict
# if need_update:
# ensemble_func = getattr(memory_ensemble_utils, cfg.SELF_TRAIN.MEMORY_ENSEMBLE.NAME)
# gt_infos = ensemble_func(ACTIVE_LABELS[input_dict['frame_id'][b_idx]],
# gt_infos, cfg.SELF_TRAIN.MEMORY_ENSEMBLE)
# if gt_infos['gt_boxes'].shape[0] > 0:
# ign_ps_meter.update((gt_infos['gt_boxes'][:, 7] < 0).sum())
# else:
# ign_ps_meter.update(0)
# pos_ps_meter.update(gt_infos['gt_boxes'].shape[0] - ign_ps_meter.val)
NEW_ACTIVE_LABELS[input_dict['frame_id'][b_idx]] = input_dict['gt_boxes']
return
def load_ps_label(frame_id):
"""
:param frame_id: file name of pseudo label
:return gt_box: loaded gt boxes (N, 9) [x, y, z, w, l, h, ry, label, scores]
"""
if frame_id in ACTIVE_LABELS:
gt_box = ACTIVE_LABELS[frame_id]['gt_boxes']
else:
raise ValueError('Cannot find pseudo label for frame: %s' % frame_id)
return gt_box
def select_active_labels(model,
labelled_loader,
unlabelled_loader,
rank,
logger,
method,
leave_pbar=True,
cur_epoch=None,
dist_train=False,
active_label_dir=None,
accumulated_iter=None):
strategy = query_strategies.build_strategy(method=method, model=model, \
labelled_loader=labelled_loader, \
unlabelled_loader=unlabelled_loader, \
rank=rank, \
active_label_dir=active_label_dir, \
cfg=cfg)
if os.path.isfile(os.path.join(active_label_dir, 'selected_frames_epoch_{}.pkl'.format(cur_epoch))):
# if found, then resume
# TODO: needed to be revised
print('found {} epoch saved selections...start resuming...'.format(cur_epoch))
with open(os.path.join(active_label_dir, 'selected_frames_epoch_{}.pkl'.format(cur_epoch)), 'rb') as f:
unpickler = pkl.Unpickler(f)
# if file is not empty scores will be equal
# to the value unpickled
selected_frames = unpickler.load(f)
else:
selected_frames = strategy.query(leave_pbar, cur_epoch)
strategy.save_active_labels(selected_frames=selected_frames, cur_epoch=cur_epoch)
strategy.update_dashboard(cur_epoch=cur_epoch, accumulated_iter=accumulated_iter)
# get selected frame id list and infos
if cfg.DATA_CONFIG.DATASET == 'KittiDataset':
selected_id_list, selected_infos = list(strategy.labelled_set.sample_id_list), \
list(strategy.labelled_set.kitti_infos)
unselected_id_list, unselected_infos = list(strategy.unlabelled_set.sample_id_list), \
list(strategy.unlabelled_set.kitti_infos)
else: # Waymo
selected_id_list, selected_infos = list(strategy.labelled_set.frame_ids), \
list(strategy.labelled_set.infos)
unselected_id_list, unselected_infos = list(strategy.unlabelled_set.frame_ids), \
list(strategy.unlabelled_set.infos)
for i in range(len(strategy.pairs)):
if strategy.pairs[i][0] in selected_frames:
selected_id_list.append(strategy.pairs[i][0])
selected_infos.append(strategy.pairs[i][1])
unselected_id_list.remove(strategy.pairs[i][0])
unselected_infos.remove(strategy.pairs[i][1])
selected_id_list, selected_infos, \
unselected_id_list, unselected_infos = \
tuple(selected_id_list), tuple(selected_infos), \
tuple(unselected_id_list), tuple(unselected_infos)
active_training = [selected_id_list, selected_infos, unselected_id_list, unselected_infos]
# Create new dataloaders
batch_size = unlabelled_loader.batch_size
print("Batch_size of a single loader: %d" % (batch_size))
workers = unlabelled_loader.num_workers
# delete old sets and loaders, save space for new sets and loaders
del labelled_loader.dataset, unlabelled_loader.dataset, \
labelled_loader, unlabelled_loader
labelled_set, unlabelled_set, \
labelled_loader, unlabelled_loader, \
sampler_labelled, sampler_unlabelled = build_active_dataloader(
cfg.DATA_CONFIG,
cfg.CLASS_NAMES,
batch_size,
dist_train,
workers=workers,
logger=logger,
training=True,
active_training=active_training
)
return labelled_loader, unlabelled_loader
| 12,565 | 37.546012 | 115 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/utils/box_utils.py | import numpy as np
import scipy
import torch
import copy
from scipy.spatial import Delaunay
from ..ops.roiaware_pool3d import roiaware_pool3d_utils
from . import common_utils
def in_hull(p, hull):
"""
:param p: (N, K) test points
:param hull: (M, K) M corners of a box
:return (N) bool
"""
try:
if not isinstance(hull, Delaunay):
hull = Delaunay(hull)
flag = hull.find_simplex(p) >= 0
except scipy.spatial.qhull.QhullError:
print('Warning: not a hull %s' % str(hull))
flag = np.zeros(p.shape[0], dtype=np.bool)
return flag
def boxes_to_corners_3d(boxes3d):
"""
7 -------- 4
/| /|
6 -------- 5 .
| | | |
. 3 -------- 0
|/ |/
2 -------- 1
Args:
boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
Returns:
"""
boxes3d, is_numpy = common_utils.check_numpy_to_torch(boxes3d)
template = boxes3d.new_tensor((
[1, 1, -1], [1, -1, -1], [-1, -1, -1], [-1, 1, -1],
[1, 1, 1], [1, -1, 1], [-1, -1, 1], [-1, 1, 1],
)) / 2
corners3d = boxes3d[:, None, 3:6].repeat(1, 8, 1) * template[None, :, :]
corners3d = common_utils.rotate_points_along_z(corners3d.view(-1, 8, 3), boxes3d[:, 6]).view(-1, 8, 3)
corners3d += boxes3d[:, None, 0:3]
return corners3d.numpy() if is_numpy else corners3d
def mask_boxes_outside_range_numpy(boxes, limit_range, min_num_corners=1):
"""
Args:
boxes: (N, 7) [x, y, z, dx, dy, dz, heading, ...], (x, y, z) is the box center
limit_range: [minx, miny, minz, maxx, maxy, maxz]
min_num_corners:
Returns:
"""
if boxes.shape[1] > 7:
boxes = boxes[:, 0:7]
corners = boxes_to_corners_3d(boxes) # (N, 8, 3)
mask = ((corners >= limit_range[0:3]) & (corners <= limit_range[3:6])).all(axis=2)
mask = mask.sum(axis=1) >= min_num_corners # (N)
return mask
def remove_points_in_boxes3d(points, boxes3d):
"""
Args:
points: (num_points, 3 + C)
boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center, each box DO NOT overlaps
Returns:
"""
boxes3d, is_numpy = common_utils.check_numpy_to_torch(boxes3d)
points, is_numpy = common_utils.check_numpy_to_torch(points)
point_masks = roiaware_pool3d_utils.points_in_boxes_cpu(points[:, 0:3], boxes3d)
points = points[point_masks.sum(dim=0) == 0]
return points.numpy() if is_numpy else points
def boxes3d_kitti_camera_to_lidar(boxes3d_camera, calib):
"""
Args:
boxes3d_camera: (N, 7) [x, y, z, l, h, w, r] in rect camera coords
calib:
Returns:
boxes3d_lidar: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
"""
boxes3d_camera_copy = copy.deepcopy(boxes3d_camera)
xyz_camera, r = boxes3d_camera_copy[:, 0:3], boxes3d_camera_copy[:, 6:7]
l, h, w = boxes3d_camera_copy[:, 3:4], boxes3d_camera_copy[:, 4:5], boxes3d_camera_copy[:, 5:6]
xyz_lidar = calib.rect_to_lidar(xyz_camera)
xyz_lidar[:, 2] += h[:, 0] / 2
return np.concatenate([xyz_lidar, l, w, h, -(r + np.pi / 2)], axis=-1)
def boxes3d_kitti_fakelidar_to_lidar(boxes3d_lidar):
"""
Args:
boxes3d_fakelidar: (N, 7) [x, y, z, w, l, h, r] in old LiDAR coordinates, z is bottom center
Returns:
boxes3d_lidar: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
"""
boxes3d_lidar_copy = copy.deepcopy(boxes3d_lidar)
w, l, h = boxes3d_lidar_copy[:, 3:4], boxes3d_lidar_copy[:, 4:5], boxes3d_lidar_copy[:, 5:6]
r = boxes3d_lidar_copy[:, 6:7]
boxes3d_lidar_copy[:, 2] += h[:, 0] / 2
return np.concatenate([boxes3d_lidar_copy[:, 0:3], l, w, h, -(r + np.pi / 2)], axis=-1)
def boxes3d_kitti_lidar_to_fakelidar(boxes3d_lidar):
"""
Args:
boxes3d_lidar: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
Returns:
boxes3d_fakelidar: [x, y, z, w, l, h, r] in old LiDAR coordinates, z is bottom center
"""
boxes3d_lidar_copy = copy.deepcopy(boxes3d_lidar)
dx, dy, dz = boxes3d_lidar_copy[:, 3:4], boxes3d_lidar_copy[:, 4:5], boxes3d_lidar_copy[:, 5:6]
heading = boxes3d_lidar_copy[:, 6:7]
boxes3d_lidar_copy[:, 2] -= dz[:, 0] / 2
return np.concatenate([boxes3d_lidar_copy[:, 0:3], dy, dx, dz, -heading - np.pi / 2], axis=-1)
def enlarge_box3d(boxes3d, extra_width=(0, 0, 0)):
"""
Args:
boxes3d: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
extra_width: [extra_x, extra_y, extra_z]
Returns:
"""
boxes3d, is_numpy = common_utils.check_numpy_to_torch(boxes3d)
large_boxes3d = boxes3d.clone()
large_boxes3d[:, 3:6] += boxes3d.new_tensor(extra_width)[None, :]
return large_boxes3d
def boxes3d_lidar_to_kitti_camera(boxes3d_lidar, calib):
"""
:param boxes3d_lidar: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
:param calib:
:return:
boxes3d_camera: (N, 7) [x, y, z, l, h, w, r] in rect camera coords
"""
boxes3d_lidar_copy = copy.deepcopy(boxes3d_lidar)
xyz_lidar = boxes3d_lidar_copy[:, 0:3]
l, w, h = boxes3d_lidar_copy[:, 3:4], boxes3d_lidar_copy[:, 4:5], boxes3d_lidar_copy[:, 5:6]
r = boxes3d_lidar_copy[:, 6:7]
xyz_lidar[:, 2] -= h.reshape(-1) / 2
xyz_cam = calib.lidar_to_rect(xyz_lidar)
# xyz_cam[:, 1] += h.reshape(-1) / 2
r = -r - np.pi / 2
return np.concatenate([xyz_cam, l, h, w, r], axis=-1)
def boxes3d_to_corners3d_kitti_camera(boxes3d, bottom_center=True):
"""
:param boxes3d: (N, 7) [x, y, z, l, h, w, ry] in camera coords, see the definition of ry in KITTI dataset
:param bottom_center: whether y is on the bottom center of object
:return: corners3d: (N, 8, 3)
7 -------- 4
/| /|
6 -------- 5 .
| | | |
. 3 -------- 0
|/ |/
2 -------- 1
"""
boxes_num = boxes3d.shape[0]
l, h, w = boxes3d[:, 3], boxes3d[:, 4], boxes3d[:, 5]
x_corners = np.array([l / 2., l / 2., -l / 2., -l / 2., l / 2., l / 2., -l / 2., -l / 2], dtype=np.float32).T
z_corners = np.array([w / 2., -w / 2., -w / 2., w / 2., w / 2., -w / 2., -w / 2., w / 2.], dtype=np.float32).T
if bottom_center:
y_corners = np.zeros((boxes_num, 8), dtype=np.float32)
y_corners[:, 4:8] = -h.reshape(boxes_num, 1).repeat(4, axis=1) # (N, 8)
else:
y_corners = np.array([h / 2., h / 2., h / 2., h / 2., -h / 2., -h / 2., -h / 2., -h / 2.], dtype=np.float32).T
ry = boxes3d[:, 6]
zeros, ones = np.zeros(ry.size, dtype=np.float32), np.ones(ry.size, dtype=np.float32)
rot_list = np.array([[np.cos(ry), zeros, -np.sin(ry)],
[zeros, ones, zeros],
[np.sin(ry), zeros, np.cos(ry)]]) # (3, 3, N)
R_list = np.transpose(rot_list, (2, 0, 1)) # (N, 3, 3)
temp_corners = np.concatenate((x_corners.reshape(-1, 8, 1), y_corners.reshape(-1, 8, 1),
z_corners.reshape(-1, 8, 1)), axis=2) # (N, 8, 3)
rotated_corners = np.matmul(temp_corners, R_list) # (N, 8, 3)
x_corners, y_corners, z_corners = rotated_corners[:, :, 0], rotated_corners[:, :, 1], rotated_corners[:, :, 2]
x_loc, y_loc, z_loc = boxes3d[:, 0], boxes3d[:, 1], boxes3d[:, 2]
x = x_loc.reshape(-1, 1) + x_corners.reshape(-1, 8)
y = y_loc.reshape(-1, 1) + y_corners.reshape(-1, 8)
z = z_loc.reshape(-1, 1) + z_corners.reshape(-1, 8)
corners = np.concatenate((x.reshape(-1, 8, 1), y.reshape(-1, 8, 1), z.reshape(-1, 8, 1)), axis=2)
return corners.astype(np.float32)
def boxes3d_kitti_camera_to_imageboxes(boxes3d, calib, image_shape=None):
"""
:param boxes3d: (N, 7) [x, y, z, l, h, w, r] in rect camera coords
:param calib:
:return:
box_2d_preds: (N, 4) [x1, y1, x2, y2]
"""
corners3d = boxes3d_to_corners3d_kitti_camera(boxes3d)
pts_img, _ = calib.rect_to_img(corners3d.reshape(-1, 3))
corners_in_image = pts_img.reshape(-1, 8, 2)
min_uv = np.min(corners_in_image, axis=1) # (N, 2)
max_uv = np.max(corners_in_image, axis=1) # (N, 2)
boxes2d_image = np.concatenate([min_uv, max_uv], axis=1)
if image_shape is not None:
boxes2d_image[:, 0] = np.clip(boxes2d_image[:, 0], a_min=0, a_max=image_shape[1] - 1)
boxes2d_image[:, 1] = np.clip(boxes2d_image[:, 1], a_min=0, a_max=image_shape[0] - 1)
boxes2d_image[:, 2] = np.clip(boxes2d_image[:, 2], a_min=0, a_max=image_shape[1] - 1)
boxes2d_image[:, 3] = np.clip(boxes2d_image[:, 3], a_min=0, a_max=image_shape[0] - 1)
return boxes2d_image
def boxes_iou_normal(boxes_a, boxes_b):
"""
Args:
boxes_a: (N, 4) [x1, y1, x2, y2]
boxes_b: (M, 4) [x1, y1, x2, y2]
Returns:
"""
assert boxes_a.shape[1] == boxes_b.shape[1] == 4
x_min = torch.max(boxes_a[:, 0, None], boxes_b[None, :, 0])
x_max = torch.min(boxes_a[:, 2, None], boxes_b[None, :, 2])
y_min = torch.max(boxes_a[:, 1, None], boxes_b[None, :, 1])
y_max = torch.min(boxes_a[:, 3, None], boxes_b[None, :, 3])
x_len = torch.clamp_min(x_max - x_min, min=0)
y_len = torch.clamp_min(y_max - y_min, min=0)
area_a = (boxes_a[:, 2] - boxes_a[:, 0]) * (boxes_a[:, 3] - boxes_a[:, 1])
area_b = (boxes_b[:, 2] - boxes_b[:, 0]) * (boxes_b[:, 3] - boxes_b[:, 1])
a_intersect_b = x_len * y_len
iou = a_intersect_b / torch.clamp_min(area_a[:, None] + area_b[None, :] - a_intersect_b, min=1e-6)
return iou
def boxes3d_lidar_to_aligned_bev_boxes(boxes3d):
"""
Args:
boxes3d: (N, 7 + C) [x, y, z, dx, dy, dz, heading] in lidar coordinate
Returns:
aligned_bev_boxes: (N, 4) [x1, y1, x2, y2] in the above lidar coordinate
"""
rot_angle = common_utils.limit_period(boxes3d[:, 6], offset=0.5, period=np.pi).abs()
choose_dims = torch.where(rot_angle[:, None] < np.pi / 4, boxes3d[:, [3, 4]], boxes3d[:, [4, 3]])
aligned_bev_boxes = torch.cat((boxes3d[:, 0:2] - choose_dims / 2, boxes3d[:, 0:2] + choose_dims / 2), dim=1)
return aligned_bev_boxes
def boxes3d_nearest_bev_iou(boxes_a, boxes_b):
"""
Args:
boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
boxes_b: (N, 7) [x, y, z, dx, dy, dz, heading]
Returns:
"""
boxes_bev_a = boxes3d_lidar_to_aligned_bev_boxes(boxes_a)
boxes_bev_b = boxes3d_lidar_to_aligned_bev_boxes(boxes_b)
return boxes_iou_normal(boxes_bev_a, boxes_bev_b)
| 10,569 | 34.351171 | 118 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/utils/loss_utils.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import box_utils
class SigmoidFocalClassificationLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(SigmoidFocalClassificationLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if weights.shape.__len__() == 2 or \
(weights.shape.__len__() == 1 and target.shape.__len__() == 2):
weights = weights.unsqueeze(-1)
assert weights.shape.__len__() == loss.shape.__len__()
return loss * weights
class WeightedSmoothL1Loss(nn.Module):
"""
Code-wise Weighted Smooth L1 Loss modified based on fvcore.nn.smooth_l1_loss
https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
"""
def __init__(self, beta: float = 1.0 / 9.0, code_weights: list = None):
"""
Args:
beta: Scalar float.
L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedSmoothL1Loss, self).__init__()
self.beta = beta
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
@staticmethod
def smooth_l1_loss(diff, beta):
if beta < 1e-5:
loss = torch.abs(diff)
else:
n = torch.abs(diff)
loss = torch.where(n < beta, 0.5 * n ** 2 / beta, n - 0.5 * beta)
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = self.smooth_l1_loss(diff, self.beta)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedL1Loss(nn.Module):
def __init__(self, code_weights: list = None):
"""
Args:
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedL1Loss, self).__init__()
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = torch.abs(diff)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedCrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self):
super(WeightedCrossEntropyLoss, self).__init__()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
input = input.permute(0, 2, 1)
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction='none') * weights
return loss
def get_corner_loss_lidar(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):
"""
Args:
pred_bbox3d: (N, 7) float Tensor.
gt_bbox3d: (N, 7) float Tensor.
Returns:
corner_loss: (N) float Tensor.
"""
assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]
pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)
gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)
gt_bbox3d_flip = gt_bbox3d.clone()
gt_bbox3d_flip[:, 6] += np.pi
gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_bbox3d_flip)
# (N, 8)
corner_dist = torch.min(torch.norm(pred_box_corners - gt_box_corners, dim=2),
torch.norm(pred_box_corners - gt_box_corners_flip, dim=2))
# (N, 8)
corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0)
return corner_loss.mean(dim=1)
def compute_fg_mask(gt_boxes2d, shape, downsample_factor=1, device=torch.device("cpu")):
"""
Compute foreground mask for images
Args:
gt_boxes2d: (B, N, 4), 2D box labels
shape: torch.Size or tuple, Foreground mask desired shape
downsample_factor: int, Downsample factor for image
device: torch.device, Foreground mask desired device
Returns:
fg_mask (shape), Foreground mask
"""
fg_mask = torch.zeros(shape, dtype=torch.bool, device=device)
# Set box corners
gt_boxes2d /= downsample_factor
gt_boxes2d[:, :, :2] = torch.floor(gt_boxes2d[:, :, :2])
gt_boxes2d[:, :, 2:] = torch.ceil(gt_boxes2d[:, :, 2:])
gt_boxes2d = gt_boxes2d.long()
# Set all values within each box to True
B, N = gt_boxes2d.shape[:2]
for b in range(B):
for n in range(N):
u1, v1, u2, v2 = gt_boxes2d[b, n]
fg_mask[b, v1:v2, u1:u2] = True
return fg_mask
def neg_loss_cornernet(pred, gt, mask=None):
"""
Refer to https://github.com/tianweiy/CenterPoint.
Modified focal loss. Exactly the same as CornerNet. Runs faster and costs a little bit more memory
Args:
pred: (batch x c x h x w)
gt: (batch x c x h x w)
mask: (batch x h x w)
Returns:
"""
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow(1 - gt, 4)
loss = 0
pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
if mask is not None:
mask = mask[:, None, :, :].float()
pos_loss = pos_loss * mask
neg_loss = neg_loss * mask
num_pos = (pos_inds.float() * mask).sum()
else:
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
class FocalLossCenterNet(nn.Module):
"""
Refer to https://github.com/tianweiy/CenterPoint
"""
def __init__(self):
super(FocalLossCenterNet, self).__init__()
self.neg_loss = neg_loss_cornernet
def forward(self, out, target, mask=None):
return self.neg_loss(out, target, mask=mask)
def _reg_loss(regr, gt_regr, mask):
"""
Refer to https://github.com/tianweiy/CenterPoint
L1 regression loss
Args:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
Returns:
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
loss = torch.abs(regr - gt_regr)
loss = loss.transpose(2, 0)
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
# else:
# # D x M x B
# loss = loss.reshape(loss.shape[0], -1)
# loss = loss / (num + 1e-4)
loss = loss / torch.clamp_min(num, min=1.0)
# import pdb; pdb.set_trace()
return loss
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _transpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3))
feat = _gather_feat(feat, ind)
return feat
class RegLossCenterNet(nn.Module):
"""
Refer to https://github.com/tianweiy/CenterPoint
"""
def __init__(self):
super(RegLossCenterNet, self).__init__()
def forward(self, output, mask, ind=None, target=None):
"""
Args:
output: (batch x dim x h x w) or (batch x max_objects)
mask: (batch x max_objects)
ind: (batch x max_objects)
target: (batch x max_objects x dim)
Returns:
"""
if ind is None:
pred = output
else:
pred = _transpose_and_gather_feat(output, ind)
loss = _reg_loss(pred, target, mask)
return loss | 12,608 | 31.665803 | 102 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/utils/box_coder_utils.py | import numpy as np
import torch
class ResidualCoder(object):
def __init__(self, code_size=7, encode_angle_by_sincos=False, **kwargs):
super().__init__()
self.code_size = code_size
self.encode_angle_by_sincos = encode_angle_by_sincos
if self.encode_angle_by_sincos:
self.code_size += 1
def encode_torch(self, boxes, anchors):
"""
Args:
boxes: (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
anchors: (N, 7 + C) [x, y, z, dx, dy, dz, heading or *[cos, sin], ...]
Returns:
"""
anchors[:, 3:6] = torch.clamp_min(anchors[:, 3:6], min=1e-5)
boxes[:, 3:6] = torch.clamp_min(boxes[:, 3:6], min=1e-5)
xa, ya, za, dxa, dya, dza, ra, *cas = torch.split(anchors, 1, dim=-1)
xg, yg, zg, dxg, dyg, dzg, rg, *cgs = torch.split(boxes, 1, dim=-1)
diagonal = torch.sqrt(dxa ** 2 + dya ** 2)
xt = (xg - xa) / diagonal
yt = (yg - ya) / diagonal
zt = (zg - za) / dza
dxt = torch.log(dxg / dxa)
dyt = torch.log(dyg / dya)
dzt = torch.log(dzg / dza)
if self.encode_angle_by_sincos:
rt_cos = torch.cos(rg) - torch.cos(ra)
rt_sin = torch.sin(rg) - torch.sin(ra)
rts = [rt_cos, rt_sin]
else:
rts = [rg - ra]
cts = [g - a for g, a in zip(cgs, cas)]
return torch.cat([xt, yt, zt, dxt, dyt, dzt, *rts, *cts], dim=-1)
def decode_torch(self, box_encodings, anchors):
"""
Args:
box_encodings: (B, N, 7 + C) or (N, 7 + C) [x, y, z, dx, dy, dz, heading or *[cos, sin], ...]
anchors: (B, N, 7 + C) or (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
Returns:
"""
xa, ya, za, dxa, dya, dza, ra, *cas = torch.split(anchors, 1, dim=-1)
if not self.encode_angle_by_sincos:
xt, yt, zt, dxt, dyt, dzt, rt, *cts = torch.split(box_encodings, 1, dim=-1)
else:
xt, yt, zt, dxt, dyt, dzt, cost, sint, *cts = torch.split(box_encodings, 1, dim=-1)
diagonal = torch.sqrt(dxa ** 2 + dya ** 2)
xg = xt * diagonal + xa
yg = yt * diagonal + ya
zg = zt * dza + za
dxg = torch.exp(dxt) * dxa
dyg = torch.exp(dyt) * dya
dzg = torch.exp(dzt) * dza
if self.encode_angle_by_sincos:
rg_cos = cost + torch.cos(ra)
rg_sin = sint + torch.sin(ra)
rg = torch.atan2(rg_sin, rg_cos)
else:
rg = rt + ra
cgs = [t + a for t, a in zip(cts, cas)]
return torch.cat([xg, yg, zg, dxg, dyg, dzg, rg, *cgs], dim=-1)
class PreviousResidualDecoder(object):
def __init__(self, code_size=7, **kwargs):
super().__init__()
self.code_size = code_size
@staticmethod
def decode_torch(box_encodings, anchors):
"""
Args:
box_encodings: (B, N, 7 + ?) x, y, z, w, l, h, r, custom values
anchors: (B, N, 7 + C) or (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
Returns:
"""
xa, ya, za, dxa, dya, dza, ra, *cas = torch.split(anchors, 1, dim=-1)
xt, yt, zt, wt, lt, ht, rt, *cts = torch.split(box_encodings, 1, dim=-1)
diagonal = torch.sqrt(dxa ** 2 + dya ** 2)
xg = xt * diagonal + xa
yg = yt * diagonal + ya
zg = zt * dza + za
dxg = torch.exp(lt) * dxa
dyg = torch.exp(wt) * dya
dzg = torch.exp(ht) * dza
rg = rt + ra
cgs = [t + a for t, a in zip(cts, cas)]
return torch.cat([xg, yg, zg, dxg, dyg, dzg, rg, *cgs], dim=-1)
class PreviousResidualRoIDecoder(object):
def __init__(self, code_size=7, **kwargs):
super().__init__()
self.code_size = code_size
@staticmethod
def decode_torch(box_encodings, anchors):
"""
Args:
box_encodings: (B, N, 7 + ?) x, y, z, w, l, h, r, custom values
anchors: (B, N, 7 + C) or (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
Returns:
"""
xa, ya, za, dxa, dya, dza, ra, *cas = torch.split(anchors, 1, dim=-1)
xt, yt, zt, wt, lt, ht, rt, *cts = torch.split(box_encodings, 1, dim=-1)
diagonal = torch.sqrt(dxa ** 2 + dya ** 2)
xg = xt * diagonal + xa
yg = yt * diagonal + ya
zg = zt * dza + za
dxg = torch.exp(lt) * dxa
dyg = torch.exp(wt) * dya
dzg = torch.exp(ht) * dza
rg = ra - rt
cgs = [t + a for t, a in zip(cts, cas)]
return torch.cat([xg, yg, zg, dxg, dyg, dzg, rg, *cgs], dim=-1)
class PointResidualCoder(object):
def __init__(self, code_size=8, use_mean_size=True, **kwargs):
super().__init__()
self.code_size = code_size
self.use_mean_size = use_mean_size
if self.use_mean_size:
self.mean_size = torch.from_numpy(np.array(kwargs['mean_size'])).cuda().float()
assert self.mean_size.min() > 0
def encode_torch(self, gt_boxes, points, gt_classes=None):
"""
Args:
gt_boxes: (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
points: (N, 3) [x, y, z]
gt_classes: (N) [1, num_classes]
Returns:
box_coding: (N, 8 + C)
"""
gt_boxes[:, 3:6] = torch.clamp_min(gt_boxes[:, 3:6], min=1e-5)
xg, yg, zg, dxg, dyg, dzg, rg, *cgs = torch.split(gt_boxes, 1, dim=-1)
xa, ya, za = torch.split(points, 1, dim=-1)
if self.use_mean_size:
assert gt_classes.max() <= self.mean_size.shape[0]
point_anchor_size = self.mean_size[gt_classes - 1]
dxa, dya, dza = torch.split(point_anchor_size, 1, dim=-1)
diagonal = torch.sqrt(dxa ** 2 + dya ** 2)
xt = (xg - xa) / diagonal
yt = (yg - ya) / diagonal
zt = (zg - za) / dza
dxt = torch.log(dxg / dxa)
dyt = torch.log(dyg / dya)
dzt = torch.log(dzg / dza)
else:
xt = (xg - xa)
yt = (yg - ya)
zt = (zg - za)
dxt = torch.log(dxg)
dyt = torch.log(dyg)
dzt = torch.log(dzg)
cts = [g for g in cgs]
return torch.cat([xt, yt, zt, dxt, dyt, dzt, torch.cos(rg), torch.sin(rg), *cts], dim=-1)
def decode_torch(self, box_encodings, points, pred_classes=None):
"""
Args:
box_encodings: (N, 8 + C) [x, y, z, dx, dy, dz, cos, sin, ...]
points: [x, y, z]
pred_classes: (N) [1, num_classes]
Returns:
"""
xt, yt, zt, dxt, dyt, dzt, cost, sint, *cts = torch.split(box_encodings, 1, dim=-1)
xa, ya, za = torch.split(points, 1, dim=-1)
if self.use_mean_size:
assert pred_classes.max() <= self.mean_size.shape[0]
point_anchor_size = self.mean_size[pred_classes - 1]
dxa, dya, dza = torch.split(point_anchor_size, 1, dim=-1)
diagonal = torch.sqrt(dxa ** 2 + dya ** 2)
xg = xt * diagonal + xa
yg = yt * diagonal + ya
zg = zt * dza + za
dxg = torch.exp(dxt) * dxa
dyg = torch.exp(dyt) * dya
dzg = torch.exp(dzt) * dza
else:
xg = xt + xa
yg = yt + ya
zg = zt + za
dxg, dyg, dzg = torch.split(torch.exp(box_encodings[..., 3:6]), 1, dim=-1)
rg = torch.atan2(sint, cost)
cgs = [t for t in cts]
return torch.cat([xg, yg, zg, dxg, dyg, dzg, rg, *cgs], dim=-1)
| 7,601 | 33.089686 | 105 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/utils/common_utils.py | import logging
import os
import pickle
import random
import shutil
import subprocess
import SharedArray
import numpy as np
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
def check_numpy_to_torch(x):
if isinstance(x, np.ndarray):
return torch.from_numpy(x).float(), True
return x, False
def limit_period(val, offset=0.5, period=np.pi):
val, is_numpy = check_numpy_to_torch(val)
ans = val - torch.floor(val / period + offset) * period
return ans.numpy() if is_numpy else ans
def drop_info_with_name(info, name):
ret_info = {}
keep_indices = [i for i, x in enumerate(info['name']) if x != name]
for key in info.keys():
ret_info[key] = info[key][keep_indices]
return ret_info
def rotate_points_along_z(points, angle):
"""
Args:
points: (B, N, 3 + C)
angle: (B), angle along z-axis, angle increases x ==> y
Returns:
"""
points, is_numpy = check_numpy_to_torch(points)
angle, _ = check_numpy_to_torch(angle)
cosa = torch.cos(angle)
sina = torch.sin(angle)
zeros = angle.new_zeros(points.shape[0])
ones = angle.new_ones(points.shape[0])
rot_matrix = torch.stack((
cosa, sina, zeros,
-sina, cosa, zeros,
zeros, zeros, ones
), dim=1).view(-1, 3, 3).float()
points_rot = torch.matmul(points[:, :, 0:3], rot_matrix)
points_rot = torch.cat((points_rot, points[:, :, 3:]), dim=-1)
return points_rot.numpy() if is_numpy else points_rot
def mask_points_by_range(points, limit_range):
mask = (points[:, 0] >= limit_range[0]) & (points[:, 0] <= limit_range[3]) \
& (points[:, 1] >= limit_range[1]) & (points[:, 1] <= limit_range[4])
return mask
def get_voxel_centers(voxel_coords, downsample_times, voxel_size, point_cloud_range):
"""
Args:
voxel_coords: (N, 3)
downsample_times:
voxel_size:
point_cloud_range:
Returns:
"""
assert voxel_coords.shape[1] == 3
voxel_centers = voxel_coords[:, [2, 1, 0]].float() # (xyz)
voxel_size = torch.tensor(voxel_size, device=voxel_centers.device).float() * downsample_times
pc_range = torch.tensor(point_cloud_range[0:3], device=voxel_centers.device).float()
voxel_centers = (voxel_centers + 0.5) * voxel_size + pc_range
return voxel_centers
def create_logger(log_file=None, rank=0, log_level=logging.INFO):
logger = logging.getLogger(__name__)
logger.setLevel(log_level if rank == 0 else 'ERROR')
formatter = logging.Formatter('%(asctime)s %(levelname)5s %(message)s')
console = logging.StreamHandler()
console.setLevel(log_level if rank == 0 else 'ERROR')
console.setFormatter(formatter)
logger.addHandler(console)
if log_file is not None:
file_handler = logging.FileHandler(filename=log_file)
file_handler.setLevel(log_level if rank == 0 else 'ERROR')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.propagate = False
return logger
def set_random_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def get_pad_params(desired_size, cur_size):
"""
Get padding parameters for np.pad function
Args:
desired_size: int, Desired padded output size
cur_size: int, Current size. Should always be less than or equal to cur_size
Returns:
pad_params: tuple(int), Number of values padded to the edges (before, after)
"""
assert desired_size >= cur_size
# Calculate amount to pad
diff = desired_size - cur_size
pad_params = (0, diff)
return pad_params
def keep_arrays_by_name(gt_names, used_classes):
inds = [i for i, x in enumerate(gt_names) if x in used_classes]
inds = np.array(inds, dtype=np.int64)
return inds
def init_dist_slurm(tcp_port, local_rank, backend='nccl'):
"""
modified from https://github.com/open-mmlab/mmdetection
Args:
tcp_port:
backend:
Returns:
"""
proc_id = int(os.environ['SLURM_PROCID'])
ntasks = int(os.environ['SLURM_NTASKS'])
node_list = os.environ['SLURM_NODELIST']
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(proc_id % num_gpus)
addr = subprocess.getoutput('scontrol show hostname {} | head -n1'.format(node_list))
os.environ['MASTER_PORT'] = str(tcp_port)
os.environ['MASTER_ADDR'] = addr
os.environ['WORLD_SIZE'] = str(ntasks)
os.environ['RANK'] = str(proc_id)
dist.init_process_group(backend=backend)
total_gpus = dist.get_world_size()
rank = dist.get_rank()
return total_gpus, rank
def init_dist_pytorch(tcp_port, local_rank, backend='nccl'):
if mp.get_start_method(allow_none=True) is None:
mp.set_start_method('spawn')
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(local_rank % num_gpus)
dist.init_process_group(
backend=backend,
# init_method='tcp://127.0.0.1:%d' % tcp_port,
# rank=local_rank,
# world_size=num_gpus
)
rank = dist.get_rank()
return num_gpus, rank
def get_dist_info(return_gpu_per_machine=False):
if torch.__version__ < '1.0':
initialized = dist._initialized
else:
if dist.is_available():
initialized = dist.is_initialized()
else:
initialized = False
if initialized:
rank = dist.get_rank()
world_size = dist.get_world_size()
else:
rank = 0
world_size = 1
if return_gpu_per_machine:
gpu_per_machine = torch.cuda.device_count()
return rank, world_size, gpu_per_machine
return rank, world_size
def merge_results_dist(result_part, size, tmpdir):
rank, world_size = get_dist_info()
os.makedirs(tmpdir, exist_ok=True)
dist.barrier()
pickle.dump(result_part, open(os.path.join(tmpdir, 'result_part_{}.pkl'.format(rank)), 'wb'))
dist.barrier()
if rank != 0:
return None
part_list = []
for i in range(world_size):
part_file = os.path.join(tmpdir, 'result_part_{}.pkl'.format(i))
part_list.append(pickle.load(open(part_file, 'rb')))
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
ordered_results = ordered_results[:size]
shutil.rmtree(tmpdir)
return ordered_results
def scatter_point_inds(indices, point_inds, shape):
ret = -1 * torch.ones(*shape, dtype=point_inds.dtype, device=point_inds.device)
ndim = indices.shape[-1]
flattened_indices = indices.view(-1, ndim)
slices = [flattened_indices[:, i] for i in range(ndim)]
ret[slices] = point_inds
return ret
def generate_voxel2pinds(sparse_tensor):
device = sparse_tensor.indices.device
batch_size = sparse_tensor.batch_size
spatial_shape = sparse_tensor.spatial_shape
indices = sparse_tensor.indices.long()
point_indices = torch.arange(indices.shape[0], device=device, dtype=torch.int32)
output_shape = [batch_size] + list(spatial_shape)
v2pinds_tensor = scatter_point_inds(indices, point_indices, output_shape)
return v2pinds_tensor
def sa_create(name, var):
x = SharedArray.create(name, var.shape, dtype=var.dtype)
x[...] = var[...]
x.flags.writeable = False
return x
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
| 7,799 | 28.323308 | 97 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/utils/transform_utils.py | import math
import torch
try:
from kornia.geometry.conversions import (
convert_points_to_homogeneous,
convert_points_from_homogeneous,
)
except:
pass
# print('Warning: kornia is not installed. This package is only required by CaDDN')
def project_to_image(project, points):
"""
Project points to image
Args:
project [torch.tensor(..., 3, 4)]: Projection matrix
points [torch.Tensor(..., 3)]: 3D points
Returns:
points_img [torch.Tensor(..., 2)]: Points in image
points_depth [torch.Tensor(...)]: Depth of each point
"""
# Reshape tensors to expected shape
points = convert_points_to_homogeneous(points)
points = points.unsqueeze(dim=-1)
project = project.unsqueeze(dim=1)
# Transform points to image and get depths
points_t = project @ points
points_t = points_t.squeeze(dim=-1)
points_img = convert_points_from_homogeneous(points_t)
points_depth = points_t[..., -1] - project[..., 2, 3]
return points_img, points_depth
def normalize_coords(coords, shape):
"""
Normalize coordinates of a grid between [-1, 1]
Args:
coords: (..., 3), Coordinates in grid
shape: (3), Grid shape
Returns:
norm_coords: (.., 3), Normalized coordinates in grid
"""
min_n = -1
max_n = 1
shape = torch.flip(shape, dims=[0]) # Reverse ordering of shape
# Subtract 1 since pixel indexing from [0, shape - 1]
norm_coords = coords / (shape - 1) * (max_n - min_n) + min_n
return norm_coords
def bin_depths(depth_map, mode, depth_min, depth_max, num_bins, target=False):
"""
Converts depth map into bin indices
Args:
depth_map: (H, W), Depth Map
mode: string, Discretiziation mode (See https://arxiv.org/pdf/2005.13423.pdf for more details)
UD: Uniform discretiziation
LID: Linear increasing discretiziation
SID: Spacing increasing discretiziation
depth_min: float, Minimum depth value
depth_max: float, Maximum depth value
num_bins: int, Number of depth bins
target: bool, Whether the depth bins indices will be used for a target tensor in loss comparison
Returns:
indices: (H, W), Depth bin indices
"""
if mode == "UD":
bin_size = (depth_max - depth_min) / num_bins
indices = ((depth_map - depth_min) / bin_size)
elif mode == "LID":
bin_size = 2 * (depth_max - depth_min) / (num_bins * (1 + num_bins))
indices = -0.5 + 0.5 * torch.sqrt(1 + 8 * (depth_map - depth_min) / bin_size)
elif mode == "SID":
indices = num_bins * (torch.log(1 + depth_map) - math.log(1 + depth_min)) / \
(math.log(1 + depth_max) - math.log(1 + depth_min))
else:
raise NotImplementedError
if target:
# Remove indicies outside of bounds
mask = (indices < 0) | (indices > num_bins) | (~torch.isfinite(indices))
indices[mask] = num_bins
# Convert to integer
indices = indices.type(torch.int64)
return indices
| 3,092 | 32.619565 | 104 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/utils/self_training_utils.py | import torch
import os
import glob
import tqdm
import numpy as np
import torch.distributed as dist
from pcdet.config import cfg
from pcdet.models import load_data_to_gpu
from pcdet.utils import common_utils, commu_utils, memory_ensemble_utils
import pickle as pkl
import re
PSEUDO_LABELS = {}
NEW_PSEUDO_LABELS = {}
def check_already_exsit_pseudo_label(ps_label_dir, start_epoch):
"""
if we continue training, use this to directly
load pseudo labels from exsiting result pkl
if exsit, load latest result pkl to PSEUDO LABEL
otherwise, return false and
Args:
ps_label_dir: dir to save pseudo label results pkls.
start_epoch: start epoc
Returns:
"""
# support init ps_label given by cfg
if start_epoch == 0 and cfg.SELF_TRAIN.get('INIT_PS', None):
if os.path.exists(cfg.SELF_TRAIN.INIT_PS):
init_ps_label = pkl.load(open(cfg.SELF_TRAIN.INIT_PS, 'rb'))
PSEUDO_LABELS.update(init_ps_label)
if cfg.LOCAL_RANK == 0:
ps_path = os.path.join(ps_label_dir, "ps_label_e0.pkl")
with open(ps_path, 'wb') as f:
pkl.dump(PSEUDO_LABELS, f)
return cfg.SELF_TRAIN.INIT_PS
ps_label_list = glob.glob(os.path.join(ps_label_dir, 'ps_label_e*.pkl'))
if len(ps_label_list) == 0:
return
ps_label_list.sort(key=os.path.getmtime, reverse=True)
for cur_pkl in ps_label_list:
num_epoch = re.findall('ps_label_e(.*).pkl', cur_pkl)
assert len(num_epoch) == 1
# load pseudo label and return
if int(num_epoch[0]) <= start_epoch:
latest_ps_label = pkl.load(open(cur_pkl, 'rb'))
PSEUDO_LABELS.update(latest_ps_label)
return cur_pkl
return None
def save_pseudo_label_epoch(model, val_loader, rank, leave_pbar, ps_label_dir, cur_epoch):
"""
Generate pseudo label with given model.
Args:
model: model to predict result for pseudo label
val_loader: data_loader to predict pseudo label
rank: process rank
leave_pbar: tqdm bar controller
ps_label_dir: dir to save pseudo label
cur_epoch
"""
val_dataloader_iter = iter(val_loader)
total_it_each_epoch = len(val_loader)
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar,
desc='generate_ps_e%d' % cur_epoch, dynamic_ncols=True)
pos_ps_meter = common_utils.AverageMeter()
ign_ps_meter = common_utils.AverageMeter()
model.eval()
for cur_it in range(total_it_each_epoch):
try:
target_batch = next(val_dataloader_iter)
except StopIteration:
target_dataloader_iter = iter(val_loader)
target_batch = next(target_dataloader_iter)
# generate gt_boxes for target_batch and update model weights
with torch.no_grad():
load_data_to_gpu(target_batch)
pred_dicts, ret_dict = model(target_batch)
pos_ps_batch, ign_ps_batch = save_pseudo_label_batch(
target_batch, pred_dicts=pred_dicts,
need_update=(cfg.SELF_TRAIN.get('MEMORY_ENSEMBLE', None) and
cfg.SELF_TRAIN.MEMORY_ENSEMBLE.ENABLED and
cur_epoch > 0)
)
# log to console and tensorboard
pos_ps_meter.update(pos_ps_batch)
ign_ps_meter.update(ign_ps_batch)
disp_dict = {'pos_ps_box': "{:.3f}({:.3f})".format(pos_ps_meter.val, pos_ps_meter.avg),
'ign_ps_box': "{:.3f}({:.3f})".format(ign_ps_meter.val, ign_ps_meter.avg)}
if rank == 0:
pbar.update()
pbar.set_postfix(disp_dict)
pbar.refresh()
if rank == 0:
pbar.close()
gather_and_dump_pseudo_label_result(rank, ps_label_dir, cur_epoch)
def gather_and_dump_pseudo_label_result(rank, ps_label_dir, cur_epoch):
commu_utils.synchronize()
if dist.is_initialized():
part_pseudo_labels_list = commu_utils.all_gather(NEW_PSEUDO_LABELS)
new_pseudo_label_dict = {}
for pseudo_labels in part_pseudo_labels_list:
new_pseudo_label_dict.update(pseudo_labels)
NEW_PSEUDO_LABELS.update(new_pseudo_label_dict)
# dump new pseudo label to given dir
if rank == 0:
ps_path = os.path.join(ps_label_dir, "ps_label_e{}.pkl".format(cur_epoch))
with open(ps_path, 'wb') as f:
pkl.dump(NEW_PSEUDO_LABELS, f)
commu_utils.synchronize()
PSEUDO_LABELS.clear()
PSEUDO_LABELS.update(NEW_PSEUDO_LABELS)
NEW_PSEUDO_LABELS.clear()
def save_pseudo_label_batch(input_dict,
pred_dicts=None,
need_update=True):
"""
Save pseudo label for give batch.
If model is given, use model to inference pred_dicts,
otherwise, directly use given pred_dicts.
Args:
input_dict: batch data read from dataloader
pred_dicts: Dict if not given model.
predict results to be generated pseudo label and saved
need_update: Bool.
If set to true, use consistency matching to update pseudo label
"""
pos_ps_meter = common_utils.AverageMeter()
ign_ps_meter = common_utils.AverageMeter()
batch_size = len(pred_dicts)
for b_idx in range(batch_size):
pred_cls_scores = pred_iou_scores = None
if 'pred_boxes' in pred_dicts[b_idx]:
# Exist predicted boxes passing self-training score threshold
pred_boxes = pred_dicts[b_idx]['pred_boxes'].detach().cpu().numpy()
pred_labels = pred_dicts[b_idx]['pred_labels'].detach().cpu().numpy()
pred_scores = pred_dicts[b_idx]['pred_scores'].detach().cpu().numpy()
if 'pred_cls_scores' in pred_dicts[b_idx]:
pred_cls_scores = pred_dicts[b_idx]['pred_cls_scores'].detach().cpu().numpy()
if 'pred_iou_scores' in pred_dicts[b_idx]:
pred_iou_scores = pred_dicts[b_idx]['pred_iou_scores'].detach().cpu().numpy()
# remove boxes under negative threshold
if cfg.SELF_TRAIN.get('NEG_THRESH', None):
labels_remove_scores = np.array(cfg.SELF_TRAIN.NEG_THRESH)[pred_labels - 1]
remain_mask = pred_scores >= labels_remove_scores
pred_labels = pred_labels[remain_mask]
pred_scores = pred_scores[remain_mask]
pred_boxes = pred_boxes[remain_mask]
if 'pred_cls_scores' in pred_dicts[b_idx]:
pred_cls_scores = pred_cls_scores[remain_mask]
if 'pred_iou_scores' in pred_dicts[b_idx]:
pred_iou_scores = pred_iou_scores[remain_mask]
labels_ignore_scores = np.array(cfg.SELF_TRAIN.SCORE_THRESH)[pred_labels - 1]
ignore_mask = pred_scores < labels_ignore_scores
pred_labels[ignore_mask] = -1
gt_box = np.concatenate((pred_boxes,
pred_labels.reshape(-1, 1),
pred_scores.reshape(-1, 1)), axis=1)
else:
# no predicted boxes passes self-training score threshold
gt_box = np.zeros((0, 9), dtype=np.float32)
gt_infos = {
'gt_boxes': gt_box,
'cls_scores': pred_cls_scores,
'iou_scores': pred_iou_scores,
'memory_counter': np.zeros(gt_box.shape[0])
}
# record pseudo label to pseudo label dict
if need_update:
ensemble_func = getattr(memory_ensemble_utils, cfg.SELF_TRAIN.MEMORY_ENSEMBLE.NAME)
gt_infos = ensemble_func(PSEUDO_LABELS[input_dict['frame_id'][b_idx]],
gt_infos, cfg.SELF_TRAIN.MEMORY_ENSEMBLE)
if gt_infos['gt_boxes'].shape[0] > 0:
ign_ps_meter.update((gt_infos['gt_boxes'][:, 7] < 0).sum())
else:
ign_ps_meter.update(0)
pos_ps_meter.update(gt_infos['gt_boxes'].shape[0] - ign_ps_meter.val)
NEW_PSEUDO_LABELS[input_dict['frame_id'][b_idx]] = gt_infos
return pos_ps_meter.avg, ign_ps_meter.avg
def load_ps_label(frame_id):
"""
:param frame_id: file name of pseudo label
:return gt_box: loaded gt boxes (N, 9) [x, y, z, w, l, h, ry, label, scores]
"""
if frame_id in PSEUDO_LABELS:
gt_box = PSEUDO_LABELS[frame_id]['gt_boxes']
else:
raise ValueError('Cannot find pseudo label for frame: %s' % frame_id)
return gt_box
| 8,550 | 35.080169 | 95 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/utils/spconv_utils.py | from typing import Set
try:
import spconv.pytorch as spconv
except:
import spconv as spconv
import torch.nn as nn
def find_all_spconv_keys(model: nn.Module, prefix="") -> Set[str]:
"""
Finds all spconv keys that need to have weight's transposed
"""
found_keys: Set[str] = set()
for name, child in model.named_children():
new_prefix = f"{prefix}.{name}" if prefix != "" else name
if isinstance(child, spconv.conv.SparseConvolution):
new_prefix = f"{new_prefix}.weight"
found_keys.add(new_prefix)
found_keys.update(find_all_spconv_keys(child, prefix=new_prefix))
return found_keys
def replace_feature(out, new_features):
if "replace_feature" in out.__dir__():
# spconv 2.x behaviour
return out.replace_feature(new_features)
else:
out.features = new_features
return out
| 896 | 24.628571 | 73 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/utils/memory_ensemble_utils.py | import torch
import numpy as np
from scipy.optimize import linear_sum_assignment
from pcdet.utils import common_utils
from pcdet.ops.iou3d_nms import iou3d_nms_utils
from pcdet.models.model_utils.model_nms_utils import class_agnostic_nms
def consistency_ensemble(gt_infos_a, gt_infos_b, memory_ensemble_cfg):
"""
Args:
gt_infos_a:
gt_boxes: (N, 9) [x, y, z, dx, dy, dz, heading, label, scores] in LiDAR for previous pseudo boxes
cls_scores: (N)
iou_scores: (N)
memory_counter: (N)
gt_infos_b:
gt_boxes: (M, 9) [x, y, z, dx, dy, dz, heading, label, scores] in LiDAR for current pseudo boxes
cls_scores: (M)
iou_scores: (M)
memory_counter: (M)
memory_ensemble_cfg:
Returns:
gt_infos:
gt_boxes: (K, 9) [x, y, z, dx, dy, dz, heading, label, scores] in LiDAR for merged pseudo boxes
cls_scores: (K)
iou_scores: (K)
memory_counter: (K)
"""
gt_box_a, _ = common_utils.check_numpy_to_torch(gt_infos_a['gt_boxes'])
gt_box_b, _ = common_utils.check_numpy_to_torch(gt_infos_b['gt_boxes'])
gt_box_a, gt_box_b = gt_box_a.cuda(), gt_box_b.cuda()
new_gt_box = gt_infos_a['gt_boxes']
new_cls_scores = gt_infos_a['cls_scores']
new_iou_scores = gt_infos_a['iou_scores']
new_memory_counter = gt_infos_a['memory_counter']
# if gt_box_b or gt_box_a don't have any predictions
if gt_box_b.shape[0] == 0:
gt_infos_a['memory_counter'] += 1
return gt_infos_a
elif gt_box_a.shape[0] == 0:
return gt_infos_b
# get ious
iou_matrix = iou3d_nms_utils.boxes_iou3d_gpu(gt_box_a[:, :7], gt_box_b[:, :7]).cpu()
ious, match_idx = torch.max(iou_matrix, dim=1)
ious, match_idx = ious.numpy(), match_idx.numpy()
gt_box_a, gt_box_b = gt_box_a.cpu().numpy(), gt_box_b.cpu().numpy()
match_pairs_idx = np.concatenate((
np.array(list(range(gt_box_a.shape[0]))).reshape(-1, 1),
match_idx.reshape(-1, 1)), axis=1)
#########################################################
# filter matched pair boxes by IoU
# if matching succeeded, use boxes with higher confidence
#########################################################
iou_mask = (ious >= memory_ensemble_cfg.IOU_THRESH)
matching_selected = match_pairs_idx[iou_mask]
gt_box_selected_a = gt_box_a[matching_selected[:, 0]]
gt_box_selected_b = gt_box_b[matching_selected[:, 1]]
# assign boxes with higher confidence
score_mask = gt_box_selected_a[:, 8] < gt_box_selected_b[:, 8]
if memory_ensemble_cfg.get('WEIGHTED', None):
weight = gt_box_selected_a[:, 8] / (gt_box_selected_a[:, 8] + gt_box_selected_b[:, 8])
min_scores = np.minimum(gt_box_selected_a[:, 8], gt_box_selected_b[:, 8])
max_scores = np.maximum(gt_box_selected_a[:, 8], gt_box_selected_b[:, 8])
weighted_score = weight * (max_scores - min_scores) + min_scores
new_gt_box[matching_selected[:, 0], :7] = weight.reshape(-1, 1) * gt_box_selected_a[:, :7] + \
(1 - weight.reshape(-1, 1)) * gt_box_selected_b[:, :7]
new_gt_box[matching_selected[:, 0], 8] = weighted_score
else:
new_gt_box[matching_selected[score_mask, 0], :] = gt_box_selected_b[score_mask, :]
if gt_infos_a['cls_scores'] is not None:
new_cls_scores[matching_selected[score_mask, 0]] = gt_infos_b['cls_scores'][
matching_selected[score_mask, 1]]
if gt_infos_a['iou_scores'] is not None:
new_iou_scores[matching_selected[score_mask, 0]] = gt_infos_b['iou_scores'][
matching_selected[score_mask, 1]]
# for matching pairs, clear the ignore counter
new_memory_counter[matching_selected[:, 0]] = 0
#######################################################
# If previous bboxes disappeared: ious <= 0.1
#######################################################
disappear_idx = (ious < memory_ensemble_cfg.IOU_THRESH).nonzero()[0]
if memory_ensemble_cfg.get('MEMORY_VOTING', None) and memory_ensemble_cfg.MEMORY_VOTING.ENABLED:
new_memory_counter[disappear_idx] += 1
# ignore gt_boxes that ignore_count == IGNORE_THRESH
ignore_mask = new_memory_counter >= memory_ensemble_cfg.MEMORY_VOTING.IGNORE_THRESH
new_gt_box[ignore_mask, 7] = -1
# remove gt_boxes that ignore_count >= RM_THRESH
remain_mask = new_memory_counter < memory_ensemble_cfg.MEMORY_VOTING.RM_THRESH
new_gt_box = new_gt_box[remain_mask]
new_memory_counter = new_memory_counter[remain_mask]
if gt_infos_a['cls_scores'] is not None:
new_cls_scores = new_cls_scores[remain_mask]
if gt_infos_a['iou_scores'] is not None:
new_iou_scores = new_iou_scores[remain_mask]
# Add new appear boxes
ious_b2a, match_idx_b2a = torch.max(iou_matrix, dim=0)
ious_b2a, match_idx_b2a = ious_b2a.numpy(), match_idx_b2a.numpy()
newboxes_idx = (ious_b2a < memory_ensemble_cfg.IOU_THRESH).nonzero()[0]
if newboxes_idx.shape[0] != 0:
new_gt_box = np.concatenate((new_gt_box, gt_infos_b['gt_boxes'][newboxes_idx, :]), axis=0)
if gt_infos_a['cls_scores'] is not None:
new_cls_scores = np.concatenate((new_cls_scores, gt_infos_b['cls_scores'][newboxes_idx]), axis=0)
if gt_infos_a['iou_scores'] is not None:
new_iou_scores = np.concatenate((new_iou_scores, gt_infos_b['iou_scores'][newboxes_idx]), axis=0)
new_memory_counter = np.concatenate((new_memory_counter, gt_infos_b['memory_counter'][newboxes_idx]), axis=0)
new_gt_infos = {
'gt_boxes': new_gt_box,
'cls_scores': new_cls_scores if gt_infos_a['cls_scores'] is not None else None,
'iou_scores': new_iou_scores if gt_infos_a['iou_scores'] is not None else None,
'memory_counter': new_memory_counter
}
return new_gt_infos
def nms_ensemble(gt_infos_a, gt_infos_b, memory_ensemble_cfg):
"""
Args:
gt_infos_a:
gt_boxes: (N, 9) [x, y, z, dx, dy, dz, heading, label, scores] in LiDAR for previous pseudo boxes
cls_scores: (N)
iou_scores: (N)
memory_counter: (N)
gt_infos_b:
gt_boxes: (M, 9) [x, y, z, dx, dy, dz, heading, label, scores] in LiDAR for current pseudo boxes
cls_scores: (M)
iou_scores: (M)
memory_counter: (M)
memory_ensemble_cfg:
Returns:
gt_infos:
gt_boxes: (K, 9) [x, y, z, dx, dy, dz, heading, label, scores] in LiDAR for merged pseudo boxes
cls_scores: (K)
iou_scores: (K)
memory_counter: (K)
"""
gt_box_a, _ = common_utils.check_numpy_to_torch(gt_infos_a['gt_boxes'])
gt_box_b, _ = common_utils.check_numpy_to_torch(gt_infos_b['gt_boxes'])
if gt_box_b.shape[0] == 0:
if memory_ensemble_cfg.get('MEMORY_VOTING', None) and memory_ensemble_cfg.MEMORY_VOTING.ENABLED:
gt_infos_a['memory_counter'] += 1
return gt_infos_a
elif gt_box_a.shape[0] == 0:
return gt_infos_b
gt_box_a, gt_box_b = gt_box_a.cuda(), gt_box_b.cuda()
gt_boxes = torch.cat((gt_box_a, gt_box_b), dim=0)
if gt_infos_a['cls_scores'] is not None:
new_cls_scores = np.concatenate((gt_infos_a['cls_scores'], gt_infos_b['cls_scores']), axis=0)
if gt_infos_a['iou_scores'] is not None:
new_iou_scores = np.concatenate((gt_infos_a['iou_scores'], gt_infos_b['iou_scores']), axis=0)
new_memory_counter = np.concatenate((gt_infos_a['memory_counter'], gt_infos_b['memory_counter']), axis=0)
selected, selected_scores = class_agnostic_nms(
box_scores=gt_boxes[:, -1], box_preds=gt_boxes[:, :7], nms_config=memory_ensemble_cfg.NMS_CONFIG
)
gt_boxes = gt_boxes.cpu().numpy()
if isinstance(selected, list):
selected = np.array(selected)
else:
selected = selected.cpu().numpy()
if memory_ensemble_cfg.get('MEMORY_VOTING', None) and memory_ensemble_cfg.MEMORY_VOTING.ENABLED:
iou_matrix = iou3d_nms_utils.boxes_iou3d_gpu(gt_box_a[:, :7], gt_box_b[:, :7])
ious, _ = torch.max(iou_matrix, dim=1)
ious = ious.cpu().numpy()
gt_box_a_size = gt_box_a.shape[0]
selected_a = selected[selected < gt_box_a_size]
matched_mask = (ious[selected_a] > memory_ensemble_cfg.NMS_CONFIG.NMS_THRESH)
match_idx = selected_a[matched_mask]
new_memory_counter[match_idx] = 0
# for previous bboxes disappeared
disappear_idx = (ious < memory_ensemble_cfg.NMS_CONFIG.NMS_THRESH).nonzero()[0]
new_memory_counter[disappear_idx] += 1
# ignore gt_boxes that ignore_count == IGNORE_THRESH
ignore_mask = new_memory_counter >= memory_ensemble_cfg.MEMORY_VOTING.IGNORE_THRESH
gt_boxes[ignore_mask, 7] = -1
# remove gt_boxes that ignore_count >= RM_THRESH
rm_idx = (new_memory_counter >= memory_ensemble_cfg.MEMORY_VOTING.RM_THRESH).nonzero()[0]
selected = np.setdiff1d(selected, rm_idx)
selected_gt_boxes = gt_boxes[selected]
new_gt_infos = {
'gt_boxes': selected_gt_boxes,
'cls_scores': new_cls_scores[selected] if gt_infos_a['cls_scores'] is not None else None,
'iou_scores': new_iou_scores[selected] if gt_infos_a['iou_scores'] is not None else None,
'memory_counter': new_memory_counter[selected]
}
return new_gt_infos
def bipartite_ensemble(gt_infos_a, gt_infos_b, memory_ensemble_cfg):
"""
Args:
gt_infos_a:
gt_boxes: (N, 9) [x, y, z, dx, dy, dz, heading, label, scores] in LiDAR for previous pseudo boxes
cls_scores: (N)
iou_scores: (N)
memory_counter: (N)
gt_infos_b:
gt_boxes: (M, 9) [x, y, z, dx, dy, dz, heading, label, scores] in LiDAR for current pseudo boxes
cls_scores: (M)
iou_scores: (M)
memory_counter: (M)
memory_ensemble_cfg:
Returns:
gt_infos:
gt_boxes: (K, 9) [x, y, z, dx, dy, dz, heading, label, scores] in LiDAR for merged pseudo boxes
cls_scores: (K)
iou_scores: (K)
memory_counter: (K)
"""
gt_box_a, _ = common_utils.check_numpy_to_torch(gt_infos_a['gt_boxes'])
gt_box_b, _ = common_utils.check_numpy_to_torch(gt_infos_b['gt_boxes'])
gt_box_a, gt_box_b = gt_box_a.cuda(), gt_box_b.cuda()
new_gt_box = gt_infos_a['gt_boxes']
new_cls_scores = gt_infos_a['cls_scores']
new_iou_scores = gt_infos_a['iou_scores']
new_memory_counter = gt_infos_a['memory_counter']
# if gt_box_b or gt_box_a don't have any predictions
if gt_box_b.shape[0] == 0:
gt_infos_a['memory_counter'] += 1
return gt_infos_a
elif gt_box_a.shape[0] == 0:
return gt_infos_b
# bipartite matching
iou_matrix = iou3d_nms_utils.boxes_iou3d_gpu(gt_box_a[:, :7], gt_box_b[:, :7])
iou_matrix = iou_matrix.cpu().numpy()
a_idx, b_idx = linear_sum_assignment(-iou_matrix)
gt_box_a, gt_box_b = gt_box_a.cpu().numpy(), gt_box_b.cpu().numpy()
matching_paris_idx = np.concatenate((a_idx.reshape(-1, 1), b_idx.reshape(-1, 1)), axis=1)
ious = iou_matrix[matching_paris_idx[:, 0], matching_paris_idx[:, 1]]
# matched a boxes.
matched_mask = ious > memory_ensemble_cfg.IOU_THRESH
matching_selected = matching_paris_idx[matched_mask]
gt_box_selected_a = gt_box_a[matching_selected[:, 0]]
gt_box_selected_b = gt_box_b[matching_selected[:, 1]]
# assign boxes with higher confidence
score_mask = gt_box_selected_a[:, 8] < gt_box_selected_b[:, 8]
new_gt_box[matching_selected[score_mask, 0], :] = gt_box_selected_b[score_mask, :]
if gt_infos_a['cls_scores'] is not None:
new_cls_scores[matching_selected[score_mask, 0]] = gt_infos_b['cls_scores'][
matching_selected[score_mask, 1]]
if gt_infos_a['iou_scores'] is not None:
new_iou_scores[matching_selected[score_mask, 0]] = gt_infos_b['iou_scores'][
matching_selected[score_mask, 1]]
# for matched pairs, clear the ignore counter
new_memory_counter[matching_selected[:, 0]] = 0
##############################################
# disppeared boxes for previous pseudo boxes
##############################################
gt_box_a_idx = np.array(list(range(gt_box_a.shape[0])))
disappear_idx = np.setdiff1d(gt_box_a_idx, matching_selected[:, 0])
if memory_ensemble_cfg.get('MEMORY_VOTING', None) and memory_ensemble_cfg.MEMORY_VOTING.ENABLED:
new_memory_counter[disappear_idx] += 1
# ignore gt_boxes that ignore_count == IGNORE_THRESH
ignore_mask = new_memory_counter >= memory_ensemble_cfg.MEMORY_VOTING.IGNORE_THRESH
new_gt_box[ignore_mask, 7] = -1
# remove gt_boxes that ignore_count >= RM_THRESH
remain_mask = new_memory_counter < memory_ensemble_cfg.MEMORY_VOTING.RM_THRESH
new_gt_box = new_gt_box[remain_mask]
new_memory_counter = new_memory_counter[remain_mask]
if gt_infos_a['cls_scores'] is not None:
new_cls_scores = new_cls_scores[remain_mask]
if gt_infos_a['iou_scores'] is not None:
new_iou_scores = new_iou_scores[remain_mask]
##############################################
# new appear boxes for current pseudo boxes
##############################################
gt_box_b_idx = np.array(list(range(gt_box_b.shape[0])))
newboxes_idx = np.setdiff1d(gt_box_b_idx, matching_selected[:, 1])
if newboxes_idx.shape[0] != 0:
new_gt_box = np.concatenate((new_gt_box, gt_infos_b['gt_boxes'][newboxes_idx, :]), axis=0)
if gt_infos_a['cls_scores'] is not None:
new_cls_scores = np.concatenate((new_cls_scores,
gt_infos_b['cls_scores'][newboxes_idx]), axis=0)
if gt_infos_a['iou_scores'] is not None:
new_iou_scores = np.concatenate((new_iou_scores,
gt_infos_b['iou_scores'][newboxes_idx]), axis=0)
new_memory_counter = np.concatenate((new_memory_counter,
gt_infos_b['memory_counter'][newboxes_idx]), axis=0)
new_gt_infos = {
'gt_boxes': new_gt_box,
'cls_scores': new_cls_scores if gt_infos_a['cls_scores'] is not None else None,
'iou_scores': new_iou_scores if gt_infos_a['iou_scores'] is not None else None,
'memory_counter': new_memory_counter
}
return new_gt_infos
| 14,715 | 41.90379 | 117 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/utils/commu_utils.py | """
This file contains primitives for multi-gpu communication.
This is useful when doing distributed training.
deeply borrow from maskrcnn-benchmark and ST3D
"""
import pickle
import time
import torch
import torch.distributed as dist
def get_world_size():
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
origin_size = None
if not isinstance(data, torch.Tensor):
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
else:
origin_size = data.size()
tensor = data.reshape(-1)
tensor_type = tensor.dtype
# obtain Tensor size of each rank
local_size = torch.LongTensor([tensor.numel()]).to("cuda")
size_list = [torch.LongTensor([0]).to("cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.FloatTensor(size=(max_size,)).cuda().to(tensor_type))
if local_size != max_size:
padding = torch.FloatTensor(size=(max_size - local_size,)).cuda().to(tensor_type)
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
if origin_size is None:
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
else:
buffer = tensor[:size]
data_list.append(buffer)
if origin_size is not None:
new_shape = [-1] + list(origin_size[1:])
resized_list = []
for data in data_list:
# suppose the difference of tensor size exist in first dimension
data = data.reshape(new_shape)
resized_list.append(data)
return resized_list
else:
return data_list
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that process with rank
0 has the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.reduce(values, dst=0)
if dist.get_rank() == 0 and average:
# only main process gets accumulated, so only divide by
# world_size in this case
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
def average_reduce_value(data):
data_list = all_gather(data)
return sum(data_list) / len(data_list)
def all_reduce(data, op="sum", average=False):
def op_map(op):
op_dict = {
"SUM": dist.ReduceOp.SUM,
"MAX": dist.ReduceOp.MAX,
"MIN": dist.ReduceOp.MIN,
"PRODUCT": dist.ReduceOp.PRODUCT,
}
return op_dict[op]
world_size = get_world_size()
if world_size > 1:
reduced_data = data.clone()
dist.all_reduce(reduced_data, op=op_map(op.upper()))
if average:
assert op.upper() == 'SUM'
return reduced_data / world_size
else:
return reduced_data
return data
@torch.no_grad()
def concat_all_gather(tensor):
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
tensors_gather = [torch.ones_like(tensor)
for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output
| 5,253 | 27.710383 | 89 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/ops/roipoint_pool3d/roipoint_pool3d_utils.py | import torch
import torch.nn as nn
from torch.autograd import Function
from ...utils import box_utils
from . import roipoint_pool3d_cuda
class RoIPointPool3d(nn.Module):
def __init__(self, num_sampled_points=512, pool_extra_width=1.0):
super().__init__()
self.num_sampled_points = num_sampled_points
self.pool_extra_width = pool_extra_width
def forward(self, points, point_features, boxes3d):
"""
Args:
points: (B, N, 3)
point_features: (B, N, C)
boxes3d: (B, M, 7), [x, y, z, dx, dy, dz, heading]
Returns:
pooled_features: (B, M, 512, 3 + C)
pooled_empty_flag: (B, M)
"""
return RoIPointPool3dFunction.apply(
points, point_features, boxes3d, self.pool_extra_width, self.num_sampled_points
)
class RoIPointPool3dFunction(Function):
@staticmethod
def forward(ctx, points, point_features, boxes3d, pool_extra_width, num_sampled_points=512):
"""
Args:
ctx:
points: (B, N, 3)
point_features: (B, N, C)
boxes3d: (B, num_boxes, 7), [x, y, z, dx, dy, dz, heading]
pool_extra_width:
num_sampled_points:
Returns:
pooled_features: (B, num_boxes, 512, 3 + C)
pooled_empty_flag: (B, num_boxes)
"""
assert points.shape.__len__() == 3 and points.shape[2] == 3
batch_size, boxes_num, feature_len = points.shape[0], boxes3d.shape[1], point_features.shape[2]
pooled_boxes3d = box_utils.enlarge_box3d(boxes3d.view(-1, 7), pool_extra_width).view(batch_size, -1, 7)
pooled_features = point_features.new_zeros((batch_size, boxes_num, num_sampled_points, 3 + feature_len))
pooled_empty_flag = point_features.new_zeros((batch_size, boxes_num)).int()
roipoint_pool3d_cuda.forward(
points.contiguous(), pooled_boxes3d.contiguous(),
point_features.contiguous(), pooled_features, pooled_empty_flag
)
return pooled_features, pooled_empty_flag
@staticmethod
def backward(ctx, grad_out):
raise NotImplementedError
if __name__ == '__main__':
pass
| 2,226 | 31.75 | 112 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/ops/pointnet2/pointnet2_stack/voxel_query_utils.py | import torch
from torch.autograd import Variable
from torch.autograd import Function
import torch.nn as nn
from typing import List
from . import pointnet2_stack_cuda as pointnet2
from . import pointnet2_utils
class VoxelQuery(Function):
@staticmethod
def forward(ctx, max_range: int, radius: float, nsample: int, xyz: torch.Tensor, \
new_xyz: torch.Tensor, new_coords: torch.Tensor, point_indices: torch.Tensor):
"""
Args:
ctx:
max_range: int, max range of voxels to be grouped
nsample: int, maximum number of features in the balls
new_coords: (M1 + M2, 4), [batch_id, z, y, x] cooridnates of keypoints
new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
point_indices: (batch_size, Z, Y, X) 4-D tensor recording the point indices of voxels
Returns:
idx: (M1 + M2, nsample) tensor with the indicies of the features that form the query balls
"""
assert new_xyz.is_contiguous()
assert xyz.is_contiguous()
assert new_coords.is_contiguous()
assert point_indices.is_contiguous()
M = new_coords.shape[0]
B, Z, Y, X = point_indices.shape
idx = torch.cuda.IntTensor(M, nsample).zero_()
z_range, y_range, x_range = max_range
pointnet2.voxel_query_wrapper(M, Z, Y, X, nsample, radius, z_range, y_range, x_range, \
new_xyz, xyz, new_coords, point_indices, idx)
empty_ball_mask = (idx[:, 0] == -1)
idx[empty_ball_mask] = 0
return idx, empty_ball_mask
@staticmethod
def backward(ctx, a=None):
return None, None, None, None
voxel_query = VoxelQuery.apply
class VoxelQueryAndGrouping(nn.Module):
def __init__(self, max_range: int, radius: float, nsample: int):
"""
Args:
radius: float, radius of ball
nsample: int, maximum number of features to gather in the ball
"""
super().__init__()
self.max_range, self.radius, self.nsample = max_range, radius, nsample
def forward(self, new_coords: torch.Tensor, xyz: torch.Tensor, xyz_batch_cnt: torch.Tensor,
new_xyz: torch.Tensor, new_xyz_batch_cnt: torch.Tensor,
features: torch.Tensor, voxel2point_indices: torch.Tensor):
"""
Args:
new_coords: (M1 + M2 ..., 3) centers voxel indices of the ball query
xyz: (N1 + N2 ..., 3) xyz coordinates of the features
xyz_batch_cnt: (batch_size), [N1, N2, ...]
new_xyz: (M1 + M2 ..., 3) centers of the ball query
new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
features: (N1 + N2 ..., C) tensor of features to group
voxel2point_indices: (B, Z, Y, X) tensor of points indices of voxels
Returns:
new_features: (M1 + M2, C, nsample) tensor
"""
assert xyz.shape[0] == xyz_batch_cnt.sum(), 'xyz: %s, xyz_batch_cnt: %s' % (str(xyz.shape), str(new_xyz_batch_cnt))
assert new_coords.shape[0] == new_xyz_batch_cnt.sum(), \
'new_coords: %s, new_xyz_batch_cnt: %s' % (str(new_coords.shape), str(new_xyz_batch_cnt))
batch_size = xyz_batch_cnt.shape[0]
# idx: (M1 + M2 ..., nsample), empty_ball_mask: (M1 + M2 ...)
idx1, empty_ball_mask1 = voxel_query(self.max_range, self.radius, self.nsample, xyz, new_xyz, new_coords, voxel2point_indices)
idx1 = idx1.view(batch_size, -1, self.nsample)
count = 0
for bs_idx in range(batch_size):
idx1[bs_idx] -= count
count += xyz_batch_cnt[bs_idx]
idx1 = idx1.view(-1, self.nsample)
idx1[empty_ball_mask1] = 0
idx = idx1
empty_ball_mask = empty_ball_mask1
grouped_xyz = pointnet2_utils.grouping_operation(xyz, xyz_batch_cnt, idx, new_xyz_batch_cnt)
# grouped_features: (M1 + M2, C, nsample)
grouped_features = pointnet2_utils.grouping_operation(features, xyz_batch_cnt, idx, new_xyz_batch_cnt)
return grouped_features, grouped_xyz, empty_ball_mask
| 4,148 | 40.079208 | 134 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/ops/pointnet2/pointnet2_stack/pointnet2_utils.py | import torch
import torch.nn as nn
from torch.autograd import Function, Variable
from . import pointnet2_stack_cuda as pointnet2
class BallQuery(Function):
@staticmethod
def forward(ctx, radius: float, nsample: int, xyz: torch.Tensor, xyz_batch_cnt: torch.Tensor,
new_xyz: torch.Tensor, new_xyz_batch_cnt):
"""
Args:
ctx:
radius: float, radius of the balls
nsample: int, maximum number of features in the balls
xyz: (N1 + N2 ..., 3) xyz coordinates of the features
xyz_batch_cnt: (batch_size), [N1, N2, ...]
new_xyz: (M1 + M2 ..., 3) centers of the ball query
new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
Returns:
idx: (M1 + M2, nsample) tensor with the indicies of the features that form the query balls
"""
assert new_xyz.is_contiguous()
assert new_xyz_batch_cnt.is_contiguous()
assert xyz.is_contiguous()
assert xyz_batch_cnt.is_contiguous()
B = xyz_batch_cnt.shape[0]
M = new_xyz.shape[0]
idx = torch.cuda.IntTensor(M, nsample).zero_()
pointnet2.ball_query_wrapper(B, M, radius, nsample, new_xyz, new_xyz_batch_cnt, xyz, xyz_batch_cnt, idx)
empty_ball_mask = (idx[:, 0] == -1)
idx[empty_ball_mask] = 0
return idx, empty_ball_mask
@staticmethod
def backward(ctx, a=None):
return None, None, None, None
ball_query = BallQuery.apply
class GroupingOperation(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, features_batch_cnt: torch.Tensor,
idx: torch.Tensor, idx_batch_cnt: torch.Tensor):
"""
Args:
ctx:
features: (N1 + N2 ..., C) tensor of features to group
features_batch_cnt: (batch_size) [N1 + N2 ...] tensor containing the indicies of features to group with
idx: (M1 + M2 ..., nsample) tensor containing the indicies of features to group with
idx_batch_cnt: (batch_size) [M1 + M2 ...] tensor containing the indicies of features to group with
Returns:
output: (M1 + M2, C, nsample) tensor
"""
assert features.is_contiguous()
assert features_batch_cnt.is_contiguous()
assert idx.is_contiguous()
assert idx_batch_cnt.is_contiguous()
assert features.shape[0] == features_batch_cnt.sum(), \
'features: %s, features_batch_cnt: %s' % (str(features.shape), str(features_batch_cnt))
assert idx.shape[0] == idx_batch_cnt.sum(), \
'idx: %s, idx_batch_cnt: %s' % (str(idx.shape), str(idx_batch_cnt))
M, nsample = idx.size()
N, C = features.size()
B = idx_batch_cnt.shape[0]
output = torch.cuda.FloatTensor(M, C, nsample)
pointnet2.group_points_wrapper(B, M, C, nsample, features, features_batch_cnt, idx, idx_batch_cnt, output)
ctx.for_backwards = (B, N, idx, features_batch_cnt, idx_batch_cnt)
return output
@staticmethod
def backward(ctx, grad_out: torch.Tensor):
"""
Args:
ctx:
grad_out: (M1 + M2 ..., C, nsample) tensor of the gradients of the output from forward
Returns:
grad_features: (N1 + N2 ..., C) gradient of the features
"""
B, N, idx, features_batch_cnt, idx_batch_cnt = ctx.for_backwards
M, C, nsample = grad_out.size()
grad_features = Variable(torch.cuda.FloatTensor(N, C).zero_())
grad_out_data = grad_out.data.contiguous()
pointnet2.group_points_grad_wrapper(B, M, C, N, nsample, grad_out_data, idx,
idx_batch_cnt, features_batch_cnt, grad_features.data)
return grad_features, None, None, None
grouping_operation = GroupingOperation.apply
class QueryAndGroup(nn.Module):
def __init__(self, radius: float, nsample: int, use_xyz: bool = True):
"""
Args:
radius: float, radius of ball
nsample: int, maximum number of features to gather in the ball
use_xyz:
"""
super().__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz: torch.Tensor, xyz_batch_cnt: torch.Tensor,
new_xyz: torch.Tensor, new_xyz_batch_cnt: torch.Tensor,
features: torch.Tensor = None):
"""
Args:
xyz: (N1 + N2 ..., 3) xyz coordinates of the features
xyz_batch_cnt: (batch_size), [N1, N2, ...]
new_xyz: (M1 + M2 ..., 3) centers of the ball query
new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
features: (N1 + N2 ..., C) tensor of features to group
Returns:
new_features: (M1 + M2, C, nsample) tensor
"""
assert xyz.shape[0] == xyz_batch_cnt.sum(), 'xyz: %s, xyz_batch_cnt: %s' % (str(xyz.shape), str(new_xyz_batch_cnt))
assert new_xyz.shape[0] == new_xyz_batch_cnt.sum(), \
'new_xyz: %s, new_xyz_batch_cnt: %s' % (str(new_xyz.shape), str(new_xyz_batch_cnt))
# idx: (M1 + M2 ..., nsample), empty_ball_mask: (M1 + M2 ...)
idx, empty_ball_mask = ball_query(self.radius, self.nsample, xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt)
grouped_xyz = grouping_operation(xyz, xyz_batch_cnt, idx, new_xyz_batch_cnt) # (M1 + M2, 3, nsample)
grouped_xyz -= new_xyz.unsqueeze(-1)
grouped_xyz[empty_ball_mask] = 0
if features is not None:
grouped_features = grouping_operation(features, xyz_batch_cnt, idx, new_xyz_batch_cnt) # (M1 + M2, C, nsample)
grouped_features[empty_ball_mask] = 0
if self.use_xyz:
new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (M1 + M2 ..., C + 3, nsample)
else:
new_features = grouped_features
else:
assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return new_features, idx
class FarthestPointSampling(Function):
@staticmethod
def forward(ctx, xyz: torch.Tensor, npoint: int):
"""
Args:
ctx:
xyz: (B, N, 3) where N > npoint
npoint: int, number of features in the sampled set
Returns:
output: (B, npoint) tensor containing the set
"""
assert xyz.is_contiguous()
B, N, _ = xyz.size()
output = torch.cuda.IntTensor(B, npoint)
temp = torch.cuda.FloatTensor(B, N).fill_(1e10)
pointnet2.farthest_point_sampling_wrapper(B, N, npoint, xyz, temp, output)
return output
@staticmethod
def backward(xyz, a=None):
return None, None
farthest_point_sample = furthest_point_sample = FarthestPointSampling.apply
class StackFarthestPointSampling(Function):
@staticmethod
def forward(ctx, xyz, xyz_batch_cnt, npoint):
"""
Args:
ctx:
xyz: (N1 + N2 + ..., 3) where N > npoint
xyz_batch_cnt: [N1, N2, ...]
npoint: int, number of features in the sampled set
Returns:
output: (npoint.sum()) tensor containing the set,
npoint: (M1, M2, ...)
"""
assert xyz.is_contiguous() and xyz.shape[1] == 3
batch_size = xyz_batch_cnt.__len__()
if not isinstance(npoint, torch.Tensor):
if not isinstance(npoint, list):
npoint = [npoint for i in range(batch_size)]
npoint = torch.tensor(npoint, device=xyz.device).int()
N, _ = xyz.size()
temp = torch.cuda.FloatTensor(N).fill_(1e10)
output = torch.cuda.IntTensor(npoint.sum().item())
pointnet2.stack_farthest_point_sampling_wrapper(xyz, temp, xyz_batch_cnt, output, npoint)
return output
@staticmethod
def backward(xyz, a=None):
return None, None
stack_farthest_point_sample = StackFarthestPointSampling.apply
class ThreeNN(Function):
@staticmethod
def forward(ctx, unknown, unknown_batch_cnt, known, known_batch_cnt):
"""
Args:
ctx:
unknown: (N1 + N2..., 3)
unknown_batch_cnt: (batch_size), [N1, N2, ...]
known: (M1 + M2..., 3)
known_batch_cnt: (batch_size), [M1, M2, ...]
Returns:
dist: (N1 + N2 ..., 3) l2 distance to the three nearest neighbors
idx: (N1 + N2 ..., 3) index of the three nearest neighbors, range [0, M1+M2+...]
"""
assert unknown.shape.__len__() == 2 and unknown.shape[1] == 3
assert known.shape.__len__() == 2 and known.shape[1] == 3
assert unknown_batch_cnt.__len__() == known_batch_cnt.__len__()
dist2 = unknown.new_zeros(unknown.shape)
idx = unknown_batch_cnt.new_zeros(unknown.shape).int()
pointnet2.three_nn_wrapper(
unknown.contiguous(), unknown_batch_cnt.contiguous(),
known.contiguous(), known_batch_cnt.contiguous(), dist2, idx
)
return torch.sqrt(dist2), idx
@staticmethod
def backward(ctx, a=None, b=None):
return None, None
three_nn = ThreeNN.apply
class ThreeInterpolate(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor, weight: torch.Tensor):
"""
Args:
ctx:
features: (M1 + M2 ..., C)
idx: [N1 + N2 ..., 3]
weight: [N1 + N2 ..., 3]
Returns:
out_tensor: (N1 + N2 ..., C)
"""
assert idx.shape[0] == weight.shape[0] and idx.shape[1] == weight.shape[1] == 3
ctx.three_interpolate_for_backward = (idx, weight, features.shape[0])
output = features.new_zeros((idx.shape[0], features.shape[1]))
pointnet2.three_interpolate_wrapper(features.contiguous(), idx.contiguous(), weight.contiguous(), output)
return output
@staticmethod
def backward(ctx, grad_out: torch.Tensor):
"""
Args:
ctx:
grad_out: (N1 + N2 ..., C)
Returns:
grad_features: (M1 + M2 ..., C)
"""
idx, weight, M = ctx.three_interpolate_for_backward
grad_features = grad_out.new_zeros((M, grad_out.shape[1]))
pointnet2.three_interpolate_grad_wrapper(
grad_out.contiguous(), idx.contiguous(), weight.contiguous(), grad_features
)
return grad_features, None, None
three_interpolate = ThreeInterpolate.apply
class ThreeNNForVectorPoolByTwoStep(Function):
@staticmethod
def forward(ctx, support_xyz, xyz_batch_cnt, new_xyz, new_xyz_grid_centers, new_xyz_batch_cnt,
max_neighbour_distance, nsample, neighbor_type, avg_length_of_neighbor_idxs, num_total_grids,
neighbor_distance_multiplier):
"""
Args:
ctx:
// support_xyz: (N1 + N2 ..., 3) xyz coordinates of the features
// xyz_batch_cnt: (batch_size), [N1, N2, ...]
// new_xyz: (M1 + M2 ..., 3) centers of the ball query
// new_xyz_grid_centers: (M1 + M2 ..., num_total_grids, 3) grids centers of each grid
// new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
// nsample: find all (-1), find limited number(>0)
// neighbor_type: 1: ball, others: cube
// neighbor_distance_multiplier: query_distance = neighbor_distance_multiplier * max_neighbour_distance
Returns:
// new_xyz_grid_idxs: (M1 + M2 ..., num_total_grids, 3) three-nn
// new_xyz_grid_dist2: (M1 + M2 ..., num_total_grids, 3) square of dist of three-nn
"""
num_new_xyz = new_xyz.shape[0]
new_xyz_grid_dist2 = new_xyz_grid_centers.new_zeros(new_xyz_grid_centers.shape)
new_xyz_grid_idxs = new_xyz_grid_centers.new_zeros(new_xyz_grid_centers.shape).int().fill_(-1)
while True:
num_max_sum_points = avg_length_of_neighbor_idxs * num_new_xyz
stack_neighbor_idxs = new_xyz_grid_idxs.new_zeros(num_max_sum_points)
start_len = new_xyz_grid_idxs.new_zeros(num_new_xyz, 2).int()
cumsum = new_xyz_grid_idxs.new_zeros(1)
pointnet2.query_stacked_local_neighbor_idxs_wrapper_stack(
support_xyz.contiguous(), xyz_batch_cnt.contiguous(),
new_xyz.contiguous(), new_xyz_batch_cnt.contiguous(),
stack_neighbor_idxs.contiguous(), start_len.contiguous(), cumsum,
avg_length_of_neighbor_idxs, max_neighbour_distance * neighbor_distance_multiplier,
nsample, neighbor_type
)
avg_length_of_neighbor_idxs = cumsum[0].item() // num_new_xyz + int(cumsum[0].item() % num_new_xyz > 0)
if cumsum[0] <= num_max_sum_points:
break
stack_neighbor_idxs = stack_neighbor_idxs[:cumsum[0]]
pointnet2.query_three_nn_by_stacked_local_idxs_wrapper_stack(
support_xyz, new_xyz, new_xyz_grid_centers, new_xyz_grid_idxs, new_xyz_grid_dist2,
stack_neighbor_idxs, start_len, num_new_xyz, num_total_grids
)
return torch.sqrt(new_xyz_grid_dist2), new_xyz_grid_idxs, torch.tensor(avg_length_of_neighbor_idxs)
three_nn_for_vector_pool_by_two_step = ThreeNNForVectorPoolByTwoStep.apply
class VectorPoolWithVoxelQuery(Function):
@staticmethod
def forward(ctx, support_xyz: torch.Tensor, xyz_batch_cnt: torch.Tensor, support_features: torch.Tensor,
new_xyz: torch.Tensor, new_xyz_batch_cnt: torch.Tensor, num_grid_x, num_grid_y, num_grid_z,
max_neighbour_distance, num_c_out_each_grid, use_xyz,
num_mean_points_per_grid=100, nsample=-1, neighbor_type=0, pooling_type=0):
"""
Args:
ctx:
support_xyz: (N1 + N2 ..., 3) xyz coordinates of the features
xyz_batch_cnt: (batch_size), [N1, N2, ...]
support_features: (N1 + N2 ..., C)
new_xyz: (M1 + M2 ..., 3) centers of new positions
new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
num_grid_x: number of grids in each local area centered at new_xyz
num_grid_y:
num_grid_z:
max_neighbour_distance:
num_c_out_each_grid:
use_xyz:
neighbor_type: 1: ball, others: cube:
pooling_type: 0: avg_pool, 1: random choice
Returns:
new_features: (M1 + M2 ..., num_c_out)
"""
assert support_xyz.is_contiguous()
assert support_features.is_contiguous()
assert xyz_batch_cnt.is_contiguous()
assert new_xyz.is_contiguous()
assert new_xyz_batch_cnt.is_contiguous()
num_total_grids = num_grid_x * num_grid_y * num_grid_z
num_c_out = num_c_out_each_grid * num_total_grids
N, num_c_in = support_features.shape
M = new_xyz.shape[0]
assert num_c_in % num_c_out_each_grid == 0, \
f'the input channels ({num_c_in}) should be an integral multiple of num_c_out_each_grid({num_c_out_each_grid})'
while True:
new_features = support_features.new_zeros((M, num_c_out))
new_local_xyz = support_features.new_zeros((M, 3 * num_total_grids))
point_cnt_of_grid = xyz_batch_cnt.new_zeros((M, num_total_grids))
num_max_sum_points = num_mean_points_per_grid * M
grouped_idxs = xyz_batch_cnt.new_zeros((num_max_sum_points, 3))
num_cum_sum = pointnet2.vector_pool_wrapper(
support_xyz, xyz_batch_cnt, support_features, new_xyz, new_xyz_batch_cnt,
new_features, new_local_xyz, point_cnt_of_grid, grouped_idxs,
num_grid_x, num_grid_y, num_grid_z, max_neighbour_distance, use_xyz,
num_max_sum_points, nsample, neighbor_type, pooling_type
)
num_mean_points_per_grid = num_cum_sum // M + int(num_cum_sum % M > 0)
if num_cum_sum <= num_max_sum_points:
break
grouped_idxs = grouped_idxs[:num_cum_sum]
normalizer = torch.clamp_min(point_cnt_of_grid[:, :, None].float(), min=1e-6)
new_features = (new_features.view(-1, num_total_grids, num_c_out_each_grid) / normalizer).view(-1, num_c_out)
if use_xyz:
new_local_xyz = (new_local_xyz.view(-1, num_total_grids, 3) / normalizer).view(-1, num_total_grids * 3)
num_mean_points_per_grid = torch.Tensor([num_mean_points_per_grid]).int()
nsample = torch.Tensor([nsample]).int()
ctx.vector_pool_for_backward = (point_cnt_of_grid, grouped_idxs, N, num_c_in)
ctx.mark_non_differentiable(new_local_xyz, num_mean_points_per_grid, nsample, point_cnt_of_grid)
return new_features, new_local_xyz, num_mean_points_per_grid, point_cnt_of_grid
@staticmethod
def backward(ctx, grad_new_features: torch.Tensor, grad_local_xyz: torch.Tensor, grad_num_cum_sum, grad_point_cnt_of_grid):
"""
Args:
ctx:
grad_new_features: (M1 + M2 ..., num_c_out), num_c_out = num_c_out_each_grid * num_total_grids
Returns:
grad_support_features: (N1 + N2 ..., C_in)
"""
point_cnt_of_grid, grouped_idxs, N, num_c_in = ctx.vector_pool_for_backward
grad_support_features = grad_new_features.new_zeros((N, num_c_in))
pointnet2.vector_pool_grad_wrapper(
grad_new_features.contiguous(), point_cnt_of_grid, grouped_idxs,
grad_support_features
)
return None, None, grad_support_features, None, None, None, None, None, None, None, None, None, None, None, None
vector_pool_with_voxel_query_op = VectorPoolWithVoxelQuery.apply
if __name__ == '__main__':
pass
| 17,903 | 38.523179 | 127 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/ops/pointnet2/pointnet2_stack/voxel_pool_modules.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from . import voxel_query_utils
from typing import List
class NeighborVoxelSAModuleMSG(nn.Module):
def __init__(self, *, query_ranges: List[List[int]], radii: List[float],
nsamples: List[int], mlps: List[List[int]], use_xyz: bool = True, pool_method='max_pool'):
"""
Args:
query_ranges: list of int, list of neighbor ranges to group with
nsamples: list of int, number of samples in each ball query
mlps: list of list of int, spec of the pointnet before the global pooling for each scale
use_xyz:
pool_method: max_pool / avg_pool
"""
super().__init__()
assert len(query_ranges) == len(nsamples) == len(mlps)
self.groupers = nn.ModuleList()
self.mlps_in = nn.ModuleList()
self.mlps_pos = nn.ModuleList()
self.mlps_out = nn.ModuleList()
for i in range(len(query_ranges)):
max_range = query_ranges[i]
nsample = nsamples[i]
radius = radii[i]
self.groupers.append(voxel_query_utils.VoxelQueryAndGrouping(max_range, radius, nsample))
mlp_spec = mlps[i]
cur_mlp_in = nn.Sequential(
nn.Conv1d(mlp_spec[0], mlp_spec[1], kernel_size=1, bias=False),
nn.BatchNorm1d(mlp_spec[1])
)
cur_mlp_pos = nn.Sequential(
nn.Conv2d(3, mlp_spec[1], kernel_size=1, bias=False),
nn.BatchNorm2d(mlp_spec[1])
)
cur_mlp_out = nn.Sequential(
nn.Conv1d(mlp_spec[1], mlp_spec[2], kernel_size=1, bias=False),
nn.BatchNorm1d(mlp_spec[2]),
nn.ReLU()
)
self.mlps_in.append(cur_mlp_in)
self.mlps_pos.append(cur_mlp_pos)
self.mlps_out.append(cur_mlp_out)
self.relu = nn.ReLU()
self.pool_method = pool_method
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0)
def forward(self, xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt, \
new_coords, features, voxel2point_indices):
"""
:param xyz: (N1 + N2 ..., 3) tensor of the xyz coordinates of the features
:param xyz_batch_cnt: (batch_size), [N1, N2, ...]
:param new_xyz: (M1 + M2 ..., 3)
:param new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
:param features: (N1 + N2 ..., C) tensor of the descriptors of the the features
:param point_indices: (B, Z, Y, X) tensor of point indices
:return:
new_xyz: (M1 + M2 ..., 3) tensor of the new features' xyz
new_features: (M1 + M2 ..., \sum_k(mlps[k][-1])) tensor of the new_features descriptors
"""
# change the order to [batch_idx, z, y, x]
new_coords = new_coords[:, [0, 3, 2, 1]].contiguous()
new_features_list = []
for k in range(len(self.groupers)):
# features_in: (1, C, M1+M2)
features_in = features.permute(1, 0).unsqueeze(0)
features_in = self.mlps_in[k](features_in)
# features_in: (1, M1+M2, C)
features_in = features_in.permute(0, 2, 1).contiguous()
# features_in: (M1+M2, C)
features_in = features_in.view(-1, features_in.shape[-1])
# grouped_features: (M1+M2, C, nsample)
# grouped_xyz: (M1+M2, 3, nsample)
grouped_features, grouped_xyz, empty_ball_mask = self.groupers[k](
new_coords, xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt, features_in, voxel2point_indices
)
grouped_features[empty_ball_mask] = 0
# grouped_features: (1, C, M1+M2, nsample)
grouped_features = grouped_features.permute(1, 0, 2).unsqueeze(dim=0)
# grouped_xyz: (M1+M2, 3, nsample)
grouped_xyz = grouped_xyz - new_xyz.unsqueeze(-1)
grouped_xyz[empty_ball_mask] = 0
# grouped_xyz: (1, 3, M1+M2, nsample)
grouped_xyz = grouped_xyz.permute(1, 0, 2).unsqueeze(0)
# grouped_xyz: (1, C, M1+M2, nsample)
position_features = self.mlps_pos[k](grouped_xyz)
new_features = grouped_features + position_features
new_features = self.relu(new_features)
if self.pool_method == 'max_pool':
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
).squeeze(dim=-1) # (1, C, M1 + M2 ...)
elif self.pool_method == 'avg_pool':
new_features = F.avg_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
).squeeze(dim=-1) # (1, C, M1 + M2 ...)
else:
raise NotImplementedError
new_features = self.mlps_out[k](new_features)
new_features = new_features.squeeze(dim=0).permute(1, 0) # (M1 + M2 ..., C)
new_features_list.append(new_features)
# (M1 + M2 ..., C)
new_features = torch.cat(new_features_list, dim=1)
return new_features
| 5,672 | 41.977273 | 108 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/ops/pointnet2/pointnet2_stack/pointnet2_modules.py | from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import pointnet2_utils
def build_local_aggregation_module(input_channels, config):
local_aggregation_name = config.get('NAME', 'StackSAModuleMSG')
if local_aggregation_name == 'StackSAModuleMSG':
mlps = config.MLPS
for k in range(len(mlps)):
mlps[k] = [input_channels] + mlps[k]
cur_layer = StackSAModuleMSG(
radii=config.POOL_RADIUS, nsamples=config.NSAMPLE, mlps=mlps, use_xyz=True, pool_method='max_pool',
)
num_c_out = sum([x[-1] for x in mlps])
elif local_aggregation_name == 'VectorPoolAggregationModuleMSG':
cur_layer = VectorPoolAggregationModuleMSG(input_channels=input_channels, config=config)
num_c_out = config.MSG_POST_MLPS[-1]
else:
raise NotImplementedError
return cur_layer, num_c_out
class StackSAModuleMSG(nn.Module):
def __init__(self, *, radii: List[float], nsamples: List[int], mlps: List[List[int]],
use_xyz: bool = True, pool_method='max_pool'):
"""
Args:
radii: list of float, list of radii to group with
nsamples: list of int, number of samples in each ball query
mlps: list of list of int, spec of the pointnet before the global pooling for each scale
use_xyz:
pool_method: max_pool / avg_pool
"""
super().__init__()
assert len(radii) == len(nsamples) == len(mlps)
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz))
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
shared_mlps = []
for k in range(len(mlp_spec) - 1):
shared_mlps.extend([
nn.Conv2d(mlp_spec[k], mlp_spec[k + 1], kernel_size=1, bias=False),
nn.BatchNorm2d(mlp_spec[k + 1]),
nn.ReLU()
])
self.mlps.append(nn.Sequential(*shared_mlps))
self.pool_method = pool_method
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
if isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0)
def forward(self, xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt, features=None, empty_voxel_set_zeros=True):
"""
:param xyz: (N1 + N2 ..., 3) tensor of the xyz coordinates of the features
:param xyz_batch_cnt: (batch_size), [N1, N2, ...]
:param new_xyz: (M1 + M2 ..., 3)
:param new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
:param features: (N1 + N2 ..., C) tensor of the descriptors of the the features
:return:
new_xyz: (M1 + M2 ..., 3) tensor of the new features' xyz
new_features: (M1 + M2 ..., \sum_k(mlps[k][-1])) tensor of the new_features descriptors
"""
new_features_list = []
for k in range(len(self.groupers)):
new_features, ball_idxs = self.groupers[k](
xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt, features
) # (M1 + M2, C, nsample)
new_features = new_features.permute(1, 0, 2).unsqueeze(dim=0) # (1, C, M1 + M2 ..., nsample)
new_features = self.mlps[k](new_features) # (1, C, M1 + M2 ..., nsample)
if self.pool_method == 'max_pool':
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
).squeeze(dim=-1) # (1, C, M1 + M2 ...)
elif self.pool_method == 'avg_pool':
new_features = F.avg_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
).squeeze(dim=-1) # (1, C, M1 + M2 ...)
else:
raise NotImplementedError
new_features = new_features.squeeze(dim=0).permute(1, 0) # (M1 + M2 ..., C)
new_features_list.append(new_features)
new_features = torch.cat(new_features_list, dim=1) # (M1 + M2 ..., C)
return new_xyz, new_features
class StackPointnetFPModule(nn.Module):
def __init__(self, *, mlp: List[int]):
"""
Args:
mlp: list of int
"""
super().__init__()
shared_mlps = []
for k in range(len(mlp) - 1):
shared_mlps.extend([
nn.Conv2d(mlp[k], mlp[k + 1], kernel_size=1, bias=False),
nn.BatchNorm2d(mlp[k + 1]),
nn.ReLU()
])
self.mlp = nn.Sequential(*shared_mlps)
def forward(self, unknown, unknown_batch_cnt, known, known_batch_cnt, unknown_feats=None, known_feats=None):
"""
Args:
unknown: (N1 + N2 ..., 3)
known: (M1 + M2 ..., 3)
unknow_feats: (N1 + N2 ..., C1)
known_feats: (M1 + M2 ..., C2)
Returns:
new_features: (N1 + N2 ..., C_out)
"""
dist, idx = pointnet2_utils.three_nn(unknown, unknown_batch_cnt, known, known_batch_cnt)
dist_recip = 1.0 / (dist + 1e-8)
norm = torch.sum(dist_recip, dim=-1, keepdim=True)
weight = dist_recip / norm
interpolated_feats = pointnet2_utils.three_interpolate(known_feats, idx, weight)
if unknown_feats is not None:
new_features = torch.cat([interpolated_feats, unknown_feats], dim=1) # (N1 + N2 ..., C2 + C1)
else:
new_features = interpolated_feats
new_features = new_features.permute(1, 0)[None, :, :, None] # (1, C, N1 + N2 ..., 1)
new_features = self.mlp(new_features)
new_features = new_features.squeeze(dim=0).squeeze(dim=-1).permute(1, 0) # (N1 + N2 ..., C)
return new_features
class VectorPoolLocalInterpolateModule(nn.Module):
def __init__(self, mlp, num_voxels, max_neighbour_distance, nsample, neighbor_type, use_xyz=True,
neighbour_distance_multiplier=1.0, xyz_encoding_type='concat'):
"""
Args:
mlp:
num_voxels:
max_neighbour_distance:
neighbor_type: 1: ball, others: cube
nsample: find all (-1), find limited number(>0)
use_xyz:
neighbour_distance_multiplier:
xyz_encoding_type:
"""
super().__init__()
self.num_voxels = num_voxels # [num_grid_x, num_grid_y, num_grid_z]: number of grids in each local area centered at new_xyz
self.num_total_grids = self.num_voxels[0] * self.num_voxels[1] * self.num_voxels[2]
self.max_neighbour_distance = max_neighbour_distance
self.neighbor_distance_multiplier = neighbour_distance_multiplier
self.nsample = nsample
self.neighbor_type = neighbor_type
self.use_xyz = use_xyz
self.xyz_encoding_type = xyz_encoding_type
if mlp is not None:
if self.use_xyz:
mlp[0] += 9 if self.xyz_encoding_type == 'concat' else 0
shared_mlps = []
for k in range(len(mlp) - 1):
shared_mlps.extend([
nn.Conv2d(mlp[k], mlp[k + 1], kernel_size=1, bias=False),
nn.BatchNorm2d(mlp[k + 1]),
nn.ReLU()
])
self.mlp = nn.Sequential(*shared_mlps)
else:
self.mlp = None
self.num_avg_length_of_neighbor_idxs = 1000
def forward(self, support_xyz, support_features, xyz_batch_cnt, new_xyz, new_xyz_grid_centers, new_xyz_batch_cnt):
"""
Args:
support_xyz: (N1 + N2 ..., 3) xyz coordinates of the features
support_features: (N1 + N2 ..., C) point-wise features
xyz_batch_cnt: (batch_size), [N1, N2, ...]
new_xyz: (M1 + M2 ..., 3) centers of the ball query
new_xyz_grid_centers: (M1 + M2 ..., num_total_grids, 3) grids centers of each grid
new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
Returns:
new_features: (N1 + N2 ..., C_out)
"""
with torch.no_grad():
dist, idx, num_avg_length_of_neighbor_idxs = pointnet2_utils.three_nn_for_vector_pool_by_two_step(
support_xyz, xyz_batch_cnt, new_xyz, new_xyz_grid_centers, new_xyz_batch_cnt,
self.max_neighbour_distance, self.nsample, self.neighbor_type,
self.num_avg_length_of_neighbor_idxs, self.num_total_grids, self.neighbor_distance_multiplier
)
self.num_avg_length_of_neighbor_idxs = max(self.num_avg_length_of_neighbor_idxs, num_avg_length_of_neighbor_idxs.item())
dist_recip = 1.0 / (dist + 1e-8)
norm = torch.sum(dist_recip, dim=-1, keepdim=True)
weight = dist_recip / torch.clamp_min(norm, min=1e-8)
empty_mask = (idx.view(-1, 3)[:, 0] == -1)
idx.view(-1, 3)[empty_mask] = 0
interpolated_feats = pointnet2_utils.three_interpolate(support_features, idx.view(-1, 3), weight.view(-1, 3))
interpolated_feats = interpolated_feats.view(idx.shape[0], idx.shape[1], -1) # (M1 + M2 ..., num_total_grids, C)
if self.use_xyz:
near_known_xyz = support_xyz[idx.view(-1, 3).long()].view(-1, 3, 3) # ( (M1 + M2 ...)*num_total_grids, 3)
local_xyz = (new_xyz_grid_centers.view(-1, 1, 3) - near_known_xyz).view(-1, idx.shape[1], 9)
if self.xyz_encoding_type == 'concat':
interpolated_feats = torch.cat((interpolated_feats, local_xyz), dim=-1) # ( M1 + M2 ..., num_total_grids, 9+C)
else:
raise NotImplementedError
new_features = interpolated_feats.view(-1, interpolated_feats.shape[-1]) # ((M1 + M2 ...) * num_total_grids, C)
new_features[empty_mask, :] = 0
if self.mlp is not None:
new_features = new_features.permute(1, 0)[None, :, :, None] # (1, C, N1 + N2 ..., 1)
new_features = self.mlp(new_features)
new_features = new_features.squeeze(dim=0).squeeze(dim=-1).permute(1, 0) # (N1 + N2 ..., C)
return new_features
class VectorPoolAggregationModule(nn.Module):
def __init__(
self, input_channels, num_local_voxel=(3, 3, 3), local_aggregation_type='local_interpolation',
num_reduced_channels=30, num_channels_of_local_aggregation=32, post_mlps=(128,),
max_neighbor_distance=None, neighbor_nsample=-1, neighbor_type=0, neighbor_distance_multiplier=2.0):
super().__init__()
self.num_local_voxel = num_local_voxel
self.total_voxels = self.num_local_voxel[0] * self.num_local_voxel[1] * self.num_local_voxel[2]
self.local_aggregation_type = local_aggregation_type
assert self.local_aggregation_type in ['local_interpolation', 'voxel_avg_pool', 'voxel_random_choice']
self.input_channels = input_channels
self.num_reduced_channels = input_channels if num_reduced_channels is None else num_reduced_channels
self.num_channels_of_local_aggregation = num_channels_of_local_aggregation
self.max_neighbour_distance = max_neighbor_distance
self.neighbor_nsample = neighbor_nsample
self.neighbor_type = neighbor_type # 1: ball, others: cube
if self.local_aggregation_type == 'local_interpolation':
self.local_interpolate_module = VectorPoolLocalInterpolateModule(
mlp=None, num_voxels=self.num_local_voxel,
max_neighbour_distance=self.max_neighbour_distance,
nsample=self.neighbor_nsample,
neighbor_type=self.neighbor_type,
neighbour_distance_multiplier=neighbor_distance_multiplier,
)
num_c_in = (self.num_reduced_channels + 9) * self.total_voxels
else:
self.local_interpolate_module = None
num_c_in = (self.num_reduced_channels + 3) * self.total_voxels
num_c_out = self.total_voxels * self.num_channels_of_local_aggregation
self.separate_local_aggregation_layer = nn.Sequential(
nn.Conv1d(num_c_in, num_c_out, kernel_size=1, groups=self.total_voxels, bias=False),
nn.BatchNorm1d(num_c_out),
nn.ReLU()
)
post_mlp_list = []
c_in = num_c_out
for cur_num_c in post_mlps:
post_mlp_list.extend([
nn.Conv1d(c_in, cur_num_c, kernel_size=1, bias=False),
nn.BatchNorm1d(cur_num_c),
nn.ReLU()
])
c_in = cur_num_c
self.post_mlps = nn.Sequential(*post_mlp_list)
self.num_mean_points_per_grid = 20
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0)
def extra_repr(self) -> str:
ret = f'radius={self.max_neighbour_distance}, local_voxels=({self.num_local_voxel}, ' \
f'local_aggregation_type={self.local_aggregation_type}, ' \
f'num_c_reduction={self.input_channels}->{self.num_reduced_channels}, ' \
f'num_c_local_aggregation={self.num_channels_of_local_aggregation}'
return ret
def vector_pool_with_voxel_query(self, xyz, xyz_batch_cnt, features, new_xyz, new_xyz_batch_cnt):
use_xyz = 1
pooling_type = 0 if self.local_aggregation_type == 'voxel_avg_pool' else 1
new_features, new_local_xyz, num_mean_points_per_grid, point_cnt_of_grid = pointnet2_utils.vector_pool_with_voxel_query_op(
xyz, xyz_batch_cnt, features, new_xyz, new_xyz_batch_cnt,
self.num_local_voxel[0], self.num_local_voxel[1], self.num_local_voxel[2],
self.max_neighbour_distance, self.num_reduced_channels, use_xyz,
self.num_mean_points_per_grid, self.neighbor_nsample, self.neighbor_type,
pooling_type
)
self.num_mean_points_per_grid = max(self.num_mean_points_per_grid, num_mean_points_per_grid.item())
num_new_pts = new_features.shape[0]
new_local_xyz = new_local_xyz.view(num_new_pts, -1, 3) # (N, num_voxel, 3)
new_features = new_features.view(num_new_pts, -1, self.num_reduced_channels) # (N, num_voxel, C)
new_features = torch.cat((new_local_xyz, new_features), dim=-1).view(num_new_pts, -1)
return new_features, point_cnt_of_grid
@staticmethod
def get_dense_voxels_by_center(point_centers, max_neighbour_distance, num_voxels):
"""
Args:
point_centers: (N, 3)
max_neighbour_distance: float
num_voxels: [num_x, num_y, num_z]
Returns:
voxel_centers: (N, total_voxels, 3)
"""
R = max_neighbour_distance
device = point_centers.device
x_grids = torch.arange(-R + R / num_voxels[0], R - R / num_voxels[0] + 1e-5, 2 * R / num_voxels[0], device=device)
y_grids = torch.arange(-R + R / num_voxels[1], R - R / num_voxels[1] + 1e-5, 2 * R / num_voxels[1], device=device)
z_grids = torch.arange(-R + R / num_voxels[2], R - R / num_voxels[2] + 1e-5, 2 * R / num_voxels[2], device=device)
x_offset, y_offset, z_offset = torch.meshgrid(x_grids, y_grids, z_grids) # shape: [num_x, num_y, num_z]
xyz_offset = torch.cat((
x_offset.contiguous().view(-1, 1),
y_offset.contiguous().view(-1, 1),
z_offset.contiguous().view(-1, 1)), dim=-1
)
voxel_centers = point_centers[:, None, :] + xyz_offset[None, :, :]
return voxel_centers
def vector_pool_with_local_interpolate(self, xyz, xyz_batch_cnt, features, new_xyz, new_xyz_batch_cnt):
"""
Args:
xyz: (N, 3)
xyz_batch_cnt: (batch_size)
features: (N, C)
new_xyz: (M, 3)
new_xyz_batch_cnt: (batch_size)
Returns:
new_features: (M, total_voxels * C)
"""
voxel_centers = self.get_dense_voxels_by_center(
point_centers=new_xyz, max_neighbour_distance=self.max_neighbour_distance, num_voxels=self.num_local_voxel
) # (M1 + M2 + ..., total_voxels, 3)
voxel_features = self.local_interpolate_module.forward(
support_xyz=xyz, support_features=features, xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz, new_xyz_grid_centers=voxel_centers, new_xyz_batch_cnt=new_xyz_batch_cnt
) # ((M1 + M2 ...) * total_voxels, C)
voxel_features = voxel_features.contiguous().view(-1, self.total_voxels * voxel_features.shape[-1])
return voxel_features
def forward(self, xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt, features, **kwargs):
"""
:param xyz: (N1 + N2 ..., 3) tensor of the xyz coordinates of the features
:param xyz_batch_cnt: (batch_size), [N1, N2, ...]
:param new_xyz: (M1 + M2 ..., 3)
:param new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
:param features: (N1 + N2 ..., C) tensor of the descriptors of the the features
:return:
new_xyz: (M1 + M2 ..., 3) tensor of the new features' xyz
new_features: (M1 + M2 ..., \sum_k(mlps[k][-1])) tensor of the new_features descriptors
"""
N, C = features.shape
assert C % self.num_reduced_channels == 0, \
f'the input channels ({C}) should be an integral multiple of num_reduced_channels({self.num_reduced_channels})'
features = features.view(N, -1, self.num_reduced_channels).sum(dim=1)
if self.local_aggregation_type in ['voxel_avg_pool', 'voxel_random_choice']:
vector_features, point_cnt_of_grid = self.vector_pool_with_voxel_query(
xyz=xyz, xyz_batch_cnt=xyz_batch_cnt, features=features,
new_xyz=new_xyz, new_xyz_batch_cnt=new_xyz_batch_cnt
)
elif self.local_aggregation_type == 'local_interpolation':
vector_features = self.vector_pool_with_local_interpolate(
xyz=xyz, xyz_batch_cnt=xyz_batch_cnt, features=features,
new_xyz=new_xyz, new_xyz_batch_cnt=new_xyz_batch_cnt
) # (M1 + M2 + ..., total_voxels * C)
else:
raise NotImplementedError
vector_features = vector_features.permute(1, 0)[None, :, :] # (1, num_voxels * C, M1 + M2 ...)
new_features = self.separate_local_aggregation_layer(vector_features)
new_features = self.post_mlps(new_features)
new_features = new_features.squeeze(dim=0).permute(1, 0)
return new_xyz, new_features
class VectorPoolAggregationModuleMSG(nn.Module):
def __init__(self, input_channels, config):
super().__init__()
self.model_cfg = config
self.num_groups = self.model_cfg.NUM_GROUPS
self.layers = []
c_in = 0
for k in range(self.num_groups):
cur_config = self.model_cfg[f'GROUP_CFG_{k}']
cur_vector_pool_module = VectorPoolAggregationModule(
input_channels=input_channels, num_local_voxel=cur_config.NUM_LOCAL_VOXEL,
post_mlps=cur_config.POST_MLPS,
max_neighbor_distance=cur_config.MAX_NEIGHBOR_DISTANCE,
neighbor_nsample=cur_config.NEIGHBOR_NSAMPLE,
local_aggregation_type=self.model_cfg.LOCAL_AGGREGATION_TYPE,
num_reduced_channels=self.model_cfg.get('NUM_REDUCED_CHANNELS', None),
num_channels_of_local_aggregation=self.model_cfg.NUM_CHANNELS_OF_LOCAL_AGGREGATION,
neighbor_distance_multiplier=2.0
)
self.__setattr__(f'layer_{k}', cur_vector_pool_module)
c_in += cur_config.POST_MLPS[-1]
c_in += 3 # use_xyz
shared_mlps = []
for cur_num_c in self.model_cfg.MSG_POST_MLPS:
shared_mlps.extend([
nn.Conv1d(c_in, cur_num_c, kernel_size=1, bias=False),
nn.BatchNorm1d(cur_num_c),
nn.ReLU()
])
c_in = cur_num_c
self.msg_post_mlps = nn.Sequential(*shared_mlps)
def forward(self, **kwargs):
features_list = []
for k in range(self.num_groups):
cur_xyz, cur_features = self.__getattr__(f'layer_{k}')(**kwargs)
features_list.append(cur_features)
features = torch.cat(features_list, dim=-1)
features = torch.cat((cur_xyz, features), dim=-1)
features = features.permute(1, 0)[None, :, :] # (1, C, N)
new_features = self.msg_post_mlps(features)
new_features = new_features.squeeze(dim=0).permute(1, 0) # (N, C)
return cur_xyz, new_features
| 21,385 | 44.40552 | 132 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/ops/pointnet2/pointnet2_batch/pointnet2_utils.py | from typing import Tuple
import torch
import torch.nn as nn
from torch.autograd import Function, Variable
from . import pointnet2_batch_cuda as pointnet2
class FarthestPointSampling(Function):
@staticmethod
def forward(ctx, xyz: torch.Tensor, npoint: int) -> torch.Tensor:
"""
Uses iterative farthest point sampling to select a set of npoint features that have the largest
minimum distance
:param ctx:
:param xyz: (B, N, 3) where N > npoint
:param npoint: int, number of features in the sampled set
:return:
output: (B, npoint) tensor containing the set
"""
assert xyz.is_contiguous()
B, N, _ = xyz.size()
output = torch.cuda.IntTensor(B, npoint)
temp = torch.cuda.FloatTensor(B, N).fill_(1e10)
pointnet2.farthest_point_sampling_wrapper(B, N, npoint, xyz, temp, output)
return output
@staticmethod
def backward(xyz, a=None):
return None, None
farthest_point_sample = furthest_point_sample = FarthestPointSampling.apply
class GatherOperation(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
"""
:param ctx:
:param features: (B, C, N)
:param idx: (B, npoint) index tensor of the features to gather
:return:
output: (B, C, npoint)
"""
assert features.is_contiguous()
assert idx.is_contiguous()
B, npoint = idx.size()
_, C, N = features.size()
output = torch.cuda.FloatTensor(B, C, npoint)
pointnet2.gather_points_wrapper(B, C, N, npoint, features, idx, output)
ctx.for_backwards = (idx, C, N)
return output
@staticmethod
def backward(ctx, grad_out):
idx, C, N = ctx.for_backwards
B, npoint = idx.size()
grad_features = Variable(torch.cuda.FloatTensor(B, C, N).zero_())
grad_out_data = grad_out.data.contiguous()
pointnet2.gather_points_grad_wrapper(B, C, N, npoint, grad_out_data, idx, grad_features.data)
return grad_features, None
gather_operation = GatherOperation.apply
class ThreeNN(Function):
@staticmethod
def forward(ctx, unknown: torch.Tensor, known: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Find the three nearest neighbors of unknown in known
:param ctx:
:param unknown: (B, N, 3)
:param known: (B, M, 3)
:return:
dist: (B, N, 3) l2 distance to the three nearest neighbors
idx: (B, N, 3) index of 3 nearest neighbors
"""
assert unknown.is_contiguous()
assert known.is_contiguous()
B, N, _ = unknown.size()
m = known.size(1)
dist2 = torch.cuda.FloatTensor(B, N, 3)
idx = torch.cuda.IntTensor(B, N, 3)
pointnet2.three_nn_wrapper(B, N, m, unknown, known, dist2, idx)
return torch.sqrt(dist2), idx
@staticmethod
def backward(ctx, a=None, b=None):
return None, None
three_nn = ThreeNN.apply
class ThreeInterpolate(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:
"""
Performs weight linear interpolation on 3 features
:param ctx:
:param features: (B, C, M) Features descriptors to be interpolated from
:param idx: (B, n, 3) three nearest neighbors of the target features in features
:param weight: (B, n, 3) weights
:return:
output: (B, C, N) tensor of the interpolated features
"""
assert features.is_contiguous()
assert idx.is_contiguous()
assert weight.is_contiguous()
B, c, m = features.size()
n = idx.size(1)
ctx.three_interpolate_for_backward = (idx, weight, m)
output = torch.cuda.FloatTensor(B, c, n)
pointnet2.three_interpolate_wrapper(B, c, m, n, features, idx, weight, output)
return output
@staticmethod
def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
:param ctx:
:param grad_out: (B, C, N) tensor with gradients of outputs
:return:
grad_features: (B, C, M) tensor with gradients of features
None:
None:
"""
idx, weight, m = ctx.three_interpolate_for_backward
B, c, n = grad_out.size()
grad_features = Variable(torch.cuda.FloatTensor(B, c, m).zero_())
grad_out_data = grad_out.data.contiguous()
pointnet2.three_interpolate_grad_wrapper(B, c, n, m, grad_out_data, idx, weight, grad_features.data)
return grad_features, None, None
three_interpolate = ThreeInterpolate.apply
class GroupingOperation(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
"""
:param ctx:
:param features: (B, C, N) tensor of features to group
:param idx: (B, npoint, nsample) tensor containing the indicies of features to group with
:return:
output: (B, C, npoint, nsample) tensor
"""
assert features.is_contiguous()
assert idx.is_contiguous()
B, nfeatures, nsample = idx.size()
_, C, N = features.size()
output = torch.cuda.FloatTensor(B, C, nfeatures, nsample)
pointnet2.group_points_wrapper(B, C, N, nfeatures, nsample, features, idx, output)
ctx.for_backwards = (idx, N)
return output
@staticmethod
def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
:param ctx:
:param grad_out: (B, C, npoint, nsample) tensor of the gradients of the output from forward
:return:
grad_features: (B, C, N) gradient of the features
"""
idx, N = ctx.for_backwards
B, C, npoint, nsample = grad_out.size()
grad_features = Variable(torch.cuda.FloatTensor(B, C, N).zero_())
grad_out_data = grad_out.data.contiguous()
pointnet2.group_points_grad_wrapper(B, C, N, npoint, nsample, grad_out_data, idx, grad_features.data)
return grad_features, None
grouping_operation = GroupingOperation.apply
class BallQuery(Function):
@staticmethod
def forward(ctx, radius: float, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor) -> torch.Tensor:
"""
:param ctx:
:param radius: float, radius of the balls
:param nsample: int, maximum number of features in the balls
:param xyz: (B, N, 3) xyz coordinates of the features
:param new_xyz: (B, npoint, 3) centers of the ball query
:return:
idx: (B, npoint, nsample) tensor with the indicies of the features that form the query balls
"""
assert new_xyz.is_contiguous()
assert xyz.is_contiguous()
B, N, _ = xyz.size()
npoint = new_xyz.size(1)
idx = torch.cuda.IntTensor(B, npoint, nsample).zero_()
pointnet2.ball_query_wrapper(B, N, npoint, radius, nsample, new_xyz, xyz, idx)
return idx
@staticmethod
def backward(ctx, a=None):
return None, None, None, None
ball_query = BallQuery.apply
class QueryAndGroup(nn.Module):
def __init__(self, radius: float, nsample: int, use_xyz: bool = True):
"""
:param radius: float, radius of ball
:param nsample: int, maximum number of features to gather in the ball
:param use_xyz:
"""
super().__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor, features: torch.Tensor = None) -> Tuple[torch.Tensor]:
"""
:param xyz: (B, N, 3) xyz coordinates of the features
:param new_xyz: (B, npoint, 3) centroids
:param features: (B, C, N) descriptors of the features
:return:
new_features: (B, 3 + C, npoint, nsample)
"""
idx = ball_query(self.radius, self.nsample, xyz, new_xyz)
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping_operation(xyz_trans, idx) # (B, 3, npoint, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping_operation(features, idx)
if self.use_xyz:
new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (B, C + 3, npoint, nsample)
else:
new_features = grouped_features
else:
assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return new_features
class GroupAll(nn.Module):
def __init__(self, use_xyz: bool = True):
super().__init__()
self.use_xyz = use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor, features: torch.Tensor = None):
"""
:param xyz: (B, N, 3) xyz coordinates of the features
:param new_xyz: ignored
:param features: (B, C, N) descriptors of the features
:return:
new_features: (B, C + 3, 1, N)
"""
grouped_xyz = xyz.transpose(1, 2).unsqueeze(2)
if features is not None:
grouped_features = features.unsqueeze(2)
if self.use_xyz:
new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (B, 3 + C, 1, N)
else:
new_features = grouped_features
else:
new_features = grouped_xyz
return new_features
| 9,717 | 32.395189 | 118 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/ops/pointnet2/pointnet2_batch/pointnet2_modules.py | from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import pointnet2_utils
class _PointnetSAModuleBase(nn.Module):
def __init__(self):
super().__init__()
self.npoint = None
self.groupers = None
self.mlps = None
self.pool_method = 'max_pool'
def forward(self, xyz: torch.Tensor, features: torch.Tensor = None, new_xyz=None) -> (torch.Tensor, torch.Tensor):
"""
:param xyz: (B, N, 3) tensor of the xyz coordinates of the features
:param features: (B, N, C) tensor of the descriptors of the the features
:param new_xyz:
:return:
new_xyz: (B, npoint, 3) tensor of the new features' xyz
new_features: (B, npoint, \sum_k(mlps[k][-1])) tensor of the new_features descriptors
"""
new_features_list = []
xyz_flipped = xyz.transpose(1, 2).contiguous()
if new_xyz is None:
new_xyz = pointnet2_utils.gather_operation(
xyz_flipped,
pointnet2_utils.farthest_point_sample(xyz, self.npoint)
).transpose(1, 2).contiguous() if self.npoint is not None else None
for i in range(len(self.groupers)):
new_features = self.groupers[i](xyz, new_xyz, features) # (B, C, npoint, nsample)
new_features = self.mlps[i](new_features) # (B, mlp[-1], npoint, nsample)
if self.pool_method == 'max_pool':
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
elif self.pool_method == 'avg_pool':
new_features = F.avg_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
else:
raise NotImplementedError
new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint)
new_features_list.append(new_features)
return new_xyz, torch.cat(new_features_list, dim=1)
class PointnetSAModuleMSG(_PointnetSAModuleBase):
"""Pointnet set abstraction layer with multiscale grouping"""
def __init__(self, *, npoint: int, radii: List[float], nsamples: List[int], mlps: List[List[int]], bn: bool = True,
use_xyz: bool = True, pool_method='max_pool'):
"""
:param npoint: int
:param radii: list of float, list of radii to group with
:param nsamples: list of int, number of samples in each ball query
:param mlps: list of list of int, spec of the pointnet before the global pooling for each scale
:param bn: whether to use batchnorm
:param use_xyz:
:param pool_method: max_pool / avg_pool
"""
super().__init__()
assert len(radii) == len(nsamples) == len(mlps)
self.npoint = npoint
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(
pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz)
if npoint is not None else pointnet2_utils.GroupAll(use_xyz)
)
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
shared_mlps = []
for k in range(len(mlp_spec) - 1):
shared_mlps.extend([
nn.Conv2d(mlp_spec[k], mlp_spec[k + 1], kernel_size=1, bias=False),
nn.BatchNorm2d(mlp_spec[k + 1]),
nn.ReLU()
])
self.mlps.append(nn.Sequential(*shared_mlps))
self.pool_method = pool_method
class PointnetSAModule(PointnetSAModuleMSG):
"""Pointnet set abstraction layer"""
def __init__(self, *, mlp: List[int], npoint: int = None, radius: float = None, nsample: int = None,
bn: bool = True, use_xyz: bool = True, pool_method='max_pool'):
"""
:param mlp: list of int, spec of the pointnet before the global max_pool
:param npoint: int, number of features
:param radius: float, radius of ball
:param nsample: int, number of samples in the ball query
:param bn: whether to use batchnorm
:param use_xyz:
:param pool_method: max_pool / avg_pool
"""
super().__init__(
mlps=[mlp], npoint=npoint, radii=[radius], nsamples=[nsample], bn=bn, use_xyz=use_xyz,
pool_method=pool_method
)
class PointnetFPModule(nn.Module):
r"""Propigates the features of one set to another"""
def __init__(self, *, mlp: List[int], bn: bool = True):
"""
:param mlp: list of int
:param bn: whether to use batchnorm
"""
super().__init__()
shared_mlps = []
for k in range(len(mlp) - 1):
shared_mlps.extend([
nn.Conv2d(mlp[k], mlp[k + 1], kernel_size=1, bias=False),
nn.BatchNorm2d(mlp[k + 1]),
nn.ReLU()
])
self.mlp = nn.Sequential(*shared_mlps)
def forward(
self, unknown: torch.Tensor, known: torch.Tensor, unknow_feats: torch.Tensor, known_feats: torch.Tensor
) -> torch.Tensor:
"""
:param unknown: (B, n, 3) tensor of the xyz positions of the unknown features
:param known: (B, m, 3) tensor of the xyz positions of the known features
:param unknow_feats: (B, C1, n) tensor of the features to be propigated to
:param known_feats: (B, C2, m) tensor of features to be propigated
:return:
new_features: (B, mlp[-1], n) tensor of the features of the unknown features
"""
if known is not None:
dist, idx = pointnet2_utils.three_nn(unknown, known)
dist_recip = 1.0 / (dist + 1e-8)
norm = torch.sum(dist_recip, dim=2, keepdim=True)
weight = dist_recip / norm
interpolated_feats = pointnet2_utils.three_interpolate(known_feats, idx, weight)
else:
interpolated_feats = known_feats.expand(*known_feats.size()[0:2], unknown.size(1))
if unknow_feats is not None:
new_features = torch.cat([interpolated_feats, unknow_feats], dim=1) # (B, C2 + C1, n)
else:
new_features = interpolated_feats
new_features = new_features.unsqueeze(-1)
new_features = self.mlp(new_features)
return new_features.squeeze(-1)
if __name__ == "__main__":
pass
| 6,631 | 36.897143 | 119 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/ops/iou3d_nms/iou3d_nms_utils.py | """
3D IoU Calculation and Rotated NMS
Written by Shaoshuai Shi
All Rights Reserved 2019-2020.
"""
import torch
from ...utils import common_utils
from . import iou3d_nms_cuda
def boxes_bev_iou_cpu(boxes_a, boxes_b):
"""
Args:
boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading]
Returns:
ans_iou: (N, M)
"""
boxes_a, is_numpy = common_utils.check_numpy_to_torch(boxes_a)
boxes_b, is_numpy = common_utils.check_numpy_to_torch(boxes_b)
assert not (boxes_a.is_cuda or boxes_b.is_cuda), 'Only support CPU tensors'
assert boxes_a.shape[1] == 7 and boxes_b.shape[1] == 7
ans_iou = boxes_a.new_zeros(torch.Size((boxes_a.shape[0], boxes_b.shape[0])))
iou3d_nms_cuda.boxes_iou_bev_cpu(boxes_a.contiguous(), boxes_b.contiguous(), ans_iou)
return ans_iou.numpy() if is_numpy else ans_iou
def boxes_iou_bev(boxes_a, boxes_b):
"""
Args:
boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading]
Returns:
ans_iou: (N, M)
"""
assert boxes_a.shape[1] == boxes_b.shape[1] == 7
ans_iou = torch.cuda.FloatTensor(torch.Size((boxes_a.shape[0], boxes_b.shape[0]))).zero_()
iou3d_nms_cuda.boxes_iou_bev_gpu(boxes_a.contiguous(), boxes_b.contiguous(), ans_iou)
return ans_iou
def boxes_iou3d_gpu(boxes_a, boxes_b):
"""
Args:
boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading]
Returns:
ans_iou: (N, M)
"""
assert boxes_a.shape[1] == boxes_b.shape[1] == 7
# height overlap
boxes_a_height_max = (boxes_a[:, 2] + boxes_a[:, 5] / 2).view(-1, 1)
boxes_a_height_min = (boxes_a[:, 2] - boxes_a[:, 5] / 2).view(-1, 1)
boxes_b_height_max = (boxes_b[:, 2] + boxes_b[:, 5] / 2).view(1, -1)
boxes_b_height_min = (boxes_b[:, 2] - boxes_b[:, 5] / 2).view(1, -1)
# bev overlap
overlaps_bev = torch.cuda.FloatTensor(torch.Size((boxes_a.shape[0], boxes_b.shape[0]))).zero_() # (N, M)
iou3d_nms_cuda.boxes_overlap_bev_gpu(boxes_a.contiguous(), boxes_b.contiguous(), overlaps_bev)
max_of_min = torch.max(boxes_a_height_min, boxes_b_height_min)
min_of_max = torch.min(boxes_a_height_max, boxes_b_height_max)
overlaps_h = torch.clamp(min_of_max - max_of_min, min=0)
# 3d iou
overlaps_3d = overlaps_bev * overlaps_h
vol_a = (boxes_a[:, 3] * boxes_a[:, 4] * boxes_a[:, 5]).view(-1, 1)
vol_b = (boxes_b[:, 3] * boxes_b[:, 4] * boxes_b[:, 5]).view(1, -1)
iou3d = overlaps_3d / torch.clamp(vol_a + vol_b - overlaps_3d, min=1e-6)
return iou3d
def nms_gpu(boxes, scores, thresh, pre_maxsize=None, **kwargs):
"""
:param boxes: (N, 7) [x, y, z, dx, dy, dz, heading]
:param scores: (N)
:param thresh:
:return:
"""
assert boxes.shape[1] == 7
order = scores.sort(0, descending=True)[1]
if pre_maxsize is not None:
order = order[:pre_maxsize]
boxes = boxes[order].contiguous()
keep = torch.LongTensor(boxes.size(0))
num_out = iou3d_nms_cuda.nms_gpu(boxes, keep, thresh)
return order[keep[:num_out].cuda()].contiguous(), None
def nms_normal_gpu(boxes, scores, thresh, **kwargs):
"""
:param boxes: (N, 7) [x, y, z, dx, dy, dz, heading]
:param scores: (N)
:param thresh:
:return:
"""
assert boxes.shape[1] == 7
order = scores.sort(0, descending=True)[1]
boxes = boxes[order].contiguous()
keep = torch.LongTensor(boxes.size(0))
num_out = iou3d_nms_cuda.nms_normal_gpu(boxes, keep, thresh)
return order[keep[:num_out].cuda()].contiguous(), None
| 3,673 | 30.401709 | 109 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/pcdet/ops/roiaware_pool3d/roiaware_pool3d_utils.py | import torch
import torch.nn as nn
from torch.autograd import Function
from ...utils import common_utils
from . import roiaware_pool3d_cuda
def points_in_boxes_cpu(points, boxes):
"""
Args:
points: (num_points, 3)
boxes: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center, each box DO NOT overlaps
Returns:
point_indices: (N, num_points)
"""
assert boxes.shape[1] == 7
assert points.shape[1] == 3
points, is_numpy = common_utils.check_numpy_to_torch(points)
boxes, is_numpy = common_utils.check_numpy_to_torch(boxes)
point_indices = points.new_zeros((boxes.shape[0], points.shape[0]), dtype=torch.int)
roiaware_pool3d_cuda.points_in_boxes_cpu(boxes.float().contiguous(), points.float().contiguous(), point_indices)
return point_indices.numpy() if is_numpy else point_indices
def points_in_boxes_gpu(points, boxes):
"""
:param points: (B, M, 3)
:param boxes: (B, T, 7), num_valid_boxes <= T
:return box_idxs_of_pts: (B, M), default background = -1
"""
assert boxes.shape[0] == points.shape[0]
assert boxes.shape[2] == 7 and points.shape[2] == 3
batch_size, num_points, _ = points.shape
box_idxs_of_pts = points.new_zeros((batch_size, num_points), dtype=torch.int).fill_(-1)
roiaware_pool3d_cuda.points_in_boxes_gpu(boxes.contiguous(), points.contiguous(), box_idxs_of_pts)
return box_idxs_of_pts
class RoIAwarePool3d(nn.Module):
def __init__(self, out_size, max_pts_each_voxel=128):
super().__init__()
self.out_size = out_size
self.max_pts_each_voxel = max_pts_each_voxel
def forward(self, rois, pts, pts_feature, pool_method='max'):
assert pool_method in ['max', 'avg']
return RoIAwarePool3dFunction.apply(rois, pts, pts_feature, self.out_size, self.max_pts_each_voxel, pool_method)
class RoIAwarePool3dFunction(Function):
@staticmethod
def forward(ctx, rois, pts, pts_feature, out_size, max_pts_each_voxel, pool_method):
"""
Args:
ctx:
rois: (N, 7) [x, y, z, dx, dy, dz, heading] (x, y, z) is the box center
pts: (npoints, 3)
pts_feature: (npoints, C)
out_size: int or tuple, like 7 or (7, 7, 7)
max_pts_each_voxel:
pool_method: 'max' or 'avg'
Returns:
pooled_features: (N, out_x, out_y, out_z, C)
"""
assert rois.shape[1] == 7 and pts.shape[1] == 3
if isinstance(out_size, int):
out_x = out_y = out_z = out_size
else:
assert len(out_size) == 3
for k in range(3):
assert isinstance(out_size[k], int)
out_x, out_y, out_z = out_size
num_rois = rois.shape[0]
num_channels = pts_feature.shape[-1]
num_pts = pts.shape[0]
pooled_features = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, num_channels))
argmax = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, num_channels), dtype=torch.int)
pts_idx_of_voxels = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, max_pts_each_voxel), dtype=torch.int)
pool_method_map = {'max': 0, 'avg': 1}
pool_method = pool_method_map[pool_method]
roiaware_pool3d_cuda.forward(rois, pts, pts_feature, argmax, pts_idx_of_voxels, pooled_features, pool_method)
ctx.roiaware_pool3d_for_backward = (pts_idx_of_voxels, argmax, pool_method, num_pts, num_channels)
return pooled_features
@staticmethod
def backward(ctx, grad_out):
"""
:param grad_out: (N, out_x, out_y, out_z, C)
:return:
grad_in: (npoints, C)
"""
pts_idx_of_voxels, argmax, pool_method, num_pts, num_channels = ctx.roiaware_pool3d_for_backward
grad_in = grad_out.new_zeros((num_pts, num_channels))
roiaware_pool3d_cuda.backward(pts_idx_of_voxels, argmax, grad_out.contiguous(), grad_in, pool_method)
return None, None, grad_in, None, None, None
if __name__ == '__main__':
pass
| 4,075 | 35.392857 | 120 | py |
hyperparam-sensitivity | hyperparam-sensitivity-main/models/estimators_tf/_two_head.py | import os
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from sklearn.model_selection import ParameterGrid
from ._common import get_params
from helpers.utils import get_params_df
def _get_ct_data(t, y):
y_c = np.zeros_like(y)
w_c = np.zeros_like(y)
y_c[t < 1] = y[t < 1]
w_c[t < 1] = 1
y_t = np.zeros_like(y)
w_t = np.zeros_like(y)
y_t[t > 0] = y[t > 0]
w_t[t > 0] = 1
return y_c, y_t, w_c, w_t
class TwoHeadSearch():
def __init__(self, opt):
self.opt = opt
self.params_grid = get_params(self.opt.estimation_model)
def run(self, train, test, scaler, iter_id, fold_id):
X_tr = train[0]
t_tr = train[1].flatten()
y_tr = train[2].flatten()
X_test = test[0]
t_test = test[1].flatten()
input_size = X_tr.shape[1]
y_hats = []
y0_hats = []
y1_hats = []
cate_hats = []
for params in ParameterGrid(self.params_grid):
model = TwoHeadNN(input_size, params['n_layers'], params['n_layers2'], params['n_units'], params['learning_rate'], params['activation'], params['dropout'], params['l2'], 'linear', params['batch_size'], params['epochs'], params['epochs_are_steps'])
model.fit(X_tr, t_tr, y_tr)
y_hat = model.predict_factual(X_test, t_test)
y0_hat, y1_hat = model.predict_all(X_test)
if self.opt.scale_y:
y0_hat = scaler.inverse_transform(y0_hat)
y1_hat = scaler.inverse_transform(y1_hat)
cate_hat = y1_hat - y0_hat
y_hats.append(y_hat)
y0_hats.append(y0_hat)
y1_hats.append(y1_hat)
cate_hats.append(cate_hat)
if fold_id > 0:
filename = f'{self.opt.estimation_model}_iter{iter_id}_fold{fold_id}'
else:
filename = f'{self.opt.estimation_model}_iter{iter_id}'
y_hats_arr = np.array(y_hats, dtype=object)
y0_hats_arr = np.array(y0_hats, dtype=object)
y1_hats_arr = np.array(y1_hats, dtype=object)
cate_hats_arr = np.array(cate_hats, dtype=object)
np.savez_compressed(os.path.join(self.opt.output_path, filename), y_hat=y_hats_arr, y0_hat=y0_hats_arr, y1_hat=y1_hats_arr, cate_hat=cate_hats_arr)
def save_params_info(self):
df_params = get_params_df(self.params_grid)
df_params.to_csv(os.path.join(self.opt.output_path, f'{self.opt.estimation_model}_params.csv'), index=False)
class TwoHeadNN():
def __init__(self, input_size, n_layers, n_layers2, n_units, learning_rate, activation, dropout, l2, out_layer, batch_size, epochs, epochs_are_steps=False):
self.input_size = input_size
self.n_layers = n_layers
self.n_layers2 = n_layers2
self.n_units = n_units
self.learning_rate = learning_rate
self.activation = activation
self.dropout = dropout
self.l2 = l2
self.out_layer = out_layer
self.batch_size = batch_size
self.epochs = epochs
self.epochs_are_steps = epochs_are_steps
self.graph = tf.Graph()
with self.graph.as_default():
self.sess = tf.compat.v1.Session()
with self.sess.as_default():
self.model = self._build_model()
def _build_model(self):
kernel_reg = keras.regularizers.l2(self.l2) if self.l2 > 0.0 else None
input = layers.Input(shape=(self.input_size,))
x = input
x = layers.Dense(self.n_units, activation='linear')(x)
if self.dropout > 0.0: x = layers.Dropout(self.dropout)(x)
x = layers.Activation(self.activation)(x)
for _ in range(self.n_layers - 1):
x = layers.Dense(self.n_units, activation='linear')(x)
if self.dropout > 0.0: x = layers.Dropout(self.dropout)(x)
x = layers.Activation(self.activation)(x)
# Separate hidden layers for the heads.
if self.n_layers2 > 0:
n_units2 = int(self.n_units/2)
c_head = layers.Dense(n_units2, activation='linear')(x)
if self.dropout > 0.0: c_head = layers.Dropout(self.dropout)(c_head)
c_head = layers.Activation(self.activation)(c_head)
for _ in range(self.n_layers2 - 1):
c_head = layers.Dense(n_units2, activation='linear')(c_head)
if self.dropout > 0.0: c_head = layers.Dropout(self.dropout)(c_head)
c_head = layers.Activation(self.activation)(c_head)
c_head = layers.Dense(1, activation=self.out_layer, kernel_regularizer=kernel_reg, name='c_head')(c_head)
t_head = layers.Dense(n_units2, activation='linear')(x)
if self.dropout > 0.0: t_head = layers.Dropout(self.dropout)(t_head)
t_head = layers.Activation(self.activation)(t_head)
for _ in range(self.n_layers2 - 1):
t_head = layers.Dense(n_units2, activation='linear')(t_head)
if self.dropout > 0.0: t_head = layers.Dropout(self.dropout)(t_head)
t_head = layers.Activation(self.activation)(t_head)
t_head = layers.Dense(1, activation=self.out_layer, kernel_regularizer=kernel_reg, name='t_head')(t_head)
else:
c_head = layers.Dense(1, activation=self.out_layer, kernel_regularizer=kernel_reg, name='c_head')(x)
t_head = layers.Dense(1, activation=self.out_layer, kernel_regularizer=kernel_reg, name='t_head')(x)
model = keras.Model(inputs=input, outputs=[c_head, t_head])
optimiser = keras.optimizers.Adam(self.learning_rate)
model.compile(optimizer=optimiser, loss=['mse', 'mse'], metrics=None)
return model
def fit(self, X, t, y):
bs = self.batch_size if self.batch_size > 0 else len(X)
y_c, y_t, sw_c, sw_t = _get_ct_data(t, y)
if self.epochs_are_steps:
epoch_i = 0
while(epoch_i < self.epochs):
for idx_start in range(0, X.shape[0], bs):
idx_end = min(idx_start + bs, X.shape[0])
X_batch = X[idx_start:idx_end]
y_c_batch, y_t_batch = y_c[idx_start:idx_end], y_t[idx_start:idx_end]
sw_c_batch, sw_t_batch = sw_c[idx_start:idx_end], sw_t[idx_start:idx_end]
with self.graph.as_default():
with self.sess.as_default():
_ = self.model.train_on_batch(X_batch, (y_c_batch, y_t_batch), sample_weight=(sw_c_batch, sw_t_batch))
epoch_i += 1
if epoch_i >= self.epochs:
break
else:
with self.graph.as_default():
with self.sess.as_default():
_ = self.model.fit(X, (y_c, y_t), batch_size=bs, epochs=self.epochs, verbose=False, sample_weight=(sw_c, sw_t))
def predict_all(self, X):
with self.graph.as_default():
with self.sess.as_default():
preds = self.model.predict(X)
# [Y0, Y1]
return preds[0], preds[1]
def predict_factual(self, X, t):
with self.graph.as_default():
with self.sess.as_default():
preds = self.model.predict(X)
# [n_samples, (y0, y1)]
ct = np.squeeze(np.array(preds)).T
# [n_samples, (t0, t1)]
mask = np.vstack((t==0, t==1)).T
return ct[mask] | 7,579 | 39.972973 | 259 | py |
hyperparam-sensitivity | hyperparam-sensitivity-main/models/estimators_tf/_mlp.py | import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
class MLP():
def __init__(self, input_size, n_layers, n_units, learning_rate, activation, dropout, l2, out_layer, batch_size, epochs, epochs_are_steps=False):
self.input_size = input_size
self.n_layers = n_layers
self.n_units = n_units
self.learning_rate = learning_rate
self.activation = activation
self.dropout = dropout
self.l2 = l2
self.out_layer = out_layer
self.batch_size = batch_size
self.epochs = epochs
self.epochs_are_steps = epochs_are_steps
self.graph = tf.Graph()
with self.graph.as_default():
self.sess = tf.compat.v1.Session()
with self.sess.as_default():
self.model = self._build_model()
def _build_model(self):
if self.out_layer == 'sigmoid':
out_act = 'sigmoid'
loss = keras.losses.BinaryCrossentropy(from_logits=False)
else:
out_act = 'linear'
loss = keras.losses.MeanSquaredError()
input = layers.Input(shape=(self.input_size,))
x = layers.Dense(self.n_units, activation=self.activation)(input)
if self.dropout > 0.0: x = layers.Dropout(self.dropout)(x)
for _ in range(self.n_layers - 1):
x = layers.Dense(self.n_units, activation=self.activation)(x)
if self.dropout > 0.0: x = layers.Dropout(self.dropout)(x)
kernel_reg = keras.regularizers.l2(self.l2) if self.l2 > 0.0 else None
x = layers.Dense(1, activation=out_act, kernel_regularizer=kernel_reg)(x)
model = keras.Model(inputs=input, outputs=x)
optimiser = keras.optimizers.Adam(self.learning_rate)
model.compile(optimizer=optimiser, loss=loss)
return model
def fit(self, X, y):
bs = self.batch_size if self.batch_size > 0 else X.shape[0]
if self.epochs_are_steps:
# Number of steps
epoch_i = 0
while(epoch_i < self.epochs):
for idx_start in range(0, X.shape[0], bs):
# Last batch can be smaller
idx_end = min(idx_start + bs, X.shape[0])
X_batch, y_batch = X[idx_start:idx_end], y[idx_start:idx_end]
with self.graph.as_default():
with self.sess.as_default():
_ = self.model.train_on_batch(X_batch, y_batch)
epoch_i += 1
# Exit if reached desired number of steps
if epoch_i >= self.epochs:
break
else:
with self.graph.as_default():
with self.sess.as_default():
_ = self.model.fit(X, y, batch_size=bs, epochs=self.epochs, verbose=False)
def predict(self, X):
# All-purpose predict.
# Assumes X includes T.
with self.graph.as_default():
with self.sess.as_default():
return self.model.predict(X)
def predict_proba(self, X):
# Just for compatibility purposes.
# (if sigmoid in the output, this returns probabilities)
return self.predict(X)
def predict_factual(self, X, t):
with self.graph.as_default():
with self.sess.as_default():
return self.model.predict(np.concatenate([X, t.reshape(-1, 1)], axis=1))
def predict_all(self, X):
# Assume X excludes T.
# X = [units, features]
X0 = np.concatenate([X, np.zeros((X.shape[0], 1))], axis=1)
X1 = np.concatenate([X, np.ones((X.shape[0], 1))], axis=1)
with self.graph.as_default():
with self.sess.as_default():
y0 = self.model.predict(X0)
y1 = self.model.predict(X1)
return y0, y1 | 3,944 | 37.676471 | 149 | py |
tps-torch | tps-torch-main/setup.py | from setuptools import setup, find_packages
import os
"""
def copy_dir(dir_path):
base_dir = os.path.join('tpstorch/', dir_path)
for (dirpath, dirnames, files) in os.walk(base_dir):
for f in files:
if "git" in f:
continue
else:
yield os.path.join(dirpath.split('/', 1)[1], f)
"""
setup(name='tpstorch',
version='0.1',
description='Some Random Package',
url='http://github.com/muhammadhasyim/tps-torch',
author='M.R. Hasyim, C.B. Batton',
author_email='muhammad_hasyim@berkeley.edu, chbatton@berkeley.edu',
license='MIT',
packages=find_packages(),
package_data={"":["*.so",
"CMake/*.cmake",
"include/tpstorch/*.h",
"include/tpstorch/examples/mullerbrown/*.h",
"include/tpstorch/examples/mullerbrown/*.hpp",
"include/tpstorch/fts/*.h",
"include/tpstorch/ml/*.h"]},
##"include/tpstorch/sm/*.h"]},
install_requires=['numpy','scipy','pybind11','tqdm'],
zip_safe=False)
| 1,164 | 36.580645 | 73 | py |
tps-torch | tps-torch-main/dimer_solv/fts_test/dimer_fts.py | import scipy.spatial
import sys
sys.path.insert(0,'/global/home/users/muhammad_hasyim/tps-torch/build/')
#Import necessarry tools from torch
import torch
import torch.distributed as dist
import torch.nn as nn
#Import necessarry tools from tpstorch
from tpstorch.examples.dimer_solv_ml import MyMLFTSSampler
from tpstorch import _rank, _world_size
import numpy as np
from tpstorch.ml.nn import FTSLayer#US
#Import any other thing
import tqdm, sys
class FTSLayerCustom(FTSLayer):
r""" A linear layer, where the paramaters correspond to the string obtained by the
general FTS method. Customized to take into account rotational and translational symmetry of the dimer problem
Args:
react_config (torch.Tensor): starting configuration in the reactant basin.
prod_config (torch.Tensor): starting configuration in the product basin.
"""
def __init__(self, react_config, prod_config, num_nodes,boxsize,num_particles):
super(FTSLayerCustom,self).__init__(react_config, prod_config, num_nodes)
self.boxsize = boxsize
self.Np = num_particles
@torch.no_grad()
def compute_metric(self,x):
##(1) Pre-processing so that dimer is at the center
old_x = x.view(32,3).clone()
#Compute the pair distance
dx = (old_x[0]-old_x[1])
dx = dx-torch.round(dx/self.boxsize)*self.boxsize
#Re-compute one of the coordinates and shift to origin
old_x[0] = dx+old_x[1]
x_com = 0.5*(old_x[0]+old_x[1])
#Do the same thing to the string configurations
new_string = self.string.view(_world_size, self.Np,3).clone()
#Compute the pair distance
ds = (new_string[:,0]-new_string[:,1])
ds = ds-torch.round(ds/self.boxsize)*self.boxsize
#Re-compute one of the coordinates and shift to origin
new_string[:,0] = ds+new_string[:,1]
s_com = 0.5*(new_string[:,0]+new_string[:,1])
for i in range(self.Np):
old_x[i] -= x_com
new_string[:,i] -= s_com
old_x[i] -= torch.round(old_x[i]/self.boxsize)*self.boxsize
new_string[:,i] -= torch.round(new_string[:,i]/self.boxsize)*self.boxsize
##(2) Rotate the system using Kabsch algorithm
dist_sq_list = torch.zeros(_world_size)
weights = np.ones(self.Np)#/(self.Np-2)
#weights = np.zeros(self.Np)#np.ones(self.Np)/(self.Np-2)
#weights[0] = 1.0
#weights[1] = 1.0
for i in range(_world_size):
_, rmsd = scipy.spatial.transform.Rotation.align_vectors(old_x.numpy(), new_string[i].numpy(),weights=weights)
dist_sq_list[i] = rmsd**2
return dist_sq_list
#WARNING! Needs to be changed
#Only testing if configurations are constrained properly
"""
def forward(self,x):
##(1) Remove center of mass
new_x = x.view(self.Np,3).clone()
#Compute the pair distance
dx = (new_x[0]-new_x[1])
dx = dx-torch.round(dx/self.boxsize)*self.boxsize
#Re-compute one of the coordinates and shift to origin
new_x[0] = dx+new_x[1]
x_com = 0.5*(new_x[0]+new_x[1])
for i in range(self.Np):
new_x[i] -= x_com
new_x[i] -= torch.round(new_x[i]/self.boxsize)*self.boxsize
new_string = self.string[_rank].view(self.Np,3).clone()
#Compute the pair distance
ds = (new_string[0]-new_string[1])
ds = ds-torch.round(ds/self.boxsize)*self.boxsize
#Re-compute one of the coordinates and shift to origin
#new_string[0] = ds+new_string[1]
#s_com = 0.5*(new_string[0]+new_string[1])#.detach().clone()#,dim=1)
#for i in range(self.Np):
# new_string[i] -= s_com
##(2) Rotate the configuration
dx /= torch.norm(dx)
ds /= torch.norm(ds)
v = torch.cross(dx,ds)
cosine = torch.dot(ds,dx)
val = 0
for i in range(self.Np):
new_x[i] += torch.cross(v,new_x[i])+torch.cross(v,torch.cross(v,new_x[i]))/(1+cosine)
DR = new_x[i] - new_string[i]#= torch.round(new_x[i]/self.boxsize)*self.boxsize
DR = DR-torch.round(DR/self.boxsize)*self.boxsize
val += torch.sum(DR**2)
return val
"""
# This is a sketch of what it should be like, but basically have to also make committor play nicely
# within function
# have omnious committor part that actual committor overrides?
class DimerFTS(MyMLFTSSampler):
def __init__(self,param,config,rank,beta,mpi_group,ftslayer,output_time, save_config=False):
super(DimerFTS, self).__init__(param,config.detach().clone(),rank,beta,mpi_group)
self.output_time = output_time
self.save_config = save_config
self.timestep = 0
self.torch_config = config
#Save config size and its flattened version
self.config_size = config.size()
self.flattened_size = config.flatten().size()
self.invkT = beta
self.ftslayer = ftslayer
#tconfig = ftslayer.string[_rank].view(2,3).detach().clone()
#tconfig.requires_grad = False
self.setConfig(config)
self.Np = 30+2
#Configs file Save Alternative, since the default XYZ format is an overkill
#self.file = open("newconfigs_{}.txt".format(dist.get_rank()), "w")
@torch.no_grad()
def initialize_from_torchconfig(self, config):
# Don't have to worry about all that much all, can just set it
self.setConfig(config.detach().clone())
@torch.no_grad()
def reset(self):
self.steps = 0
self.computeMetric()
inftscell = self.checkFTSCell(_rank, _world_size)
if inftscell:
pass
else:
self.setConfig(self.ftslayer.string[_rank].view(self.Np,3).detach().clone())
def computeMetric(self):
self.distance_sq_list = self.ftslayer.compute_metric(self.getConfig().flatten())
@torch.no_grad()
def step(self):
state_old = self.getConfig().detach().clone()
self.stepUnbiased()
self.computeMetric()
inftscell = self.checkFTSCell(_rank, _world_size)
if inftscell:
pass
else:
self.setConfig(state_old)
self.torch_config.requires_grad_()
try:
self.torch_config.grad.data.zero_()
except:
pass
def step_unbiased(self):
with torch.no_grad():
self.stepUnbiased()
self.torch_config.requires_grad_()
try:
self.torch_config.grad.data.zero_()
except:
pass
@torch.no_grad()
def isReactant(self, x = None):
r0 = 2**(1/6.0)
s = 0.5*r0
#Compute the pair distance
if x is None:
if self.getBondLength() <= r0:
return True
else:
return False
else:
if self.getBondLengthConfig(x) <= r0:
return True
else:
return False
@torch.no_grad()
def isProduct(self,x = None):
r0 = 2**(1/6.0)
s = 0.5*r0
if x is None:
if self.getBondLength() >= r0+2*s:
return True
else:
return False
else:
if self.getBondLengthConfig(x) >= r0+2*s:
return True
else:
return False
def step_bc(self):
with torch.no_grad():
state_old = self.getConfig().detach().clone()
self.step_unbiased()
if self.isReactant() or self.isProduct():
pass
else:
#If it's not in the reactant or product state, reset!
self.setConfig(state_old)
def save(self):
self.timestep += 1
if self.save_config and self.timestep % self.output_time == 0:
self.dumpConfig(self.timestep)
| 8,078 | 34.906667 | 122 | py |
tps-torch | tps-torch-main/dimer_solv/fts_test/fts_simulation.py | #import sys
#sys.path.insert(0,"/global/home/users/muhammad_hasyim/tps-torch/build")
#Import necessarry tools from torch
import tpstorch
import torch
import torch.distributed as dist
import torch.nn as nn
import scipy.spatial
#Import necessarry tools from tpstorch
#from mb_fts import MullerBrown as MullerBrownFTS#, CommittorNet
from dimer_fts import DimerFTS
from dimer_fts import FTSLayerCustom as FTSLayer
#from tpstorch.ml.data import FTSSimulation#, EXPReweightStringSimulation
from tpstorch.ml.data import FTSSimulation#, EXPReweightStringSimulation
from tpstorch.ml.optim import ParallelSGD, ParallelAdam, FTSImplicitUpdate, FTSUpdate
#from tpstorch.ml.nn import FTSLayer#BKELossFTS, BKELossEXP, FTSCommittorLoss, FTSLayer
import numpy as np
#Grag the MPI group in tpstorch
mpi_group = tpstorch._mpi_group
world_size = tpstorch._world_size
rank = tpstorch._rank
#Import any other thing
import tqdm, sys
torch.manual_seed(5070)
np.random.seed(5070)
prefix = 'simple'
#Initialize neural net
def initializer(s):
return (1-s)*start+s*end
Np = 30+2
#This function only assumes that the string consists of the dimer without solvent particles
@torch.no_grad()
def dimer_nullspace(vec,x,boxsize):
Np = 32
##(1) Pre-processing so that dimer is at the center
old_x = x.view(Np,3).clone()
#Compute the pair distance
dx = (old_x[0]-old_x[1])
dx = dx-torch.round(dx/boxsize)*boxsize
#Re-compute one of the coordinates and shift to origin
old_x[0] = dx+old_x[1]
x_com = 0.5*(old_x[0]+old_x[1])
new_vec = vec.view(Np,3).clone()
#Compute the pair distance
ds = (new_vec[0]-new_vec[1])
ds = ds-torch.round(ds/boxsize)*boxsize
#Re-compute one of the coordinates and shift to origin
new_vec[0] = ds+new_vec[1]
s_com = 0.5*(new_vec[0]+new_vec[1])
for i in range(Np):
old_x[i] -= x_com
new_vec[i] -= s_com
old_x[i] -= torch.round(old_x[i]/boxsize)*boxsize
new_vec[i] -= torch.round(new_vec[i]/boxsize)*boxsize
##(2) Rotate the system using Kabsch algorithm
#weights = np.ones(Np)
#weights = np.zeros(Np)
#weights[0] = 1.0
#weights[1] = 1.0
weights = np.ones(Np)#/(Np-2)
#weights = np.zeros(self.Np)#np.ones(self.Np)/(self.Np-2)
weights[0] = 1.0
weights[1] = 1.0
rotate,rmsd = scipy.spatial.transform.Rotation.align_vectors(new_vec.numpy(),old_x.numpy(), weights=weights)
for i in range(Np):
old_x[i] = torch.tensor(rotate.apply(old_x[i].numpy()))
old_x[i] -= torch.round(old_x[i]/boxsize)*boxsize
return old_x.flatten()
#Initialization
r0 = 2**(1/6.0)
width = 0.5*r0
#Reactant
dist_init_start = r0
#Product state
dist_init_end = r0+2*width
#scale down/up the distance of one of the particle dimer
box = [14.736125994561544, 14.736125994561544, 14.736125994561544]
def CubicLattice(dist_init):
state = torch.zeros(Np, 3);
num_spacing = np.ceil(Np**(1/3.0))
spacing_x = box[0]/num_spacing;
spacing_y = box[1]/num_spacing;
spacing_z = box[2]/num_spacing;
count = 0;
id_x = 0;
id_y = 0;
id_z = 0;
while Np > count:
state[int(id_z+id_y*num_spacing+id_x*num_spacing*num_spacing)][0] = spacing_x*id_x-0.5*box[0];
state[int(id_z+id_y*num_spacing+id_x*num_spacing*num_spacing)][1] = spacing_y*id_y-0.5*box[1];
state[int(id_z+id_y*num_spacing+id_x*num_spacing*num_spacing)][2] = spacing_z*id_z-0.5*box[2];
count += 1;
id_z += 1;
if(id_z==num_spacing):
id_z = 0;
id_y += 1;
if(id_y==num_spacing):
id_y = 0;
id_x += 1;
#Compute the pair distance
dx = (state[0]-state[1])
dx = dx-torch.round(dx/box[0])*box[0]
#Re-compute one of the coordinates and shift to origin
state[0] = dx/torch.norm(dx)*dist_init+state[1]
x_com = 0.5*(state[0]+state[1])
for i in range(Np):
state[i] -= x_com
state[i] -= torch.round(state[i]/box[0])*box[0]
return state;
start = CubicLattice(dist_init_start)
end = CubicLattice(dist_init_end)
initial_config = initializer(rank/(world_size-1))
#Initialize the string
#For now I'm only going to interpolate through the dimer, ignoring solvent particles
ftslayer = FTSLayer(react_config=start.flatten(),prod_config=end.flatten(),num_nodes=world_size,boxsize=box[0],num_particles=Np).to('cpu')
#Construct FTSSimulation
ftsoptimizer = FTSUpdate(ftslayer.parameters(), deltatau=0.01,kappa=0.1,periodic=True,dim=3)
kT = 1.0
batch_size = 10
period = 10
#Initialize the dimer simulation
#dimer_sim_fts = DimerFTS(param="param",config=initial_config.detach().clone(), rank=rank, beta=1/kT, save_config=True, mpi_group = mpi_group, ftslayer=ftslayer,output_time=batch_size*period)
dimer_sim_fts = DimerFTS(param="param",config=ftslayer.string[rank].view(Np,3).detach().clone(), rank=rank, beta=1/kT, save_config=True, mpi_group = mpi_group, ftslayer=ftslayer,output_time=batch_size*period)
datarunner_fts = FTSSimulation(dimer_sim_fts, nn_training = False, period=period, batch_size=batch_size, dimN=Np*3)
#FTS Simulation Training Loop
with open("string_{}_config.xyz".format(rank),"w") as f, open("string_{}_log.txt".format(rank),"w") as g:
for i in tqdm.tqdm(range(10000)):
# get data and reweighting factors
configs = datarunner_fts.runSimulation()
ftsoptimizer.step(configs,len(configs),boxsize=box[0],remove_nullspace=dimer_nullspace)
string_temp = ftslayer.string[rank].view(Np,3)
f.write("32 \n")
f.write('Lattice=\"{} 0.0 0.0 0.0 {} 0.0 0.0 0.0 {}\" '.format(box[0],box[1],box[2]))
f.write('Origin=\"{} {} {}\" '.format(-0.5*box[0],-0.5*box[1],-0.5*box[2]))
f.write("Properties=type:S:1:pos:R:3:aux1:R:1 \n")
f.write("B {} {} {} {} \n".format(string_temp[0,0],string_temp[0,1], string_temp[0,2],0.5*r0))
f.write("B {} {} {} {} \n".format(string_temp[1,0],string_temp[1,1], string_temp[1,2],0.5*r0))
for i in range(2,Np):
f.write("A {} {} {} {} \n".format(string_temp[i,0],string_temp[i,1], string_temp[i,2],0.5*r0))
f.flush()
g.write("{} {} \n".format((i+1)*period,torch.norm(string_temp[0]-string_temp[1])))
g.flush()
if rank == 0:
torch.save(ftslayer.state_dict(), "test_string_config")
| 6,357 | 36.621302 | 208 | py |
tps-torch | tps-torch-main/dimer_solv/fts_test/dimer_fts_nosolv.py | import sys
sys.path.insert(0,'/global/home/users/muhammad_hasyim/tps-torch/build/')
import scipy.spatial
#Import necessarry tools from torch
import torch
import torch.distributed as dist
import torch.nn as nn
#Import necessarry tools from tpstorch
from tpstorch.examples.dimer_solv_ml import MyMLFTSSampler
from tpstorch import _rank, _world_size
import numpy as np
from tpstorch.ml.nn import FTSLayer#US
#Import any other thing
import tqdm, sys
class FTSLayerCustom(FTSLayer):
r""" A linear layer, where the paramaters correspond to the string obtained by the
general FTS method. Customized to take into account rotational and translational symmetry of the dimer problem
Args:
react_config (torch.Tensor): starting configuration in the reactant basin.
prod_config (torch.Tensor): starting configuration in the product basin.
"""
def __init__(self, react_config, prod_config, num_nodes,boxsize,num_particles):
super(FTSLayerCustom,self).__init__(react_config, prod_config, num_nodes)
self.boxsize = boxsize
self.Np = num_particles
@torch.no_grad()
def compute_metric(self,x):
##(1) Pre-processing so that dimer is at the center
old_x = x.view(32,3)[:2].clone()
#Compute the pair distance
dx = (old_x[0]-old_x[1])
dx = dx-torch.round(dx/self.boxsize)*self.boxsize
#Re-compute one of the coordinates and shift to origin
old_x[0] = dx+old_x[1]
x_com = 0.5*(old_x[0]+old_x[1])
#Do the same thing to the string configurations
new_string = self.string.view(_world_size, self.Np,3).clone()
#Compute the pair distance
ds = (new_string[:,0]-new_string[:,1])
ds = ds-torch.round(ds/self.boxsize)*self.boxsize
#Re-compute one of the coordinates and shift to origin
new_string[:,0] = ds+new_string[:,1]
s_com = 0.5*(new_string[:,0]+new_string[:,1])
for i in range(self.Np):
old_x[i] -= x_com
new_string[:,i] -= s_com
old_x[i] -= torch.round(old_x[i]/self.boxsize)*self.boxsize
new_string[:,i] -= torch.round(new_string[:,i]/self.boxsize)*self.boxsize
##(2) Rotate the system using Kabsch algorithm
dist_sq_list = torch.zeros(_world_size)
#weights = np.ones(self.Np)/(self.Np-2)
weights = np.zeros(self.Np)#np.ones(self.Np)/(self.Np-2)
weights[0] = 1.0
weights[1] = 1.0
for i in range(_world_size):
_, rmsd = scipy.spatial.transform.Rotation.align_vectors(old_x.numpy(), new_string[i].numpy(),weights=weights)
dist_sq_list[i] = rmsd**2
return dist_sq_list
#WARNING! Needs to be changed
#Only testing if configurations are constrained properly
"""
def forward(self,x):
##(1) Remove center of mass
new_x = x.view(self.Np,3).clone()
#Compute the pair distance
dx = (new_x[0]-new_x[1])
dx = dx-torch.round(dx/self.boxsize)*self.boxsize
#Re-compute one of the coordinates and shift to origin
new_x[0] = dx+new_x[1]
x_com = 0.5*(new_x[0]+new_x[1])
for i in range(self.Np):
new_x[i] -= x_com
new_x[i] -= torch.round(new_x[i]/self.boxsize)*self.boxsize
new_string = self.string[_rank].view(self.Np,3).clone()
#Compute the pair distance
ds = (new_string[0]-new_string[1])
ds = ds-torch.round(ds/self.boxsize)*self.boxsize
#Re-compute one of the coordinates and shift to origin
#new_string[0] = ds+new_string[1]
#s_com = 0.5*(new_string[0]+new_string[1])#.detach().clone()#,dim=1)
#for i in range(self.Np):
# new_string[i] -= s_com
##(2) Rotate the configuration
dx /= torch.norm(dx)
ds /= torch.norm(ds)
v = torch.cross(dx,ds)
cosine = torch.dot(ds,dx)
val = 0
for i in range(self.Np):
new_x[i] += torch.cross(v,new_x[i])+torch.cross(v,torch.cross(v,new_x[i]))/(1+cosine)
DR = new_x[i] - new_string[i]#= torch.round(new_x[i]/self.boxsize)*self.boxsize
DR = DR-torch.round(DR/self.boxsize)*self.boxsize
val += torch.sum(DR**2)
return val
"""
# This is a sketch of what it should be like, but basically have to also make committor play nicely
# within function
# have omnious committor part that actual committor overrides?
class DimerFTS(MyMLFTSSampler):
def __init__(self,param,config,rank,beta,mpi_group,ftslayer,output_time, save_config=False):
super(DimerFTS, self).__init__(param,config.detach().clone(),rank,beta,mpi_group)
self.output_time = output_time
self.save_config = save_config
self.timestep = 0
self.torch_config = config
#Save config size and its flattened version
self.config_size = config.size()
self.flattened_size = config.flatten().size()
self.invkT = beta
self.ftslayer = ftslayer
#tconfig = ftslayer.string[_rank].view(2,3).detach().clone()
#tconfig.requires_grad = False
self.setConfig(config)
self.Np = 30+2
#Configs file Save Alternative, since the default XYZ format is an overkill
#self.file = open("newconfigs_{}.txt".format(dist.get_rank()), "w")
@torch.no_grad()
def initialize_from_torchconfig(self, config):
# Don't have to worry about all that much all, can just set it
self.setConfig(config.detach().clone())
@torch.no_grad()
def reset(self):
self.steps = 0
self.computeMetric()
inftscell = self.checkFTSCell(_rank, _world_size)
if inftscell:
pass
else:
state_old = self.getConfig().detach().clone()
state_old[:2] = self.ftslayer.string[_rank].view(2,3).detach().clone()
self.setConfig(state_old)
#self.setConfig(self.ftslayer.string[_rank].view(self.Np,3).detach().clone())
def computeMetric(self):
self.distance_sq_list = self.ftslayer.compute_metric(self.getConfig().flatten())
@torch.no_grad()
def step(self):
state_old = self.getConfig().detach().clone()
self.stepUnbiased()
self.computeMetric()
inftscell = self.checkFTSCell(_rank, _world_size)
if inftscell:
pass
else:
self.setConfig(state_old)
self.torch_config.requires_grad_()
try:
self.torch_config.grad.data.zero_()
except:
pass
def step_unbiased(self):
with torch.no_grad():
self.stepUnbiased()
self.torch_config.requires_grad_()
try:
self.torch_config.grad.data.zero_()
except:
pass
@torch.no_grad()
def isReactant(self, x = None):
r0 = 2**(1/6.0)
s = 0.5*r0
#Compute the pair distance
if x is None:
if self.getBondLength() <= r0:
return True
else:
return False
else:
if self.getBondLengthConfig(x) <= r0:
return True
else:
return False
@torch.no_grad()
def isProduct(self,x = None):
r0 = 2**(1/6.0)
s = 0.5*r0
if x is None:
if self.getBondLength() >= r0+2*s:
return True
else:
return False
else:
if self.getBondLengthConfig(x) >= r0+2*s:
return True
else:
return False
def step_bc(self):
with torch.no_grad():
state_old = self.getConfig().detach().clone()
self.step_unbiased()
if self.isReactant() or self.isProduct():
pass
else:
#If it's not in the reactant or product state, reset!
self.setConfig(state_old)
def save(self):
self.timestep += 1
if self.save_config and self.timestep % self.output_time == 0:
self.dumpConfig(self.timestep)
| 8,260 | 35.074236 | 122 | py |
tps-torch | tps-torch-main/dimer_solv/fts_test/fts_simulation_nosolv.py | #import sys
#sys.path.insert(0,"/global/home/users/muhammad_hasyim/tps-torch/build")
#Import necessarry tools from torch
import tpstorch
import torch
import torch.distributed as dist
import torch.nn as nn
import scipy.spatial
#Import necessarry tools from tpstorch
#from mb_fts import MullerBrown as MullerBrownFTS#, CommittorNet
from dimer_fts_nosolv import DimerFTS
from dimer_fts_nosolv import FTSLayerCustom as FTSLayer
#from tpstorch.ml.data import FTSSimulation#, EXPReweightStringSimulation
from tpstorch.ml.data import FTSSimulation#, EXPReweightStringSimulation
from tpstorch.ml.optim import ParallelSGD, ParallelAdam, FTSImplicitUpdate, FTSUpdate
#from tpstorch.ml.nn import FTSLayer#BKELossFTS, BKELossEXP, FTSCommittorLoss, FTSLayer
import numpy as np
#Grag the MPI group in tpstorch
mpi_group = tpstorch._mpi_group
world_size = tpstorch._world_size
rank = tpstorch._rank
#Import any other thing
import tqdm, sys
torch.manual_seed(5070)
np.random.seed(5070)
prefix = 'simple'
#Initialize neural net
def initializer(s):
return (1-s)*start+s*end
Np = 30+2
#This function only assumes that the string consists of the dimer without solvent particles
@torch.no_grad()
def dimer_nullspace(vec,x,boxsize):
Np = 2
##(1) Pre-processing so that dimer is at the center
old_x = x.view(Np,3).clone()
#Compute the pair distance
dx = (old_x[0]-old_x[1])
dx = dx-torch.round(dx/boxsize)*boxsize
#Re-compute one of the coordinates and shift to origin
old_x[0] = dx+old_x[1]
x_com = 0.5*(old_x[0]+old_x[1])
new_vec = vec.view(Np,3).clone()
#Compute the pair distance
ds = (new_vec[0]-new_vec[1])
ds = ds-torch.round(ds/boxsize)*boxsize
#Re-compute one of the coordinates and shift to origin
new_vec[0] = ds+new_vec[1]
s_com = 0.5*(new_vec[0]+new_vec[1])
for i in range(Np):
old_x[i] -= x_com
new_vec[i] -= s_com
old_x[i] -= torch.round(old_x[i]/boxsize)*boxsize
new_vec[i] -= torch.round(new_vec[i]/boxsize)*boxsize
##(2) Rotate the system using Kabsch algorithm
#weights = np.ones(Np)
#weights = np.zeros(Np)
#weights[0] = 1.0
#weights[1] = 1.0
#weights = np.ones(Np)/(Np-2)
weights = np.zeros(Np)#np.ones(self.Np)/(self.Np-2)
weights[0] = 1.0
weights[1] = 1.0
rotate,rmsd = scipy.spatial.transform.Rotation.align_vectors(new_vec.numpy(),old_x.numpy(), weights=weights)
for i in range(Np):
old_x[i] = torch.tensor(rotate.apply(old_x[i].numpy()))
old_x[i] -= torch.round(old_x[i]/boxsize)*boxsize
return old_x.flatten()
#Initialization
r0 = 2**(1/6.0)
width = 0.5*r0
#Reactant
dist_init_start = r0
#Product state
dist_init_end = r0+2*width
#scale down/up the distance of one of the particle dimer
box = [14.736125994561544, 14.736125994561544, 14.736125994561544]
def CubicLattice(dist_init):
state = torch.zeros(Np, 3);
num_spacing = np.ceil(Np**(1/3.0))
spacing_x = box[0]/num_spacing;
spacing_y = box[1]/num_spacing;
spacing_z = box[2]/num_spacing;
count = 0;
id_x = 0;
id_y = 0;
id_z = 0;
while Np > count:
state[int(id_z+id_y*num_spacing+id_x*num_spacing*num_spacing)][0] = spacing_x*id_x-0.5*box[0];
state[int(id_z+id_y*num_spacing+id_x*num_spacing*num_spacing)][1] = spacing_y*id_y-0.5*box[1];
state[int(id_z+id_y*num_spacing+id_x*num_spacing*num_spacing)][2] = spacing_z*id_z-0.5*box[2];
count += 1;
id_z += 1;
if(id_z==num_spacing):
id_z = 0;
id_y += 1;
if(id_y==num_spacing):
id_y = 0;
id_x += 1;
#Compute the pair distance
dx = (state[0]-state[1])
dx = dx-torch.round(dx/box[0])*box[0]
#Re-compute one of the coordinates and shift to origin
state[0] = dx/torch.norm(dx)*dist_init+state[1]
x_com = 0.5*(state[0]+state[1])
for i in range(Np):
state[i] -= x_com
state[i] -= torch.round(state[i]/box[0])*box[0]
return state;
start = CubicLattice(dist_init_start)
end = CubicLattice(dist_init_end)
initial_config = initializer(rank/(world_size-1))
#Initialize the string
#For now I'm only going to interpolate through the dimer, ignoring solvent particles
ftslayer = FTSLayer(react_config=start[:2].flatten(),prod_config=end[:2].flatten(),num_nodes=world_size,boxsize=box[0],num_particles=2).to('cpu')
#Construct FTSSimulation
ftsoptimizer = FTSUpdate(ftslayer.parameters(), deltatau=0.01,momentum=0.9,nesterov=True, kappa=0.1,periodic=True,dim=3)
kT = 1.0
batch_size = 10
period = 10
#Initialize the dimer simulation
dimer_sim_fts = DimerFTS(param="param",config=initial_config.detach().clone(), rank=rank, beta=1/kT, save_config=True, mpi_group = mpi_group, ftslayer=ftslayer,output_time=batch_size*period)
#dimer_sim_fts = DimerFTS(param="param",config=ftslayer.string[rank].view(Np,3).detach().clone(), rank=rank, beta=1/kT, save_config=True, mpi_group = mpi_group, ftslayer=ftslayer,output_time=batch_size*period)
datarunner_fts = FTSSimulation(dimer_sim_fts, nn_training = False, period=period, batch_size=batch_size, dimN=Np*3)
#FTS Simulation Training Loop
with open("string_{}_config.xyz".format(rank),"w") as f, open("string_{}_log.txt".format(rank),"w") as g:
for i in tqdm.tqdm(range(10000)):
# get data and reweighting factors
configs = datarunner_fts.runSimulation()
ftsoptimizer.step(configs[:,:6],len(configs),boxsize=box[0],remove_nullspace=dimer_nullspace)
string_temp = ftslayer.string[rank].view(2,3)
f.write("2 \n")
f.write('Lattice=\"{} 0.0 0.0 0.0 {} 0.0 0.0 0.0 {}\" '.format(box[0],box[1],box[2]))
f.write('Origin=\"{} {} {}\" '.format(-0.5*box[0],-0.5*box[1],-0.5*box[2]))
f.write("Properties=type:S:1:pos:R:3:aux1:R:1 \n")
f.write("B {} {} {} {} \n".format(string_temp[0,0],string_temp[0,1], string_temp[0,2],0.5*r0))
f.write("B {} {} {} {} \n".format(string_temp[1,0],string_temp[1,1], string_temp[1,2],0.5*r0))
#for i in range(2,Np):
# f.write("A {} {} {} {} \n".format(string_temp[i,0],string_temp[i,1], string_temp[i,2],0.5*r0))
f.flush()
g.write("{} {} \n".format((i+1)*period,torch.norm(string_temp[0]-string_temp[1])))
g.flush()
if rank == 0:
torch.save(ftslayer.state_dict(), "test_string_config")
| 6,405 | 36.905325 | 209 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.