repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
3DTrans
|
3DTrans-master/tools/ssl_utils/iou_match_3d.py
|
import torch
from .semi_utils import reverse_transform, load_data_to_gpu, construct_pseudo_label
from pcdet.models.model_utils.model_nms_utils import class_agnostic_nms
@torch.no_grad()
def iou_match_3d_filter(batch_dict, cfgs):
batch_size = batch_dict['batch_size']
pred_dicts = []
for index in range(batch_size):
box_preds = batch_dict['rois'][index]
iou_preds = batch_dict['roi_ious'][index]
cls_preds = batch_dict['roi_scores'][index]
label_preds = batch_dict['roi_labels'][index]
if not batch_dict['cls_preds_normalized']:
iou_preds = torch.sigmoid(iou_preds)
cls_preds = torch.sigmoid(cls_preds)
iou_preds = iou_preds.squeeze(-1)
# filtered by iou
iou_threshold_per_class = cfgs.IOU_SCORE_THRESH
num_classes = len(iou_threshold_per_class)
iou_th = iou_preds.new_zeros(iou_preds.shape)
for cls_idx in range(num_classes):
class_mask = label_preds == (cls_idx + 1)
iou_th[class_mask] = iou_threshold_per_class[cls_idx]
iou_mask = iou_preds >= iou_th
iou_preds = iou_preds[iou_mask]
box_preds = box_preds[iou_mask]
cls_preds = cls_preds[iou_mask]
label_preds = label_preds[iou_mask]
nms_scores = cls_preds # iou_preds
selected, selected_scores = class_agnostic_nms(
box_scores=nms_scores, box_preds=box_preds,
nms_config=cfgs.NMS_CONFIG,
score_thresh=cfgs.CLS_SCORE_THRESH
)
final_scores = selected_scores
final_labels = label_preds[selected]
final_boxes = box_preds[selected]
# added filtering boxes with size 0
zero_mask = (final_boxes[:, 3:6] != 0).all(1)
final_boxes = final_boxes[zero_mask]
final_labels = final_labels[zero_mask]
final_scores = final_scores[zero_mask]
record_dict = {
'pred_boxes': final_boxes,
'pred_scores': final_scores,
'pred_labels': final_labels,
}
pred_dicts.append(record_dict)
return pred_dicts
def iou_match_3d(teacher_model, student_model,
ld_teacher_batch_dict, ld_student_batch_dict,
ud_teacher_batch_dict, ud_student_batch_dict,
cfgs, epoch_id, dist
):
assert ld_teacher_batch_dict is None # Only generate labels for unlabeled data
load_data_to_gpu(ld_student_batch_dict)
load_data_to_gpu(ud_student_batch_dict)
load_data_to_gpu(ud_teacher_batch_dict)
if not dist:
ud_teacher_batch_dict = teacher_model(ud_teacher_batch_dict)
else:
_, ud_teacher_batch_dict = teacher_model(ld_teacher_batch_dict, ud_teacher_batch_dict)
teacher_boxes = iou_match_3d_filter(ud_teacher_batch_dict, cfgs.TEACHER)
teacher_boxes = reverse_transform(teacher_boxes, ud_teacher_batch_dict, ud_student_batch_dict)
gt_boxes = construct_pseudo_label(teacher_boxes)
ud_student_batch_dict['gt_boxes'] = gt_boxes
if not dist:
_, ld_ret_dict, _, _ = student_model(ld_student_batch_dict)
_, ud_ret_dict, tb_dict, disp_dict = student_model(ud_student_batch_dict)
else:
(_, ld_ret_dict, _, _), (_, ud_ret_dict, tb_dict, disp_dict) = student_model(ld_student_batch_dict, ud_student_batch_dict)
loss = ld_ret_dict['loss'].mean() + ud_ret_dict['loss'].mean()
return loss, tb_dict, disp_dict
| 3,447
| 38.181818
| 130
|
py
|
3DTrans
|
3DTrans-master/tools/ssl_utils/sess.py
|
import torch
import torch.nn.functional as F
import numpy as np
from .semi_utils import reverse_transform, load_data_to_gpu, filter_boxes
def get_consistency_loss(teacher_boxes, student_boxes):
center_losses, size_losses, cls_losses = [], [], []
batch_normalizer = 0
for teacher_box, student_box in zip(teacher_boxes, student_boxes):
teacher_cls_preds = teacher_box['pred_cls_preds'].detach_()
teacher_box_preds = teacher_box['pred_boxes'].detach_()
student_cls_preds = student_box['pred_cls_preds']
student_box_preds = student_box['pred_boxes']
num_teacher_boxes = teacher_box_preds.shape[0]
num_student_boxes = student_box_preds.shape[0]
if num_teacher_boxes == 0 or num_student_boxes == 0:
batch_normalizer += 1
continue
teacher_centers, teacher_sizes, teacher_rot = teacher_box_preds[:, :3], teacher_box_preds[:, 3:6], teacher_box_preds[:, [6]]
student_centers, student_sizes, student_rot = student_box_preds[:, :3], student_box_preds[:, 3:6], student_box_preds[:, [6]]
with torch.no_grad():
teacher_class = torch.max(teacher_cls_preds, dim=-1, keepdim=True)[1] # [Nt, 1]
student_class = torch.max(student_cls_preds, dim=-1, keepdim=True)[1] # [Ns, 1]
not_same_class = (teacher_class != student_class.T).float() # [Nt, Ns]
MAX_DISTANCE = 1000000
dist = teacher_centers[:, None, :] - student_centers[None, :, :] # [Nt, Ns, 3]
dist = (dist ** 2).sum(-1) # [Nt, Ns]
dist += not_same_class * MAX_DISTANCE # penalty on different classes
student_dist_of_teacher, student_index_of_teacher = dist.min(1) # [Nt]
teacher_dist_of_student, teacher_index_of_student = dist.min(0) # [Ns]
# different from standard sess, we only consider distance<1m as matching
MATCHED_DISTANCE = 1
matched_teacher_mask = (teacher_dist_of_student < MATCHED_DISTANCE).float().unsqueeze(-1) # [Ns, 1]
matched_student_mask = (student_dist_of_teacher < MATCHED_DISTANCE).float().unsqueeze(-1) # [Nt, 1]
matched_teacher_centers = teacher_centers[teacher_index_of_student] # [Ns, :]
matched_student_centers = student_centers[student_index_of_teacher] # [Nt, :]
matched_student_sizes = student_sizes[student_index_of_teacher] # [Nt, :]
matched_student_cls_preds = student_cls_preds[student_index_of_teacher] # [Nt, :]
center_loss = (((student_centers - matched_teacher_centers) * matched_teacher_mask).abs().sum()
+ ((teacher_centers - matched_student_centers) * matched_student_mask).abs().sum()) \
/ (num_teacher_boxes + num_student_boxes)
size_loss = F.mse_loss(matched_student_sizes, teacher_sizes, reduction='none')
size_loss = (size_loss * matched_student_mask).sum() / num_teacher_boxes
# kl_div is not feasible, since we use sigmoid instead of softmax for class prediction
# cls_loss = F.kl_div(matched_student_cls_preds.log(), teacher_cls_preds, reduction='none')
cls_loss = F.mse_loss(matched_student_cls_preds, teacher_cls_preds, reduction='none') # use mse loss instead
cls_loss = (cls_loss * matched_student_mask).sum() / num_teacher_boxes
center_losses.append(center_loss)
size_losses.append(size_loss)
cls_losses.append(cls_loss)
batch_normalizer += 1
return sum(center_losses)/batch_normalizer, sum(size_losses)/batch_normalizer, sum(cls_losses)/batch_normalizer
def sigmoid_rampup(current, rampup_start, rampup_end):
assert rampup_start <= rampup_end
if current < rampup_start:
return 0
elif (current >= rampup_start) and (current < rampup_end):
rampup_length = max(rampup_end, 0) - max(rampup_start, 0)
if rampup_length == 0: # no rampup
return 1
else:
phase = 1.0 - (current - max(rampup_start, 0)) / rampup_length
return float(np.exp(-5.0 * phase * phase))
elif current >= rampup_end:
return 1
else:
raise Exception('Impossible condition for sigmoid rampup')
def sess(teacher_model, student_model,
ld_teacher_batch_dict, ld_student_batch_dict,
ud_teacher_batch_dict, ud_student_batch_dict,
cfgs, epoch_id, dist
):
load_data_to_gpu(ld_teacher_batch_dict)
load_data_to_gpu(ld_student_batch_dict)
load_data_to_gpu(ud_teacher_batch_dict)
load_data_to_gpu(ud_student_batch_dict)
# get loss for labeled data
if not dist:
ld_teacher_batch_dict = teacher_model(ld_teacher_batch_dict)
ud_teacher_batch_dict = teacher_model(ud_teacher_batch_dict)
ld_student_batch_dict, ret_dict, tb_dict, disp_dict = student_model(ld_student_batch_dict)
ud_student_batch_dict = student_model(ud_student_batch_dict)
else:
ld_teacher_batch_dict, ud_teacher_batch_dict = teacher_model(ld_teacher_batch_dict, ud_teacher_batch_dict)
(ld_student_batch_dict, ret_dict, tb_dict, disp_dict), (ud_student_batch_dict) = student_model(ld_student_batch_dict, ud_student_batch_dict)
sup_loss = ret_dict['loss'].mean()
ld_teacher_boxes = filter_boxes(ld_teacher_batch_dict, cfgs)
ud_teacher_boxes = filter_boxes(ud_teacher_batch_dict, cfgs)
ld_student_boxes = filter_boxes(ld_student_batch_dict, cfgs)
ud_student_boxes = filter_boxes(ud_student_batch_dict, cfgs)
# Since the teacher model did not perform the Point-level Data Transform,
# the prediction results of the teacher model should be transformed by a identical transformation function
ld_teacher_boxes = reverse_transform(ld_teacher_boxes, ld_teacher_batch_dict, ld_student_batch_dict)
ud_teacher_boxes = reverse_transform(ud_teacher_boxes, ud_teacher_batch_dict, ud_student_batch_dict)
ld_center_loss, ld_size_loss, ld_cls_loss = get_consistency_loss(ld_teacher_boxes, ld_student_boxes)
ud_center_loss, ud_size_loss, ud_cls_loss = get_consistency_loss(ud_teacher_boxes, ud_student_boxes)
consistency_loss = (ld_center_loss + ud_center_loss) * cfgs.CENTER_WEIGHT \
+ (ld_size_loss + ud_size_loss) * cfgs.SIZE_WEIGHT \
+ (ld_cls_loss + ud_cls_loss) * cfgs.CLASS_WEIGHT
consistency_weight = cfgs.CONSISTENCY_WEIGHT * sigmoid_rampup(epoch_id, cfgs.TEACHER.EMA_EPOCH[0], cfgs.TEACHER.EMA_EPOCH[1])
loss = sup_loss + consistency_weight * consistency_loss
return loss, tb_dict, disp_dict
| 6,566
| 54.184874
| 148
|
py
|
3DTrans
|
3DTrans-master/tools/ssl_utils/se_ssd.py
|
import torch
import torch.nn.functional as F
import numpy as np
from .semi_utils import reverse_transform, load_data_to_gpu, filter_boxes
from pcdet.ops.iou3d_nms.iou3d_nms_utils import boxes_iou3d_gpu
def get_iou_consistency_loss(teacher_boxes, student_boxes):
box_losses, cls_losses = [], []
batch_normalizer = 0
for teacher_box, student_box in zip(teacher_boxes, student_boxes):
teacher_cls_preds = teacher_box['pred_cls_preds'].detach_()
teacher_box_preds = teacher_box['pred_boxes'].detach_()
student_cls_preds = student_box['pred_cls_preds']
student_box_preds = student_box['pred_boxes']
num_teacher_boxes = teacher_box_preds.shape[0]
num_student_boxes = student_box_preds.shape[0]
if num_teacher_boxes == 0 or num_student_boxes == 0:
batch_normalizer += 1
continue
with torch.no_grad():
teacher_class = torch.max(teacher_cls_preds, dim=-1, keepdim=True)[1] # [Nt, 1]
student_class = torch.max(student_cls_preds, dim=-1, keepdim=True)[1] # [Ns, 1]
not_same_class = (teacher_class != student_class.T).float() # [Nt, Ns]
iou_3d = boxes_iou3d_gpu(teacher_box_preds, student_box_preds) # [Nt, Ns]
iou_3d -= not_same_class # iou < 0 if not from the same class
matched_iou_of_stduent, matched_teacher_index_of_student = iou_3d.max(0) # [Ns]
MATCHED_IOU_TH = 0.7
matched_teacher_mask = (matched_iou_of_stduent >= MATCHED_IOU_TH).float().unsqueeze(-1)
num_matched_boxes = matched_teacher_mask.sum()
if num_matched_boxes == 0: num_matched_boxes = 1
matched_teacher_preds = teacher_box_preds[matched_teacher_index_of_student]
matched_teacher_cls = teacher_cls_preds[matched_teacher_index_of_student]
student_box_reg, student_box_rot = student_box_preds[:, :6], student_box_preds[:, [6]]
matched_teacher_reg, matched_teacher_rot = matched_teacher_preds[:, :6], matched_teacher_preds[:, [6]]
box_loss_reg = F.smooth_l1_loss(student_box_reg, matched_teacher_reg, reduction='none')
box_loss_reg = (box_loss_reg * matched_teacher_mask).sum() / num_matched_boxes
box_loss_rot = F.smooth_l1_loss(torch.sin(student_box_rot - matched_teacher_rot), torch.zeros_like(student_box_rot), reduction='none')
box_loss_rot = (box_loss_rot * matched_teacher_mask).sum() / num_matched_boxes
consistency_box_loss = box_loss_reg + box_loss_rot
consistency_cls_loss = F.smooth_l1_loss(student_cls_preds, matched_teacher_cls, reduction='none')
consistency_cls_loss = (consistency_cls_loss * matched_teacher_mask).sum() / num_matched_boxes
box_losses.append(consistency_box_loss)
cls_losses.append(consistency_cls_loss)
batch_normalizer += 1
return sum(box_losses)/batch_normalizer, sum(cls_losses)/batch_normalizer
def sigmoid_rampup(current, rampup_start, rampup_end):
assert rampup_start <= rampup_end
if current < rampup_start:
return 0
elif (current >= rampup_start) and (current < rampup_end):
rampup_length = max(rampup_end, 0) - max(rampup_start, 0)
if rampup_length == 0: # no rampup
return 1
else:
phase = 1.0 - (current - max(rampup_start, 0)) / rampup_length
return float(np.exp(-5.0 * phase * phase))
elif current >= rampup_end:
return 1
else:
raise Exception('Impossible condition for sigmoid rampup')
def se_ssd(teacher_model, student_model,
ld_teacher_batch_dict, ld_student_batch_dict,
ud_teacher_batch_dict, ud_student_batch_dict,
cfgs, epoch_id, dist
):
load_data_to_gpu(ld_teacher_batch_dict)
load_data_to_gpu(ld_student_batch_dict)
load_data_to_gpu(ud_teacher_batch_dict)
load_data_to_gpu(ud_student_batch_dict)
# get loss for labeled data
if not dist:
ld_teacher_batch_dict = teacher_model(ld_teacher_batch_dict)
ud_teacher_batch_dict = teacher_model(ud_teacher_batch_dict)
ld_student_batch_dict, ret_dict, tb_dict, disp_dict = student_model(ld_student_batch_dict)
ud_student_batch_dict = student_model(ud_student_batch_dict)
else:
ld_teacher_batch_dict, ud_teacher_batch_dict = teacher_model(ld_teacher_batch_dict, ud_teacher_batch_dict)
(ld_student_batch_dict, ret_dict, tb_dict, disp_dict), (ud_student_batch_dict) = student_model(ld_student_batch_dict, ud_student_batch_dict)
sup_loss = ret_dict['loss'].mean()
ld_teacher_boxes = filter_boxes(ld_teacher_batch_dict, cfgs)
ud_teacher_boxes = filter_boxes(ud_teacher_batch_dict, cfgs)
ld_student_boxes = filter_boxes(ld_student_batch_dict, cfgs)
ud_student_boxes = filter_boxes(ud_student_batch_dict, cfgs)
ld_teacher_boxes = reverse_transform(ld_teacher_boxes, ld_teacher_batch_dict, ld_student_batch_dict)
ud_teacher_boxes = reverse_transform(ud_teacher_boxes, ud_teacher_batch_dict, ud_student_batch_dict)
ld_box_loss, ld_cls_loss = get_iou_consistency_loss(ld_teacher_boxes, ld_student_boxes)
ud_box_loss, ud_cls_loss = get_iou_consistency_loss(ud_teacher_boxes, ud_student_boxes)
consistency_loss = (ld_box_loss + ud_box_loss) * cfgs.CONSIST_BOX_WEIGHT \
+ (ld_cls_loss + ud_cls_loss) * cfgs.CONSIST_CLS_WEIGHT
consistency_weight = cfgs.CONSISTENCY_WEIGHT * sigmoid_rampup(epoch_id, cfgs.TEACHER.EMA_EPOCH[0], cfgs.TEACHER.EMA_EPOCH[1])
loss = sup_loss + consistency_weight * consistency_loss
return loss, tb_dict, disp_dict
| 5,617
| 51.018519
| 148
|
py
|
3DTrans
|
3DTrans-master/tools/ssl_utils/pseudo_label.py
|
import torch
from .semi_utils import reverse_transform, load_data_to_gpu, construct_pseudo_label
def pseudo_label(teacher_model, student_model,
ld_teacher_batch_dict, ld_student_batch_dict,
ud_teacher_batch_dict, ud_student_batch_dict,
cfgs, epoch_id, dist
):
assert ld_teacher_batch_dict is None # Only generate labels for unlabeled data
load_data_to_gpu(ld_student_batch_dict)
load_data_to_gpu(ud_student_batch_dict)
load_data_to_gpu(ud_teacher_batch_dict)
if not dist:
ud_teacher_batch_dict = teacher_model(ud_teacher_batch_dict)
teacher_boxes, _ = teacher_model.post_processing(ud_teacher_batch_dict)
else:
_, ud_teacher_batch_dict = teacher_model(ld_teacher_batch_dict, ud_teacher_batch_dict)
teacher_boxes, _ = teacher_model.module.onepass.post_processing(ud_teacher_batch_dict)
teacher_boxes = reverse_transform(teacher_boxes, ud_teacher_batch_dict, ud_student_batch_dict)
if cfgs.get('FILTER_BY_SCORE_THRESHOLD', False):
pred_dicts = []
for index in range(ud_teacher_batch_dict['batch_size']):
selected = teacher_boxes[index]['pred_scores'] > cfgs.SCORE_THRESHOLD
pred_boxes = teacher_boxes[index]['pred_boxes'][selected]
pred_scores = teacher_boxes[index]['pred_scores'][selected]
pred_labels = teacher_boxes[index]['pred_labels'][selected]
record_dict = {
'pred_boxes': pred_boxes,
'pred_scores': pred_scores,
'pred_labels': pred_labels
}
pred_dicts.append(record_dict)
gt_boxes = construct_pseudo_label(pred_dicts)
else:
gt_boxes = construct_pseudo_label(teacher_boxes)
ud_student_batch_dict['gt_boxes'] = gt_boxes
if not dist:
_, ld_ret_dict, _, _ = student_model(ld_student_batch_dict)
_, ud_ret_dict, tb_dict, disp_dict = student_model(ud_student_batch_dict)
else:
(_, ld_ret_dict, _, _), (_, ud_ret_dict, tb_dict, disp_dict) = student_model(ld_student_batch_dict, ud_student_batch_dict)
loss = ld_ret_dict['loss'].mean() + ud_ret_dict['loss'].mean()
return loss, tb_dict, disp_dict
| 2,249
| 42.269231
| 130
|
py
|
3DTrans
|
3DTrans-master/tools/train_utils/train_active_CLUE.py
|
import glob
import os
import pickle
from symbol import parameters
import torch
import tqdm
import time
from torch.nn.utils import clip_grad_norm_
from pcdet.utils import common_utils, commu_utils, self_training_utils
from pcdet.models import load_data_to_gpu
from pcdet.datasets import build_dataloader, build_dataloader_ada
from pcdet.utils import active_learning_2D_utils
def train_detector(model, model_func, optimizer, lr_scheduler, source_loader, sample_loader, source_loader_iter, sample_loader_iter,
dist_train, optim_cfg, rank, total_it_each_epoch, accumulated_iter_detector, tb_log, tbar, leave_pbar=False):
model.train()
if dist_train:
for p in model.module.parameters():
p.requires_grad = True
else:
for p in model.parameters():
p.requires_grad = True
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train_detector', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
forward_args = {
'mode': 'train_detector'
}
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch_src = next(source_loader_iter)
except StopIteration:
source_loader_iter = iter(source_loader)
batch_src = next(source_loader_iter)
print('new source iter')
try:
batch_sample = next(sample_loader_iter)
except StopIteration:
sample_loader_iter = iter(sample_loader)
batch_sample = next(sample_loader_iter)
print('new sample iter')
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter_detector)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter_detector)
optimizer.zero_grad()
loss_sam, tb_dict_sam, disp_dict = model_func(model, batch_sample, **forward_args)
loss_src, tb_dict_src, disp_dict = model_func(model, batch_src, **forward_args)
# loss_sam, tb_dict_sam, disp_dict = model_func(model, batch_sample, forward_args)
loss = loss_src + optim_cfg.SAMPLE_LOSS_SCALE * loss_sam
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter_detector += 1
cur_batch_time = time.time() - end
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr_detector': cur_lr,
'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})',
'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter_detector))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss_detector', loss, accumulated_iter_detector)
tb_log.add_scalar('meta_data/learning_rate_detector', cur_lr, accumulated_iter_detector)
for key, val in tb_dict_src.items():
tb_log.add_scalar('train/detector_src' + key, val, accumulated_iter_detector)
for key, val in tb_dict_sam.items():
tb_log.add_scalar('train/detector_sam' + key, val, accumulated_iter_detector)
if rank == 0:
pbar.close()
return accumulated_iter_detector
def train_active_model_target(model, optimizer, source_train_loader, target_train_loader, model_func, lr_scheduler, optim_cfg,
start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir, sample_epoch,
annotation_budget, target_file_path, sample_save_path, cfg, batch_size, workers, dist_train,
source_sampler=None, target_sampler=None, lr_warmup_scheduler=None, ckpt_save_interval=1,
max_ckpt_save_num=50, merge_all_iters_to_one_epoch=False, logger=None, ema_model=None):
target_list = active_learning_2D_utils.get_dataset_list(target_file_path, oss=True)
sample_list = []
sample_train_loader = None
target_name = cfg['DATA_CONFIG_TAR']['DATASET']
accumulated_iter_detector = start_iter
source_reader = common_utils.DataReader(source_train_loader, source_sampler)
source_reader.construct_iter()
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True,
leave=(rank == 0)) as tbar:
if merge_all_iters_to_one_epoch:
assert hasattr(source_train_loader.dataset, 'merge_all_iters_to_one_epoch')
source_train_loader.dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
total_iters_each_epoch = len(source_train_loader) // max(total_epochs, 1)
dataloader_iter_src = iter(source_train_loader)
dataloader_iter_tar = iter(target_train_loader) if target_train_loader is not None else None
for cur_epoch in tbar:
if source_sampler is not None:
source_sampler.set_epoch(cur_epoch)
if target_sampler is not None:
target_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
# active evaluate and sample
if cur_epoch in sample_epoch:
# sample from target_domain
frame_score = active_learning_2D_utils.active_evaluate_dual(model, target_train_loader, rank, domain='target')
sampled_frame_id, _ = active_learning_2D_utils.active_sample_CLUE(frame_score, budget=annotation_budget)
sample_list, info_path = active_learning_2D_utils.update_sample_list_dual(
sample_list, target_list, sampled_frame_id, cur_epoch, sample_save_path, target_name, rank, domain='target'
)
target_list, target_info_path = active_learning_2D_utils.update_target_list(target_list, sampled_frame_id, cur_epoch, sample_save_path, target_name, rank)
sample_train_set, sample_train_loader, sample_train_sampler = build_dataloader_ada(
dataset_cfg=cfg.DATA_CONFIG_SAMPLE,
class_names=cfg.DATA_CONFIG_SAMPLE.CLASS_NAMES,
batch_size=batch_size,
dist=dist_train, workers=workers,
logger=logger,
training=True,
info_path=info_path,
merge_all_iters_to_one_epoch=merge_all_iters_to_one_epoch,
total_epochs=total_epochs-cur_epoch
)
target_train_set, target_train_loader, target_train_sampler = build_dataloader_ada(
dataset_cfg=cfg.DATA_CONFIG_TAR,
class_names=cfg.DATA_CONFIG_TAR.CLASS_NAMES,
batch_size=batch_size,
dist=dist_train, workers=workers,
logger=logger,
training=True,
info_path=target_info_path,
merge_all_iters_to_one_epoch=merge_all_iters_to_one_epoch,
total_epochs=total_epochs-cur_epoch
)
dataloader_iter_tar = iter(target_train_loader)
dataloader_iter_sample = iter(sample_train_loader) if sample_train_loader is not None else None
accumulated_iter_detector = train_detector(
model,
model_func,
optimizer,
lr_scheduler,
source_train_loader,
sample_train_loader,
dataloader_iter_src,
dataloader_iter_sample,
dist_train,
optim_cfg,
rank,
len(sample_train_loader),
accumulated_iter_detector,
tb_log, tbar
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch)
save_checkpoint(
checkpoint_state(model, epoch=trained_epoch, it=accumulated_iter_detector), filename=ckpt_name,
)
def model_state_to_cpu(model_state):
model_state_cpu = type(model_state)() # ordered dict
for key, val in model_state.items():
model_state_cpu[key] = val.cpu()
return model_state_cpu
def checkpoint_state(model=None, optimizer=None, epoch=None, it=None):
optim_state = optimizer.state_dict() if optimizer is not None else None
if model is not None:
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_state = model_state_to_cpu(model.module.state_dict())
else:
model_state = model.state_dict()
else:
model_state = None
try:
import pcdet
version = 'pcdet+' + pcdet.__version__
except:
version = 'none'
return {'epoch': epoch, 'it': it, 'model_state': model_state, 'optimizer_state': optim_state, 'version': version}
def save_checkpoint(state, filename='checkpoint'):
if False and 'optimizer_state' in state:
optimizer_state = state['optimizer_state']
state.pop('optimizer_state', None)
optimizer_filename = '{}_optim.pth'.format(filename)
torch.save({'optimizer_state': optimizer_state}, optimizer_filename)
filename = '{}.pth'.format(filename)
torch.save(state, filename)
| 11,006
| 41.334615
| 170
|
py
|
3DTrans
|
3DTrans-master/tools/train_utils/train_active_source_utils.py
|
from dis import dis
import glob
import os
import pickle
import torch
import tqdm
import time
from torch.nn.utils import clip_grad_norm_
from pcdet.utils import common_utils, commu_utils, self_training_utils
from pcdet.models import load_data_to_gpu
from pcdet.datasets import build_dataloader, build_dataloader_ada
from pcdet.utils import active_learning_utils
def train_detector(model, model_func, optimizer, lr_scheduler, source_loader, sample_loader, source_loader_iter, sample_loader_iter,
dist_train, optim_cfg, rank, total_it_each_epoch, accumulated_iter_detector, tb_log, tbar, leave_pbar=False):
model.train()
if dist_train:
for p in model.module.parameters():
p.requires_grad = True
for p in model.module.discriminator.parameters():
p.requires_grad = False
else:
for p in model.parameters():
p.requires_grad = True
for p in model.discriminator.parameters():
p.requires_grad = False
total_it_each_epoch = len(source_loader)
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train_detector', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
forward_args = {
'mode': 'train_detector'
}
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch_src = next(source_loader_iter)
except StopIteration:
source_loader_iter = iter(source_loader)
batch_src = next(source_loader_iter)
print('new source iter')
if sample_loader is not None:
try:
batch_sample = next(sample_loader_iter)
except StopIteration:
sample_loader_iter = iter(sample_loader)
batch_sample = next(sample_loader_iter)
print('new sample iter')
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter_detector)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter_detector)
optimizer.zero_grad()
if sample_loader is not None:
loss_sam, tb_dict_sam, disp_dict = model_func(model, batch_sample, **forward_args)
loss_src, tb_dict_src, disp_dict = model_func(model, batch_src, **forward_args)
# loss_sam, tb_dict_sam, disp_dict = model_func(model, batch_sample, forward_args)
if sample_loader is not None:
loss = loss_src + optim_cfg.SAMPLE_LOSS_SCALE * loss_sam
else:
loss = loss_src
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter_detector += 1
cur_batch_time = time.time() - end
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr_detector': cur_lr,
'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})',
'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter_detector))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss_detector', loss, accumulated_iter_detector)
tb_log.add_scalar('meta_data/learning_rate_detector', cur_lr, accumulated_iter_detector)
for key, val in tb_dict_src.items():
tb_log.add_scalar('train/detector_src' + key, val, accumulated_iter_detector)
if sample_loader is not None:
for key, val in tb_dict_sam.items():
tb_log.add_scalar('train/detector_sam' + key, val, accumulated_iter_detector)
if rank == 0:
pbar.close()
return accumulated_iter_detector
def train_discriminator(model, optimizer, lr_scheduler, source_loader, target_loader,
source_loader_iter, target_loader_iter, dist_train, total_it_each_epoch,
accumulated_iter_discriminator, optim_cfg, tb_log, rank, tbar, leave_pbar=False):
model.train()
if dist_train:
for p in model.module.parameters():
p.requires_grad = False
for p in model.module.discriminator.parameters():
p.requires_grad = True
else:
for p in model.parameters():
p.requires_grad = False
for p in model.discriminator.parameters():
p.requires_grad = True
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train_discriminator', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch_src = next(source_loader_iter)
except StopIteration:
source_loader_iter = iter(source_loader)
batch_src = next(source_loader_iter)
print('new source iter')
try:
batch_tar = next(target_loader_iter)
except StopIteration:
target_loader_iter = iter(target_loader)
batch_tar = next(target_loader_iter)
print('new target iter')
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter_discriminator)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter_discriminator)
optimizer.zero_grad()
load_data_to_gpu(batch_src)
forward_args = {
'mode': 'train_discriminator',
'source': True
}
loss_src = model(batch_src, **forward_args)
load_data_to_gpu(batch_tar)
forward_args = {
'mode': 'train_discriminator',
'source': False
}
loss_tar = model(batch_tar, **forward_args)
loss = (loss_src + loss_tar) / 2
tb_dict = {
'discriminator_loss': loss.item()
}
disp_dict = {}
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter_discriminator += 1
cur_batch_time = time.time() - end
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr_discriminator': cur_lr,
'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})',
'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter_discriminator))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss_discriminator', loss, accumulated_iter_discriminator)
tb_log.add_scalar('meta_data/learning_rate_discriminator', cur_lr, accumulated_iter_discriminator)
for key, val in tb_dict.items():
tb_log.add_scalar('train/' + key, val, accumulated_iter_discriminator)
if rank == 0:
pbar.close()
return accumulated_iter_discriminator
def train_one_epoch(model, optimizer_detector, optimizer_discriminator, source_train_loader, target_train_loader,
sample_train_loader, model_func, lr_scheduler_detector, lr_scheduler_discriminator,
accumulated_iter_detector, accumulated_iter_discriminator, optim_cfg, rank, tbar,
dist_train, total_it_each_epoch, dataloader_iter_src, dataloader_iter_tar, dataloader_iter_sample,
tb_log=None, leave_pbar=False, ema_model=None):
# assert total_it_each_epoch == len(source_train_loader)
accumulated_iter_detector = train_detector(
model,
model_func,
optimizer_detector,
lr_scheduler_detector,
source_train_loader,
sample_train_loader,
dataloader_iter_src,
dataloader_iter_sample,
dist_train,
optim_cfg,
rank,
total_it_each_epoch,
accumulated_iter_detector,
tb_log, tbar
)
accumulated_iter_discriminator = train_discriminator(
model,
optimizer_discriminator,
lr_scheduler_discriminator,
source_train_loader,
target_train_loader,
dataloader_iter_src,
dataloader_iter_tar,
dist_train,
2,
accumulated_iter_discriminator,
optim_cfg.DISCRIMINATOR,
tb_log, rank, tbar
)
return accumulated_iter_detector, accumulated_iter_discriminator
def train_active_model_source(model, optimizer, source_train_loader, target_train_loader, sample_loader, model_func, lr_scheduler,
optim_cfg, total_iters_each_epoch, start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir,
sample_epoch, source_budget, source_file_path, sample_save_path, cfg, batch_size, workers, dist_train,
source_sampler=None, target_sampler=None, sample_sampler=None, lr_warmup_scheduler=None, ckpt_save_interval=1,
max_ckpt_save_num=50, merge_all_iters_to_one_epoch=False, logger=None, ema_model=None):
source_list = active_learning_utils.get_dataset_list(source_file_path, oss=True)
sample_list_source = []
sample_train_loader = None
target_name = cfg['DATA_CONFIG_TAR']['DATASET']
source_name = cfg['DATA_CONFIG']['DATASET']
accumulated_iter_detector, accumulated_iter_mul_cls, accumulated_iter_discriminator = start_iter, start_iter, start_iter
source_reader = common_utils.DataReader(source_train_loader, source_sampler)
source_reader.construct_iter()
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True,
leave=(rank == 0)) as tbar:
if merge_all_iters_to_one_epoch:
assert hasattr(source_train_loader.dataset, 'merge_all_iters_to_one_epoch')
source_train_loader.dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
total_iters_each_epoch = len(source_train_loader) // max(total_epochs, 1)
dataloader_iter_src = iter(source_train_loader)
dataloader_iter_tar = iter(target_train_loader) if target_train_loader is not None else None
dataloader_iter_sample = iter(sample_loader)
for cur_epoch in tbar:
if source_sampler is not None:
source_sampler.set_epoch(cur_epoch)
if target_sampler is not None:
target_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
accumulated_iter_discriminator = train_discriminator(
model,
optimizer[1],
lr_scheduler[1],
source_train_loader,
target_train_loader,
dataloader_iter_src,
dataloader_iter_tar,
dist_train,
len(target_train_loader),
accumulated_iter_discriminator,
optim_cfg.DISCRIMINATOR,
tb_log, rank, tbar
)
# active evaluate and sample
if cur_epoch in sample_epoch:
# sample from source domain
frame_score = active_learning_utils.active_evaluate_dual(model, source_train_loader, rank, domain='source')
sampled_frame_id_source, _ = active_learning_utils.active_sample(frame_score, source_budget)
sample_list_source, info_path_source = active_learning_utils.update_sample_list_dual(sample_list_source, source_list, sampled_frame_id_source,
cur_epoch, sample_save_path, source_name, rank, domain='source')
source_sample_set, source_sample_loader, source_sample_sampler = build_dataloader_ada(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=batch_size,
dist=dist_train,
workers=workers,
logger=logger,
training=True,
info_path=info_path_source,
merge_all_iters_to_one_epoch=merge_all_iters_to_one_epoch,
total_epochs=total_epochs-cur_epoch
)
dataloader_iter_tar = iter(target_train_loader)
# dataloader_iter_sample = iter(sample_train_loader)
dataloader_iter_src_sample = iter(source_sample_loader) if source_sample_loader is not None else None
accumulated_iter_detector = train_detector(
model,
model_func,
optimizer[0],
lr_scheduler[0],
source_sample_loader,
sample_loader,
dataloader_iter_src_sample,
dataloader_iter_sample,
dist_train,
optim_cfg,
rank,
total_iters_each_epoch,
accumulated_iter_detector,
tb_log, tbar
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch)
save_checkpoint(
checkpoint_state(model, epoch=trained_epoch, it=accumulated_iter_detector), filename=ckpt_name,
)
def train_active_model_source_2(model, optimizer, source_train_loader, target_train_loader, sample_loader, model_func, lr_scheduler,
optim_cfg, total_iters_each_epoch, start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir,
sample_epoch, source_budget, source_file_path, sample_save_path, cfg, batch_size, workers, dist_train,
source_sampler=None, target_sampler=None, sample_sampler=None, lr_warmup_scheduler=None, ckpt_save_interval=1,
max_ckpt_save_num=50, merge_all_iters_to_one_epoch=False, logger=None, ema_model=None):
source_list = active_learning_utils.get_dataset_list(source_file_path, oss=True)
sample_list_source = []
sample_train_loader = None
target_name = cfg['DATA_CONFIG_TAR']['DATASET']
source_name = cfg['DATA_CONFIG']['DATASET']
accumulated_iter_detector, accumulated_iter_mul_cls, accumulated_iter_discriminator = start_iter, start_iter, start_iter
source_reader = common_utils.DataReader(source_train_loader, source_sampler)
source_reader.construct_iter()
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True,
leave=(rank == 0)) as tbar:
if merge_all_iters_to_one_epoch:
assert hasattr(source_train_loader.dataset, 'merge_all_iters_to_one_epoch')
source_train_loader.dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
total_iters_each_epoch = len(source_train_loader) // max(total_epochs, 1)
dataloader_iter_src = iter(source_train_loader)
dataloader_iter_tar = iter(target_train_loader) if target_train_loader is not None else None
dataloader_iter_sample = iter(sample_loader)
for cur_epoch in tbar:
if source_sampler is not None:
source_sampler.set_epoch(cur_epoch)
if target_sampler is not None:
target_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
accumulated_iter_discriminator = train_discriminator(
model,
optimizer[1],
lr_scheduler[1],
source_train_loader,
target_train_loader,
dataloader_iter_src,
dataloader_iter_tar,
dist_train,
len(target_train_loader),
accumulated_iter_discriminator,
optim_cfg.DISCRIMINATOR,
tb_log, rank, tbar
)
# active evaluate and sample
if cur_epoch in sample_epoch:
# sample from source domain
frame_score = active_learning_utils.active_evaluate_dual(model, source_train_loader, rank, domain='source')
sampled_frame_id_source, _ = active_learning_utils.active_sample_source(frame_score, source_budget)
sample_list_source, info_path_source = active_learning_utils.update_sample_list_dual(sample_list_source, source_list, sampled_frame_id_source,
cur_epoch, sample_save_path, source_name, rank, domain='source')
source_sample_set, source_sample_loader, source_sample_sampler = build_dataloader_ada(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=batch_size,
dist=dist_train,
workers=workers,
logger=logger,
training=True,
info_path=info_path_source,
merge_all_iters_to_one_epoch=merge_all_iters_to_one_epoch,
total_epochs=total_epochs-cur_epoch
)
dataloader_iter_tar = iter(target_train_loader)
# dataloader_iter_sample = iter(sample_train_loader)
dataloader_iter_src_sample = iter(source_sample_loader) if source_sample_loader is not None else None
accumulated_iter_detector = train_detector(
model,
model_func,
optimizer[0],
lr_scheduler[0],
source_sample_loader,
sample_loader,
dataloader_iter_src_sample,
dataloader_iter_sample,
dist_train,
optim_cfg,
rank,
total_iters_each_epoch,
accumulated_iter_detector,
tb_log, tbar
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch)
save_checkpoint(
checkpoint_state(model, epoch=trained_epoch, it=accumulated_iter_detector), filename=ckpt_name,
)
def train_active_model_source_only(model, optimizer, source_train_loader, target_train_loader, sample_loader, model_func, lr_scheduler,
optim_cfg, total_iters_each_epoch, start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir,
sample_epoch, source_budget, source_file_path, sample_save_path, cfg, batch_size, workers, dist_train,
source_sampler=None, target_sampler=None, sample_sampler=None, lr_warmup_scheduler=None, ckpt_save_interval=5,
max_ckpt_save_num=50, merge_all_iters_to_one_epoch=False, logger=None, ema_model=None):
sample_train_loader = None
target_name = cfg['DATA_CONFIG_TAR']['DATASET']
source_name = cfg['DATA_CONFIG']['DATASET']
waymo_source = True if source_name == 'ActiveWaymoDataset' else False
if waymo_source:
sample_interval = cfg['DATA_CONFIG']['SAMPLED_INTERVAL'].get('train', 1)
else:
sample_interval = 1
source_list = active_learning_utils.get_dataset_list(source_file_path, oss=True, waymo=waymo_source, sample_interval=sample_interval)
print('source_list % d' % len(source_list))
sample_list_source = []
accumulated_iter_detector, accumulated_iter_discriminator = start_iter, start_iter
source_reader = common_utils.DataReader(source_train_loader, source_sampler)
source_reader.construct_iter()
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True,
leave=(rank == 0)) as tbar:
if merge_all_iters_to_one_epoch:
assert hasattr(source_train_loader.dataset, 'merge_all_iters_to_one_epoch')
source_train_loader.dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
total_iters_each_epoch = len(source_train_loader) // max(total_epochs, 1)
dataloader_iter_src = iter(source_train_loader)
dataloader_iter_tar = iter(target_train_loader) if target_train_loader is not None else None
dataloader_iter_sample = iter(sample_loader) if sample_loader is not None else None
for cur_epoch in tbar:
if source_sampler is not None:
source_sampler.set_epoch(cur_epoch)
if target_sampler is not None:
target_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
if cur_epoch <= sample_epoch[-1]:
accumulated_iter_discriminator = train_discriminator(
model,
optimizer[1],
lr_scheduler[1],
source_train_loader,
target_train_loader,
dataloader_iter_src,
dataloader_iter_tar,
dist_train,
len(target_train_loader),
accumulated_iter_discriminator,
optim_cfg.DISCRIMINATOR,
tb_log, rank, tbar
)
# active evaluate and sample
if cur_epoch in sample_epoch:
# sample from source domain
frame_score = active_learning_utils.active_evaluate_dual(model, source_train_loader, rank, domain='source')
# sampled_frame_id_source, _ = active_learning_utils.active_sample_source(frame_score, source_budget)
sampled_frame_id_source, _ = active_learning_utils.active_sample(frame_score, source_budget)
sample_list_source, info_path_source = active_learning_utils.update_sample_list_dual(sample_list_source, source_list, sampled_frame_id_source,
cur_epoch, sample_save_path, source_name, rank, domain='source')
# sample_frame_id_target = active_learning_utils.active_sample_target_2(frame_score_tar)
# sample_list_target = []
# target_list = active_learning_utils.get_dataset_list('', oss=True)
# active_learning_utils.save_sample(frame_score_tar, target_list, sample_save_path)
# sample_list_target, info_path_target = active_learning_utils.update_sample_list_dual(sample_list_target, target_list, sample_frame_id_target,
# cur_epoch, sample_save_path, 'ActiveKittiDataset', rank, domain='target')
source_sample_set, source_sample_loader, source_sample_sampler = build_dataloader_ada(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=batch_size,
dist=dist_train,
workers=workers,
logger=logger,
training=True,
info_path=info_path_source,
merge_all_iters_to_one_epoch=merge_all_iters_to_one_epoch,
total_epochs=total_epochs-cur_epoch
)
dataloader_iter_tar = iter(target_train_loader)
# dataloader_iter_sample = iter(sample_train_loader)
dataloader_iter_src_sample = iter(source_sample_loader) if source_sample_loader is not None else None
accumulated_iter_detector = train_detector(
model,
model_func,
optimizer[0],
lr_scheduler[0],
source_sample_loader,
sample_loader,
dataloader_iter_src_sample,
dataloader_iter_sample,
dist_train,
optim_cfg,
rank,
total_iters_each_epoch,
accumulated_iter_detector,
tb_log, tbar
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch)
save_checkpoint(
checkpoint_state(model, epoch=trained_epoch, it=accumulated_iter_detector), filename=ckpt_name,
)
def model_state_to_cpu(model_state):
model_state_cpu = type(model_state)() # ordered dict
for key, val in model_state.items():
model_state_cpu[key] = val.cpu()
return model_state_cpu
def checkpoint_state(model=None, optimizer=None, epoch=None, it=None):
optim_state = optimizer.state_dict() if optimizer is not None else None
if model is not None:
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_state = model_state_to_cpu(model.module.state_dict())
else:
model_state = model.state_dict()
else:
model_state = None
try:
import pcdet
version = 'pcdet+' + pcdet.__version__
except:
version = 'none'
return {'epoch': epoch, 'it': it, 'model_state': model_state, 'optimizer_state': optim_state, 'version': version}
def save_checkpoint(state, filename='checkpoint'):
if False and 'optimizer_state' in state:
optimizer_state = state['optimizer_state']
state.pop('optimizer_state', None)
optimizer_filename = '{}_optim.pth'.format(filename)
torch.save({'optimizer_state': optimizer_state}, optimizer_filename)
filename = '{}.pth'.format(filename)
torch.save(state, filename)
| 29,688
| 43.31194
| 175
|
py
|
3DTrans
|
3DTrans-master/tools/train_utils/train_st_utils.py
|
import torch
import os
import glob
import tqdm
from torch.nn.utils import clip_grad_norm_
from pcdet.utils import common_utils
from pcdet.utils import self_training_utils
from pcdet.config import cfg
from .train_utils import save_checkpoint, checkpoint_state
def train_one_epoch_st(model, optimizer, source_reader, target_loader, model_func, lr_scheduler,
accumulated_iter, optim_cfg, rank, tbar, total_it_each_epoch,
dataloader_iter, tb_log=None, leave_pbar=False, ema_model=None):
if total_it_each_epoch == len(target_loader):
dataloader_iter = iter(target_loader)
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train', dynamic_ncols=True)
ps_bbox_meter = common_utils.AverageMeter()
ignore_ps_bbox_meter = common_utils.AverageMeter()
st_loss_meter = common_utils.AverageMeter()
disp_dict = {}
for cur_it in range(total_it_each_epoch):
lr_scheduler.step(accumulated_iter)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
model.train()
optimizer.zero_grad()
try:
target_batch = next(dataloader_iter)
except StopIteration:
dataloader_iter = iter(target_loader)
target_batch = next(dataloader_iter)
print('new iters')
# parameters for save pseudo label on the fly
st_loss, st_tb_dict, st_disp_dict = model_func(model, target_batch)
st_loss.backward()
st_loss_meter.update(st_loss.item())
# count number of used ps bboxes in this batch
pos_pseudo_bbox = target_batch['pos_ps_bbox'].mean().item()
ign_pseudo_bbox = target_batch['ign_ps_bbox'].mean().item()
ps_bbox_meter.update(pos_pseudo_bbox)
ignore_ps_bbox_meter.update(ign_pseudo_bbox)
st_tb_dict = common_utils.add_prefix_to_dict(st_tb_dict, 'st_')
disp_dict.update(common_utils.add_prefix_to_dict(st_disp_dict, 'st_'))
disp_dict.update({'st_loss': "{:.3f}({:.3f})".format(st_loss_meter.val, st_loss_meter.avg),
'pos_ps_box': ps_bbox_meter.avg,
'ign_ps_box': ignore_ps_bbox_meter.avg})
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter += 1
# log to console and tensorboard
if rank == 0:
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter, pos_ps_box=ps_bbox_meter.val,
ign_ps_box=ignore_ps_bbox_meter.val))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
tb_log.add_scalar('train/st_loss', st_loss, accumulated_iter)
tb_log.add_scalar('train/pos_ps_bbox', ps_bbox_meter.val, accumulated_iter)
tb_log.add_scalar('train/ign_ps_bbox', ignore_ps_bbox_meter.val, accumulated_iter)
for key, val in st_tb_dict.items():
tb_log.add_scalar('train/' + key, val, accumulated_iter)
if rank == 0:
pbar.close()
tb_log.add_scalar('train/epoch_ign_ps_box', ignore_ps_bbox_meter.avg, accumulated_iter)
tb_log.add_scalar('train/epoch_pos_ps_box', ps_bbox_meter.avg, accumulated_iter)
return accumulated_iter
def train_model_st(model, optimizer, source_loader, target_loader, model_func, lr_scheduler, optim_cfg,
start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir, ps_label_dir,
source_sampler=None, target_sampler=None, lr_warmup_scheduler=None, ckpt_save_interval=1,
max_ckpt_save_num=50, merge_all_iters_to_one_epoch=False, logger=None, ema_model=None):
accumulated_iter = start_iter
source_reader = common_utils.DataReader(source_loader, source_sampler)
source_reader.construct_iter()
# for continue training.
# if already exist generated pseudo label result
ps_pkl = self_training_utils.check_already_exsit_pseudo_label(ps_label_dir, start_epoch)
if ps_pkl is not None:
logger.info('==> Loading pseudo labels from {}'.format(ps_pkl))
# for continue training
if cfg.SELF_TRAIN.get('PROG_AUG', None) and cfg.SELF_TRAIN.PROG_AUG.ENABLED and \
start_epoch > 0:
for cur_epoch in range(start_epoch):
if cur_epoch in cfg.SELF_TRAIN.PROG_AUG.UPDATE_AUG:
aug_times = cfg.SELF_TRAIN.PROG_AUG.UPDATE_AUG.index(cur_epoch) + 1
print ("***********update AUG times for continue training:**********", aug_times)
target_loader.dataset.data_augmentor.re_prepare(
augmentor_configs=cfg.SELF_TRAIN.PROG_AUG.D_CFG if cfg.SELF_TRAIN.PROG_AUG.get('D_CFG', None) else None,
intensity=cfg.SELF_TRAIN.PROG_AUG.SCALE, aug_times=aug_times)
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True,
leave=(rank == 0)) as tbar:
total_it_each_epoch = len(target_loader)
if merge_all_iters_to_one_epoch:
assert hasattr(target_loader.dataset, 'merge_all_iters_to_one_epoch')
target_loader.dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
total_it_each_epoch = len(target_loader) // max(total_epochs, 1)
dataloader_iter = iter(target_loader)
for cur_epoch in tbar:
if target_sampler is not None:
target_sampler.set_epoch(cur_epoch)
source_reader.set_cur_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
# update pseudo label
if (cur_epoch in cfg.SELF_TRAIN.UPDATE_PSEUDO_LABEL) or \
((cur_epoch % cfg.SELF_TRAIN.UPDATE_PSEUDO_LABEL_INTERVAL == 0)
and cur_epoch != 0):
target_loader.dataset.eval()
print ("***********update pseudo label**********")
self_training_utils.save_pseudo_label_epoch(
model, target_loader, rank,
leave_pbar=True, ps_label_dir=ps_label_dir, cur_epoch=cur_epoch
)
target_loader.dataset.train()
# curriculum data augmentation
if cfg.SELF_TRAIN.get('PROG_AUG', None) and cfg.SELF_TRAIN.PROG_AUG.ENABLED and \
(cur_epoch in cfg.SELF_TRAIN.PROG_AUG.UPDATE_AUG):
aug_times = cfg.SELF_TRAIN.PROG_AUG.UPDATE_AUG.index(cur_epoch) + 1
print ("***********update AUG times:**********", aug_times)
target_loader.dataset.data_augmentor.re_prepare(
augmentor_configs=cfg.SELF_TRAIN.PROG_AUG.D_CFG if cfg.SELF_TRAIN.PROG_AUG.get('D_CFG', None) else None,
intensity=cfg.SELF_TRAIN.PROG_AUG.SCALE, aug_times=aug_times)
accumulated_iter = train_one_epoch_st(
model, optimizer, source_reader, target_loader, model_func,
lr_scheduler=cur_scheduler,
accumulated_iter=accumulated_iter, optim_cfg=optim_cfg,
rank=rank, tbar=tbar, tb_log=tb_log,
leave_pbar=(cur_epoch + 1 == total_epochs),
total_it_each_epoch=total_it_each_epoch,
dataloader_iter=dataloader_iter, ema_model=ema_model
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch)
state = checkpoint_state(model, optimizer, trained_epoch, accumulated_iter)
save_checkpoint(state, filename=ckpt_name)
| 8,582
| 46.41989
| 125
|
py
|
3DTrans
|
3DTrans-master/tools/train_utils/train_utils.py
|
import glob
import os
import torch
import tqdm
import time
from torch.nn.utils import clip_grad_norm_
from pcdet.utils import common_utils, commu_utils
def train_one_epoch(model, optimizer, train_loader, model_func, lr_scheduler, accumulated_iter, optim_cfg,
rank, tbar, total_it_each_epoch, dataloader_iter, tb_log=None, leave_pbar=False):
if total_it_each_epoch == len(train_loader):
dataloader_iter = iter(train_loader)
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch = next(dataloader_iter)
except StopIteration:
dataloader_iter = iter(train_loader)
batch = next(dataloader_iter)
print('new iters')
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
model.train()
optimizer.zero_grad()
loss, tb_dict, disp_dict = model_func(model, batch)
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter += 1
cur_batch_time = time.time() - end
# average reduce
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr': cur_lr, 'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})', 'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss', loss, accumulated_iter)
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
for key, val in tb_dict.items():
tb_log.add_scalar('train/' + key, val, accumulated_iter)
if rank == 0:
pbar.close()
return accumulated_iter
def train_model(model, optimizer, train_loader, model_func, lr_scheduler, optim_cfg,
start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir,
source_sampler=None, lr_warmup_scheduler=None, ckpt_save_interval=1,
max_ckpt_save_num=50, merge_all_iters_to_one_epoch=False):
accumulated_iter = start_iter
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True, leave=(rank == 0)) as tbar:
total_it_each_epoch = len(train_loader)
if merge_all_iters_to_one_epoch:
assert hasattr(train_loader.dataset, 'merge_all_iters_to_one_epoch')
train_loader.dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
total_it_each_epoch = len(train_loader) // max(total_epochs, 1)
dataloader_iter = iter(train_loader)
for cur_epoch in tbar:
if source_sampler is not None:
source_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
accumulated_iter = train_one_epoch(
model, optimizer, train_loader, model_func,
lr_scheduler=cur_scheduler,
accumulated_iter=accumulated_iter, optim_cfg=optim_cfg,
rank=rank, tbar=tbar, tb_log=tb_log,
leave_pbar=(cur_epoch + 1 == total_epochs),
total_it_each_epoch=total_it_each_epoch,
dataloader_iter=dataloader_iter
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch)
save_checkpoint(
checkpoint_state(model, optimizer, trained_epoch, accumulated_iter), filename=ckpt_name,
)
def model_state_to_cpu(model_state):
model_state_cpu = type(model_state)() # ordered dict
for key, val in model_state.items():
model_state_cpu[key] = val.cpu()
return model_state_cpu
def checkpoint_state(model=None, optimizer=None, epoch=None, it=None):
optim_state = optimizer.state_dict() if optimizer is not None else None
if model is not None:
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_state = model_state_to_cpu(model.module.state_dict())
else:
model_state = model.state_dict()
else:
model_state = None
try:
import pcdet
version = 'pcdet+' + pcdet.__version__
except:
version = 'none'
return {'epoch': epoch, 'it': it, 'model_state': model_state, 'optimizer_state': optim_state, 'version': version}
def save_checkpoint(state, filename='checkpoint'):
if False and 'optimizer_state' in state:
optimizer_state = state['optimizer_state']
state.pop('optimizer_state', None)
optimizer_filename = '{}_optim.pth'.format(filename)
torch.save({'optimizer_state': optimizer_state}, optimizer_filename)
filename = '{}.pth'.format(filename)
torch.save(state, filename)
| 6,737
| 38.174419
| 132
|
py
|
3DTrans
|
3DTrans-master/tools/train_utils/train_active_target_utils.py
|
import glob
import os
import pickle
import torch
import tqdm
import time
from torch.nn.utils import clip_grad_norm_
from pcdet.utils import common_utils, commu_utils
from pcdet.utils import active_learning_utils
from pcdet.datasets import build_dataloader_ada
from pcdet.models import load_data_to_gpu
def train_detector(model, model_func, optimizer, lr_scheduler, source_loader, sample_loader, source_loader_iter, sample_loader_iter,
dist_train, optim_cfg, rank, total_it_each_epoch, accumulated_iter_detector, tb_log, tbar, leave_pbar=False):
model.train()
if dist_train:
for p in model.module.parameters():
p.requires_grad = True
for p in model.module.discriminator.parameters():
p.requires_grad = False
else:
for p in model.parameters():
p.requires_grad = True
for p in model.discriminator.parameters():
p.requires_grad = False
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train_detector', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
forward_args = {
'mode': 'train_detector'
}
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch_src = next(source_loader_iter)
except StopIteration:
source_loader_iter = iter(source_loader)
batch_src = next(source_loader_iter)
print('new source iter')
try:
batch_sample = next(sample_loader_iter)
except StopIteration:
sample_loader_iter = iter(sample_loader)
batch_sample = next(sample_loader_iter)
print('new sample iter')
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter_detector)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter_detector)
optimizer.zero_grad()
loss_sam, tb_dict_sam, disp_dict = model_func(model, batch_sample, **forward_args)
loss_src, tb_dict_src, disp_dict = model_func(model, batch_src, **forward_args)
# loss_sam, tb_dict_sam, disp_dict = model_func(model, batch_sample, forward_args)
loss = loss_src + optim_cfg.SAMPLE_LOSS_SCALE * loss_sam
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter_detector += 1
cur_batch_time = time.time() - end
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr_detector': cur_lr,
'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})',
'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter_detector))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss_detector', loss, accumulated_iter_detector)
tb_log.add_scalar('meta_data/learning_rate_detector', cur_lr, accumulated_iter_detector)
for key, val in tb_dict_src.items():
tb_log.add_scalar('train/detector_src' + key, val, accumulated_iter_detector)
for key, val in tb_dict_sam.items():
tb_log.add_scalar('train/detector_sam' + key, val, accumulated_iter_detector)
if rank == 0:
pbar.close()
return accumulated_iter_detector
def train_discriminator(model, optimizer, lr_scheduler, source_loader, target_loader,
source_loader_iter, target_loader_iter, dist_train, total_it_each_epoch,
accumulated_iter_discriminator, optim_cfg, tb_log, rank, tbar, leave_pbar=False):
model.train()
if dist_train:
for p in model.module.parameters():
p.requires_grad = False
for p in model.module.discriminator.parameters():
p.requires_grad = True
else:
for p in model.parameters():
p.requires_grad = False
for p in model.discriminator.parameters():
p.requires_grad = True
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train_discriminator', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch_src = next(source_loader_iter)
except StopIteration:
source_loader_iter = iter(source_loader)
batch_src = next(source_loader_iter)
print('new source iter')
try:
batch_tar = next(target_loader_iter)
except StopIteration:
target_loader_iter = iter(target_loader)
batch_tar = next(target_loader_iter)
print('new target iter')
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter_discriminator)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter_discriminator)
optimizer.zero_grad()
load_data_to_gpu(batch_src)
forward_args = {
'mode': 'train_discriminator',
'source': True
}
loss_src = model(batch_src, **forward_args)
load_data_to_gpu(batch_tar)
forward_args = {
'mode': 'train_discriminator',
'source': False
}
loss_tar = model(batch_tar, **forward_args)
loss = (loss_src + loss_tar) / 2
tb_dict = {
'discriminator_loss': loss.item()
}
disp_dict = {}
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter_discriminator += 1
cur_batch_time = time.time() - end
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr_discriminator': cur_lr,
'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})',
'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter_discriminator))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss_discriminator', loss, accumulated_iter_discriminator)
tb_log.add_scalar('meta_data/learning_rate_discriminator', cur_lr, accumulated_iter_discriminator)
for key, val in tb_dict.items():
tb_log.add_scalar('train/' + key, val, accumulated_iter_discriminator)
if rank == 0:
pbar.close()
return accumulated_iter_discriminator
def train_active_model_dual_tar(model, optimizer, source_train_loader, target_train_loader, source_sample_loader, model_func,
lr_scheduler, optim_cfg, start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir,
sample_epoch, annotation_budget, target_file_path, sample_save_path, cfg, batch_size, workers, dist_train,
source_sampler=None, target_sampler=None, source_sample_sampler=None, lr_warmup_scheduler=None, ckpt_save_interval=1,
max_ckpt_save_num=50, merge_all_iters_to_one_epoch=False, logger=None, ema_model=None):
accumulated_iter_detector, accumulated_iter_discriminator = start_iter, start_iter
target_name = cfg['DATA_CONFIG_TAR']['DATASET']
oss = True if cfg.DATA_CONFIG_TAR.get('OSS_PATH', None) is not None else False
target_list = active_learning_utils.get_target_list(target_file_path, oss=oss)
sample_list = []
sample_train_loader = None
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True, leave=(rank == 0)) as tbar:
total_it_each_epoch = len(source_train_loader)
if merge_all_iters_to_one_epoch:
assert hasattr(source_train_loader.dataset, 'merge_all_iters_to_one_epoch')
source_train_loader.dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
total_it_each_epoch = len(source_train_loader) // max(total_epochs, 1)
dataloader_iter_src = iter(source_train_loader)
dataloader_iter_tar = iter(target_train_loader)
for cur_epoch in tbar:
if source_sampler is not None:
source_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
if cur_epoch in sample_epoch:
accumulated_iter_discriminator = train_discriminator(
model,
optimizer[1],
lr_scheduler[1],
source_train_loader,
target_train_loader,
dataloader_iter_src,
dataloader_iter_tar,
dist_train,
len(target_train_loader),
accumulated_iter_discriminator,
optim_cfg.DISCRIMINATOR,
tb_log, rank, tbar
)
if cur_epoch in sample_epoch:
frame_score = active_learning_utils.active_evaluate_dual(model, target_train_loader, rank, domain='target')
sampled_frame_id, _ = active_learning_utils.active_sample_tar(frame_score, budget=annotation_budget, logger=logger)
sample_list, info_path = active_learning_utils.update_sample_list_dual(
sample_list, target_list, sampled_frame_id, cur_epoch, sample_save_path, target_name, rank, domain='target'
)
target_list, target_info_path = active_learning_utils.update_target_list(target_list, sampled_frame_id, cur_epoch, sample_save_path, target_name, rank)
sample_train_set, sample_train_loader, sample_train_sampler = build_dataloader_ada(
dataset_cfg=cfg.DATA_CONFIG_SAMPLE,
class_names=cfg.DATA_CONFIG_SAMPLE.CLASS_NAMES,
batch_size=batch_size,
dist=dist_train, workers=workers,
logger=logger,
training=True,
info_path=info_path,
merge_all_iters_to_one_epoch=merge_all_iters_to_one_epoch,
total_epochs=total_epochs-cur_epoch
)
target_train_set, target_train_loader, target_train_sampler = build_dataloader_ada(
dataset_cfg=cfg.DATA_CONFIG_TAR,
class_names=cfg.DATA_CONFIG_TAR.CLASS_NAMES,
batch_size=batch_size,
dist=dist_train, workers=workers,
logger=logger,
training=True,
info_path=target_info_path,
merge_all_iters_to_one_epoch=merge_all_iters_to_one_epoch,
total_epochs=total_epochs-cur_epoch
)
dataloader_iter_tar = iter(target_train_loader)
dataloader_iter_sample = iter(sample_train_loader) if sample_train_loader is not None else None
dataloader_iter_src_sample = iter(source_sample_loader)
accumulated_iter_detector = train_detector(
model,
model_func,
optimizer[0],
lr_scheduler[0],
source_sample_loader,
sample_train_loader,
dataloader_iter_src_sample,
dataloader_iter_sample,
dist_train,
optim_cfg,
rank,
len(source_sample_loader),
accumulated_iter_detector,
tb_log, tbar
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch)
save_checkpoint(
checkpoint_state(model, epoch=trained_epoch, it=accumulated_iter_detector), filename=ckpt_name,
)
def model_state_to_cpu(model_state):
model_state_cpu = type(model_state)() # ordered dict
for key, val in model_state.items():
model_state_cpu[key] = val.cpu()
return model_state_cpu
def checkpoint_state(model=None, optimizer=None, epoch=None, it=None):
optim_state = optimizer.state_dict() if optimizer is not None else None
if model is not None:
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_state = model_state_to_cpu(model.module.state_dict())
else:
model_state = model.state_dict()
else:
model_state = None
try:
import pcdet
version = 'pcdet+' + pcdet.__version__
except:
version = 'none'
return {'epoch': epoch, 'it': it, 'model_state': model_state, 'optimizer_state': optim_state, 'version': version}
def save_checkpoint(state, filename='checkpoint'):
if False and 'optimizer_state' in state:
optimizer_state = state['optimizer_state']
state.pop('optimizer_state', None)
optimizer_filename = '{}_optim.pth'.format(filename)
torch.save({'optimizer_state': optimizer_state}, optimizer_filename)
filename = '{}.pth'.format(filename)
torch.save(state, filename)
| 15,815
| 40.952255
| 167
|
py
|
3DTrans
|
3DTrans-master/tools/train_utils/active_with_st3d_utils.py
|
import glob
import os
import torch
import tqdm
import time
from torch.nn.utils import clip_grad_norm_
from pcdet.utils import common_utils, commu_utils
from pcdet.utils import active_learning_utils, self_training_utils
from pcdet.datasets import build_dataloader_ada
from pcdet.models import load_data_to_gpu
def train_detector_st3d(model, model_func, optimizer, lr_scheduler, source_loader, sample_loader, target_loader, source_loader_iter, sample_loader_iter,
target_loader_iter, dist_train, optim_cfg, rank, total_it_each_epoch, accumulated_iter_detector, tb_log, tbar, leave_pbar=False):
total_it_each_epoch = len(sample_loader) + len(target_loader)
model.train()
if dist_train:
for p in model.module.parameters():
p.requires_grad = True
for p in model.module.discriminator.parameters():
p.requires_grad = False
else:
for p in model.parameters():
p.requires_grad = True
for p in model.discriminator.parameters():
p.requires_grad = False
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train_detector', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
forward_args = {
'mode': 'train_detector'
}
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch_src = next(source_loader_iter)
except StopIteration:
source_loader_iter = iter(source_loader)
batch_src = next(source_loader_iter)
print('new source iter')
if cur_it < len(sample_loader):
try:
batch_sample = next(sample_loader_iter)
except StopIteration:
sample_loader_iter = iter(sample_loader)
batch_sample = next(sample_loader_iter)
print('new sample iter')
else:
try:
batch_sample = next(target_loader_iter)
except StopIteration:
target_loader_iter = iter(target_loader)
batch_sample = next(target_loader_iter)
print('new target iter')
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter_detector)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter_detector)
optimizer.zero_grad()
loss_sam, tb_dict_sam, disp_dict = model_func(model, batch_sample, **forward_args)
loss_src, tb_dict_src, disp_dict = model_func(model, batch_src, **forward_args)
# loss_sam, tb_dict_sam, disp_dict = model_func(model, batch_sample, forward_args)
loss = loss_src + optim_cfg.SAMPLE_LOSS_SCALE * loss_sam
# loss = loss_sam
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter_detector += 1
cur_batch_time = time.time() - end
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr_detector': cur_lr,
'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})',
'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter_detector))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss_detector', loss, accumulated_iter_detector)
tb_log.add_scalar('meta_data/learning_rate_detector', cur_lr, accumulated_iter_detector)
for key, val in tb_dict_src.items():
tb_log.add_scalar('train/detector_src' + key, val, accumulated_iter_detector)
for key, val in tb_dict_sam.items():
tb_log.add_scalar('train/detector_sam' + key, val, accumulated_iter_detector)
if rank == 0:
pbar.close()
return accumulated_iter_detector
def train_detector_st3d_1(model, model_func, optimizer, lr_scheduler, source_loader, sample_loader, target_loader, source_loader_iter, sample_loader_iter,
target_loader_iter, dist_train, optim_cfg, rank, total_it_each_epoch, accumulated_iter_detector, tb_log, tbar, leave_pbar=False):
total_it_each_epoch = len(target_loader)
model.train()
if dist_train:
for p in model.module.parameters():
p.requires_grad = True
for p in model.module.discriminator.parameters():
p.requires_grad = False
else:
for p in model.parameters():
p.requires_grad = True
for p in model.discriminator.parameters():
p.requires_grad = False
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train_detector', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
forward_args = {
'mode': 'train_detector'
}
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch_tar = next(target_loader_iter)
except StopIteration:
target_loader_iter = iter(target_loader)
batch_tar = next(target_loader_iter)
print('new target iter')
try:
batch_sample = next(sample_loader_iter)
except StopIteration:
sample_loader_iter = iter(sample_loader)
batch_sample = next(sample_loader_iter)
print('new sample iter')
# else:
# try:
# batch_sample = next(target_loader_iter)
# except StopIteration:
# target_loader_iter = iter(target_loader)
# batch_sample = next(target_loader_iter)
# print('new target iter')
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter_detector)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter_detector)
optimizer.zero_grad()
loss_sam, tb_dict_sam, disp_dict = model_func(model, batch_sample, **forward_args)
loss_tar, tb_dict_tar, disp_dict = model_func(model, batch_tar, **forward_args)
# loss_sam, tb_dict_sam, disp_dict = model_func(model, batch_sample, forward_args)
loss = loss_tar + optim_cfg.SAMPLE_LOSS_SCALE * loss_sam
# loss = loss_sam
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter_detector += 1
cur_batch_time = time.time() - end
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr_detector': cur_lr,
'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})',
'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter_detector))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss_detector', loss, accumulated_iter_detector)
tb_log.add_scalar('meta_data/learning_rate_detector', cur_lr, accumulated_iter_detector)
for key, val in tb_dict_tar.items():
tb_log.add_scalar('train/detector_src' + key, val, accumulated_iter_detector)
for key, val in tb_dict_sam.items():
tb_log.add_scalar('train/detector_sam' + key, val, accumulated_iter_detector)
if rank == 0:
pbar.close()
return accumulated_iter_detector
def train_detector_st3d_2(model, model_func, optimizer, lr_scheduler, source_loader, sample_loader, target_loader, source_loader_iter, sample_loader_iter,
target_loader_iter, dist_train, optim_cfg, rank, total_it_each_epoch, accumulated_iter_detector, tb_log, tbar, leave_pbar=False):
total_it_each_epoch = len(sample_loader) + len(target_loader)
model.train()
if dist_train:
for p in model.module.parameters():
p.requires_grad = True
for p in model.module.discriminator.parameters():
p.requires_grad = False
else:
for p in model.parameters():
p.requires_grad = True
for p in model.discriminator.parameters():
p.requires_grad = False
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train_detector', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
forward_args = {
'mode': 'train_detector'
}
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch_src = next(source_loader_iter)
except StopIteration:
source_loader_iter = iter(source_loader)
batch_src = next(source_loader_iter)
print('new source iter')
try:
batch_sample = next(sample_loader_iter)
except StopIteration:
sample_loader_iter = iter(sample_loader)
batch_sample = next(sample_loader_iter)
print('new sample iter')
try:
batch_tar = next(target_loader_iter)
except StopIteration:
target_loader_iter = iter(target_loader)
batch_tar = next(target_loader_iter)
print('new target iter')
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter_detector)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter_detector)
optimizer.zero_grad()
loss_sam, tb_dict_sam, disp_dict = model_func(model, batch_sample, **forward_args)
loss_tar, tb_dict_tar, disp_dict = model_func(model, batch_tar, **forward_args)
loss_src, tb_dict_src, disp_dict = model_func(model, batch_src, **forward_args)
# loss_sam, tb_dict_sam, disp_dict = model_func(model, batch_sample, forward_args)
loss = loss_src + optim_cfg.SAMPLE_LOSS_SCALE * loss_sam + loss_tar
# loss = loss_sam
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter_detector += 1
cur_batch_time = time.time() - end
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr_detector': cur_lr,
'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})',
'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter_detector))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss_detector', loss, accumulated_iter_detector)
tb_log.add_scalar('meta_data/learning_rate_detector', cur_lr, accumulated_iter_detector)
for key, val in tb_dict_src.items():
tb_log.add_scalar('train/detector_src' + key, val, accumulated_iter_detector)
for key, val in tb_dict_sam.items():
tb_log.add_scalar('train/detector_sam' + key, val, accumulated_iter_detector)
if rank == 0:
pbar.close()
return accumulated_iter_detector
def train_discriminator(model, optimizer, lr_scheduler, source_loader, target_loader,
source_loader_iter, target_loader_iter, dist_train, total_it_each_epoch,
accumulated_iter_discriminator, optim_cfg, tb_log, rank, tbar, leave_pbar=False):
model.train()
if dist_train:
for p in model.module.parameters():
p.requires_grad = False
for p in model.module.discriminator.parameters():
p.requires_grad = True
else:
for p in model.parameters():
p.requires_grad = False
for p in model.discriminator.parameters():
p.requires_grad = True
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train_discriminator', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch_src = next(source_loader_iter)
except StopIteration:
source_loader_iter = iter(source_loader)
batch_src = next(source_loader_iter)
print('new source iter')
try:
batch_tar = next(target_loader_iter)
except StopIteration:
target_loader_iter = iter(target_loader)
batch_tar = next(target_loader_iter)
print('new target iter')
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter_discriminator)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter_discriminator)
optimizer.zero_grad()
load_data_to_gpu(batch_src)
forward_args = {
'mode': 'train_discriminator',
'source': True
}
loss_src = model(batch_src, **forward_args)
load_data_to_gpu(batch_tar)
forward_args = {
'mode': 'train_discriminator',
'source': False
}
loss_tar = model(batch_tar, **forward_args)
loss = (loss_src + loss_tar) / 2
tb_dict = {
'discriminator_loss': loss.item()
}
disp_dict = {}
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter_discriminator += 1
cur_batch_time = time.time() - end
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr_discriminator': cur_lr,
'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})',
'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter_discriminator))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss_discriminator', loss, accumulated_iter_discriminator)
tb_log.add_scalar('meta_data/learning_rate_discriminator', cur_lr, accumulated_iter_discriminator)
for key, val in tb_dict.items():
tb_log.add_scalar('train/' + key, val, accumulated_iter_discriminator)
if rank == 0:
pbar.close()
return accumulated_iter_discriminator
def train_detector(model, model_func, optimizer, lr_scheduler, source_loader, sample_loader, source_loader_iter, sample_loader_iter,
dist_train, optim_cfg, rank, total_it_each_epoch, accumulated_iter_detector, tb_log, tbar, leave_pbar=False):
model.train()
if dist_train:
for p in model.module.parameters():
p.requires_grad = True
for p in model.module.discriminator.parameters():
p.requires_grad = False
else:
for p in model.parameters():
p.requires_grad = True
for p in model.discriminator.parameters():
p.requires_grad = False
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train_detector', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
forward_args = {
'mode': 'train_detector'
}
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch_src = next(source_loader_iter)
except StopIteration:
source_loader_iter = iter(source_loader)
batch_src = next(source_loader_iter)
print('new source iter')
try:
batch_sample = next(sample_loader_iter)
except StopIteration:
sample_loader_iter = iter(sample_loader)
batch_sample = next(sample_loader_iter)
print('new sample iter')
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter_detector)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter_detector)
optimizer.zero_grad()
loss_sam, tb_dict_sam, disp_dict = model_func(model, batch_sample, **forward_args)
loss_src, tb_dict_src, disp_dict = model_func(model, batch_src, **forward_args)
# loss_sam, tb_dict_sam, disp_dict = model_func(model, batch_sample, forward_args)
loss = loss_src + optim_cfg.SAMPLE_LOSS_SCALE * loss_sam
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter_detector += 1
cur_batch_time = time.time() - end
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr_detector': cur_lr,
'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})',
'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter_detector))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss_detector', loss, accumulated_iter_detector)
tb_log.add_scalar('meta_data/learning_rate_detector', cur_lr, accumulated_iter_detector)
for key, val in tb_dict_src.items():
tb_log.add_scalar('train/detector_src' + key, val, accumulated_iter_detector)
for key, val in tb_dict_sam.items():
tb_log.add_scalar('train/detector_sam' + key, val, accumulated_iter_detector)
if rank == 0:
pbar.close()
return accumulated_iter_detector
def train_active_with_st3d(model, optimizer, source_train_loader, target_train_loader, source_sample_loader, model_func,
lr_scheduler, optim_cfg, start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir,
sample_epoch, annotation_budget, target_file_path, sample_save_path, ps_label_dir, cfg, batch_size, workers, dist_train,
source_sampler=None, target_sampler=None, source_sample_sampler=None, lr_warmup_scheduler=None, ckpt_save_interval=1,
max_ckpt_save_num=50, merge_all_iters_to_one_epoch=False, logger=None, ema_model=None):
accumulated_iter_detector, accumulated_iter_discriminator = start_iter, start_iter
target_name = cfg['DATA_CONFIG_TAR']['DATASET']
target_list = active_learning_utils.get_target_list(target_file_path, oss=True)
sample_list = []
sample_train_loader = None
ps_pkl = self_training_utils.check_already_exsit_pseudo_label(ps_label_dir, start_epoch)
if ps_pkl is not None:
logger.info('==> Loading pseudo labels from {}'.format(ps_pkl))
if cfg.SELF_TRAIN.get('PROG_AUG', None) and cfg.SELF_TRAIN.PROG_AUG.ENABLED and \
start_epoch > 0:
for cur_epoch in range(start_epoch):
if cur_epoch in cfg.SELF_TRAIN.PROG_AUG.UPDATE_AUG:
aug_times = cfg.SELF_TRAIN.PROG_AUG.UPDATE_AUG.index(cur_epoch) + 1
print ("***********update AUG times for continue training:**********", aug_times)
target_train_loader.dataset.data_augmentor.re_prepare(
augmentor_configs=cfg.SELF_TRAIN.PROG_AUG.D_CFG if cfg.SELF_TRAIN.PROG_AUG.get('D_CFG', None) else None,
intensity=cfg.SELF_TRAIN.PROG_AUG.SCALE, aug_times=aug_times)
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True, leave=(rank == 0)) as tbar:
total_it_each_epoch = len(source_train_loader)
if merge_all_iters_to_one_epoch:
assert hasattr(source_train_loader.dataset, 'merge_all_iters_to_one_epoch')
source_train_loader.dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
total_it_each_epoch = len(source_train_loader) // max(total_epochs, 1)
dataloader_iter_src = iter(source_train_loader)
dataloader_iter_tar = iter(target_train_loader)
for cur_epoch in tbar:
if source_sampler is not None:
source_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
if cur_epoch in [20, 30, 40]:
cfg.DATA_CONFIG_TAR.USE_PSEUDO_LABEL = True
target_train_loader.dataset.eval()
print("***********update pseudo label**********")
self_training_utils.save_pseudo_label_epoch(
model, target_train_loader, rank,
leave_pbar=True, ps_label_dir=ps_label_dir, cur_epoch=cur_epoch
)
target_train_loader.dataset.train()
commu_utils.synchronize()
if cur_epoch in sample_epoch:
accumulated_iter_discriminator = train_discriminator(
model,
optimizer[1],
lr_scheduler[1],
source_train_loader,
target_train_loader,
dataloader_iter_src,
dataloader_iter_tar,
dist_train,
len(target_train_loader),
accumulated_iter_discriminator,
optim_cfg.DISCRIMINATOR,
tb_log, rank, tbar
)
if cur_epoch in sample_epoch:
frame_score = active_learning_utils.active_evaluate_dual(model, target_train_loader, rank, domain='target')
sampled_frame_id, _ = active_learning_utils.active_sample_tar(frame_score, budget=annotation_budget, logger=logger)
sample_list, info_path = active_learning_utils.update_sample_list_dual(
sample_list, target_list, sampled_frame_id, cur_epoch, sample_save_path, target_name, rank, domain='target'
)
target_list, target_info_path = active_learning_utils.update_target_list(target_list, sampled_frame_id, cur_epoch, sample_save_path, target_name, rank)
sample_train_set, sample_train_loader, sample_train_sampler = build_dataloader_ada(
dataset_cfg=cfg.DATA_CONFIG_SAMPLE,
class_names=cfg.DATA_CONFIG_SAMPLE.CLASS_NAMES,
batch_size=batch_size,
dist=dist_train, workers=workers,
logger=logger,
training=True,
info_path=info_path,
merge_all_iters_to_one_epoch=merge_all_iters_to_one_epoch,
total_epochs=total_epochs-cur_epoch
)
target_train_set, target_train_loader, target_train_sampler = build_dataloader_ada(
dataset_cfg=cfg.DATA_CONFIG_TAR,
class_names=cfg.DATA_CONFIG_TAR.CLASS_NAMES,
batch_size=batch_size,
dist=dist_train, workers=workers,
logger=logger,
training=True,
info_path=target_info_path,
merge_all_iters_to_one_epoch=merge_all_iters_to_one_epoch,
total_epochs=total_epochs-cur_epoch
)
dataloader_iter_tar = iter(target_train_loader)
dataloader_iter_sample = iter(sample_train_loader) if sample_train_loader is not None else None
dataloader_iter_src_sample = iter(source_sample_loader)
# if cfg.SELF_TRAIN.get('PROG_AUG', None) and cfg.SELF_TRAIN.PROG_AUG.ENABLED and \
# (cur_epoch in cfg.SELF_TRAIN.PROG_AUG.UPDATE_AUG):
# aug_times = cfg.SELF_TRAIN.PROG_AUG.UPDATE_AUG.index(cur_epoch) + 1
# print ("***********update AUG times:**********", aug_times)
# target_train_loader.dataset.data_augmentor.re_prepare(
# augmentor_configs=cfg.SELF_TRAIN.PROG_AUG.D_CFG if cfg.SELF_TRAIN.PROG_AUG.get('D_CFG', None) else None,
# intensity=cfg.SELF_TRAIN.PROG_AUG.SCALE, aug_times=aug_times)
if cur_epoch < 20:
accumulated_iter_detector = train_detector(
model,
model_func,
optimizer[0],
lr_scheduler[0],
source_sample_loader,
sample_train_loader,
dataloader_iter_src_sample,
dataloader_iter_sample,
dist_train,
optim_cfg,
rank,
len(source_sample_loader),
accumulated_iter_detector,
tb_log, tbar
)
else:
accumulated_iter_detector = train_detector_st3d_2(
model,
model_func,
optimizer[0],
lr_scheduler[0],
source_sample_loader,
sample_train_loader,
target_train_loader,
dataloader_iter_src_sample,
dataloader_iter_sample,
dataloader_iter_tar,
dist_train,
optim_cfg,
rank,
len(sample_train_loader) + len(target_train_loader),
accumulated_iter_detector,
tb_log, tbar
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch)
save_checkpoint(
checkpoint_state(model, epoch=trained_epoch, it=accumulated_iter_detector), filename=ckpt_name,
)
def model_state_to_cpu(model_state):
model_state_cpu = type(model_state)() # ordered dict
for key, val in model_state.items():
model_state_cpu[key] = val.cpu()
return model_state_cpu
def checkpoint_state(model=None, optimizer=None, epoch=None, it=None):
optim_state = optimizer.state_dict() if optimizer is not None else None
if model is not None:
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_state = model_state_to_cpu(model.module.state_dict())
else:
model_state = model.state_dict()
else:
model_state = None
try:
import pcdet
version = 'pcdet+' + pcdet.__version__
except:
version = 'none'
return {'epoch': epoch, 'it': it, 'model_state': model_state, 'optimizer_state': optim_state, 'version': version}
def save_checkpoint(state, filename='checkpoint'):
if False and 'optimizer_state' in state:
optimizer_state = state['optimizer_state']
state.pop('optimizer_state', None)
optimizer_filename = '{}_optim.pth'.format(filename)
torch.save({'optimizer_state': optimizer_state}, optimizer_filename)
filename = '{}.pth'.format(filename)
torch.save(state, filename)
| 32,330
| 41.318063
| 167
|
py
|
3DTrans
|
3DTrans-master/tools/train_utils/train_semi_utils.py
|
import glob
import os
import math
import torch
import tqdm
from torch.nn.utils import clip_grad_norm_
def train_one_epoch(model, optimizer, train_loader, model_func, lr_scheduler, accumulated_iter, cur_epoch, optim_cfg,
rank, tbar, total_it_each_epoch, dataloader_iter, tb_log=None, leave_pbar=False):
if total_it_each_epoch == len(train_loader):
dataloader_iter = iter(train_loader)
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train', dynamic_ncols=True)
for cur_it in range(total_it_each_epoch):
try:
batch = next(dataloader_iter)
except StopIteration:
dataloader_iter = iter(train_loader)
batch = next(dataloader_iter)
print('new iters')
lr_scheduler.step(accumulated_iter)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
model.train()
optimizer.zero_grad()
# use temperature for radius search
if optim_cfg.get('USE_TEMPERATURE', False):
cur_temperature = calculate_temperature_decay(cur_epoch, optim_cfg)
batch['temperature'] = cur_temperature
loss, tb_dict, disp_dict = model_func(model, batch)
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter += 1
disp_dict.update({'loss': loss.item(), 'lr': cur_lr})
# log to console and tensorboard
if rank == 0:
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss', loss, accumulated_iter)
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
for key, val in tb_dict.items():
tb_log.add_scalar('train/' + key, val, accumulated_iter)
if rank == 0:
pbar.close()
return accumulated_iter
def train_model(model, optimizer, train_loader, model_func, lr_scheduler, optim_cfg,
start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir, train_sampler=None,
lr_warmup_scheduler=None, ckpt_save_interval=1, max_ckpt_save_num=50,
merge_all_iters_to_one_epoch=False):
accumulated_iter = start_iter
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True, leave=(rank == 0)) as tbar:
total_it_each_epoch = len(train_loader)
if merge_all_iters_to_one_epoch:
assert hasattr(train_loader.dataset, 'merge_all_iters_to_one_epoch')
train_loader.dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
total_it_each_epoch = len(train_loader) // max(total_epochs, 1)
dataloader_iter = iter(train_loader)
for cur_epoch in tbar:
if train_sampler is not None:
train_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
accumulated_iter = train_one_epoch(
model, optimizer, train_loader, model_func,
lr_scheduler=cur_scheduler,
accumulated_iter=accumulated_iter,
cur_epoch = cur_epoch,
optim_cfg=optim_cfg,
rank=rank, tbar=tbar, tb_log=tb_log,
leave_pbar=(cur_epoch + 1 == total_epochs),
total_it_each_epoch=total_it_each_epoch,
dataloader_iter=dataloader_iter
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch)
save_checkpoint(
checkpoint_state(model, optimizer, trained_epoch, accumulated_iter), filename=ckpt_name,
)
def model_state_to_cpu(model_state):
model_state_cpu = type(model_state)() # ordered dict
for key, val in model_state.items():
model_state_cpu[key] = val.cpu()
return model_state_cpu
def checkpoint_state(model=None, optimizer=None, epoch=None, it=None):
optim_state = optimizer.state_dict() if optimizer is not None else None
if model is not None:
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_state = model_state_to_cpu(model.module.state_dict())
else:
model_state = model.state_dict()
else:
model_state = None
try:
import pcdet
version = 'pcdet+' + pcdet.__version__
except:
version = 'none'
return {'epoch': epoch, 'it': it, 'model_state': model_state, 'optimizer_state': optim_state, 'version': version}
def save_checkpoint(state, filename='checkpoint'):
if False and 'optimizer_state' in state:
optimizer_state = state['optimizer_state']
state.pop('optimizer_state', None)
optimizer_filename = '{}_optim.pth'.format(filename)
torch.save({'optimizer_state': optimizer_state}, optimizer_filename)
filename = '{}.pth'.format(filename)
torch.save(state, filename)
def calculate_temperature_decay(cur_epoch, optim_cfg):
start_epoch, end_epoch = optim_cfg.DECAY_EPOCH
start_temperature, end_temperature = optim_cfg.DECAY_TEMPERATURE
if optim_cfg.DECAY_MODE == 'exp':
if cur_epoch < start_epoch:
return start_temperature
elif cur_epoch > end_epoch:
return end_temperature
else:
duration = end_epoch - start_epoch
ratio = end_temperature / start_temperature
factor = math.log(ratio) # neg
factor = factor / duration
cur_temperature = start_temperature * math.exp(factor * (cur_epoch - start_epoch))
return cur_temperature
else:
raise NotImplementedError
| 6,704
| 37.757225
| 117
|
py
|
3DTrans
|
3DTrans-master/tools/train_utils/train_active_utils.py
|
from dis import dis
import glob
import os
import pickle
import torch
import tqdm
import time
from torch.nn.utils import clip_grad_norm_
from pcdet.utils import common_utils, commu_utils, self_training_utils
from pcdet.models import load_data_to_gpu
from pcdet.datasets import build_dataloader, build_dataloader_ada
from pcdet.utils import active_learning_utils
def train_detector(model, model_func, optimizer, lr_scheduler, source_loader, sample_loader, source_loader_iter, sample_loader_iter,
dist_train, optim_cfg, rank, total_it_each_epoch, accumulated_iter_detector, tb_log, tbar, leave_pbar=False):
model.train()
if dist_train:
for p in model.module.parameters():
p.requires_grad = True
for p in model.module.dense_head.conv_cls1.parameters():
p.requires_grad = False
for p in model.module.dense_head.conv_cls2.parameters():
p.requires_grad = False
for p in model.module.discriminator.parameters():
p.requires_grad = False
else:
for p in model.parameters():
p.requires_grad = True
for p in model.dense_head.conv_cls1.parameters():
p.requires_grad = False
for p in model.dense_head.conv_cls2.parameters():
p.requires_grad = False
for p in model.discriminator.parameters():
p.requires_grad = False
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train_detector', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
forward_args = {
'mode': 'train_detector'
}
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch_src = next(source_loader_iter)
except StopIteration:
source_loader_iter = iter(source_loader)
batch_src = next(source_loader_iter)
print('new source iter')
try:
batch_sample = next(sample_loader_iter)
except StopIteration:
sample_loader_iter = iter(sample_loader)
batch_sample = next(sample_loader_iter)
print('new sample iter')
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter_detector)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter_detector)
optimizer.zero_grad()
loss_sam, tb_dict_sam, disp_dict = model_func(model, batch_sample, **forward_args)
loss_src, tb_dict_src, disp_dict = model_func(model, batch_src, **forward_args)
# loss_sam, tb_dict_sam, disp_dict = model_func(model, batch_sample, forward_args)
loss = loss_src + optim_cfg.SAMPLE_LOSS_SCALE * loss_sam
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter_detector += 1
cur_batch_time = time.time() - end
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr_detector': cur_lr,
'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})',
'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter_detector))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss_detector', loss, accumulated_iter_detector)
tb_log.add_scalar('meta_data/learning_rate_detector', cur_lr, accumulated_iter_detector)
for key, val in tb_dict_src.items():
tb_log.add_scalar('train/detector_src' + key, val, accumulated_iter_detector)
for key, val in tb_dict_sam.items():
tb_log.add_scalar('train/detector_sam' + key, val, accumulated_iter_detector)
if rank == 0:
pbar.close()
return accumulated_iter_detector
def train_discriminator(model, optimizer, lr_scheduler, source_loader, target_loader,
source_loader_iter, target_loader_iter, dist_train, total_it_each_epoch,
accumulated_iter_discriminator, optim_cfg, tb_log, rank, tbar, leave_pbar=False):
model.train()
if dist_train:
for p in model.module.parameters():
p.requires_grad = False
for p in model.module.discriminator.parameters():
p.requires_grad = True
else:
for p in model.parameters():
p.requires_grad = False
for p in model.discriminator.parameters():
p.requires_grad = True
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train_discriminator', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch_src = next(source_loader_iter)
except StopIteration:
source_loader_iter = iter(source_loader)
batch_src = next(source_loader_iter)
print('new source iter')
try:
batch_tar = next(target_loader_iter)
except StopIteration:
target_loader_iter = iter(target_loader)
batch_tar = next(target_loader_iter)
print('new target iter')
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter_discriminator)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter_discriminator)
optimizer.zero_grad()
load_data_to_gpu(batch_src)
forward_args = {
'mode': 'train_discriminator',
'source': True
}
loss_src = model(batch_src, **forward_args)
load_data_to_gpu(batch_tar)
forward_args = {
'mode': 'train_discriminator',
'source': False
}
loss_tar = model(batch_tar, **forward_args)
loss = (loss_src + loss_tar) / 2
tb_dict = {
'discriminator_loss': loss.item()
}
disp_dict = {}
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter_discriminator += 1
cur_batch_time = time.time() - end
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr_discriminator': cur_lr,
'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})',
'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter_discriminator))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss_discriminator', loss, accumulated_iter_discriminator)
tb_log.add_scalar('meta_data/learning_rate_discriminator', cur_lr, accumulated_iter_discriminator)
for key, val in tb_dict.items():
tb_log.add_scalar('train/' + key, val, accumulated_iter_discriminator)
if rank == 0:
pbar.close()
return accumulated_iter_discriminator
def train_multi_classifier(model, optimizer, lr_scheduler, source_loader, sample_loader,
source_loader_iter, sample_loader_iter, dist_train, total_it_each_epoch,
accumulated_iter_mul_cls, optim_cfg, tb_log, rank,
tbar, leave_pbar=False):
model.train()
if dist_train:
for p in model.module.parameters():
p.requires_grad = False
for p in model.module.dense_head.conv_cls1.parameters():
p.requires_grad = True
for p in model.module.dense_head.conv_cls2.parameters():
p.requires_grad = True
else:
for p in model.parameters():
p.requires_grad = False
for p in model.dense_head.conv_cls1.parameters():
p.requires_grad = True
for p in model.dense_head.conv_cls2.parameters():
p.requires_grad = True
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train_mul_cls', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
forward_args = {
'mode': 'train_mul_cls',
'distance': 'max'
}
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch_src = next(source_loader_iter)
except StopIteration:
source_loader_iter = iter(source_loader)
batch_src = next(source_loader_iter)
print('new source iter')
try:
batch_sam = next(sample_loader_iter)
except StopIteration:
sample_loader_iter = iter(sample_loader)
batch_sam = next(sample_loader_iter)
print('new sample iter')
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter_mul_cls)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter_mul_cls)
optimizer.zero_grad()
load_data_to_gpu(batch_src)
ret_src, tb_dict_src, disp_dict = model(batch_src, **forward_args)
load_data_to_gpu(batch_sam)
ret_sam, tb_dict_sam, disp_dict = model(batch_sam, **forward_args)
loss_src = ret_src['loss'].mean()
loss_sam = ret_sam['loss'].mean()
loss = (loss_src + loss_sam) / 2
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter_mul_cls += 1
cur_batch_time = time.time() - end
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr_mul_cls': cur_lr,
'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})',
'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter_mul_cls))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss_mul_cls', loss, accumulated_iter_mul_cls)
tb_log.add_scalar('meta_data/learning_rate_mul_cls', cur_lr, accumulated_iter_mul_cls)
for key, val in tb_dict_src.items():
tb_log.add_scalar('train/mul_cls_src_' + key, val, accumulated_iter_mul_cls)
for key, val in tb_dict_sam.items():
tb_log.add_scalar('train/mul_cls_sam_' + key, val, accumulated_iter_mul_cls)
if rank == 0:
pbar.close()
return accumulated_iter_mul_cls
def train_one_epoch(model, optimizer_detector, optimizer_mul_cls, optimizer_discriminator,
source_train_loader, target_train_loader, sample_train_loader, model_func,
lr_scheduler_detector, lr_scheduler_mul_cls, lr_scheduler_discriminator,
accumulated_iter_detector, accumulated_iter_mul_cls, accumulated_iter_discriminator,
optim_cfg, rank, tbar, dist_train, total_it_each_epoch,
dataloader_iter_src, dataloader_iter_tar, dataloader_iter_sample,
tb_log=None, leave_pbar=False, ema_model=None):
accumulated_iter_detector = train_detector(
model,
model_func,
optimizer_detector,
lr_scheduler_detector,
source_train_loader,
sample_train_loader,
dataloader_iter_src,
dataloader_iter_sample,
dist_train,
optim_cfg,
rank,
total_it_each_epoch,
accumulated_iter_detector,
tb_log, tbar
)
accumulated_iter_mul_cls = train_multi_classifier(
model,
optimizer_mul_cls,
lr_scheduler_mul_cls,
source_train_loader,
sample_train_loader,
dataloader_iter_src,
dataloader_iter_sample,
dist_train,
2,
accumulated_iter_mul_cls,
optim_cfg.MUL_CLS,
tb_log, rank, tbar
)
accumulated_iter_discriminator = train_discriminator(
model,
optimizer_discriminator,
lr_scheduler_discriminator,
source_train_loader,
target_train_loader,
dataloader_iter_src,
dataloader_iter_tar,
dist_train,
2,
accumulated_iter_discriminator,
optim_cfg.DISCRIMINATOR,
tb_log, rank, tbar
)
return accumulated_iter_detector, accumulated_iter_mul_cls, accumulated_iter_discriminator
def train_active_model_ps_label(model, optimizer, source_train_loader, target_train_loader, model_func, lr_scheduler, optim_cfg,
start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir, ps_label_dir, sample_epoch,
annotation_budget, target_file_path, sample_save_path, cfg, batch_size, workers, dist_train,
source_sampler=None, target_sampler=None, lr_warmup_scheduler=None, ckpt_save_interval=1,
max_ckpt_save_num=50, merge_all_iters_to_one_epoch=False, logger=None, ema_model=None):
oss = True if cfg['DATA_CONFIG_TAR'].get('OSS_PATH', None) is not None else False
target_list = active_learning_utils.get_target_list(target_file_path, oss)
sample_list = []
sample_train_loader = None
target_name = cfg['DATA_CONFIG_TAR']['DATASET']
accumulated_iter_detector, accumulated_iter_mul_cls, accumulated_iter_discriminator = start_iter, start_iter, start_iter
source_reader = common_utils.DataReader(source_train_loader, source_sampler)
source_reader.construct_iter()
# for continue training.
# if already exist generated pseudo label result
ps_pkl = self_training_utils.check_already_exsit_pseudo_label(ps_label_dir, start_epoch)
if ps_pkl is not None:
logger.info('==> Loading pseudo labels from {}'.format(ps_pkl))
# for continue training
if cfg.SELF_TRAIN.get('PROG_AUG', None) and cfg.SELF_TRAIN.PROG_AUG.ENABLED and \
start_epoch > 0:
for cur_epoch in range(start_epoch):
if cur_epoch in cfg.SELF_TRAIN.PROG_AUG.UPDATE_AUG:
aug_times = cfg.SELF_TRAIN.PROG_AUG.UPDATE_AUG.index(cur_epoch) + 1
print("***********update AUG times for continue training:**********", aug_times)
target_train_loader.dataset.data_augmentor.re_prepare(
augmentor_configs=cfg.SELF_TRAIN.PROG_AUG.D_CFG if cfg.SELF_TRAIN.PROG_AUG.get('D_CFG',
None) else None,
intensity=cfg.SELF_TRAIN.PROG_AUG.SCALE, aug_times=aug_times)
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True,
leave=(rank == 0)) as tbar:
total_it_each_epoch = len(source_train_loader)
if merge_all_iters_to_one_epoch:
assert hasattr(source_train_loader.dataset, 'merge_all_iters_to_one_epoch')
source_train_loader.dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
total_it_each_epoch = len(source_train_loader) // max(total_epochs, 1)
dataloader_iter_src = iter(source_train_loader)
dataloader_iter_tar = iter(target_train_loader) if target_train_loader is not None else None
for cur_epoch in tbar:
if source_sampler is not None:
source_sampler.set_epoch(cur_epoch)
if target_sampler is not None:
target_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
# update pseudo label
if (cur_epoch in cfg.SELF_TRAIN.UPDATE_PSEUDO_LABEL) or \
((cur_epoch % cfg.SELF_TRAIN.UPDATE_PSEUDO_LABEL_INTERVAL == 0)
and cur_epoch != 0):
target_train_loader.dataset.eval()
print("***********update pseudo label**********")
self_training_utils.save_pseudo_label_epoch(
model, target_train_loader, rank,
leave_pbar=True, ps_label_dir=ps_label_dir, cur_epoch=cur_epoch
)
target_train_loader.dataset.train()
# curriculum data augmentation
if cfg.SELF_TRAIN.get('PROG_AUG', None) and cfg.SELF_TRAIN.PROG_AUG.ENABLED and \
(cur_epoch in cfg.SELF_TRAIN.PROG_AUG.UPDATE_AUG):
aug_times = cfg.SELF_TRAIN.PROG_AUG.UPDATE_AUG.index(cur_epoch) + 1
print("***********update AUG times:**********", aug_times)
target_train_loader.dataset.data_augmentor.re_prepare(
augmentor_configs=cfg.SELF_TRAIN.PROG_AUG.D_CFG if cfg.SELF_TRAIN.PROG_AUG.get('D_CFG',
None) else None,
intensity=cfg.SELF_TRAIN.PROG_AUG.SCALE, aug_times=aug_times)
# active evaluate and sample
if cur_epoch in sample_epoch:
frame_score = active_learning_utils.active_evaluate(model, target_train_loader, rank)
sampled_frame_id, _ = active_learning_utils.active_sample(frame_score, budget=annotation_budget)
sample_list, info_path = active_learning_utils.update_sample_list(sample_list, target_list, sampled_frame_id, cur_epoch, sample_save_path, target_name)
target_list, target_info_path = active_learning_utils.update_target_list(target_list, sampled_frame_id, cur_epoch, sample_save_path, target_name)
sample_train_set, sample_train_loader, sample_train_sampler = build_dataloader_ada(
dataset_cfg=cfg.DATA_CONFIG_SAMPLE,
class_names=cfg.DATA_CONFIG_SAMPLE.CLASS_NAMES,
batch_size=batch_size,
dist=dist_train, workers=workers,
logger=logger,
training=True,
info_path=info_path,
merge_all_iters_to_one_epoch=merge_all_iters_to_one_epoch,
total_epochs=total_epochs-cur_epoch
)
target_train_set, target_train_loader, target_train_sampler = build_dataloader_ada(
dataset_cfg=cfg.DATA_CONFIG_TAR,
class_names=cfg.DATA_CONFIG_TAR.CLASS_NAMES,
batch_size=batch_size,
dist=dist_train, workers=workers,
logger=logger,
training=True,
info_path=target_info_path,
merge_all_iters_to_one_epoch=merge_all_iters_to_one_epoch,
total_epochs=total_epochs-cur_epoch
)
dataloader_iter_tar = iter(target_train_loader)
dataloader_iter_sample = iter(sample_train_loader) if sample_train_loader is not None else None
accumulated_iter_detector, accumulated_iter_mul_cls, accumulated_iter_discriminator = train_one_epoch(
model=model,
optimizer_detector=optimizer[0],
optimizer_mul_cls=optimizer[1],
optimizer_discriminator=optimizer[2],
source_train_loader=source_train_loader,
target_train_loader=target_train_loader,
sample_train_loader=sample_train_loader,
model_func=model_func,
lr_scheduler_detector=cur_scheduler[0],
lr_scheduler_mul_cls=cur_scheduler[1],
lr_scheduler_discriminator=cur_scheduler[2],
accumulated_iter_detector=accumulated_iter_detector,
accumulated_iter_mul_cls=accumulated_iter_mul_cls,
accumulated_iter_discriminator=accumulated_iter_discriminator,
optim_cfg=optim_cfg,
rank=rank, tbar=tbar,
dist_train=dist_train,
tb_log=tb_log,
leave_pbar=(cur_epoch + 1 == total_epochs),
total_it_each_epoch=total_it_each_epoch,
dataloader_iter_src=dataloader_iter_src,
dataloader_iter_tar=dataloader_iter_tar,
dataloader_iter_sample=dataloader_iter_sample,
ema_model=ema_model
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch)
save_checkpoint(
checkpoint_state(model, epoch=trained_epoch, it=accumulated_iter_detector), filename=ckpt_name,
)
def fine_tune_dense_head(model, optimizer, lr_scheduler, source_loader, source_loader_iter, dist_train, total_it_each_epoch,
accumulated_iter_dense_head, optim_cfg, tb_log, rank, tbar, leave_pbar=False):
model.train()
if dist_train:
for p in model.module.parameters():
p.requires_grad = False
for p in model.module.dense_head.conv_cls1.parameters():
p.requires_grad = True
for p in model.module.dense_head.conv_cls2.parameters():
p.requires_grad = True
else:
for p in model.parameters():
p.requires_grad = False
for p in model.dense_head.parameters():
p.requires_grad = True
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train_dense_head', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch_src = next(source_loader_iter)
except StopIteration:
source_loader_iter = iter(source_loader)
batch_src = next(source_loader_iter)
print('new source iter')
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter_dense_head)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate_dense_head', cur_lr, accumulated_iter_dense_head)
optimizer.zero_grad()
load_data_to_gpu(batch_src)
forward_args = {
'mode': 'finetune'
}
ret_dict, tb_dict, disp_dict = model(batch_src, **forward_args)
loss = ret_dict['loss'].mean()
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.DENSE_HEAD.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter_dense_head += 1
cur_batch_time = time.time() - end
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr_dense_head': cur_lr,
'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})',
'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter_dense_head))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss_dense_head', loss, accumulated_iter_dense_head)
tb_log.add_scalar('meta_data/learning_rate_dense_head', cur_lr, accumulated_iter_dense_head)
for key, val in tb_dict.items():
tb_log.add_scalar('train/' + key, val, accumulated_iter_dense_head)
if rank == 0:
pbar.close()
return accumulated_iter_dense_head
def fine_tune_discriminator(model, optimizer, lr_scheduler, source_loader, target_loader, source_loader_iter,
target_loader_iter, dist_train, total_it_each_epoch, accumulated_iter_discriminator,
optim_cfg, tb_log, rank, tbar, leave_pbar=False):
model.train()
if dist_train:
for p in model.module.parameters():
p.requires_grad = False
for p in model.module.discriminator.parameters():
p.requires_grad = True
else:
for p in model.parameters():
p.requires_grad = False
for p in model.discriminator.parameters():
p.requires_grad = True
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train_discriminator', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch_src = next(source_loader_iter)
except StopIteration:
source_loader_iter = iter(source_loader)
batch_src = next(source_loader_iter)
print('new source iter')
try:
batch_tar = next(target_loader_iter)
except StopIteration:
target_loader_iter = iter(target_loader)
batch_tar = next(target_loader_iter)
print('new target iter')
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter_discriminator)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter_discriminator)
optimizer.zero_grad()
load_data_to_gpu(batch_src)
forward_args = {
'mode': 'train_discriminator',
'source': True
}
loss_src = model(batch_src, **forward_args)
load_data_to_gpu(batch_tar)
forward_args = {
'mode': 'train_discriminator',
'source': False
}
loss_tar = model(batch_tar, **forward_args)
loss = (loss_src + loss_tar) / 2
forward_timer = time.time()
tb_dict = {
'discriminator_loss': loss.item()
}
disp_dict = {}
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.DISCRIMINATOR.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter_discriminator += 1
cur_batch_time = time.time() - end
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr_discriminator': cur_lr,
'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})',
'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter_discriminator))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss_discriminator', loss, accumulated_iter_discriminator)
tb_log.add_scalar('meta_data/learning_rate_discriminator', cur_lr, accumulated_iter_discriminator)
for key, val in tb_dict.items():
tb_log.add_scalar('train/' + key, val, accumulated_iter_discriminator)
if rank == 0:
pbar.close()
return accumulated_iter_discriminator
def fine_tune_one_epoch_1(model, optimizer_dense_head, optimizer_discriminator, source_train_loader, target_train_loader,
model_func, lr_scheduler_dense_head, lr_scheduler_discriminator, optim_cfg,
accumulated_iter_dense_head, accumulated_iter_discriminator, rank, tbar, dist_train, total_it_each_epoch, dataloader_iter_src,
dataloader_iter_tar, tb_log=None, leave_pbar=False):
accumulated_iter_dense_head = fine_tune_dense_head(
model,
optimizer_dense_head,
lr_scheduler_dense_head,
source_train_loader,
dataloader_iter_src,
dist_train,
total_it_each_epoch,
accumulated_iter_dense_head,
optim_cfg,
tb_log,
rank,
tbar,
leave_pbar
)
accumulated_iter_discriminator = fine_tune_discriminator(
model,
optimizer_discriminator,
lr_scheduler_discriminator,
source_train_loader,
target_train_loader,
dataloader_iter_src,
dataloader_iter_tar,
dist_train,
total_it_each_epoch,
accumulated_iter_discriminator,
optim_cfg,
tb_log,
rank,
tbar,
leave_pbar
)
return accumulated_iter_dense_head, accumulated_iter_discriminator
def fine_tune_model(model, optimizer, source_train_loader, target_train_loader, model_func, lr_scheduler, optim_cfg,
start_epoch, total_finetune_epochs, start_iter, rank, tb_log, ckpt_save_dir,
dist_train, source_sampler=None, target_sampler=None, lr_warmup_scheduler=None, ckpt_save_interval=1,
max_ckpt_save_num=50, merge_all_iters_to_one_epoch=False, logger=None, ema_model=None):
accumulated_iter_dense_head, accumulated_iter_discriminator = start_iter, start_iter
with tqdm.trange(start_epoch, total_finetune_epochs, desc='epochs', dynamic_ncols=True, leave=(rank == 0)) as tbar:
total_it_each_epoch = len(source_train_loader)
if merge_all_iters_to_one_epoch:
assert hasattr(source_train_loader.dataset, 'merge_all_iters_to_one_epoch')
source_train_loader.dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_finetune_epochs)
total_it_each_epoch = len(source_train_loader) // max(total_finetune_epochs, 1)
dataloader_iter_src = iter(source_train_loader)
dataloader_iter_tar = iter(target_train_loader) if target_train_loader is not None else None
for cur_epoch in tbar:
if source_sampler is not None:
source_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
accumulated_iter_dense_head, accumulated_iter_discriminator = fine_tune_one_epoch_1(
model,
optimizer[0],
optimizer[1],
source_train_loader,
target_train_loader,
model_func,
cur_scheduler[0],
cur_scheduler[1],
optim_cfg,
accumulated_iter_dense_head=accumulated_iter_dense_head,
accumulated_iter_discriminator=accumulated_iter_discriminator,
rank=rank, tbar=tbar, tb_log=tb_log,
dist_train=dist_train,
leave_pbar=(cur_epoch + 1 == total_finetune_epochs),
total_it_each_epoch=total_it_each_epoch,
dataloader_iter_src=dataloader_iter_src,
dataloader_iter_tar=dataloader_iter_tar
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch)
save_checkpoint(
checkpoint_state(model, epoch=trained_epoch, it=accumulated_iter_dense_head), filename=ckpt_name,
)
def model_state_to_cpu(model_state):
model_state_cpu = type(model_state)() # ordered dict
for key, val in model_state.items():
model_state_cpu[key] = val.cpu()
return model_state_cpu
def checkpoint_state(model=None, optimizer=None, epoch=None, it=None):
optim_state = optimizer.state_dict() if optimizer is not None else None
if model is not None:
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_state = model_state_to_cpu(model.module.state_dict())
else:
model_state = model.state_dict()
else:
model_state = None
try:
import pcdet
version = 'pcdet+' + pcdet.__version__
except:
version = 'none'
return {'epoch': epoch, 'it': it, 'model_state': model_state, 'optimizer_state': optim_state, 'version': version}
def save_checkpoint(state, filename='checkpoint'):
if False and 'optimizer_state' in state:
optimizer_state = state['optimizer_state']
state.pop('optimizer_state', None)
optimizer_filename = '{}_optim.pth'.format(filename)
torch.save({'optimizer_state': optimizer_state}, optimizer_filename)
filename = '{}.pth'.format(filename)
torch.save(state, filename)
| 37,649
| 41.067039
| 167
|
py
|
3DTrans
|
3DTrans-master/tools/train_utils/train_pseudo_label_utils.py
|
import glob
import os
import torch
import tqdm
import time
from torch.nn.utils import clip_grad_norm_
from pcdet.utils import common_utils, commu_utils
from pcdet.utils import self_training_utils
def train_detector(model, model_func, optimizer, lr_scheduler, labeled_loader, unlabeled_loader, labeled_loader_iter,
unlabeled_loader_iter, dist_train, optim_cfg, rank, total_it_each_epoch, accumulated_iter_detector, tb_log, tbar, leave_pbar=False):
total_it_each_epoch = len(unlabeled_loader)
model.train()
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train_detector', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch_labeled = next(labeled_loader_iter)
except StopIteration:
labeled_loader_iter = iter(labeled_loader)
batch_labeled = next(labeled_loader_iter)
print('new labeled iter')
try:
batch_unlabeled = next(unlabeled_loader_iter)
except StopIteration:
unlabeled_loader_iter = iter(unlabeled_loader)
batch_unlabeled = next(unlabeled_loader_iter)
print('new unlabeled iter')
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter_detector)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter_detector)
optimizer.zero_grad()
loss_unlabeled, tb_dict_unlabeled, disp_dict = model_func(model, batch_unlabeled)
loss_labeled, tb_dict_labeled, disp_dict = model_func(model, batch_labeled)
loss = loss_labeled + loss_unlabeled
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter_detector += 1
cur_batch_time = time.time() - end
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr_detector': cur_lr,
'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})',
'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter_detector))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss_detector', loss, accumulated_iter_detector)
tb_log.add_scalar('meta_data/learning_rate_detector', cur_lr, accumulated_iter_detector)
for key, val in tb_dict_labeled.items():
tb_log.add_scalar('train/detector_labeled' + key, val, accumulated_iter_detector)
for key, val in tb_dict_unlabeled.items():
tb_log.add_scalar('train/detector_unlabeled' + key, val, accumulated_iter_detector)
if rank == 0:
pbar.close()
return accumulated_iter_detector
def train_model(model, optimizer, labeled_train_loader, unlabeled_train_loader, model_func,
lr_scheduler, optim_cfg, start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir,
ps_label_dir, cfg, dist_train, labeled_sampler=None, unlabeled_sampler=None, lr_warmup_scheduler=None,
ckpt_save_interval=1, max_ckpt_save_num=50, merge_all_iters_to_one_epoch=False, logger=None, ema_model=None):
accumulated_iter = start_iter
ps_pkl = self_training_utils.check_already_exsit_pseudo_label(ps_label_dir, start_epoch)
if ps_pkl is not None:
logger.info('==> Loading pseudo labels from {}'.format(ps_pkl))
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True, leave=(rank == 0)) as tbar:
total_it_each_epoch = len(unlabeled_train_loader)
if merge_all_iters_to_one_epoch:
assert hasattr(labeled_train_loader.dataset, 'merge_all_iters_to_one_epoch')
labeled_train_loader.dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
total_it_each_epoch = len(labeled_train_loader) // max(total_epochs, 1)
labeled_loader_iter = iter(labeled_train_loader)
unlabeled_loader_iter = iter(unlabeled_train_loader)
for cur_epoch in tbar:
if labeled_sampler is not None:
labeled_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
if cur_epoch in [0, 5, 10]:
cfg.DATA_CONFIG.USE_UNLABELED_PSEUDO_LABEL = True
unlabeled_train_loader.dataset.eval()
print("***********update pseudo label**********")
self_training_utils.save_pseudo_label_epoch(
model, unlabeled_train_loader, rank,
leave_pbar=True, ps_label_dir=ps_label_dir, cur_epoch=cur_epoch
)
unlabeled_train_loader.dataset.train()
commu_utils.synchronize()
accumulated_iter = train_detector(
model,
model_func,
optimizer,
cur_scheduler,
labeled_train_loader,
unlabeled_train_loader,
labeled_loader_iter,
unlabeled_loader_iter,
dist_train,
optim_cfg,
rank,
total_it_each_epoch,
accumulated_iter,
tb_log, tbar
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch)
save_checkpoint(
checkpoint_state(model, optimizer=optimizer, epoch=trained_epoch, it=accumulated_iter), filename=ckpt_name,
)
def model_state_to_cpu(model_state):
model_state_cpu = type(model_state)() # ordered dict
for key, val in model_state.items():
model_state_cpu[key] = val.cpu()
return model_state_cpu
def checkpoint_state(model=None, optimizer=None, epoch=None, it=None):
optim_state = optimizer.state_dict() if optimizer is not None else None
if model is not None:
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_state = model_state_to_cpu(model.module.state_dict())
else:
model_state = model.state_dict()
else:
model_state = None
try:
import pcdet
version = 'pcdet+' + pcdet.__version__
except:
version = 'none'
return {'epoch': epoch, 'it': it, 'model_state': model_state, 'optimizer_state': optim_state, 'version': version}
def save_checkpoint(state, filename='checkpoint'):
if False and 'optimizer_state' in state:
optimizer_state = state['optimizer_state']
state.pop('optimizer_state', None)
optimizer_filename = '{}_optim.pth'.format(filename)
torch.save({'optimizer_state': optimizer_state}, optimizer_filename)
filename = '{}.pth'.format(filename)
torch.save(state, filename)
| 8,615
| 40.423077
| 151
|
py
|
3DTrans
|
3DTrans-master/tools/train_utils/train_multi_db_utils_3cls.py
|
import glob
import os
import torch
import tqdm
import time
import math
import copy
from torch.nn.utils import clip_grad_norm_
from pcdet.utils import common_utils, commu_utils
from pcdet.utils import self_training_utils
from pcdet.config import cfg
def train_one_epoch(model, optimizer, train_loader_1, train_loader_2, train_loader_3, model_func,
lr_scheduler, accumulated_iter, optim_cfg, rank, tbar, total_it_each_epoch,
dataloader_iter_1, dataloader_iter_2, dataloader_iter_3, tb_log=None, leave_pbar=False):
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
merge_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch_1 = next(dataloader_iter_1)
except StopIteration:
dataloader_iter_1 = iter(train_loader_1)
batch_1 = next(dataloader_iter_1)
#print('new iters')
try:
batch_2 = next(dataloader_iter_2)
except StopIteration:
dataloader_iter_2 = iter(train_loader_2)
batch_2 = next(dataloader_iter_2)
try:
batch_3 = next(dataloader_iter_3)
except StopIteration:
dataloader_iter_3 = iter(train_loader_3)
batch_3 = next(dataloader_iter_3)
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
model.train()
optimizer.zero_grad()
batch_pre = common_utils.merge_two_batch_dict(batch_1, batch_2)
batch = common_utils.merge_two_batch_dict(batch_pre, batch_3)
merge_timer = time.time()
cur_merge_time = merge_timer - data_timer
loss, tb_dict, disp_dict = model_func(model, batch)
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter += 1
cur_batch_time = time.time() - end
# average reduce
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_merge_time = commu_utils.average_reduce_value(cur_merge_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
merge_time.update(avg_merge_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr': cur_lr, 'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})', 'm_time': f'{merge_time.val:.2f}({merge_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})', 'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss', loss, accumulated_iter)
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
for key, val in tb_dict.items():
tb_log.add_scalar('train/' + key, val, accumulated_iter)
if rank == 0:
pbar.close()
return accumulated_iter
def train_model(model, optimizer, train_loader_1, train_loader_2, train_loader_3, model_func, lr_scheduler, optim_cfg,
start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir, ps_label_dir,
source_sampler=None, lr_warmup_scheduler=None, ckpt_save_interval=1,
max_ckpt_save_num=50, merge_all_iters_to_one_epoch=False, logger=None):
accumulated_iter = start_iter
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True, leave=(rank == 0)) as tbar:
# generate the pseudo-labeling for UDA training
if cfg.get('SELF_TRAIN', None):
ps_pkl = self_training_utils.check_already_exsit_pseudo_label(ps_label_dir, start_epoch)
if ps_pkl is not None:
logger.info('==> Loading pseudo labels from {}'.format(ps_pkl))
total_it_each_epoch = len(train_loader_1) if len(train_loader_1) > len(train_loader_2) else len(train_loader_2)
if merge_all_iters_to_one_epoch:
raise NotImplementedError
dataloader_iter_1 = iter(train_loader_1)
dataloader_iter_2 = iter(train_loader_2)
dataloader_iter_3 = iter(train_loader_3)
for cur_epoch in tbar:
if source_sampler is not None:
source_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
if optim_cfg.get('GENERATE_PSEUDO_LABEL', None):
train_loader_2.dataset.eval()
logger.info('***********update pseudo label**********')
self_training_utils.save_pseudo_label_epoch(
model, train_loader_2, rank,
leave_pbar=True, ps_label_dir=ps_label_dir, cur_epoch=cur_epoch
)
train_loader_2.dataset.train()
accumulated_iter = train_one_epoch(
model, optimizer, train_loader_1, train_loader_2, train_loader_3, model_func,
lr_scheduler=cur_scheduler,
accumulated_iter=accumulated_iter, optim_cfg=optim_cfg,
rank=rank, tbar=tbar, tb_log=tb_log,
leave_pbar=(cur_epoch + 1 == total_epochs),
total_it_each_epoch=total_it_each_epoch,
dataloader_iter_1=dataloader_iter_1,
dataloader_iter_2=dataloader_iter_2,
dataloader_iter_3=dataloader_iter_3
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch)
save_checkpoint(
checkpoint_state(model, optimizer, trained_epoch, accumulated_iter), filename=ckpt_name,
)
def model_state_to_cpu(model_state):
model_state_cpu = type(model_state)() # ordered dict
for key, val in model_state.items():
model_state_cpu[key] = val.cpu()
return model_state_cpu
def checkpoint_state(model=None, optimizer=None, epoch=None, it=None):
optim_state = optimizer.state_dict() if optimizer is not None else None
if model is not None:
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_state = model_state_to_cpu(model.module.state_dict())
else:
model_state = model.state_dict()
else:
model_state = None
try:
import pcdet
version = 'pcdet+' + pcdet.__version__
except:
version = 'none'
return {'epoch': epoch, 'it': it, 'model_state': model_state, 'optimizer_state': optim_state, 'version': version}
def save_checkpoint(state, filename='checkpoint'):
if False and 'optimizer_state' in state:
optimizer_state = state['optimizer_state']
state.pop('optimizer_state', None)
optimizer_filename = '{}_optim.pth'.format(filename)
torch.save({'optimizer_state': optimizer_state}, optimizer_filename)
filename = '{}.pth'.format(filename)
torch.save(state, filename)
| 8,627
| 39.317757
| 163
|
py
|
3DTrans
|
3DTrans-master/tools/train_utils/train_multi_db_loss_merge.py
|
import torch
import os
import glob
import tqdm
from torch.nn.utils import clip_grad_norm_
def visualize_boxes_batch(batch):
import visualize_utils as vis
import mayavi.mlab as mlab
for b_idx in range(batch['batch_size']):
points = batch['points'][batch['points'][:, 0] == b_idx][:, 1:]
if 'debug' not in batch:
vis.draw_scenes(points, ref_boxes=batch['gt_boxes'][b_idx, :, :7],
scores=batch['scores'][b_idx])
else:
vis.draw_scenes(points, ref_boxes=batch['gt_boxes'][b_idx, :, :7],
gt_boxes=batch['debug'][b_idx]['gt_boxes_lidar'],
scores=batch['scores'][b_idx])
mlab.show(stop=True)
def merge_two_batch_data(batch_1, batch_2):
import numpy as np
ret = {}
for key, val in batch_1.items():
if key in ['batch_size']:
continue
else:
ret[key] = np.stack(val, axis=0)
for key, val in batch_2.items():
val_cat = []
if key in ['batch_size']:
continue
elif key in ['gt_boxes']:
assert batch_1[key][0].shape[-1] == val[0].shape[-1]
max_gt = max([len(x) for x in batch_1[key]]) + max([len(x) for x in val])
batch_gt_boxes3d = np.zeros((batch_1['batch_size']*2, max_gt, val[0].shape[-1]), dtype=np.float32)
for k in range(batch_1['batch_size']):
batch_gt_boxes3d[k, :batch_1[key][k].__len__(), :] = batch_1[key][k]
for k in range(batch_2['batch_size']):
batch_gt_boxes3d[k+batch_1['batch_size'], :val[k].__len__(), :] = val[k]
ret[key] = batch_gt_boxes3d
else:
val_cat.append(batch_1[key])
val_cat.append(val)
ret[key] = np.concatenate(val_cat, axis=0)
#ret[key] = np.stack(val, axis=0)
ret['batch_size'] = batch_1['batch_size']*2
return ret
def train_one_epoch_multi_db(model, optimizer, train_loader_1, train_loader_2, model_func, lr_scheduler, accumulated_iter, optim_cfg,
rank, tbar, total_it_each_epoch, dataloader_iter_1, dataloader_iter_2, tb_log=None, leave_pbar=False):
if total_it_each_epoch == len(train_loader_1):
dataloader_iter_1 = iter(train_loader_1)
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train', dynamic_ncols=True)
for cur_it in range(total_it_each_epoch):
# Load the source domain ONE:
try:
batch_1 = next(dataloader_iter_1)
except StopIteration:
dataloader_iter_1 = iter(train_loader_1)
batch_1 = next(dataloader_iter_1)
# Load the source domain TWO:
try:
batch_2 = next(dataloader_iter_2)
except StopIteration:
dataloader_iter_2 = iter(train_loader_2)
batch_2 = next(dataloader_iter_2)
lr_scheduler.step(accumulated_iter)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
model.train()
optimizer.zero_grad()
# The loss_1 + loss_2 will lead to a runtime error of loss.backward()
# when perofrming the pytorch distributed
# you should perform the forward and backward one by one.
# Loss for source domain ONE:
loss_s1, tb_dict_s1, disp_dict_s1 = model_func(model, batch_1)
# Loss for source domain TWO:
loss_s2, tb_dict_s2, _ = model_func(model, batch_2)
# Merge the two loss
loss = loss_s1 + optim_cfg.DB_2_W * loss_s2
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter += 1
disp_dict_s1.update({'loss': loss_s1.item(), 'lr': cur_lr})
# log to console and tensorboard
# save the log of the source domain ONE
if rank == 0:
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter))
tbar.set_postfix(disp_dict_s1)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss_1', loss_s1, accumulated_iter)
tb_log.add_scalar('train/loss_2', loss_s2, accumulated_iter)
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
for key, val in tb_dict_s1.items():
tb_log.add_scalar('train/' + key, val, accumulated_iter)
for key, val in tb_dict_s2.items():
tb_log.add_scalar('train/' + key, val, accumulated_iter)
if rank == 0:
pbar.close()
return accumulated_iter
def train_multi_db_model(model, optimizer, train_src_loader, train_src_loader_2, model_func, lr_scheduler, optim_cfg,
start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir, ps_label_dir,
source_sampler=None, target_sampler=None, lr_warmup_scheduler=None, ckpt_save_interval=1,
max_ckpt_save_num=50, merge_all_iters_to_one_epoch=False, logger=None, ema_model=None):
accumulated_iter = start_iter
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True, leave=(rank == 0)) as tbar:
total_it_each_epoch = len(train_src_loader) if len(train_src_loader) > len(train_src_loader_2) else len(train_src_loader_2)
if merge_all_iters_to_one_epoch:
raise NotImplementedError
dataloader_iter_1 = iter(train_src_loader)
dataloader_iter_2 = iter(train_src_loader_2)
for cur_epoch in tbar:
if source_sampler is not None:
source_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
accumulated_iter = train_one_epoch_multi_db(
model, optimizer, train_src_loader, train_src_loader_2, model_func,
lr_scheduler=cur_scheduler,
accumulated_iter=accumulated_iter, optim_cfg=optim_cfg,
rank=rank, tbar=tbar, tb_log=tb_log,
leave_pbar=(cur_epoch + 1 == total_epochs),
total_it_each_epoch=total_it_each_epoch,
dataloader_iter_1=dataloader_iter_1,
dataloader_iter_2=dataloader_iter_2
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch)
save_checkpoint(
checkpoint_state(model, optimizer, trained_epoch, accumulated_iter), filename=ckpt_name,
)
def model_state_to_cpu(model_state):
model_state_cpu = type(model_state)() # ordered dict
for key, val in model_state.items():
model_state_cpu[key] = val.cpu()
return model_state_cpu
def checkpoint_state(model=None, optimizer=None, epoch=None, it=None):
optim_state = optimizer.state_dict() if optimizer is not None else None
if model is not None:
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_state = model_state_to_cpu(model.module.state_dict())
else:
model_state = model.state_dict()
else:
model_state = None
try:
import pcdet
version = 'pcdet+' + pcdet.__version__
except:
version = 'none'
return {'epoch': epoch, 'it': it, 'model_state': model_state, 'optimizer_state': optim_state, 'version': version}
def save_checkpoint(state, filename='checkpoint'):
if False and 'optimizer_state' in state:
optimizer_state = state['optimizer_state']
state.pop('optimizer_state', None)
optimizer_filename = '{}_optim.pth'.format(filename)
torch.save({'optimizer_state': optimizer_state}, optimizer_filename)
filename = '{}.pth'.format(filename)
torch.save(state, filename)
| 8,641
| 40.152381
| 133
|
py
|
3DTrans
|
3DTrans-master/tools/train_utils/train_multi_db_utils.py
|
import glob
import os
import torch
import tqdm
import time
import math
import copy
from torch.nn.utils import clip_grad_norm_
from pcdet.utils import common_utils, commu_utils
from pcdet.utils import self_training_utils
from pcdet.config import cfg
def train_one_epoch(model, optimizer, train_loader_1, train_loader_2, model_func, lr_scheduler, accumulated_iter, optim_cfg,
rank, tbar, total_it_each_epoch, dataloader_iter_1, dataloader_iter_2, tb_log=None, leave_pbar=False):
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
merge_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch_1 = next(dataloader_iter_1)
except StopIteration:
dataloader_iter_1 = iter(train_loader_1)
batch_1 = next(dataloader_iter_1)
#print('new iters')
try:
batch_2 = next(dataloader_iter_2)
except StopIteration:
dataloader_iter_2 = iter(train_loader_2)
batch_2 = next(dataloader_iter_2)
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
model.train()
optimizer.zero_grad()
batch = common_utils.merge_two_batch_dict(batch_1, batch_2)
merge_timer = time.time()
cur_merge_time = merge_timer - data_timer
loss, tb_dict, disp_dict = model_func(model, batch)
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter += 1
cur_batch_time = time.time() - end
# average reduce
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_merge_time = commu_utils.average_reduce_value(cur_merge_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
merge_time.update(avg_merge_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr': cur_lr, 'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})', 'm_time': f'{merge_time.val:.2f}({merge_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})', 'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss', loss, accumulated_iter)
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
for key, val in tb_dict.items():
tb_log.add_scalar('train/' + key, val, accumulated_iter)
if rank == 0:
pbar.close()
return accumulated_iter
def train_model(model, optimizer, train_loader_1, train_loader_2, model_func, lr_scheduler, optim_cfg,
start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir, ps_label_dir,
source_sampler=None, lr_warmup_scheduler=None, ckpt_save_interval=1,
max_ckpt_save_num=50, merge_all_iters_to_one_epoch=False, logger=None):
accumulated_iter = start_iter
# Change the Data Augmentation:
# Cancel the GT-Sampling for the last 1/4 epoch for the training process
# for resuming the ckpt: change the data_augmentator for dataset 1
if cfg.DATA_CONFIG.get('PROG_AUG', None) and cfg.DATA_CONFIG.PROG_AUG.ENABLED and \
(start_epoch == cfg.DATA_CONFIG.PROG_AUG.UPDATE_AUG_EPOCH):
logger.info('**********************Starting the Fade GT-Sampling Operation**********************')
train_loader_1.dataset.data_augmentor.re_prepare(
augmentor_configs=cfg.DATA_CONFIG.PROG_AUG.D_CFG if cfg.DATA_CONFIG.PROG_AUG.get('D_CFG', None) else None)
# for resuming the ckpt: change the data_augmentator for dataset 2
if cfg.DATA_CONFIG_SRC_2.get('PROG_AUG', None) and cfg.DATA_CONFIG_SRC_2.PROG_AUG.ENABLED and \
(start_epoch == cfg.DATA_CONFIG_SRC_2.PROG_AUG.UPDATE_AUG_EPOCH):
logger.info('**********************Starting the Fade GT-Sampling Operation**********************')
train_loader_2.dataset.data_augmentor.re_prepare(
augmentor_configs=cfg.DATA_CONFIG_SRC_2.PROG_AUG.D_CFG if cfg.DATA_CONFIG_SRC_2.PROG_AUG.get('D_CFG', None) else None)
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True, leave=(rank == 0)) as tbar:
# generate the pseudo-labeling for merge the labeled A dataset and unlabeled B dataset
if cfg.get('SELF_TRAIN', None):
ps_pkl = self_training_utils.check_already_exsit_pseudo_label(ps_label_dir, start_epoch)
if ps_pkl is not None:
logger.info('==> Loading pseudo labels from {}'.format(ps_pkl))
total_it_each_epoch = len(train_loader_1) if len(train_loader_1) > len(train_loader_2) else len(train_loader_2)
if merge_all_iters_to_one_epoch:
raise NotImplementedError
dataloader_iter_1 = iter(train_loader_1)
dataloader_iter_2 = iter(train_loader_2)
for cur_epoch in tbar:
if source_sampler is not None:
source_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
if optim_cfg.get('GENERATE_PSEUDO_LABEL', None):
train_loader_2.dataset.eval()
logger.info('***********update pseudo label**********')
self_training_utils.save_pseudo_label_epoch(
model, train_loader_2, rank,
leave_pbar=True, ps_label_dir=ps_label_dir, cur_epoch=cur_epoch
)
train_loader_2.dataset.train()
# for resuming the ckpt: change the data_augmentator for dataset 1
if cfg.DATA_CONFIG.get('PROG_AUG', None) and cfg.DATA_CONFIG.PROG_AUG.ENABLED and \
(cur_epoch == cfg.DATA_CONFIG.PROG_AUG.UPDATE_AUG_EPOCH):
logger.info('**********************Dataset ONE: Starting the Fade GT-Sampling Operation**********************')
train_loader_1.dataset.data_augmentor.re_prepare(
augmentor_configs=cfg.DATA_CONFIG.PROG_AUG.D_CFG if cfg.DATA_CONFIG.PROG_AUG.get('D_CFG', None) else None)
# for resuming the ckpt: change the data_augmentator for dataset 2
if cfg.DATA_CONFIG_SRC_2.get('PROG_AUG', None) and cfg.DATA_CONFIG_SRC_2.PROG_AUG.ENABLED and \
(cur_epoch == cfg.DATA_CONFIG_SRC_2.PROG_AUG.UPDATE_AUG_EPOCH):
logger.info('**********************Dataset TWO: Starting the Fade GT-Sampling Operation**********************')
train_loader_2.dataset.data_augmentor.re_prepare(
augmentor_configs=cfg.DATA_CONFIG_SRC_2.PROG_AUG.D_CFG if cfg.DATA_CONFIG_SRC_2.PROG_AUG.get('D_CFG', None) else None)
accumulated_iter = train_one_epoch(
model, optimizer, train_loader_1, train_loader_2, model_func,
lr_scheduler=cur_scheduler,
accumulated_iter=accumulated_iter, optim_cfg=optim_cfg,
rank=rank, tbar=tbar, tb_log=tb_log,
leave_pbar=(cur_epoch + 1 == total_epochs),
total_it_each_epoch=total_it_each_epoch,
dataloader_iter_1=dataloader_iter_1,
dataloader_iter_2=dataloader_iter_2
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch)
save_checkpoint(
checkpoint_state(model, optimizer, trained_epoch, accumulated_iter), filename=ckpt_name,
)
def model_state_to_cpu(model_state):
model_state_cpu = type(model_state)() # ordered dict
for key, val in model_state.items():
model_state_cpu[key] = val.cpu()
return model_state_cpu
def checkpoint_state(model=None, optimizer=None, epoch=None, it=None):
optim_state = optimizer.state_dict() if optimizer is not None else None
if model is not None:
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_state = model_state_to_cpu(model.module.state_dict())
else:
model_state = model.state_dict()
else:
model_state = None
try:
import pcdet
version = 'pcdet+' + pcdet.__version__
except:
version = 'none'
return {'epoch': epoch, 'it': it, 'model_state': model_state, 'optimizer_state': optim_state, 'version': version}
def save_checkpoint(state, filename='checkpoint'):
if False and 'optimizer_state' in state:
optimizer_state = state['optimizer_state']
state.pop('optimizer_state', None)
optimizer_filename = '{}_optim.pth'.format(filename)
torch.save({'optimizer_state': optimizer_state}, optimizer_filename)
filename = '{}.pth'.format(filename)
torch.save(state, filename)
| 10,562
| 43.758475
| 163
|
py
|
3DTrans
|
3DTrans-master/tools/train_utils/train_active_TQS.py
|
import glob
import os
import pickle
from symbol import parameters
import torch
import tqdm
import time
from torch.nn.utils import clip_grad_norm_
from pcdet.utils import common_utils, commu_utils, self_training_utils
from pcdet.models import load_data_to_gpu
from pcdet.datasets import build_dataloader, build_dataloader_ada
from pcdet.utils import active_learning_2D_utils
def train_detector(model, model_func, optimizer, lr_scheduler, source_loader, sample_loader, source_loader_iter, sample_loader_iter,
dist_train, optim_cfg, rank, total_it_each_epoch, accumulated_iter_detector, tb_log, tbar, leave_pbar=False):
model.train()
if dist_train:
for p in model.module.parameters():
p.requires_grad = True
for p in model.module.discriminator.parameters():
p.requires_grad = False
else:
for p in model.parameters():
p.requires_grad = True
for p in model.discriminator.parameters():
p.requires_grad = False
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train_detector', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
forward_args = {
'mode': 'train_detector'
}
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch_src = next(source_loader_iter)
except StopIteration:
source_loader_iter = iter(source_loader)
batch_src = next(source_loader_iter)
print('new source iter')
try:
batch_sample = next(sample_loader_iter)
except StopIteration:
sample_loader_iter = iter(sample_loader)
batch_sample = next(sample_loader_iter)
print('new sample iter')
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter_detector)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter_detector)
optimizer.zero_grad()
loss_sam, tb_dict_sam, disp_dict = model_func(model, batch_sample, **forward_args)
loss_src, tb_dict_src, disp_dict = model_func(model, batch_src, **forward_args)
# loss_sam, tb_dict_sam, disp_dict = model_func(model, batch_sample, forward_args)
loss = loss_src + optim_cfg.SAMPLE_LOSS_SCALE * loss_sam
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter_detector += 1
cur_batch_time = time.time() - end
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr_detector': cur_lr,
'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})',
'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter_detector))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss_detector', loss, accumulated_iter_detector)
tb_log.add_scalar('meta_data/learning_rate_detector', cur_lr, accumulated_iter_detector)
for key, val in tb_dict_src.items():
tb_log.add_scalar('train/detector_src' + key, val, accumulated_iter_detector)
for key, val in tb_dict_sam.items():
tb_log.add_scalar('train/detector_sam' + key, val, accumulated_iter_detector)
if rank == 0:
pbar.close()
return accumulated_iter_detector
def train_multi_classifier(model, optimizer, lr_scheduler, source_loader, sample_loader,
source_loader_iter, sample_loader_iter, dist_train, total_it_each_epoch,
accumulated_iter_mul_cls, optim_cfg, tb_log, rank,
tbar, leave_pbar=False):
model.train()
if dist_train:
for p in model.module.parameters():
p.requires_grad = False
for p in model.module.dense_head.conv_cls1.parameters():
p.requires_grad = True
for p in model.module.dense_head.conv_cls2.parameters():
p.requires_grad = True
else:
for p in model.parameters():
p.requires_grad = False
for p in model.dense_head.conv_cls1.parameters():
p.requires_grad = True
for p in model.dense_head.conv_cls2.parameters():
p.requires_grad = True
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train_mul_cls', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
forward_args = {
'mode': 'train_mul_cls'
}
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch_src = next(source_loader_iter)
except StopIteration:
source_loader_iter = iter(source_loader)
batch_src = next(source_loader_iter)
print('new source iter')
if sample_loader is not None:
try:
batch_sam = next(sample_loader_iter)
except StopIteration:
sample_loader_iter = iter(sample_loader)
batch_sam = next(sample_loader_iter)
print('new sample iter')
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter_mul_cls)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter_mul_cls)
optimizer.zero_grad()
load_data_to_gpu(batch_src)
ret_src, tb_dict_src, disp_dict = model(batch_src, **forward_args)
loss_src = ret_src['loss'].mean()
if sample_loader is not None:
load_data_to_gpu(batch_sam)
ret_sam, tb_dict_sam, disp_dict = model(batch_sam, **forward_args)
loss_sam = ret_sam['loss'].mean()
loss = (loss_src + loss_sam) / 2
else:
loss = loss_src
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter_mul_cls += 1
cur_batch_time = time.time() - end
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr_mul_cls': cur_lr,
'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})',
'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter_mul_cls))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss_mul_cls', loss, accumulated_iter_mul_cls)
tb_log.add_scalar('meta_data/learning_rate_mul_cls', cur_lr, accumulated_iter_mul_cls)
for key, val in tb_dict_src.items():
tb_log.add_scalar('train/mul_cls_src_' + key, val, accumulated_iter_mul_cls)
if sample_loader is not None:
for key, val in tb_dict_sam.items():
tb_log.add_scalar('train/mul_cls_sam_' + key, val, accumulated_iter_mul_cls)
if rank == 0:
pbar.close()
return accumulated_iter_mul_cls
def train_discriminator(model, optimizer, lr_scheduler, source_loader, target_loader,
source_loader_iter, target_loader_iter, dist_train, total_it_each_epoch,
accumulated_iter_discriminator, optim_cfg, tb_log, rank, tbar, leave_pbar=False):
model.train()
if dist_train:
for p in model.module.parameters():
p.requires_grad = False
for p in model.module.discriminator.parameters():
p.requires_grad = True
else:
for p in model.parameters():
p.requires_grad = False
for p in model.discriminator.parameters():
p.requires_grad = True
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train_discriminator', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch_src = next(source_loader_iter)
except StopIteration:
source_loader_iter = iter(source_loader)
batch_src = next(source_loader_iter)
print('new source iter')
try:
batch_tar = next(target_loader_iter)
except StopIteration:
target_loader_iter = iter(target_loader)
batch_tar = next(target_loader_iter)
print('new target iter')
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter_discriminator)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter_discriminator)
optimizer.zero_grad()
load_data_to_gpu(batch_src)
forward_args = {
'mode': 'train_discriminator',
'source': True
}
loss_src = model(batch_src, **forward_args)
load_data_to_gpu(batch_tar)
forward_args = {
'mode': 'train_discriminator',
'source': False
}
loss_tar = model(batch_tar, **forward_args)
loss = (loss_src + loss_tar) / 2
tb_dict = {
'discriminator_loss': loss.item()
}
disp_dict = {}
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter_discriminator += 1
cur_batch_time = time.time() - end
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr_discriminator': cur_lr,
'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})',
'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter_discriminator))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss_discriminator', loss, accumulated_iter_discriminator)
tb_log.add_scalar('meta_data/learning_rate_discriminator', cur_lr, accumulated_iter_discriminator)
for key, val in tb_dict.items():
tb_log.add_scalar('train/' + key, val, accumulated_iter_discriminator)
if rank == 0:
pbar.close()
return accumulated_iter_discriminator
def train_one_epoch(model, optimizer_detector, optimizer_discriminator, optimizer_mul_cls, source_train_loader, target_train_loader,
sample_train_loader, model_func, lr_scheduler_detector, lr_scheduler_discriminator, lr_scheduler_mul_cls,
accumulated_iter_detector, accumulated_iter_discriminator, accumulated_iter_mul_cls,optim_cfg, rank, tbar,
dist_train, total_it_each_epoch, dataloader_iter_src, dataloader_iter_tar, dataloader_iter_sample,
tb_log=None, leave_pbar=False, ema_model=None):
# assert total_it_each_epoch == len(source_train_loader)
accumulated_iter_detector = train_detector(
model,
model_func,
optimizer_detector,
lr_scheduler_detector,
source_train_loader,
sample_train_loader,
dataloader_iter_src,
dataloader_iter_sample,
dist_train,
optim_cfg,
rank,
total_it_each_epoch,
accumulated_iter_detector,
tb_log, tbar
)
accumulated_iter_nul_cls = train_multi_classifier(
model,
optimizer_mul_cls,
lr_scheduler_mul_cls,
source_train_loader,
sample_train_loader,
dataloader_iter_src,
dataloader_iter_sample,
dist_train,
total_it_each_epoch,
accumulated_iter_mul_cls,
optim_cfg.MUL_CLS,
tb_log,
rank,
tbar
)
accumulated_iter_discriminator = train_discriminator(
model,
optimizer_discriminator,
lr_scheduler_discriminator,
source_train_loader,
target_train_loader,
dataloader_iter_src,
dataloader_iter_tar,
dist_train,
total_it_each_epoch,
accumulated_iter_discriminator,
optim_cfg.DISCRIMINATOR,
tb_log, rank, tbar
)
return accumulated_iter_detector, accumulated_iter_mul_cls, accumulated_iter_discriminator
def train_active_model_target(model, optimizer, source_train_loader, target_train_loader, sample_loader, model_func, lr_scheduler, optim_cfg,
start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir, sample_epoch,
annotation_budget, target_file_path, sample_save_path, cfg, batch_size, workers, dist_train,
source_sampler=None, target_sampler=None, sample_sampler=None, lr_warmup_scheduler=None, ckpt_save_interval=1,
max_ckpt_save_num=50, merge_all_iters_to_one_epoch=False, logger=None, ema_model=None):
target_list = active_learning_2D_utils.get_dataset_list(target_file_path, oss=True)
sample_list = []
sample_train_loader = None
dataloader_iter_sample = None
target_name = cfg['DATA_CONFIG_TAR']['DATASET']
accumulated_iter_detector, accumulated_iter_mul_cls, accumulated_iter_discriminator = start_iter, start_iter, start_iter
source_reader = common_utils.DataReader(source_train_loader, source_sampler)
source_reader.construct_iter()
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True,
leave=(rank == 0)) as tbar:
if merge_all_iters_to_one_epoch:
assert hasattr(source_train_loader.dataset, 'merge_all_iters_to_one_epoch')
source_train_loader.dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
total_iters_each_epoch = len(source_train_loader) // max(total_epochs, 1)
dataloader_iter_src = iter(source_train_loader)
dataloader_iter_tar = iter(target_train_loader) if target_train_loader is not None else None
for cur_epoch in tbar:
if source_sampler is not None:
source_sampler.set_epoch(cur_epoch)
if target_sampler is not None:
target_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
if cur_epoch in sample_epoch:
accumulated_iter_discriminator = train_discriminator(
model,
optimizer[1],
lr_scheduler[1],
source_train_loader,
target_train_loader,
dataloader_iter_src,
dataloader_iter_tar,
dist_train,
len(target_train_loader),
accumulated_iter_discriminator,
optim_cfg.DISCRIMINATOR,
tb_log, rank, tbar
)
accumulated_iter_mul_cls = train_multi_classifier(
model,
optimizer[2],
lr_scheduler[2],
source_train_loader,
sample_train_loader,
dataloader_iter_src,
dataloader_iter_sample,
dist_train,
len(target_train_loader),
accumulated_iter_mul_cls,
optim_cfg.MUL_CLS,
tb_log,
rank,
tbar
)
# active evaluate and sample
if cur_epoch in sample_epoch:
# sample from target_domain
frame_score = active_learning_2D_utils.active_evaluate_dual(model, target_train_loader, rank, domain='target')
sampled_frame_id, _ = active_learning_2D_utils.active_sample(frame_score, budget=annotation_budget)
sample_list, info_path = active_learning_2D_utils.update_sample_list_dual(
sample_list, target_list, sampled_frame_id, cur_epoch, sample_save_path, target_name, rank, domain='target'
)
target_list, target_info_path = active_learning_2D_utils.update_target_list(target_list, sampled_frame_id, cur_epoch, sample_save_path, target_name, rank)
sample_train_set, sample_train_loader, sample_train_sampler = build_dataloader_ada(
dataset_cfg=cfg.DATA_CONFIG_SAMPLE,
class_names=cfg.DATA_CONFIG_SAMPLE.CLASS_NAMES,
batch_size=batch_size,
dist=dist_train, workers=workers,
logger=logger,
training=True,
info_path=info_path,
merge_all_iters_to_one_epoch=merge_all_iters_to_one_epoch,
total_epochs=total_epochs-cur_epoch
)
target_train_set, target_train_loader, target_train_sampler = build_dataloader_ada(
dataset_cfg=cfg.DATA_CONFIG_TAR,
class_names=cfg.DATA_CONFIG_TAR.CLASS_NAMES,
batch_size=batch_size,
dist=dist_train, workers=workers,
logger=logger,
training=True,
info_path=target_info_path,
merge_all_iters_to_one_epoch=merge_all_iters_to_one_epoch,
total_epochs=total_epochs-cur_epoch
)
dataloader_iter_tar = iter(target_train_loader)
dataloader_iter_sample = iter(sample_train_loader) if sample_train_loader is not None else None
dataloader_iter_src_sample = iter(sample_loader)
accumulated_iter_detector = train_detector(
model,
model_func,
optimizer[0],
lr_scheduler[0],
sample_loader,
sample_train_loader,
dataloader_iter_src_sample,
dataloader_iter_sample,
dist_train,
optim_cfg,
rank,
len(sample_loader),
accumulated_iter_detector,
tb_log, tbar
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch)
save_checkpoint(
checkpoint_state(model, epoch=trained_epoch, it=accumulated_iter_detector), filename=ckpt_name,
)
def model_state_to_cpu(model_state):
model_state_cpu = type(model_state)() # ordered dict
for key, val in model_state.items():
model_state_cpu[key] = val.cpu()
return model_state_cpu
def checkpoint_state(model=None, optimizer=None, epoch=None, it=None):
optim_state = optimizer.state_dict() if optimizer is not None else None
if model is not None:
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_state = model_state_to_cpu(model.module.state_dict())
else:
model_state = model.state_dict()
else:
model_state = None
try:
import pcdet
version = 'pcdet+' + pcdet.__version__
except:
version = 'none'
return {'epoch': epoch, 'it': it, 'model_state': model_state, 'optimizer_state': optim_state, 'version': version}
def save_checkpoint(state, filename='checkpoint'):
if False and 'optimizer_state' in state:
optimizer_state = state['optimizer_state']
state.pop('optimizer_state', None)
optimizer_filename = '{}_optim.pth'.format(filename)
torch.save({'optimizer_state': optimizer_state}, optimizer_filename)
filename = '{}.pth'.format(filename)
torch.save(state, filename)
| 23,288
| 39.362218
| 170
|
py
|
3DTrans
|
3DTrans-master/tools/train_utils/train_random_utils.py
|
import glob
import os
import torch
import tqdm
import time
from torch.nn.utils import clip_grad_norm_
from pcdet.utils import common_utils, commu_utils, active_learning_utils
def train_one_epoch(model, optimizer, train_source_loader, train_target_loader, model_func, lr_scheduler, accumulated_iter, optim_cfg,
rank, tbar, total_it_each_epoch, dataloader_iter_src, dataloader_iter_tar, tb_log=None, leave_pbar=False):
if total_it_each_epoch == len(train_source_loader):
dataloader_iter_src = iter(train_source_loader)
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch_src = next(dataloader_iter_src)
except StopIteration:
dataloader_iter_src = iter(train_source_loader)
batch_src = next(dataloader_iter_src)
print('new iters')
try:
batch_tar = next(dataloader_iter_tar)
except StopIteration:
dataloader_iter_tar = iter(train_target_loader)
batch_tar = next(dataloader_iter_tar)
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
model.train()
optimizer.zero_grad()
loss_src, tb_dict, disp_dict = model_func(model, batch_src)
loss_tar, tb_dict, disp_dict = model_func(model, batch_tar)
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss = (loss_src + loss_tar) / 2
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter += 1
cur_batch_time = time.time() - end
# average reduce
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr': cur_lr, 'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})', 'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss', loss, accumulated_iter)
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
for key, val in tb_dict.items():
tb_log.add_scalar('train/' + key, val, accumulated_iter)
if rank == 0:
pbar.close()
return accumulated_iter
def train_one_epoch_tar_only(model, optimizer, train_target_loader, model_func, lr_scheduler, accumulated_iter, optim_cfg,
rank, tbar, total_it_each_epoch, dataloader_iter_tar, tb_log=None, leave_pbar=False):
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch_tar = next(dataloader_iter_tar)
except StopIteration:
dataloader_iter_tar = iter(train_target_loader)
batch_tar = next(dataloader_iter_tar)
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
model.train()
optimizer.zero_grad()
loss_tar, tb_dict, disp_dict = model_func(model, batch_tar)
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss = loss_tar
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter += 1
cur_batch_time = time.time() - end
# average reduce
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr': cur_lr, 'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})', 'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss', loss, accumulated_iter)
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
for key, val in tb_dict.items():
tb_log.add_scalar('train/' + key, val, accumulated_iter)
if rank == 0:
pbar.close()
return accumulated_iter
def train_model(model, optimizer, train_source_loader, train_target_loader, model_func, lr_scheduler, optim_cfg,
start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir,
source_sampler=None, target_sampler=None, lr_warmup_scheduler=None, ckpt_save_interval=1,
max_ckpt_save_num=50, merge_all_iters_to_one_epoch=False):
accumulated_iter = start_iter
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True, leave=(rank == 0)) as tbar:
total_it_each_epoch = len(train_source_loader)
if merge_all_iters_to_one_epoch:
assert hasattr(train_source_loader.dataset, 'merge_all_iters_to_one_epoch')
train_source_loader.dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
total_it_each_epoch = len(train_source_loader) // max(total_epochs, 1)
dataloader_iter_src = iter(train_source_loader)
dataloader_iter_tar = iter(train_target_loader)
for cur_epoch in tbar:
if source_sampler is not None:
source_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
accumulated_iter = train_one_epoch(
model, optimizer, train_source_loader, train_target_loader, model_func,
lr_scheduler=cur_scheduler,
accumulated_iter=accumulated_iter, optim_cfg=optim_cfg,
rank=rank, tbar=tbar, tb_log=tb_log,
leave_pbar=(cur_epoch + 1 == total_epochs),
total_it_each_epoch=total_it_each_epoch,
dataloader_iter_src=dataloader_iter_src, dataloader_iter_tar=dataloader_iter_tar
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch)
save_checkpoint(
checkpoint_state(model, optimizer, trained_epoch, accumulated_iter), filename=ckpt_name,
)
def train_model_random(model, optimizer, train_source_loader, train_target_loader, model_func, lr_scheduler, optim_cfg,
start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir,
source_sampler=None, target_sampler=None, lr_warmup_scheduler=None, ckpt_save_interval=1,
max_ckpt_save_num=50, merge_all_iters_to_one_epoch=False):
accumulated_iter = start_iter
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True, leave=(rank == 0)) as tbar:
total_it_each_epoch = len(train_source_loader)
if merge_all_iters_to_one_epoch:
assert hasattr(train_source_loader.dataset, 'merge_all_iters_to_one_epoch')
train_source_loader.dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
total_it_each_epoch = len(train_source_loader) // max(total_epochs, 1)
dataloader_iter_src = iter(train_source_loader)
dataloader_iter_tar = iter(train_target_loader)
for cur_epoch in tbar:
if source_sampler is not None:
source_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
accumulated_iter = train_one_epoch(
model, optimizer, train_source_loader, train_target_loader, model_func,
lr_scheduler=cur_scheduler,
accumulated_iter=accumulated_iter, optim_cfg=optim_cfg,
rank=rank, tbar=tbar, tb_log=tb_log,
leave_pbar=(cur_epoch + 1 == total_epochs),
total_it_each_epoch=total_it_each_epoch,
dataloader_iter_src=dataloader_iter_src, dataloader_iter_tar=dataloader_iter_tar
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch)
save_checkpoint(
checkpoint_state(model, optimizer, trained_epoch, accumulated_iter), filename=ckpt_name,
)
def train_model_target_only(model, optimizer, train_target_loader, model_func, lr_scheduler, optim_cfg,
start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir,
target_sampler=None, lr_warmup_scheduler=None, ckpt_save_interval=1,
max_ckpt_save_num=50, merge_all_iters_to_one_epoch=False):
accumulated_iter = start_iter
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True, leave=(rank == 0)) as tbar:
total_it_each_epoch = len(train_target_loader)
if merge_all_iters_to_one_epoch:
assert hasattr(train_target_loader.dataset, 'merge_all_iters_to_one_epoch')
train_target_loader.dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
total_it_each_epoch = len(train_target_loader) // max(total_epochs, 1)
dataloader_iter_tar = iter(train_target_loader)
for cur_epoch in tbar:
if target_sampler is not None:
target_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
accumulated_iter = train_one_epoch_tar_only(
model, optimizer, train_target_loader, model_func,
lr_scheduler=cur_scheduler,
accumulated_iter=accumulated_iter, optim_cfg=optim_cfg,
rank=rank, tbar=tbar, tb_log=tb_log,
leave_pbar=(cur_epoch + 1 == total_epochs),
total_it_each_epoch=total_it_each_epoch,
dataloader_iter_tar=dataloader_iter_tar
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch)
save_checkpoint(
checkpoint_state(model, optimizer, trained_epoch, accumulated_iter), filename=ckpt_name,
)
def model_state_to_cpu(model_state):
model_state_cpu = type(model_state)() # ordered dict
for key, val in model_state.items():
model_state_cpu[key] = val.cpu()
return model_state_cpu
def checkpoint_state(model=None, optimizer=None, epoch=None, it=None):
optim_state = optimizer.state_dict() if optimizer is not None else None
if model is not None:
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_state = model_state_to_cpu(model.module.state_dict())
else:
model_state = model.state_dict()
else:
model_state = None
try:
import pcdet
version = 'pcdet+' + pcdet.__version__
except:
version = 'none'
return {'epoch': epoch, 'it': it, 'model_state': model_state, 'optimizer_state': optim_state, 'version': version}
def save_checkpoint(state, filename='checkpoint'):
if False and 'optimizer_state' in state:
optimizer_state = state['optimizer_state']
state.pop('optimizer_state', None)
optimizer_filename = '{}_optim.pth'.format(filename)
torch.save({'optimizer_state': optimizer_state}, optimizer_filename)
filename = '{}.pth'.format(filename)
torch.save(state, filename)
| 15,415
| 42.548023
| 134
|
py
|
3DTrans
|
3DTrans-master/tools/train_utils/optimization/fastai_optim.py
|
# This file is modified from https://github.com/traveller59/second.pytorch
try:
from collections.abc import Iterable
except:
from collections import Iterable
import torch
from torch import nn
from torch._utils import _unflatten_dense_tensors
from torch.nn.utils import parameters_to_vector
bn_types = (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d, nn.SyncBatchNorm)
def split_bn_bias(layer_groups):
"Split the layers in `layer_groups` into batchnorm (`bn_types`) and non-batchnorm groups."
split_groups = []
for l in layer_groups:
l1, l2 = [], []
for c in l.children():
if isinstance(c, bn_types):
l2.append(c)
else:
l1.append(c)
split_groups += [nn.Sequential(*l1), nn.Sequential(*l2)]
return split_groups
def get_master(layer_groups, flat_master: bool = False):
"Return two lists, one for the model parameters in FP16 and one for the master parameters in FP32."
split_groups = split_bn_bias(layer_groups)
model_params = [[param for param in lg.parameters() if param.requires_grad] for lg in split_groups]
if flat_master:
master_params = []
for lg in model_params:
if len(lg) != 0:
mp = parameters_to_vector([param.data.float() for param in lg])
mp = torch.nn.Parameter(mp, requires_grad=True)
if mp.grad is None: mp.grad = mp.new(*mp.size())
master_params.append([mp])
else:
master_params.append([])
return model_params, master_params
else:
master_params = [[param.clone().float().detach() for param in lg] for lg in model_params]
for mp in master_params:
for param in mp: param.requires_grad = True
return model_params, master_params
def model_g2master_g(model_params, master_params, flat_master: bool = False) -> None:
"Copy the `model_params` gradients to `master_params` for the optimizer step."
if flat_master:
for model_group, master_group in zip(model_params, master_params):
if len(master_group) != 0:
master_group[0].grad.data.copy_(parameters_to_vector([p.grad.data.float() for p in model_group]))
else:
for model_group, master_group in zip(model_params, master_params):
for model, master in zip(model_group, master_group):
if model.grad is not None:
if master.grad is None: master.grad = master.data.new(*master.data.size())
master.grad.data.copy_(model.grad.data)
else:
master.grad = None
def master2model(model_params, master_params, flat_master: bool = False) -> None:
"Copy `master_params` to `model_params`."
if flat_master:
for model_group, master_group in zip(model_params, master_params):
if len(model_group) != 0:
for model, master in zip(model_group, _unflatten_dense_tensors(master_group[0].data, model_group)):
model.data.copy_(master)
else:
for model_group, master_group in zip(model_params, master_params):
for model, master in zip(model_group, master_group): model.data.copy_(master.data)
def listify(p=None, q=None):
"Make `p` listy and the same length as `q`."
if p is None:
p = []
elif isinstance(p, str):
p = [p]
elif not isinstance(p, Iterable):
p = [p]
n = q if type(q) == int else len(p) if q is None else len(q)
if len(p) == 1: p = p * n
assert len(p) == n, f'List len mismatch ({len(p)} vs {n})'
return list(p)
def trainable_params(m: nn.Module):
"Return list of trainable params in `m`."
res = filter(lambda p: p.requires_grad, m.parameters())
return res
def is_tuple(x) -> bool: return isinstance(x, tuple)
# copy from fastai.
class OptimWrapper():
"Basic wrapper around `opt` to simplify hyper-parameters changes."
def __init__(self, opt, wd, true_wd: bool = False, bn_wd: bool = True):
self.opt, self.true_wd, self.bn_wd = opt, true_wd, bn_wd
self.opt_keys = list(self.opt.param_groups[0].keys())
self.opt_keys.remove('params')
self.read_defaults()
self.wd = wd
@classmethod
def create(cls, opt_func, lr,
layer_groups, **kwargs):
"Create an `optim.Optimizer` from `opt_func` with `lr`. Set lr on `layer_groups`."
split_groups = split_bn_bias(layer_groups)
opt = opt_func([{'params': trainable_params(l), 'lr': 0} for l in split_groups])
opt = cls(opt, **kwargs)
opt.lr, opt.opt_func = listify(lr, layer_groups), opt_func
return opt
def new(self, layer_groups):
"Create a new `OptimWrapper` from `self` with another `layer_groups` but the same hyper-parameters."
opt_func = getattr(self, 'opt_func', self.opt.__class__)
split_groups = split_bn_bias(layer_groups)
opt = opt_func([{'params': trainable_params(l), 'lr': 0} for l in split_groups])
return self.create(opt_func, self.lr, layer_groups, wd=self.wd, true_wd=self.true_wd, bn_wd=self.bn_wd)
def __repr__(self) -> str:
return f'OptimWrapper over {repr(self.opt)}.\nTrue weight decay: {self.true_wd}'
# Pytorch optimizer methods
def step(self) -> None:
"Set weight decay and step optimizer."
# weight decay outside of optimizer step (AdamW)
if self.true_wd:
for lr, wd, pg1, pg2 in zip(self._lr, self._wd, self.opt.param_groups[::2], self.opt.param_groups[1::2]):
for p in pg1['params']:
# When some parameters are fixed: Shaoshuai Shi
if p.requires_grad is False:
continue
p.data.mul_(1 - wd * lr)
if self.bn_wd:
for p in pg2['params']:
# When some parameters are fixed: Shaoshuai Shi
if p.requires_grad is False:
continue
p.data.mul_(1 - wd * lr)
self.set_val('weight_decay', listify(0, self._wd))
self.opt.step()
def zero_grad(self) -> None:
"Clear optimizer gradients."
self.opt.zero_grad()
# Passthrough to the inner opt.
def __getattr__(self, k: str):
return getattr(self.opt, k, None)
def clear(self):
"Reset the state of the inner optimizer."
sd = self.state_dict()
sd['state'] = {}
self.load_state_dict(sd)
# Hyperparameters as properties
@property
def lr(self) -> float:
return self._lr[-1]
@lr.setter
def lr(self, val: float) -> None:
self._lr = self.set_val('lr', listify(val, self._lr))
@property
def mom(self) -> float:
return self._mom[-1]
@mom.setter
def mom(self, val: float) -> None:
if 'momentum' in self.opt_keys:
self.set_val('momentum', listify(val, self._mom))
elif 'betas' in self.opt_keys:
self.set_val('betas', (listify(val, self._mom), self._beta))
self._mom = listify(val, self._mom)
@property
def beta(self) -> float:
return None if self._beta is None else self._beta[-1]
@beta.setter
def beta(self, val: float) -> None:
"Set beta (or alpha as makes sense for given optimizer)."
if val is None: return
if 'betas' in self.opt_keys:
self.set_val('betas', (self._mom, listify(val, self._beta)))
elif 'alpha' in self.opt_keys:
self.set_val('alpha', listify(val, self._beta))
self._beta = listify(val, self._beta)
@property
def wd(self) -> float:
return self._wd[-1]
@wd.setter
def wd(self, val: float) -> None:
"Set weight decay."
if not self.true_wd: self.set_val('weight_decay', listify(val, self._wd), bn_groups=self.bn_wd)
self._wd = listify(val, self._wd)
# Helper functions
def read_defaults(self) -> None:
"Read the values inside the optimizer for the hyper-parameters."
self._beta = None
if 'lr' in self.opt_keys: self._lr = self.read_val('lr')
if 'momentum' in self.opt_keys: self._mom = self.read_val('momentum')
if 'alpha' in self.opt_keys: self._beta = self.read_val('alpha')
if 'betas' in self.opt_keys: self._mom, self._beta = self.read_val('betas')
if 'weight_decay' in self.opt_keys: self._wd = self.read_val('weight_decay')
def set_val(self, key: str, val, bn_groups: bool = True):
"Set `val` inside the optimizer dictionary at `key`."
if is_tuple(val): val = [(v1, v2) for v1, v2 in zip(*val)]
for v, pg1, pg2 in zip(val, self.opt.param_groups[::2], self.opt.param_groups[1::2]):
pg1[key] = v
if bn_groups: pg2[key] = v
return val
def read_val(self, key: str):
"Read a hyperparameter `key` in the optimizer dictionary."
val = [pg[key] for pg in self.opt.param_groups[::2]]
if is_tuple(val[0]): val = [o[0] for o in val], [o[1] for o in val]
return val
class FastAIMixedOptim(OptimWrapper):
@classmethod
def create(cls, opt_func, lr,
layer_groups, model, flat_master=False, loss_scale=512.0, **kwargs):
"Create an `optim.Optimizer` from `opt_func` with `lr`. Set lr on `layer_groups`."
opt = OptimWrapper.create(opt_func, lr, layer_groups, **kwargs)
opt.model_params, opt.master_params = get_master(layer_groups, flat_master)
opt.flat_master = flat_master
opt.loss_scale = loss_scale
opt.model = model
# Changes the optimizer so that the optimization step is done in FP32.
# opt = self.learn.opt
mom, wd, beta = opt.mom, opt.wd, opt.beta
lrs = [lr for lr in opt._lr for _ in range(2)]
opt_params = [{'params': mp, 'lr': lr} for mp, lr in zip(opt.master_params, lrs)]
opt.opt = opt_func(opt_params)
opt.mom, opt.wd, opt.beta = mom, wd, beta
return opt
def step(self):
model_g2master_g(self.model_params, self.master_params, self.flat_master)
for group in self.master_params:
for param in group: param.grad.div_(self.loss_scale)
super(FastAIMixedOptim, self).step()
self.model.zero_grad()
# Update the params from master to model.
master2model(self.model_params, self.master_params, self.flat_master)
| 10,535
| 38.758491
| 117
|
py
|
3DTrans
|
3DTrans-master/tools/train_utils/optimization/learning_schedules_fastai.py
|
# This file is modified from https://github.com/traveller59/second.pytorch
import math
from functools import partial
import numpy as np
import torch.optim.lr_scheduler as lr_sched
from .fastai_optim import OptimWrapper
class LRSchedulerStep(object):
def __init__(self, fai_optimizer: OptimWrapper, total_step, lr_phases,
mom_phases):
# if not isinstance(fai_optimizer, OptimWrapper):
# raise TypeError('{} is not a fastai OptimWrapper'.format(
# type(fai_optimizer).__name__))
self.optimizer = fai_optimizer
self.total_step = total_step
self.lr_phases = []
for i, (start, lambda_func) in enumerate(lr_phases):
if len(self.lr_phases) != 0:
assert self.lr_phases[-1][0] < start
if isinstance(lambda_func, str):
lambda_func = eval(lambda_func)
if i < len(lr_phases) - 1:
self.lr_phases.append((int(start * total_step), int(lr_phases[i + 1][0] * total_step), lambda_func))
else:
self.lr_phases.append((int(start * total_step), total_step, lambda_func))
assert self.lr_phases[0][0] == 0
self.mom_phases = []
for i, (start, lambda_func) in enumerate(mom_phases):
if len(self.mom_phases) != 0:
assert self.mom_phases[-1][0] < start
if isinstance(lambda_func, str):
lambda_func = eval(lambda_func)
if i < len(mom_phases) - 1:
self.mom_phases.append((int(start * total_step), int(mom_phases[i + 1][0] * total_step), lambda_func))
else:
self.mom_phases.append((int(start * total_step), total_step, lambda_func))
assert self.mom_phases[0][0] == 0
def step(self, step):
for start, end, func in self.lr_phases:
if step >= start:
self.optimizer.lr = func((step - start) / (end - start))
for start, end, func in self.mom_phases:
if step >= start:
self.optimizer.mom = func((step - start) / (end - start))
def annealing_cos(start, end, pct):
# print(pct, start, end)
"Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0."
cos_out = np.cos(np.pi * pct) + 1
return end + (start - end) / 2 * cos_out
class OneCycle(LRSchedulerStep):
def __init__(self, fai_optimizer, total_step, lr_max, moms, div_factor,
pct_start):
self.lr_max = lr_max
self.moms = moms
self.div_factor = div_factor
self.pct_start = pct_start
a1 = int(total_step * self.pct_start)
a2 = total_step - a1
low_lr = self.lr_max / self.div_factor
lr_phases = ((0, partial(annealing_cos, low_lr, self.lr_max)),
(self.pct_start,
partial(annealing_cos, self.lr_max, low_lr / 1e4)))
mom_phases = ((0, partial(annealing_cos, *self.moms)),
(self.pct_start, partial(annealing_cos,
*self.moms[::-1])))
fai_optimizer.lr, fai_optimizer.mom = low_lr, self.moms[0]
super().__init__(fai_optimizer, total_step, lr_phases, mom_phases)
class CosineWarmupLR(lr_sched._LRScheduler):
def __init__(self, optimizer, T_max, eta_min=0, last_epoch=-1):
self.T_max = T_max
self.eta_min = eta_min
super(CosineWarmupLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
return [self.eta_min + (base_lr - self.eta_min) *
(1 - math.cos(math.pi * self.last_epoch / self.T_max)) / 2
for base_lr in self.base_lrs]
class FakeOptim:
def __init__(self):
self.lr = 0
self.mom = 0
if __name__ == "__main__":
import matplotlib.pyplot as plt
opt = FakeOptim() # 3e-3, wd=0.4, div_factor=10
schd = OneCycle(opt, 100, 3e-3, (0.95, 0.85), 10.0, 0.1)
lrs = []
moms = []
for i in range(100):
schd.step(i)
lrs.append(opt.lr)
moms.append(opt.mom)
plt.plot(lrs)
# plt.plot(moms)
plt.show()
plt.plot(moms)
plt.show()
| 4,169
| 35.26087
| 118
|
py
|
3DTrans
|
3DTrans-master/tools/train_utils/optimization/__init__.py
|
from functools import partial
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler as lr_sched
from .fastai_optim import OptimWrapper
from .learning_schedules_fastai import CosineWarmupLR, OneCycle
def build_optimizer(model, optim_cfg):
if optim_cfg.OPTIMIZER == 'adam':
optimizer = optim.Adam(model.parameters(), lr=optim_cfg.LR, weight_decay=optim_cfg.WEIGHT_DECAY)
elif optim_cfg.OPTIMIZER == 'sgd':
optimizer = optim.SGD(
model.parameters(), lr=optim_cfg.LR, weight_decay=optim_cfg.WEIGHT_DECAY,
momentum=optim_cfg.MOMENTUM
)
elif optim_cfg.OPTIMIZER == 'adam_onecycle':
def children(m: nn.Module):
return list(m.children())
def num_children(m: nn.Module) -> int:
return len(children(m))
flatten_model = lambda m: sum(map(flatten_model, m.children()), []) if num_children(m) else [m]
get_layer_groups = lambda m: [nn.Sequential(*flatten_model(m))]
optimizer_func = partial(optim.Adam, betas=(0.9, 0.99))
optimizer = OptimWrapper.create(
optimizer_func, 3e-3, get_layer_groups(model), wd=optim_cfg.WEIGHT_DECAY, true_wd=True, bn_wd=True
)
else:
raise NotImplementedError
return optimizer
def build_scheduler(optimizer, total_iters_each_epoch, total_epochs, last_epoch, optim_cfg):
decay_steps = [x * total_iters_each_epoch for x in optim_cfg.DECAY_STEP_LIST]
def lr_lbmd(cur_epoch):
cur_decay = 1
for decay_step in decay_steps:
if cur_epoch >= decay_step:
cur_decay = cur_decay * optim_cfg.LR_DECAY
return max(cur_decay, optim_cfg.LR_CLIP / optim_cfg.LR)
lr_warmup_scheduler = None
total_steps = total_iters_each_epoch * total_epochs
if optim_cfg.OPTIMIZER == 'adam_onecycle':
lr_scheduler = OneCycle(
optimizer, total_steps, optim_cfg.LR, list(optim_cfg.MOMS), optim_cfg.DIV_FACTOR, optim_cfg.PCT_START
)
else:
lr_scheduler = lr_sched.LambdaLR(optimizer, lr_lbmd, last_epoch=last_epoch)
if optim_cfg.LR_WARMUP:
lr_warmup_scheduler = CosineWarmupLR(
optimizer, T_max=optim_cfg.WARMUP_EPOCH * len(total_iters_each_epoch),
eta_min=optim_cfg.LR / optim_cfg.DIV_FACTOR
)
return lr_scheduler, lr_warmup_scheduler
| 2,401
| 36.53125
| 113
|
py
|
3DTrans
|
3DTrans-master/tools/visual_utils/open3d_vis_utils.py
|
"""
Open3d visualization tool box
Written by Jihan YANG
All rights preserved from 2021 - present.
"""
import open3d
import torch
import matplotlib
import numpy as np
box_colormap = [
[1, 1, 1],
[0, 1, 0],
[0, 1, 1],
[1, 1, 0],
]
def get_coor_colors(obj_labels):
"""
Args:
obj_labels: 1 is ground, labels > 1 indicates different instance cluster
Returns:
rgb: [N, 3]. color for each point.
"""
colors = matplotlib.colors.XKCD_COLORS.values()
max_color_num = obj_labels.max()
color_list = list(colors)[:max_color_num+1]
colors_rgba = [matplotlib.colors.to_rgba_array(color) for color in color_list]
label_rgba = np.array(colors_rgba)[obj_labels]
label_rgba = label_rgba.squeeze()[:, :3]
return label_rgba
def draw_scenes(points, gt_boxes=None, ref_boxes=None, ref_labels=None, ref_scores=None, point_colors=None, draw_origin=True):
if isinstance(points, torch.Tensor):
points = points.cpu().numpy()
if isinstance(gt_boxes, torch.Tensor):
gt_boxes = gt_boxes.cpu().numpy()
if isinstance(ref_boxes, torch.Tensor):
ref_boxes = ref_boxes.cpu().numpy()
vis = open3d.visualization.Visualizer()
vis.create_window()
vis.get_render_option().point_size = 1.0
vis.get_render_option().background_color = np.zeros(3)
# draw origin
if draw_origin:
axis_pcd = open3d.geometry.TriangleMesh.create_coordinate_frame(size=1.0, origin=[0, 0, 0])
vis.add_geometry(axis_pcd)
pts = open3d.geometry.PointCloud()
pts.points = open3d.utility.Vector3dVector(points[:, :3])
vis.add_geometry(pts)
if point_colors is None:
pts.colors = open3d.utility.Vector3dVector(np.ones((points.shape[0], 3)))
else:
pts.colors = open3d.utility.Vector3dVector(point_colors)
if gt_boxes is not None:
vis = draw_box(vis, gt_boxes, (0, 0, 1))
if ref_boxes is not None:
vis = draw_box(vis, ref_boxes, (0, 1, 0), ref_labels, ref_scores)
vis.run()
vis.destroy_window()
def translate_boxes_to_open3d_instance(gt_boxes):
"""
4-------- 6
/| /|
5 -------- 3 .
| | | |
. 7 -------- 1
|/ |/
2 -------- 0
"""
center = gt_boxes[0:3]
lwh = gt_boxes[3:6]
axis_angles = np.array([0, 0, gt_boxes[6] + 1e-10])
rot = open3d.geometry.get_rotation_matrix_from_axis_angle(axis_angles)
box3d = open3d.geometry.OrientedBoundingBox(center, rot, lwh)
line_set = open3d.geometry.LineSet.create_from_oriented_bounding_box(box3d)
# import ipdb; ipdb.set_trace(context=20)
lines = np.asarray(line_set.lines)
lines = np.concatenate([lines, np.array([[1, 4], [7, 6]])], axis=0)
line_set.lines = open3d.utility.Vector2iVector(lines)
return line_set, box3d
def draw_box(vis, gt_boxes, color=(0, 1, 0), ref_labels=None, score=None):
for i in range(gt_boxes.shape[0]):
line_set, box3d = translate_boxes_to_open3d_instance(gt_boxes[i])
if ref_labels is None:
line_set.paint_uniform_color(color)
else:
line_set.paint_uniform_color(box_colormap[ref_labels[i]])
vis.add_geometry(line_set)
# if score is not None:
# corners = box3d.get_box_points()
# vis.add_3d_label(corners[5], '%.2f' % score[i])
return vis
| 3,413
| 28.179487
| 126
|
py
|
3DTrans
|
3DTrans-master/tools/visual_utils/visualize_utils.py
|
import mayavi.mlab as mlab
import numpy as np
import torch
box_colormap = [
[1, 1, 1],
[0, 1, 0],
[0, 1, 1],
[1, 1, 0],
]
def check_numpy_to_torch(x):
if isinstance(x, np.ndarray):
return torch.from_numpy(x).float(), True
return x, False
def rotate_points_along_z(points, angle):
"""
Args:
points: (B, N, 3 + C)
angle: (B), angle along z-axis, angle increases x ==> y
Returns:
"""
points, is_numpy = check_numpy_to_torch(points)
angle, _ = check_numpy_to_torch(angle)
cosa = torch.cos(angle)
sina = torch.sin(angle)
zeros = angle.new_zeros(points.shape[0])
ones = angle.new_ones(points.shape[0])
rot_matrix = torch.stack((
cosa, sina, zeros,
-sina, cosa, zeros,
zeros, zeros, ones
), dim=1).view(-1, 3, 3).float()
points_rot = torch.matmul(points[:, :, 0:3], rot_matrix)
points_rot = torch.cat((points_rot, points[:, :, 3:]), dim=-1)
return points_rot.numpy() if is_numpy else points_rot
def boxes_to_corners_3d(boxes3d):
"""
7 -------- 4
/| /|
6 -------- 5 .
| | | |
. 3 -------- 0
|/ |/
2 -------- 1
Args:
boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
Returns:
"""
boxes3d, is_numpy = check_numpy_to_torch(boxes3d)
template = boxes3d.new_tensor((
[1, 1, -1], [1, -1, -1], [-1, -1, -1], [-1, 1, -1],
[1, 1, 1], [1, -1, 1], [-1, -1, 1], [-1, 1, 1],
)) / 2
corners3d = boxes3d[:, None, 3:6].repeat(1, 8, 1) * template[None, :, :]
corners3d = rotate_points_along_z(corners3d.view(-1, 8, 3), boxes3d[:, 6]).view(-1, 8, 3)
corners3d += boxes3d[:, None, 0:3]
return corners3d.numpy() if is_numpy else corners3d
def visualize_pts(pts, fig=None, bgcolor=(0, 0, 0), fgcolor=(1.0, 1.0, 1.0),
show_intensity=False, size=(600, 600), draw_origin=True):
if not isinstance(pts, np.ndarray):
pts = pts.cpu().numpy()
if fig is None:
fig = mlab.figure(figure=None, bgcolor=bgcolor, fgcolor=fgcolor, engine=None, size=size)
if show_intensity:
G = mlab.points3d(pts[:, 0], pts[:, 1], pts[:, 2], pts[:, 3], mode='point',
colormap='gnuplot', scale_factor=1, figure=fig)
else:
G = mlab.points3d(pts[:, 0], pts[:, 1], pts[:, 2], mode='point',
colormap='gnuplot', scale_factor=1, figure=fig)
if draw_origin:
mlab.points3d(0, 0, 0, color=(1, 1, 1), mode='cube', scale_factor=0.2)
mlab.plot3d([0, 3], [0, 0], [0, 0], color=(0, 0, 1), tube_radius=0.1)
mlab.plot3d([0, 0], [0, 3], [0, 0], color=(0, 1, 0), tube_radius=0.1)
mlab.plot3d([0, 0], [0, 0], [0, 3], color=(1, 0, 0), tube_radius=0.1)
return fig
def draw_sphere_pts(pts, color=(0, 1, 0), fig=None, bgcolor=(0, 0, 0), scale_factor=0.2):
if not isinstance(pts, np.ndarray):
pts = pts.cpu().numpy()
if fig is None:
fig = mlab.figure(figure=None, bgcolor=bgcolor, fgcolor=None, engine=None, size=(600, 600))
if isinstance(color, np.ndarray) and color.shape[0] == 1:
color = color[0]
color = (color[0] / 255.0, color[1] / 255.0, color[2] / 255.0)
if isinstance(color, np.ndarray):
pts_color = np.zeros((pts.__len__(), 4), dtype=np.uint8)
pts_color[:, 0:3] = color
pts_color[:, 3] = 255
G = mlab.points3d(pts[:, 0], pts[:, 1], pts[:, 2], np.arange(0, pts_color.__len__()), mode='sphere',
scale_factor=scale_factor, figure=fig)
G.glyph.color_mode = 'color_by_scalar'
G.glyph.scale_mode = 'scale_by_vector'
G.module_manager.scalar_lut_manager.lut.table = pts_color
else:
mlab.points3d(pts[:, 0], pts[:, 1], pts[:, 2], mode='sphere', color=color,
colormap='gnuplot', scale_factor=scale_factor, figure=fig)
mlab.points3d(0, 0, 0, color=(1, 1, 1), mode='cube', scale_factor=0.2)
mlab.plot3d([0, 3], [0, 0], [0, 0], color=(0, 0, 1), line_width=3, tube_radius=None, figure=fig)
mlab.plot3d([0, 0], [0, 3], [0, 0], color=(0, 1, 0), line_width=3, tube_radius=None, figure=fig)
mlab.plot3d([0, 0], [0, 0], [0, 3], color=(1, 0, 0), line_width=3, tube_radius=None, figure=fig)
return fig
def draw_grid(x1, y1, x2, y2, fig, tube_radius=None, color=(0.5, 0.5, 0.5)):
mlab.plot3d([x1, x1], [y1, y2], [0, 0], color=color, tube_radius=tube_radius, line_width=1, figure=fig)
mlab.plot3d([x2, x2], [y1, y2], [0, 0], color=color, tube_radius=tube_radius, line_width=1, figure=fig)
mlab.plot3d([x1, x2], [y1, y1], [0, 0], color=color, tube_radius=tube_radius, line_width=1, figure=fig)
mlab.plot3d([x1, x2], [y2, y2], [0, 0], color=color, tube_radius=tube_radius, line_width=1, figure=fig)
return fig
def draw_multi_grid_range(fig, grid_size=20, bv_range=(-60, -60, 60, 60)):
for x in range(bv_range[0], bv_range[2], grid_size):
for y in range(bv_range[1], bv_range[3], grid_size):
fig = draw_grid(x, y, x + grid_size, y + grid_size, fig)
return fig
def draw_scenes(points, gt_boxes=None, ref_boxes=None, ref_scores=None, ref_labels=None):
if not isinstance(points, np.ndarray):
points = points.cpu().numpy()
if ref_boxes is not None and not isinstance(ref_boxes, np.ndarray):
ref_boxes = ref_boxes.cpu().numpy()
if gt_boxes is not None and not isinstance(gt_boxes, np.ndarray):
gt_boxes = gt_boxes.cpu().numpy()
if ref_scores is not None and not isinstance(ref_scores, np.ndarray):
ref_scores = ref_scores.cpu().numpy()
if ref_labels is not None and not isinstance(ref_labels, np.ndarray):
ref_labels = ref_labels.cpu().numpy()
fig = visualize_pts(points)
fig = draw_multi_grid_range(fig, bv_range=(0, -40, 80, 40))
if gt_boxes is not None:
corners3d = boxes_to_corners_3d(gt_boxes)
fig = draw_corners3d(corners3d, fig=fig, color=(0, 0, 1), max_num=100)
if ref_boxes is not None and len(ref_boxes) > 0:
ref_corners3d = boxes_to_corners_3d(ref_boxes)
if ref_labels is None:
fig = draw_corners3d(ref_corners3d, fig=fig, color=(0, 1, 0), cls=ref_scores, max_num=100)
else:
for k in range(ref_labels.min(), ref_labels.max() + 1):
cur_color = tuple(box_colormap[k % len(box_colormap)])
mask = (ref_labels == k)
fig = draw_corners3d(ref_corners3d[mask], fig=fig, color=cur_color, cls=ref_scores[mask], max_num=100)
mlab.view(azimuth=-179, elevation=54.0, distance=104.0, roll=90.0)
return fig
def draw_corners3d(corners3d, fig, color=(1, 1, 1), line_width=2, cls=None, tag='', max_num=500, tube_radius=None):
"""
:param corners3d: (N, 8, 3)
:param fig:
:param color:
:param line_width:
:param cls:
:param tag:
:param max_num:
:return:
"""
import mayavi.mlab as mlab
num = min(max_num, len(corners3d))
for n in range(num):
b = corners3d[n] # (8, 3)
if cls is not None:
if isinstance(cls, np.ndarray):
mlab.text3d(b[6, 0], b[6, 1], b[6, 2], '%.2f' % cls[n], scale=(0.3, 0.3, 0.3), color=color, figure=fig)
else:
mlab.text3d(b[6, 0], b[6, 1], b[6, 2], '%s' % cls[n], scale=(0.3, 0.3, 0.3), color=color, figure=fig)
for k in range(0, 4):
i, j = k, (k + 1) % 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
i, j = k + 4, (k + 1) % 4 + 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
i, j = k, k + 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
i, j = 0, 5
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
i, j = 1, 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
return fig
| 8,540
| 38.541667
| 121
|
py
|
3DTrans
|
3DTrans-master/pcdet/config.py
|
from pathlib import Path
import yaml
from easydict import EasyDict
def log_config_to_file(cfg, pre='cfg', logger=None):
for key, val in cfg.items():
if isinstance(cfg[key], EasyDict):
logger.info('\n%s.%s = edict()' % (pre, key))
log_config_to_file(cfg[key], pre=pre + '.' + key, logger=logger)
continue
logger.info('%s.%s: %s' % (pre, key, val))
def cfg_from_list(cfg_list, config):
"""Set config keys via list (e.g., from command line)."""
from ast import literal_eval
assert len(cfg_list) % 2 == 0
for k, v in zip(cfg_list[0::2], cfg_list[1::2]):
key_list = k.split('.')
d = config
for subkey in key_list[:-1]:
assert subkey in d, 'NotFoundKey: %s' % subkey
d = d[subkey]
subkey = key_list[-1]
assert subkey in d, 'NotFoundKey: %s' % subkey
try:
value = literal_eval(v)
except:
value = v
if type(value) != type(d[subkey]) and isinstance(d[subkey], EasyDict):
key_val_list = value.split(',')
for src in key_val_list:
cur_key, cur_val = src.split(':')
val_type = type(d[subkey][cur_key])
cur_val = val_type(cur_val)
d[subkey][cur_key] = cur_val
elif type(value) != type(d[subkey]) and isinstance(d[subkey], list):
val_list = value.split(',')
for k, x in enumerate(val_list):
val_list[k] = type(d[subkey][0])(x)
d[subkey] = val_list
else:
assert type(value) == type(d[subkey]), \
'type {} does not match original type {}'.format(type(value), type(d[subkey]))
d[subkey] = value
def merge_new_config(config, new_config):
if '_BASE_CONFIG_' in new_config:
with open(new_config['_BASE_CONFIG_'], 'r') as f:
try:
yaml_config = yaml.safe_load(f, Loader=yaml.FullLoader)
except:
yaml_config = yaml.safe_load(f)
config.update(EasyDict(yaml_config))
for key, val in new_config.items():
if not isinstance(val, dict):
config[key] = val
continue
if key not in config:
config[key] = EasyDict()
merge_new_config(config[key], val)
return config
def cfg_from_yaml_file(cfg_file, config):
with open(cfg_file, 'r') as f:
try:
new_config = yaml.safe_load(f, Loader=yaml.FullLoader)
except:
new_config = yaml.safe_load(f)
merge_new_config(config=config, new_config=new_config)
return config
cfg = EasyDict()
cfg.ROOT_DIR = (Path(__file__).resolve().parent / '../').resolve()
cfg.LOCAL_RANK = 0
| 2,770
| 31.22093
| 94
|
py
|
3DTrans
|
3DTrans-master/pcdet/__init__.py
|
import subprocess
from pathlib import Path
from .version import __version__
__all__ = [
'__version__'
]
def get_git_commit_number():
if not (Path(__file__).parent / '../.git').exists():
return '0000000'
cmd_out = subprocess.run(['git', 'rev-parse', 'HEAD'], stdout=subprocess.PIPE)
git_commit_number = cmd_out.stdout.decode('utf-8')[:7]
return git_commit_number
script_version = get_git_commit_number()
if script_version not in __version__:
__version__ = __version__ + '+py%s' % script_version
| 535
| 20.44
| 82
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/__init__.py
|
from collections import namedtuple
import numpy as np
import torch
from .detectors import build_detector, build_detector_multi_db, build_detector_multi_db_3
try:
import kornia
except:
pass
# print('Warning: kornia is not installed. This package is only required by CaDDN')
def build_network(model_cfg, num_class, dataset):
model = build_detector(
model_cfg=model_cfg, num_class=num_class, dataset=dataset
)
return model
# def build_network_multi_db_v2(model_cfg, num_class, dataset):
# model = build_detector_multi_db_v2(
# model_cfg=model_cfg, num_class=num_class, dataset=dataset
# )
# return model
def build_network_multi_db(model_cfg, num_class, num_class_s2, dataset, dataset_s2, source_one_name):
model = build_detector_multi_db(
model_cfg=model_cfg, num_class=num_class, num_class_s2=num_class_s2,
dataset=dataset, dataset_s2=dataset_s2, source_one_name=source_one_name
)
return model
def build_network_multi_db_3(model_cfg, num_class, num_class_s2, num_class_s3, dataset, dataset_s2, dataset_s3, source_one_name, source_1):
model = build_detector_multi_db_3(
model_cfg=model_cfg, num_class=num_class, num_class_s2=num_class_s2, num_class_s3=num_class_s3,
dataset=dataset, dataset_s2=dataset_s2, dataset_s3=dataset_s3, source_one_name=source_one_name,
source_1=source_1
)
return model
def load_data_to_gpu(batch_dict):
for key, val in batch_dict.items():
if not isinstance(val, np.ndarray):
continue
elif key in ['frame_id', 'metadata', 'calib']:
continue
elif key in ['images']:
batch_dict[key] = kornia.image_to_tensor(val).float().cuda().contiguous()
elif key in ['image_shape']:
batch_dict[key] = torch.from_numpy(val).int().cuda()
elif key in ['db_flag']:
continue
else:
batch_dict[key] = torch.from_numpy(val).float().cuda()
def model_fn_decorator():
ModelReturn = namedtuple('ModelReturn', ['loss', 'tb_dict', 'disp_dict'])
def model_func(model, batch_dict, **forward_args):
load_data_to_gpu(batch_dict)
ret_dict, tb_dict, disp_dict = model(batch_dict, **forward_args)
loss = ret_dict['loss'].mean()
if hasattr(model, 'update_global_step'):
model.update_global_step()
else:
model.module.update_global_step()
return ModelReturn(loss, tb_dict, disp_dict)
return model_func
| 2,525
| 33.135135
| 139
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/mdf_models/dense_2d_moe_add_wo_SE.py
|
import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pcdet.utils import common_utils
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""1x1 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, groups=groups, bias=False, dilation=dilation)
class BasicBlock(nn.Module):
expansion = 1
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.LeakyReLU(inplace=True)
# self.conv2 = conv3x3(planes, planes)
# self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
# out = self.relu(out)
# out = self.conv2(out)
# out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class BasicBlock_2(nn.Module):
expansion = 1
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock_2, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.LeakyReLU(inplace=True)
self.conv2 = conv1x1(planes, planes, stride)
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
return out
class BasicBlock_Rescale(nn.Module):
expansion = 1
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock_Rescale, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.LeakyReLU(inplace=True)
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
return out
class SEBlock(nn.Module):
""" Squeeze-and-excitation block """
def __init__(self, channels, r=16):
super(SEBlock, self).__init__()
self.r = r
self.squeeze = nn.Sequential(nn.Linear(channels, channels//self.r),
nn.ReLU(),
nn.Linear(channels//self.r, channels),
nn.Sigmoid())
def forward(self, x):
B, C, H, W = x.size()
squeeze = self.squeeze(torch.mean(x, dim=(2,3))).view(B,C,1,1)
return torch.mul(x, squeeze)
class DENSE_2D_MoE_ADD_wo_SE(nn.Module):
def __init__(self, model_cfg, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.N = 2
self.source_one_name = self.model_cfg.SOURCE_ONE_NAME
self.per_task_channels = self.model_cfg.INPUT_CONV_CHANNEL
self.shared_channels = int(self.N*self.model_cfg.INPUT_CONV_CHANNEL)
self.db_source = int(self.model_cfg.db_source)
# Non-linear function f
downsample = nn.Sequential(nn.Conv2d(self.shared_channels, self.shared_channels//4, 1, bias=False),
nn.BatchNorm2d(self.shared_channels//4))
self.non_linear = nn.Sequential(BasicBlock(self.shared_channels, self.shared_channels//4, downsample=downsample),
BasicBlock(self.shared_channels//4, self.shared_channels//4),
nn.Conv2d(self.shared_channels//4, self.shared_channels, 1))
# Dimensionality reduction
downsample = nn.Sequential(nn.Conv2d(self.shared_channels, self.per_task_channels, 1, bias=False),
nn.BatchNorm2d(self.per_task_channels))
self.dimensionality_reduction = BasicBlock(self.shared_channels, self.per_task_channels,
downsample=downsample)
def forward(self, data_dict):
# Get shared representation
split_tag_s1, split_tag_s2 = common_utils.split_batch_dict(self.source_one_name, data_dict)
spatial_features_2d = data_dict['spatial_features_2d']
spatial_features_2d_s1 = spatial_features_2d[split_tag_s1,:,:,:]
spatial_features_2d_s2 = spatial_features_2d[split_tag_s2,:,:,:]
if self.training:
# Concat the task-specific into the channel-dimension
concat = torch.cat([spatial_features_2d_s1, spatial_features_2d_s2], 1)
B, C, H, W = concat.size()
shared = self.non_linear(concat)
# Per task attention mask
mask = F.softmax(shared.view(B, C//self.N, self.N, H, W), dim = 2)
shared = torch.mul(mask, concat.view(B, C//self.N, self.N, H, W)).view(B,-1, H, W)
# Perform dimensionality reduction
shared = self.dimensionality_reduction(shared)
# Per task-specific squeeze-and-excitation
out_s1 = shared + spatial_features_2d_s1
out_s2 = shared + spatial_features_2d_s2
concat_f = torch.cat([out_s1, out_s2], 0)
data_dict['spatial_features_2d'] = concat_f
else:
# Concat the dataset-specific features into the channel-dimension
if self.db_source == 1:
features_used = spatial_features_2d_s1
elif self.db_source == 2:
features_used = spatial_features_2d_s2
concat = torch.cat([features_used, features_used], 1)
B, C, H, W = concat.size()
shared = self.non_linear(concat)
# dataset attention mask
mask = F.softmax(shared.view(B, C//self.N, self.N, H, W), dim = 2)
shared = torch.mul(mask, concat.view(B, C//self.N, self.N, H, W)).view(B,-1, H, W)
# Perform dimensionality reduction
shared = self.dimensionality_reduction(shared)
# dataset-specific squeeze-and-excitation
if self.db_source == 1:
out_s1 = shared + spatial_features_2d_s1
data_dict['spatial_features_2d'] = out_s1
elif self.db_source == 2:
out_s2 = shared + spatial_features_2d_s2
data_dict['spatial_features_2d'] = out_s2
return data_dict
| 8,239
| 39
| 121
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/mdf_models/dense_2d_moe_add_wo_attention.py
|
import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pcdet.utils import common_utils
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""1x1 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, groups=groups, bias=False, dilation=dilation)
class BasicBlock(nn.Module):
expansion = 1
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.LeakyReLU(inplace=True)
# self.conv2 = conv3x3(planes, planes)
# self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
# out = self.relu(out)
# out = self.conv2(out)
# out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class BasicBlock_2(nn.Module):
expansion = 1
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock_2, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.LeakyReLU(inplace=True)
self.conv2 = conv1x1(planes, planes, stride)
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
return out
class BasicBlock_Rescale(nn.Module):
expansion = 1
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock_Rescale, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.LeakyReLU(inplace=True)
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
return out
class SEBlock(nn.Module):
""" Squeeze-and-excitation block """
def __init__(self, channels, r=16):
super(SEBlock, self).__init__()
self.r = r
self.squeeze = nn.Sequential(nn.Linear(channels, channels//self.r),
nn.ReLU(),
nn.Linear(channels//self.r, channels),
nn.Sigmoid())
def forward(self, x):
B, C, H, W = x.size()
squeeze = self.squeeze(torch.mean(x, dim=(2,3))).view(B,C,1,1)
return torch.mul(x, squeeze)
class DENSE_2D_MoE_ADD_wo_AT(nn.Module):
def __init__(self, model_cfg, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.N = 2
self.source_one_name = self.model_cfg.SOURCE_ONE_NAME
self.per_task_channels = self.model_cfg.INPUT_CONV_CHANNEL
self.shared_channels = int(self.N*self.model_cfg.INPUT_CONV_CHANNEL)
self.db_source = int(self.model_cfg.db_source)
# Non-linear function f
downsample = nn.Sequential(nn.Conv2d(self.shared_channels, self.shared_channels//4, 1, bias=False),
nn.BatchNorm2d(self.shared_channels//4))
self.non_linear = nn.Sequential(BasicBlock(self.shared_channels, self.shared_channels//4, downsample=downsample),
BasicBlock(self.shared_channels//4, self.shared_channels//4),
nn.Conv2d(self.shared_channels//4, self.shared_channels, 1))
# Dimensionality reduction
downsample = nn.Sequential(nn.Conv2d(self.shared_channels, self.per_task_channels, 1, bias=False),
nn.BatchNorm2d(self.per_task_channels))
self.dimensionality_reduction = BasicBlock(self.shared_channels, self.per_task_channels,
downsample=downsample)
# SEBlock
self.se_s1 = SEBlock(self.per_task_channels)
self.se_s2 = SEBlock(self.per_task_channels)
def forward(self, data_dict):
# Get shared representation
split_tag_s1, split_tag_s2 = common_utils.split_batch_dict(self.source_one_name, data_dict)
spatial_features_2d = data_dict['spatial_features_2d']
spatial_features_2d_s1 = spatial_features_2d[split_tag_s1,:,:,:]
spatial_features_2d_s2 = spatial_features_2d[split_tag_s2,:,:,:]
if self.training:
# Concat the dataset-specific features into the channel-dimension
concat = torch.cat([spatial_features_2d_s1, spatial_features_2d_s2], 1)
B, C, H, W = concat.size()
shared = self.non_linear(concat)
# Perform dimensionality reduction
shared = self.dimensionality_reduction(shared)
# dataset-specific squeeze-and-excitation
out_s1 = self.se_s1(shared) + spatial_features_2d_s1
out_s2 = self.se_s2(shared) + spatial_features_2d_s2
concat_f = torch.cat([out_s1, out_s2], 0)
data_dict['spatial_features_2d'] = concat_f
else:
# Inference Usage: BEV Feature Copy
if self.db_source == 1:
features_used = spatial_features_2d_s1
elif self.db_source == 2:
features_used = spatial_features_2d_s2
concat = torch.cat([features_used, features_used], 1)
B, C, H, W = concat.size()
shared = self.non_linear(concat)
# Perform dimensionality reduction
shared = self.dimensionality_reduction(shared)
# dataset-specific squeeze-and-excitation
if self.db_source == 1:
out_s1 = self.se_s1(shared) + spatial_features_2d_s1
data_dict['spatial_features_2d'] = out_s1
elif self.db_source == 2:
out_s2 = self.se_s2(shared) + spatial_features_2d_s2
data_dict['spatial_features_2d'] = out_s2
return data_dict
| 7,968
| 38.450495
| 121
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/mdf_models/dense_3d_cr.py
|
import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pcdet.utils import common_utils
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
class BasicBlock(nn.Module):
expansion = 1
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class SEBlock(nn.Module):
""" Squeeze-and-excitation block """
def __init__(self, channels, r=16):
super(SEBlock, self).__init__()
self.r = r
self.squeeze = nn.Sequential(nn.Linear(channels, channels//self.r),
nn.ReLU(),
nn.Linear(channels//self.r, channels),
nn.Sigmoid())
def forward(self, x):
B, C, H, W = x.size()
squeeze = self.squeeze(torch.mean(x, dim=(2,3))).view(B,C,1,1)
return torch.mul(x, squeeze)
class DENSE_3D_DT(nn.Module):
def __init__(self, model_cfg, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.source_one_name = self.model_cfg.SOURCE_ONE_NAME
self.per_task_channels = self.model_cfg.INPUT_CONV_CHANNEL
# SEBlock
self.se_s1 = SEBlock(self.per_task_channels)
self.se_s2 = SEBlock(self.per_task_channels)
def forward(self, data_dict):
# Get shared representation
split_tag_s1, split_tag_s2 = common_utils.split_batch_dict(self.source_one_name, data_dict)
spatial_features_2d = data_dict['spatial_features']
spatial_features_2d_s1 = spatial_features_2d[split_tag_s1,:,:,:]
spatial_features_2d_s2 = spatial_features_2d[split_tag_s2,:,:,:]
out_s1 = self.se_s1(spatial_features_2d_s1)
out_s2 = self.se_s2(spatial_features_2d_s2)
concat_f = torch.cat([out_s1, out_s2], 0)
data_dict['spatial_features'] = concat_f
return data_dict
class DENSE_3D_CR(nn.Module):
def __init__(self, model_cfg, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.source_one_name = self.model_cfg.SOURCE_ONE_NAME
self.N = self.model_cfg.NUM_OF_DB
self.per_task_channels = self.model_cfg.INPUT_CONV_CHANNEL
self.shared_channels = int(self.N*self.model_cfg.INPUT_CONV_CHANNEL)
self.db_source = int(self.model_cfg.db_source)
# Non-linear function f
downsample = nn.Sequential(nn.Conv2d(self.shared_channels, self.shared_channels//4, 1, bias=False),
nn.BatchNorm2d(self.shared_channels//4))
self.non_linear = nn.Sequential(BasicBlock(self.shared_channels, self.shared_channels//4, downsample=downsample),
nn.Conv2d(self.shared_channels//4, self.shared_channels, 1))
# Dimensionality reduction
downsample = nn.Sequential(nn.Conv2d(self.shared_channels, self.per_task_channels, 1, bias=False),
nn.BatchNorm2d(self.per_task_channels))
self.dimensionality_reduction = BasicBlock(self.shared_channels, self.per_task_channels,
downsample=downsample)
# SEBlock
self.se_s1 = SEBlock(self.per_task_channels)
self.se_s2 = SEBlock(self.per_task_channels)
def forward(self, data_dict):
# Get shared representation
split_tag_s1, split_tag_s2 = common_utils.split_batch_dict(self.source_one_name, data_dict)
spatial_features = data_dict['spatial_features']
spatial_features_s1 = spatial_features[split_tag_s1,:,:,:]
spatial_features_s2 = spatial_features[split_tag_s2,:,:,:]
if self.training:
# Concat the dataset-specific features into the channel-dimension
concat = torch.cat([spatial_features_s1, spatial_features_s2], 1)
B, C, H, W = concat.size()
shared = self.non_linear(concat)
spatial_att = torch.max(concat, dim=1).values.view(B, 1, 1, H, W)
mask = F.softmax(shared.view(B, C//self.N, self.N, H, W), dim = 2)
mask = mask * spatial_att
shared = torch.mul(mask, concat.view(B, C//self.N, self.N, H, W)).view(B,-1, H, W)
# Perform dimensionality reduction
shared = self.dimensionality_reduction(shared)
# dataset-specific squeeze-and-excitation
out_s1 = self.se_s1(shared) + spatial_features_s1
out_s2 = self.se_s2(shared) + spatial_features_s2
concat_f_spatial = torch.cat([out_s1, out_s2], 0)
data_dict['spatial_features'] = concat_f_spatial
else:
if self.db_source == 1:
features_used = spatial_features_s1
elif self.db_source == 2:
features_used = spatial_features_s2
concat = torch.cat([features_used, features_used], 1)
B, C, H, W = concat.size()
shared = self.non_linear(concat)
spatial_att = torch.max(concat, dim=1).values.view(B, 1, 1, H, W)
mask = F.softmax(shared.view(B, C//self.N, self.N, H, W), dim = 2)
mask = mask * spatial_att
shared = torch.mul(mask, concat.view(B, C//self.N, self.N, H, W)).view(B,-1, H, W)
# Perform dimensionality reduction
shared = self.dimensionality_reduction(shared)
if self.db_source == 1:
out_s1 = self.se_s1(shared) + spatial_features_s1
data_dict['spatial_features'] = out_s1
elif self.db_source == 2:
out_s2 = self.se_s2(shared) + spatial_features_s2
data_dict['spatial_features'] = out_s2
return data_dict
| 7,057
| 38.430168
| 121
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/mdf_models/dense_cr.py
|
import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pcdet.utils import common_utils
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""1x1 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, groups=groups, bias=False, dilation=dilation)
class BasicBlock(nn.Module):
expansion = 1
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.LeakyReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class BasicBlock_2(nn.Module):
expansion = 1
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock_2, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.LeakyReLU(inplace=True)
self.conv2 = conv1x1(planes, planes, stride)
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
return out
class BasicBlock_Rescale(nn.Module):
expansion = 1
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock_Rescale, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.conv1 = conv1x1(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.LeakyReLU(inplace=True)
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
return out
class SEBlock(nn.Module):
""" Squeeze-and-excitation block """
def __init__(self, channels, r=16):
super(SEBlock, self).__init__()
self.r = r
self.squeeze = nn.Sequential(nn.Linear(channels, channels//self.r),
nn.ReLU(),
nn.Linear(channels//self.r, channels),
nn.Sigmoid())
def forward(self, x):
B, C, H, W = x.size()
squeeze = self.squeeze(torch.mean(x, dim=(2,3))).view(B,C,1,1)
return torch.mul(x, squeeze)
class SA_SEBlock(nn.Module):
""" Squeeze-and-excitation block """
def __init__(self, channels, r=16):
super(SA_SEBlock, self).__init__()
self.r = r
self.squeeze_1 = nn.Sequential(nn.Conv2d(channels, channels//self.r, 8, 3, 1, bias=True),
nn.LeakyReLU(negative_slope=0.2, inplace=True))
self.squeeze_2 = nn.Sequential(nn.Conv2d(channels//self.r, channels//self.r, 8, 3, 1, bias=True),
nn.LeakyReLU(negative_slope=0.2, inplace=True))
self.squeeze = nn.Sequential(nn.Linear(channels//self.r, channels//self.r),
nn.ReLU(),
nn.Linear(channels//self.r, channels),
nn.Sigmoid())
def forward(self, x):
B, C, H, W = x.size()
att = self.squeeze_1(x)
att = self.squeeze_2(att)
squeeze = self.squeeze(torch.mean(att, dim=(2,3))).view(B,C,1,1)
return torch.mul(x, squeeze)
class DENSE_2D_DT(nn.Module):
def __init__(self, model_cfg, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.source_one_name = self.model_cfg.SOURCE_ONE_NAME
self.per_task_channels = self.model_cfg.INPUT_CONV_CHANNEL
# SEBlock
self.se_s1 = SEBlock(self.per_task_channels)
self.se_s2 = SEBlock(self.per_task_channels)
def forward(self, data_dict):
# Get shared representation
split_tag_s1, split_tag_s2 = common_utils.split_batch_dict(self.source_one_name, data_dict)
spatial_features_2d = data_dict['spatial_features_2d']
spatial_features_2d_s1 = spatial_features_2d[split_tag_s1,:,:,:]
spatial_features_2d_s2 = spatial_features_2d[split_tag_s2,:,:,:]
out_s1 = self.se_s1(spatial_features_2d_s1)
out_s2 = self.se_s2(spatial_features_2d_s2)
concat_f = torch.cat([out_s1, out_s2], 0)
data_dict['spatial_features_2d'] = concat_f
return data_dict
class DENSE_CR(nn.Module):
def __init__(self, model_cfg, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.N = self.model_cfg.NUM_OF_DB
self.source_one_name = self.model_cfg.SOURCE_ONE_NAME
self.per_task_channels = self.model_cfg.INPUT_CONV_CHANNEL
self.shared_channels = int(self.N*self.model_cfg.INPUT_CONV_CHANNEL)
self.db_source = int(self.model_cfg.db_source)
# Non-linear function f
downsample = nn.Sequential(nn.Conv2d(self.shared_channels, self.shared_channels//8, 1, bias=False),
nn.BatchNorm2d(self.shared_channels//8))
self.non_linear = nn.Sequential(BasicBlock(self.shared_channels, self.shared_channels//8, downsample=downsample),
nn.Conv2d(self.shared_channels//8, self.shared_channels, 1))
# Dimensionality reduction
self.dimensionality_reduction = BasicBlock_Rescale(self.shared_channels, self.per_task_channels)
# SEBlock
self.se_s1 = SEBlock(self.per_task_channels, r=32)
self.se_s2 = SEBlock(self.per_task_channels, r=32)
def forward(self, data_dict):
split_tag_s1, split_tag_s2 = common_utils.split_batch_dict(self.source_one_name, data_dict)
spatial_features_2d = data_dict['spatial_features_2d']
spatial_features_2d_s1 = spatial_features_2d[split_tag_s1,:,:,:]
spatial_features_2d_s2 = spatial_features_2d[split_tag_s2,:,:,:]
if self.training:
# Concat the dataset-specific features into the channel-dimension
concat = torch.cat([spatial_features_2d_s1, spatial_features_2d_s2], 1)
B, C, H, W = concat.size()
shared = self.non_linear(concat)
# Spatial mask across different datasets
spatial_att = torch.max(concat, dim=1).values.view(B, 1, 1, H, W)
# dataset attention mask
mask = F.softmax(shared.view(B, C//self.N, self.N, H, W), dim = 2)
mask = mask * spatial_att
shared = torch.mul(mask, concat.view(B, C//self.N, self.N, H, W)).view(B,-1, H, W)
# Perform dimensionality reduction
shared = self.dimensionality_reduction(shared)
# dataset-specific squeeze-and-excitation
out_s1 = self.se_s1(shared) + spatial_features_2d_s1
out_s2 = self.se_s2(shared) + spatial_features_2d_s2
concat_f = torch.cat([out_s1, out_s2], 0)
data_dict['spatial_features_2d'] = concat_f
else:
# Inference Usage: BEV Features Copy
if self.db_source == 1:
features_used = spatial_features_2d_s1
elif self.db_source == 2:
features_used = spatial_features_2d_s2
concat = torch.cat([features_used, features_used], 1)
B, C, H, W = concat.size()
shared = self.non_linear(concat)
# Spatial mask across different datasets
spatial_att = torch.max(concat, dim=1).values.view(B, 1, 1, H, W)
# dataset attention mask
mask = F.softmax(shared.view(B, C//self.N, self.N, H, W), dim = 2)
mask = mask * spatial_att
shared = torch.mul(mask, concat.view(B, C//self.N, self.N, H, W)).view(B,-1, H, W)
# Perform dimensionality reduction
shared = self.dimensionality_reduction(shared)
# dataset-specific squeeze-and-excitation
if self.db_source == 1:
out_s1 = self.se_s1(shared) + spatial_features_2d_s1
data_dict['spatial_features_2d'] = out_s1
elif self.db_source == 2:
out_s2 = self.se_s2(shared) + spatial_features_2d_s2
data_dict['spatial_features_2d'] = out_s2
return data_dict
class DENSE_2D_CR_ADD(nn.Module):
def __init__(self, model_cfg, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.N = self.model_cfg.NUM_OF_DB
self.source_one_name = self.model_cfg.SOURCE_ONE_NAME
self.per_task_channels = self.model_cfg.INPUT_CONV_CHANNEL
self.shared_channels = int(self.N*self.model_cfg.INPUT_CONV_CHANNEL)
self.db_source = int(self.model_cfg.db_source)
# Non-linear function f
downsample = nn.Sequential(nn.Conv2d(self.shared_channels, self.shared_channels//4, 1, bias=False),
nn.BatchNorm2d(self.shared_channels//4))
self.non_linear = nn.Sequential(BasicBlock(self.shared_channels, self.shared_channels//4, downsample=downsample),
BasicBlock(self.shared_channels//4, self.shared_channels//4),
nn.Conv2d(self.shared_channels//4, self.shared_channels, 1))
# Dimensionality reduction
downsample = nn.Sequential(nn.Conv2d(self.shared_channels, self.per_task_channels, 1, bias=False),
nn.BatchNorm2d(self.per_task_channels))
self.dimensionality_reduction = BasicBlock(self.shared_channels, self.per_task_channels,
downsample=downsample)
# SEBlock
self.se_s1 = SEBlock(self.per_task_channels)
self.se_s2 = SEBlock(self.per_task_channels)
def forward(self, data_dict):
# Get shared representation
split_tag_s1, split_tag_s2 = common_utils.split_batch_dict(self.source_one_name, data_dict)
spatial_features_2d = data_dict['spatial_features_2d']
spatial_features_2d_s1 = spatial_features_2d[split_tag_s1,:,:,:]
spatial_features_2d_s2 = spatial_features_2d[split_tag_s2,:,:,:]
if self.training:
# Concat the dataset-specific features into the channel-dimension
concat = torch.cat([spatial_features_2d_s1, spatial_features_2d_s2], 1)
B, C, H, W = concat.size()
shared = self.non_linear(concat)
# dataset attention mask
mask = F.softmax(shared.view(B, C//self.N, self.N, H, W), dim = 2)
shared = torch.mul(mask, concat.view(B, C//self.N, self.N, H, W)).view(B,-1, H, W)
# Perform dimensionality reduction
shared = self.dimensionality_reduction(shared)
# dataset-specific squeeze-and-excitation
out_s1 = self.se_s1(shared) + spatial_features_2d_s1
out_s2 = self.se_s2(shared) + spatial_features_2d_s2
concat_f = torch.cat([out_s1, out_s2], 0)
data_dict['spatial_features_2d'] = concat_f
else:
# Inference Usage: BEV Features Copy
if self.db_source == 1:
features_used = spatial_features_2d_s1
elif self.db_source == 2:
features_used = spatial_features_2d_s2
concat = torch.cat([features_used, features_used], 1)
B, C, H, W = concat.size()
shared = self.non_linear(concat)
# dataset attention mask
mask = F.softmax(shared.view(B, C//self.N, self.N, H, W), dim = 2)
shared = torch.mul(mask, concat.view(B, C//self.N, self.N, H, W)).view(B,-1, H, W)
# Perform dimensionality reduction
shared = self.dimensionality_reduction(shared)
# dataset-specific squeeze-and-excitation
if self.db_source == 1:
out_s1 = self.se_s1(shared) + spatial_features_2d_s1
data_dict['spatial_features_2d'] = out_s1
elif self.db_source == 2:
out_s2 = self.se_s2(shared) + spatial_features_2d_s2
data_dict['spatial_features_2d'] = out_s2
return data_dict
class DENSE_2D_CR_ADD_SIM(nn.Module):
def __init__(self, model_cfg, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.N = self.model_cfg.NUM_OF_DB
self.source_one_name = self.model_cfg.SOURCE_ONE_NAME
self.per_task_channels = self.model_cfg.INPUT_CONV_CHANNEL
self.shared_channels = int(self.N*self.model_cfg.INPUT_CONV_CHANNEL)
self.db_source = int(self.model_cfg.db_source)
# Non-linear function f
downsample = nn.Sequential(nn.Conv2d(self.shared_channels, self.shared_channels//4, 1, bias=False),
nn.BatchNorm2d(self.shared_channels//4))
self.non_linear = nn.Sequential(BasicBlock(self.shared_channels, self.shared_channels//4, downsample=downsample),
BasicBlock(self.shared_channels//4, self.shared_channels//4),
nn.Conv2d(self.shared_channels//4, self.shared_channels, 1))
# Dimensionality reduction
downsample = nn.Sequential(nn.Conv2d(self.shared_channels, self.per_task_channels, 1, bias=False),
nn.BatchNorm2d(self.per_task_channels))
self.dimensionality_reduction = BasicBlock(self.shared_channels, self.per_task_channels,
downsample=downsample)
# SEBlock
self.se_s1 = SEBlock(self.per_task_channels)
self.se_s2 = SEBlock(self.per_task_channels)
def forward(self, data_dict):
# Get shared representation
split_tag_s1, split_tag_s2 = common_utils.split_batch_dict(self.source_one_name, data_dict)
spatial_features_2d = data_dict['spatial_features_2d']
spatial_features_2d_s1 = spatial_features_2d[split_tag_s1,:,:,:]
spatial_features_2d_s2 = spatial_features_2d[split_tag_s2,:,:,:]
# Concat the dataset-specific features into the channel-dimension
concat = torch.cat([spatial_features_2d_s1, spatial_features_2d_s2], 1)
B, C, H, W = concat.size()
shared = self.non_linear(concat)
# dataset attention mask
mask = F.softmax(shared.view(B, C//self.N, self.N, H, W), dim = 2)
shared = torch.mul(mask, concat.view(B, C//self.N, self.N, H, W)).view(B,-1, H, W)
# Perform dimensionality reduction
shared = self.dimensionality_reduction(shared)
# dataset-specific squeeze-and-excitation
out_s1 = self.se_s1(shared) + spatial_features_2d_s1
out_s2 = self.se_s2(shared) + spatial_features_2d_s2
concat_f = torch.cat([out_s1, out_s2], 0)
data_dict['spatial_features_2d'] = concat_f
return data_dict
| 16,634
| 40.175743
| 121
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/mdf_models/__init__.py
|
from .dense_3d_cr import DENSE_3D_CR
from .dense_3d_cr import DENSE_3D_DT
from .dense_cr import DENSE_2D_DT
from .dense_cr import DENSE_2D_CR_ADD
from .dense_cr import DENSE_CR
from .dense_cr import DENSE_2D_CR_ADD_SIM
from .dense_2d_moe_add_wo_SE import DENSE_2D_MoE_ADD_wo_SE
from .dense_2d_moe_add_wo_attention import DENSE_2D_MoE_ADD_wo_AT
__all__ = {
'DENSE_3D_CR':DENSE_3D_CR,
'DENSE_3D_DT':DENSE_3D_DT,
'DENSE_2D_DT':DENSE_2D_DT,
'DENSE_CR': DENSE_CR,
'DENSE_2D_CR_ADD':DENSE_2D_CR_ADD,
'DENSE_2D_MoE_CR_SIM':DENSE_2D_CR_ADD_SIM,
'DENSE_2D_MoE_ADD_wo_SE':DENSE_2D_MoE_ADD_wo_SE,
'DENSE_2D_MoE_ADD_wo_AT':DENSE_2D_MoE_ADD_wo_AT,
}
| 672
| 29.590909
| 65
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/detectors/point_rcnn.py
|
from .detector3d_template import Detector3DTemplate
class PointRCNN(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict):
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
def get_training_loss(self):
disp_dict = {}
loss_point, tb_dict = self.point_head.get_loss()
loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)
loss = loss_point + loss_rcnn
return loss, tb_dict, disp_dict
| 999
| 31.258065
| 83
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/detectors/pointpillar.py
|
from .detector3d_template import Detector3DTemplate
class PointPillar(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict):
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
def get_training_loss(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head.get_loss()
tb_dict = {
'loss_rpn': loss_rpn.item(),
**tb_dict
}
loss = loss_rpn
return loss, tb_dict, disp_dict
| 1,018
| 28.114286
| 83
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/detectors/second_net.py
|
from .detector3d_template import Detector3DTemplate
class SECONDNet(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict):
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
def get_training_loss(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head.get_loss()
tb_dict = {
'loss_rpn': loss_rpn.item(),
**tb_dict
}
loss = loss_rpn
return loss, tb_dict, disp_dict
| 1,016
| 28.057143
| 83
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/detectors/detector3d_template_IASSD.py
|
import os
import torch
import torch.nn as nn
from ...ops.iou3d_nms import iou3d_nms_utils
from ...utils.spconv_utils import find_all_spconv_keys
from .. import backbones_2d, backbones_3d, dense_heads, roi_heads
from ..backbones_2d import map_to_bev
from ..backbones_3d import pfe, vfe
from ..model_utils import model_nms_utils
class Detector3DTemplate_IASSD(nn.Module):
def __init__(self, model_cfg, num_class, dataset):
super().__init__()
self.model_cfg = model_cfg
self.num_class = num_class
self.dataset = dataset
self.class_names = dataset.class_names
self.register_buffer('global_step', torch.LongTensor(1).zero_())
self.module_topology = [
'vfe', 'backbone_3d', 'map_to_bev_module', 'pfe',
'backbone_2d', 'dense_head', 'point_head', 'roi_head'
]
@property
def mode(self):
return 'TRAIN' if self.training else 'TEST'
def update_global_step(self):
self.global_step += 1
def build_networks(self):
model_info_dict = {
'module_list': [],
'num_rawpoint_features': self.dataset.point_feature_encoder.num_point_features,
'num_point_features': self.dataset.point_feature_encoder.num_point_features,
'grid_size': self.dataset.grid_size,
'point_cloud_range': self.dataset.point_cloud_range,
'voxel_size': self.dataset.voxel_size,
'depth_downsample_factor': self.dataset.depth_downsample_factor
}
for module_name in self.module_topology:
module, model_info_dict = getattr(self, 'build_%s' % module_name)(
model_info_dict=model_info_dict
)
self.add_module(module_name, module)
return model_info_dict['module_list']
def build_vfe(self, model_info_dict):
if self.model_cfg.get('VFE', None) is None:
return None, model_info_dict
vfe_module = vfe.__all__[self.model_cfg.VFE.NAME](
model_cfg=self.model_cfg.VFE,
num_point_features=model_info_dict['num_rawpoint_features'],
point_cloud_range=model_info_dict['point_cloud_range'],
voxel_size=model_info_dict['voxel_size'],
grid_size=model_info_dict['grid_size'],
depth_downsample_factor=model_info_dict['depth_downsample_factor']
)
model_info_dict['num_point_features'] = vfe_module.get_output_feature_dim()
model_info_dict['module_list'].append(vfe_module)
return vfe_module, model_info_dict
def build_backbone_3d(self, model_info_dict):
if self.model_cfg.get('BACKBONE_3D', None) is None:
return None, model_info_dict
backbone_3d_module = backbones_3d.__all__[self.model_cfg.BACKBONE_3D.NAME](
model_cfg=self.model_cfg.BACKBONE_3D,
num_class=self.num_class, ####################
input_channels=model_info_dict['num_point_features'],
grid_size=model_info_dict['grid_size'],
voxel_size=model_info_dict['voxel_size'],
point_cloud_range=model_info_dict['point_cloud_range']
)
model_info_dict['module_list'].append(backbone_3d_module)
model_info_dict['num_point_features'] = backbone_3d_module.num_point_features
model_info_dict['backbone_channels'] = backbone_3d_module.backbone_channels \
if hasattr(backbone_3d_module, 'backbone_channels') else None
return backbone_3d_module, model_info_dict
def build_map_to_bev_module(self, model_info_dict):
if self.model_cfg.get('MAP_TO_BEV', None) is None:
return None, model_info_dict
map_to_bev_module = map_to_bev.__all__[self.model_cfg.MAP_TO_BEV.NAME](
model_cfg=self.model_cfg.MAP_TO_BEV,
grid_size=model_info_dict['grid_size']
)
model_info_dict['module_list'].append(map_to_bev_module)
model_info_dict['num_bev_features'] = map_to_bev_module.num_bev_features
return map_to_bev_module, model_info_dict
def build_backbone_2d(self, model_info_dict):
if self.model_cfg.get('BACKBONE_2D', None) is None:
return None, model_info_dict
backbone_2d_module = backbones_2d.__all__[self.model_cfg.BACKBONE_2D.NAME](
model_cfg=self.model_cfg.BACKBONE_2D,
input_channels=model_info_dict['num_bev_features']
)
model_info_dict['module_list'].append(backbone_2d_module)
model_info_dict['num_bev_features'] = backbone_2d_module.num_bev_features
return backbone_2d_module, model_info_dict
def build_pfe(self, model_info_dict):
if self.model_cfg.get('PFE', None) is None:
return None, model_info_dict
pfe_module = pfe.__all__[self.model_cfg.PFE.NAME](
model_cfg=self.model_cfg.PFE,
voxel_size=model_info_dict['voxel_size'],
point_cloud_range=model_info_dict['point_cloud_range'],
num_bev_features=model_info_dict['num_bev_features'],
num_rawpoint_features=model_info_dict['num_rawpoint_features']
)
model_info_dict['module_list'].append(pfe_module)
model_info_dict['num_point_features'] = pfe_module.num_point_features
model_info_dict['num_point_features_before_fusion'] = pfe_module.num_point_features_before_fusion
return pfe_module, model_info_dict
def build_dense_head(self, model_info_dict):
if self.model_cfg.get('DENSE_HEAD', None) is None:
return None, model_info_dict
dense_head_module = dense_heads.__all__[self.model_cfg.DENSE_HEAD.NAME](
model_cfg=self.model_cfg.DENSE_HEAD,
input_channels=model_info_dict['num_bev_features'],
num_class=self.num_class if not self.model_cfg.DENSE_HEAD.CLASS_AGNOSTIC else 1,
class_names=self.class_names,
grid_size=model_info_dict['grid_size'],
point_cloud_range=model_info_dict['point_cloud_range'],
predict_boxes_when_training=self.model_cfg.get('ROI_HEAD', False),
voxel_size=model_info_dict.get('voxel_size', False)
)
model_info_dict['module_list'].append(dense_head_module)
return dense_head_module, model_info_dict
def build_point_head(self, model_info_dict):
if self.model_cfg.get('POINT_HEAD', None) is None:
return None, model_info_dict
if self.model_cfg.POINT_HEAD.get('USE_POINT_FEATURES_BEFORE_FUSION', False):
num_point_features = model_info_dict['num_point_features_before_fusion']
else:
num_point_features = model_info_dict['num_point_features']
point_head_module = dense_heads.__all__[self.model_cfg.POINT_HEAD.NAME](
model_cfg=self.model_cfg.POINT_HEAD,
input_channels=num_point_features,
num_class=self.num_class if not self.model_cfg.POINT_HEAD.CLASS_AGNOSTIC else 1,
predict_boxes_when_training=self.model_cfg.get('ROI_HEAD', False)
)
model_info_dict['module_list'].append(point_head_module)
return point_head_module, model_info_dict
def build_roi_head(self, model_info_dict):
if self.model_cfg.get('ROI_HEAD', None) is None:
return None, model_info_dict
point_head_module = roi_heads.__all__[self.model_cfg.ROI_HEAD.NAME](
model_cfg=self.model_cfg.ROI_HEAD,
input_channels=model_info_dict['num_point_features'],
backbone_channels=model_info_dict['backbone_channels'],
point_cloud_range=model_info_dict['point_cloud_range'],
voxel_size=model_info_dict['voxel_size'],
num_class=self.num_class if not self.model_cfg.ROI_HEAD.CLASS_AGNOSTIC else 1,
)
model_info_dict['module_list'].append(point_head_module)
return point_head_module, model_info_dict
def forward(self, **kwargs):
raise NotImplementedError
def post_processing(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
batch_cls_preds: (B, num_boxes, num_classes | 1) or (N1+N2+..., num_classes | 1)
or [(B, num_boxes, num_class1), (B, num_boxes, num_class2) ...]
multihead_label_mapping: [(num_class1), (num_class2), ...]
batch_box_preds: (B, num_boxes, 7+C) or (N1+N2+..., 7+C)
cls_preds_normalized: indicate whether batch_cls_preds is normalized
batch_index: optional (N1+N2+...)
has_class_labels: True/False
roi_labels: (B, num_rois) 1 .. num_classes
batch_pred_labels: (B, num_boxes, 1)
Returns:
"""
post_process_cfg = self.model_cfg.POST_PROCESSING
batch_size = batch_dict['batch_size']
recall_dict = {}
pred_dicts = []
for index in range(batch_size):
if batch_dict.get('batch_index', None) is not None:
assert batch_dict['batch_box_preds'].shape.__len__() == 2
batch_mask = (batch_dict['batch_index'] == index)
else:
assert batch_dict['batch_box_preds'].shape.__len__() == 3
batch_mask = index
box_preds = batch_dict['batch_box_preds'][batch_mask]
src_box_preds = box_preds
if not isinstance(batch_dict['batch_cls_preds'], list):
cls_preds = batch_dict['batch_cls_preds'][batch_mask]
src_cls_preds = cls_preds
assert cls_preds.shape[1] in [1, self.num_class]
if not batch_dict['cls_preds_normalized']:
cls_preds = torch.sigmoid(cls_preds)
else:
cls_preds = [x[batch_mask] for x in batch_dict['batch_cls_preds']]
src_cls_preds = cls_preds
if not batch_dict['cls_preds_normalized']:
cls_preds = [torch.sigmoid(x) for x in cls_preds]
if post_process_cfg.NMS_CONFIG.MULTI_CLASSES_NMS:
if not isinstance(cls_preds, list):
cls_preds = [cls_preds]
multihead_label_mapping = [torch.arange(1, self.num_class, device=cls_preds[0].device)]
else:
multihead_label_mapping = batch_dict['multihead_label_mapping']
cur_start_idx = 0
pred_scores, pred_labels, pred_boxes = [], [], []
for cur_cls_preds, cur_label_mapping in zip(cls_preds, multihead_label_mapping):
assert cur_cls_preds.shape[1] == len(cur_label_mapping)
cur_box_preds = box_preds[cur_start_idx: cur_start_idx + cur_cls_preds.shape[0]]
cur_pred_scores, cur_pred_labels, cur_pred_boxes = model_nms_utils.multi_classes_nms(
cls_scores=cur_cls_preds, box_preds=cur_box_preds,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH
)
cur_pred_labels = cur_label_mapping[cur_pred_labels]
pred_scores.append(cur_pred_scores)
pred_labels.append(cur_pred_labels)
pred_boxes.append(cur_pred_boxes)
cur_start_idx += cur_cls_preds.shape[0]
final_scores = torch.cat(pred_scores, dim=0)
final_labels = torch.cat(pred_labels, dim=0)
final_boxes = torch.cat(pred_boxes, dim=0)
else:
cls_preds, label_preds = torch.max(cls_preds, dim=-1)
if batch_dict.get('has_class_labels', False):
label_key = 'roi_labels' if 'roi_labels' in batch_dict else 'batch_pred_labels'
label_preds = batch_dict[label_key][index]
else:
label_preds = label_preds + 1
selected, selected_scores = model_nms_utils.class_agnostic_nms(
box_scores=cls_preds, box_preds=box_preds,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH
)
if post_process_cfg.OUTPUT_RAW_SCORE:
max_cls_preds, _ = torch.max(src_cls_preds, dim=-1)
selected_scores = max_cls_preds[selected]
final_scores = selected_scores
final_labels = label_preds[selected]
final_boxes = box_preds[selected]
if post_process_cfg.get('RECALL_MODE', 'normal') == 'normal':
recall_dict = self.generate_recall_record(
box_preds=final_boxes if 'rois' not in batch_dict else src_box_preds,
recall_dict=recall_dict, batch_index=index, data_dict=batch_dict,
thresh_list=post_process_cfg.RECALL_THRESH_LIST
)
record_dict = {
'pred_boxes': final_boxes,
'pred_scores': final_scores,
'pred_labels': final_labels
}
pred_dicts.append(record_dict)
return pred_dicts, recall_dict
@staticmethod
def generate_recall_record(box_preds, recall_dict, batch_index, data_dict=None, thresh_list=None):
if 'gt_boxes' not in data_dict:
return recall_dict
rois = data_dict['rois'][batch_index] if 'rois' in data_dict else None
gt_boxes = data_dict['gt_boxes'][batch_index]
if recall_dict.__len__() == 0:
recall_dict = {'gt': 0}
for cur_thresh in thresh_list:
recall_dict['roi_%s' % (str(cur_thresh))] = 0
recall_dict['rcnn_%s' % (str(cur_thresh))] = 0
cur_gt = gt_boxes
k = cur_gt.__len__() - 1
while k > 0 and cur_gt[k].sum() == 0:
k -= 1
cur_gt = cur_gt[:k + 1]
if cur_gt.shape[0] > 0:
if box_preds.shape[0] > 0:
iou3d_rcnn = iou3d_nms_utils.boxes_iou3d_gpu(box_preds[:, 0:7], cur_gt[:, 0:7])
else:
iou3d_rcnn = torch.zeros((0, cur_gt.shape[0]))
if rois is not None:
iou3d_roi = iou3d_nms_utils.boxes_iou3d_gpu(rois[:, 0:7], cur_gt[:, 0:7])
for cur_thresh in thresh_list:
if iou3d_rcnn.shape[0] == 0:
recall_dict['rcnn_%s' % str(cur_thresh)] += 0
else:
rcnn_recalled = (iou3d_rcnn.max(dim=0)[0] > cur_thresh).sum().item()
recall_dict['rcnn_%s' % str(cur_thresh)] += rcnn_recalled
if rois is not None:
roi_recalled = (iou3d_roi.max(dim=0)[0] > cur_thresh).sum().item()
recall_dict['roi_%s' % str(cur_thresh)] += roi_recalled
recall_dict['gt'] += cur_gt.shape[0]
else:
gt_iou = box_preds.new_zeros(box_preds.shape[0])
return recall_dict
def _load_state_dict(self, model_state_disk, *, strict=True):
state_dict = self.state_dict() # local cache of state_dict
spconv_keys = find_all_spconv_keys(self)
update_model_state = {}
for key, val in model_state_disk.items():
if key in spconv_keys and key in state_dict and state_dict[key].shape != val.shape:
# with different spconv versions, we need to adapt weight shapes for spconv blocks
# adapt spconv weights from version 1.x to version 2.x if you used weights from spconv 1.x
val_native = val.transpose(-1, -2) # (k1, k2, k3, c_in, c_out) to (k1, k2, k3, c_out, c_in)
if val_native.shape == state_dict[key].shape:
val = val_native.contiguous()
else:
assert val.shape.__len__() == 5, 'currently only spconv 3D is supported'
val_implicit = val.permute(4, 0, 1, 2, 3) # (k1, k2, k3, c_in, c_out) to (c_out, k1, k2, k3, c_in)
if val_implicit.shape == state_dict[key].shape:
val = val_implicit.contiguous()
if key in state_dict and state_dict[key].shape == val.shape:
update_model_state[key] = val
# logger.info('Update weight %s: %s' % (key, str(val.shape)))
if strict:
self.load_state_dict(update_model_state)
else:
state_dict.update(update_model_state)
self.load_state_dict(state_dict)
return state_dict, update_model_state
def load_params_from_file(self, filename, logger, to_cpu=False):
if not os.path.isfile(filename):
raise FileNotFoundError
logger.info('==> Loading parameters from checkpoint %s to %s' % (filename, 'CPU' if to_cpu else 'GPU'))
loc_type = torch.device('cpu') if to_cpu else None
checkpoint = torch.load(filename, map_location=loc_type)
model_state_disk = checkpoint['model_state']
version = checkpoint.get("version", None)
if version is not None:
logger.info('==> Checkpoint trained from version: %s' % version)
state_dict, update_model_state = self._load_state_dict(model_state_disk, strict=False)
for key in state_dict:
if key not in update_model_state:
logger.info('Not updated weight %s: %s' % (key, str(state_dict[key].shape)))
logger.info('==> Done (loaded %d/%d)' % (len(update_model_state), len(state_dict)))
def load_params_with_optimizer(self, filename, to_cpu=False, optimizer=None, logger=None):
if not os.path.isfile(filename):
raise FileNotFoundError
logger.info('==> Loading parameters from checkpoint %s to %s' % (filename, 'CPU' if to_cpu else 'GPU'))
loc_type = torch.device('cpu') if to_cpu else None
checkpoint = torch.load(filename, map_location=loc_type)
epoch = checkpoint.get('epoch', -1)
it = checkpoint.get('it', 0.0)
self._load_state_dict(checkpoint['model_state'], strict=True)
if optimizer is not None:
if 'optimizer_state' in checkpoint and checkpoint['optimizer_state'] is not None:
logger.info('==> Loading optimizer parameters from checkpoint %s to %s'
% (filename, 'CPU' if to_cpu else 'GPU'))
optimizer.load_state_dict(checkpoint['optimizer_state'])
else:
assert filename[-4] == '.', filename
src_file, ext = filename[:-4], filename[-3:]
optimizer_filename = '%s_optim.%s' % (src_file, ext)
if os.path.exists(optimizer_filename):
optimizer_ckpt = torch.load(optimizer_filename, map_location=loc_type)
optimizer.load_state_dict(optimizer_ckpt['optimizer_state'])
if 'version' in checkpoint:
print('==> Checkpoint trained from version: %s' % checkpoint['version'])
logger.info('==> Done')
return it, epoch
| 19,155
| 45.382567
| 119
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/detectors/detector3d_template.py
|
import os
import torch
import torch.nn as nn
from ...ops.iou3d_nms import iou3d_nms_utils
from ...utils.spconv_utils import find_all_spconv_keys
from .. import backbones_2d, backbones_3d, dense_heads, roi_heads
from ..backbones_2d import map_to_bev
from ..backbones_3d import pfe, vfe
from ..model_utils import model_nms_utils
class Detector3DTemplate(nn.Module):
def __init__(self, model_cfg, num_class, dataset):
super().__init__()
self.model_cfg = model_cfg
self.num_class = num_class
self.dataset = dataset
self.class_names = dataset.class_names
self.register_buffer('global_step', torch.LongTensor(1).zero_())
self.module_topology = [
'vfe', 'unetscn', 'backbone_3d', 'map_to_bev_module', 'pfe',
'backbone_2d', 'dense_head', 'point_head', 'roi_head'
]
@property
def mode(self):
return 'TRAIN' if self.training else 'TEST'
def update_global_step(self):
self.global_step += 1
def build_networks(self):
model_info_dict = {
'module_list': [],
'num_rawpoint_features': self.dataset.point_feature_encoder.num_point_features,
'num_point_features': self.dataset.point_feature_encoder.num_point_features,
'grid_size': self.dataset.grid_size,
'point_cloud_range': self.dataset.point_cloud_range,
'voxel_size': self.dataset.voxel_size,
'depth_downsample_factor': self.dataset.depth_downsample_factor
}
for module_name in self.module_topology:
module, model_info_dict = getattr(self, 'build_%s' % module_name)(
model_info_dict=model_info_dict
)
self.add_module(module_name, module)
return model_info_dict['module_list']
def build_unetscn(self, model_info_dict):
if self.model_cfg.get('UNETSCN', None) is None:
return None, model_info_dict
unetscn_module = pfe.__all__[self.model_cfg.UNETSCN.NAME](
model_cfg=self.model_cfg.UNETSCN,
input_channels=model_info_dict['num_point_features'],
grid_size=model_info_dict['grid_size']
)
model_info_dict['module_list'].append(unetscn_module)
return unetscn_module, model_info_dict
def build_vfe(self, model_info_dict):
if self.model_cfg.get('VFE', None) is None:
return None, model_info_dict
vfe_module = vfe.__all__[self.model_cfg.VFE.NAME](
model_cfg=self.model_cfg.VFE,
num_point_features=model_info_dict['num_rawpoint_features'],
point_cloud_range=model_info_dict['point_cloud_range'],
voxel_size=model_info_dict['voxel_size'],
grid_size=model_info_dict['grid_size'],
depth_downsample_factor=model_info_dict['depth_downsample_factor']
)
model_info_dict['num_point_features'] = vfe_module.get_output_feature_dim()
model_info_dict['module_list'].append(vfe_module)
return vfe_module, model_info_dict
def build_backbone_3d(self, model_info_dict):
if self.model_cfg.get('BACKBONE_3D', None) is None:
return None, model_info_dict
backbone_3d_module = backbones_3d.__all__[self.model_cfg.BACKBONE_3D.NAME](
model_cfg=self.model_cfg.BACKBONE_3D,
input_channels=model_info_dict['num_point_features'],
grid_size=model_info_dict['grid_size'],
voxel_size=model_info_dict['voxel_size'],
point_cloud_range=model_info_dict['point_cloud_range']
)
model_info_dict['module_list'].append(backbone_3d_module)
model_info_dict['num_point_features'] = backbone_3d_module.num_point_features
model_info_dict['backbone_channels'] = backbone_3d_module.backbone_channels \
if hasattr(backbone_3d_module, 'backbone_channels') else None
return backbone_3d_module, model_info_dict
def build_map_to_bev_module(self, model_info_dict):
if self.model_cfg.get('MAP_TO_BEV', None) is None:
return None, model_info_dict
map_to_bev_module = map_to_bev.__all__[self.model_cfg.MAP_TO_BEV.NAME](
model_cfg=self.model_cfg.MAP_TO_BEV,
grid_size=model_info_dict['grid_size']
)
model_info_dict['module_list'].append(map_to_bev_module)
model_info_dict['num_bev_features'] = map_to_bev_module.num_bev_features
return map_to_bev_module, model_info_dict
def build_backbone_2d(self, model_info_dict):
if self.model_cfg.get('BACKBONE_2D', None) is None:
return None, model_info_dict
backbone_2d_module = backbones_2d.__all__[self.model_cfg.BACKBONE_2D.NAME](
model_cfg=self.model_cfg.BACKBONE_2D,
input_channels=model_info_dict['num_bev_features']
)
model_info_dict['module_list'].append(backbone_2d_module)
model_info_dict['num_bev_features'] = backbone_2d_module.num_bev_features
return backbone_2d_module, model_info_dict
def build_pfe(self, model_info_dict):
if self.model_cfg.get('PFE', None) is None:
return None, model_info_dict
pfe_module = pfe.__all__[self.model_cfg.PFE.NAME](
model_cfg=self.model_cfg.PFE,
voxel_size=model_info_dict['voxel_size'],
point_cloud_range=model_info_dict['point_cloud_range'],
num_bev_features=model_info_dict['num_bev_features'],
num_rawpoint_features=model_info_dict['num_rawpoint_features']
)
model_info_dict['module_list'].append(pfe_module)
model_info_dict['num_point_features'] = pfe_module.num_point_features
model_info_dict['num_point_features_before_fusion'] = pfe_module.num_point_features_before_fusion
return pfe_module, model_info_dict
def build_dense_head(self, model_info_dict):
if self.model_cfg.get('DENSE_HEAD', None) is None:
return None, model_info_dict
dense_head_module = dense_heads.__all__[self.model_cfg.DENSE_HEAD.NAME](
model_cfg=self.model_cfg.DENSE_HEAD,
input_channels=model_info_dict['num_bev_features'],
num_class=self.num_class if not self.model_cfg.DENSE_HEAD.CLASS_AGNOSTIC else 1,
class_names=self.class_names,
grid_size=model_info_dict['grid_size'],
point_cloud_range=model_info_dict['point_cloud_range'],
predict_boxes_when_training=self.model_cfg.get('ROI_HEAD', False),
voxel_size=model_info_dict.get('voxel_size', False)
)
model_info_dict['module_list'].append(dense_head_module)
return dense_head_module, model_info_dict
def build_point_head(self, model_info_dict):
if self.model_cfg.get('POINT_HEAD', None) is None:
return None, model_info_dict
if self.model_cfg.POINT_HEAD.get('USE_POINT_FEATURES_BEFORE_FUSION', False):
num_point_features = model_info_dict['num_point_features_before_fusion']
else:
num_point_features = model_info_dict['num_point_features']
point_head_module = dense_heads.__all__[self.model_cfg.POINT_HEAD.NAME](
model_cfg=self.model_cfg.POINT_HEAD,
input_channels=num_point_features,
num_class=self.num_class if not self.model_cfg.POINT_HEAD.CLASS_AGNOSTIC else 1,
predict_boxes_when_training=self.model_cfg.get('ROI_HEAD', False)
)
model_info_dict['module_list'].append(point_head_module)
return point_head_module, model_info_dict
def build_roi_head(self, model_info_dict):
if self.model_cfg.get('ROI_HEAD', None) is None:
return None, model_info_dict
point_head_module = roi_heads.__all__[self.model_cfg.ROI_HEAD.NAME](
model_cfg=self.model_cfg.ROI_HEAD,
input_channels=model_info_dict['num_point_features'],
backbone_channels=model_info_dict['backbone_channels'],
point_cloud_range=model_info_dict['point_cloud_range'],
voxel_size=model_info_dict['voxel_size'],
num_class=self.num_class if not self.model_cfg.ROI_HEAD.CLASS_AGNOSTIC else 1,
)
model_info_dict['module_list'].append(point_head_module)
return point_head_module, model_info_dict
def forward(self, **kwargs):
raise NotImplementedError
def post_processing(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
batch_cls_preds: (B, num_boxes, num_classes | 1) or (N1+N2+..., num_classes | 1)
or [(B, num_boxes, num_class1), (B, num_boxes, num_class2) ...]
multihead_label_mapping: [(num_class1), (num_class2), ...]
batch_box_preds: (B, num_boxes, 7+C) or (N1+N2+..., 7+C)
cls_preds_normalized: indicate whether batch_cls_preds is normalized
batch_index: optional (N1+N2+...)
has_class_labels: True/False
roi_labels: (B, num_rois) 1 .. num_classes
batch_pred_labels: (B, num_boxes, 1)
Returns:
"""
post_process_cfg = self.model_cfg.POST_PROCESSING
batch_size = batch_dict['batch_size']
recall_dict = {}
pred_dicts = []
for index in range(batch_size):
if batch_dict.get('batch_index', None) is not None:
assert batch_dict['batch_box_preds'].shape.__len__() == 2
batch_mask = (batch_dict['batch_index'] == index)
else:
assert batch_dict['batch_box_preds'].shape.__len__() == 3
batch_mask = index
box_preds = batch_dict['batch_box_preds'][batch_mask]
src_box_preds = box_preds
if not isinstance(batch_dict['batch_cls_preds'], list):
cls_preds = batch_dict['batch_cls_preds'][batch_mask]
src_cls_preds = cls_preds
assert cls_preds.shape[1] in [1, self.num_class]
if not batch_dict['cls_preds_normalized']:
cls_preds = torch.sigmoid(cls_preds)
else:
cls_preds = [x[batch_mask] for x in batch_dict['batch_cls_preds']]
src_cls_preds = cls_preds
if not batch_dict['cls_preds_normalized']:
cls_preds = [torch.sigmoid(x) for x in cls_preds]
if post_process_cfg.NMS_CONFIG.MULTI_CLASSES_NMS:
if not isinstance(cls_preds, list):
cls_preds = [cls_preds]
multihead_label_mapping = [torch.arange(1, self.num_class, device=cls_preds[0].device)]
else:
multihead_label_mapping = batch_dict['multihead_label_mapping']
cur_start_idx = 0
pred_scores, pred_labels, pred_boxes = [], [], []
for cur_cls_preds, cur_label_mapping in zip(cls_preds, multihead_label_mapping):
assert cur_cls_preds.shape[1] == len(cur_label_mapping)
cur_box_preds = box_preds[cur_start_idx: cur_start_idx + cur_cls_preds.shape[0]]
cur_pred_scores, cur_pred_labels, cur_pred_boxes = model_nms_utils.multi_classes_nms(
cls_scores=cur_cls_preds, box_preds=cur_box_preds,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH
)
cur_pred_labels = cur_label_mapping[cur_pred_labels]
pred_scores.append(cur_pred_scores)
pred_labels.append(cur_pred_labels)
pred_boxes.append(cur_pred_boxes)
cur_start_idx += cur_cls_preds.shape[0]
final_scores = torch.cat(pred_scores, dim=0)
final_labels = torch.cat(pred_labels, dim=0)
final_boxes = torch.cat(pred_boxes, dim=0)
else:
cls_preds, label_preds = torch.max(cls_preds, dim=-1)
if batch_dict.get('has_class_labels', False):
label_key = 'roi_labels' if 'roi_labels' in batch_dict else 'batch_pred_labels'
label_preds = batch_dict[label_key][index]
else:
label_preds = label_preds + 1
selected, selected_scores = model_nms_utils.class_agnostic_nms(
box_scores=cls_preds, box_preds=box_preds,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH
)
if post_process_cfg.OUTPUT_RAW_SCORE:
max_cls_preds, _ = torch.max(src_cls_preds, dim=-1)
selected_scores = max_cls_preds[selected]
final_scores = selected_scores
final_labels = label_preds[selected]
final_boxes = box_preds[selected]
recall_dict = self.generate_recall_record(
box_preds=final_boxes if 'rois' not in batch_dict else src_box_preds,
recall_dict=recall_dict, batch_index=index, data_dict=batch_dict,
thresh_list=post_process_cfg.RECALL_THRESH_LIST
)
record_dict = {
'pred_boxes': final_boxes,
'pred_scores': final_scores,
'pred_labels': final_labels
}
pred_dicts.append(record_dict)
return pred_dicts, recall_dict
@staticmethod
def generate_recall_record(box_preds, recall_dict, batch_index, data_dict=None, thresh_list=None):
if 'gt_boxes' not in data_dict:
return recall_dict
rois = data_dict['rois'][batch_index] if 'rois' in data_dict else None
gt_boxes = data_dict['gt_boxes'][batch_index]
if recall_dict.__len__() == 0:
recall_dict = {'gt': 0}
for cur_thresh in thresh_list:
recall_dict['roi_%s' % (str(cur_thresh))] = 0
recall_dict['rcnn_%s' % (str(cur_thresh))] = 0
cur_gt = gt_boxes
k = cur_gt.__len__() - 1
while k >= 0 and cur_gt[k].sum() == 0:
k -= 1
cur_gt = cur_gt[:k + 1]
if cur_gt.shape[0] > 0:
if box_preds.shape[0] > 0:
iou3d_rcnn = iou3d_nms_utils.boxes_iou3d_gpu(box_preds[:, 0:7], cur_gt[:, 0:7])
else:
iou3d_rcnn = torch.zeros((0, cur_gt.shape[0]))
if rois is not None:
iou3d_roi = iou3d_nms_utils.boxes_iou3d_gpu(rois[:, 0:7], cur_gt[:, 0:7])
for cur_thresh in thresh_list:
if iou3d_rcnn.shape[0] == 0:
recall_dict['rcnn_%s' % str(cur_thresh)] += 0
else:
rcnn_recalled = (iou3d_rcnn.max(dim=0)[0] > cur_thresh).sum().item()
recall_dict['rcnn_%s' % str(cur_thresh)] += rcnn_recalled
if rois is not None:
roi_recalled = (iou3d_roi.max(dim=0)[0] > cur_thresh).sum().item()
recall_dict['roi_%s' % str(cur_thresh)] += roi_recalled
recall_dict['gt'] += cur_gt.shape[0]
else:
gt_iou = box_preds.new_zeros(box_preds.shape[0])
return recall_dict
def _load_state_dict(self, model_state_disk, *, strict=True):
state_dict = self.state_dict() # local cache of state_dict
spconv_keys = find_all_spconv_keys(self)
update_model_state = {}
for key, val in model_state_disk.items():
if key in spconv_keys and key in state_dict and state_dict[key].shape != val.shape:
# with different spconv versions, we need to adapt weight shapes for spconv blocks
# adapt spconv weights from version 1.x to version 2.x if you used weights from spconv 1.x
val_native = val.transpose(-1, -2) # (k1, k2, k3, c_in, c_out) to (k1, k2, k3, c_out, c_in)
if val_native.shape == state_dict[key].shape:
val = val_native.contiguous()
else:
assert val.shape.__len__() == 5, 'currently only spconv 3D is supported'
val_implicit = val.permute(4, 0, 1, 2, 3) # (k1, k2, k3, c_in, c_out) to (c_out, k1, k2, k3, c_in)
if val_implicit.shape == state_dict[key].shape:
val = val_implicit.contiguous()
if key in state_dict and state_dict[key].shape == val.shape:
update_model_state[key] = val
# logger.info('Update weight %s: %s' % (key, str(val.shape)))
if strict:
self.load_state_dict(update_model_state)
else:
state_dict.update(update_model_state)
self.load_state_dict(state_dict)
return state_dict, update_model_state
def load_params_from_file(self, filename, logger, to_cpu=False):
if not os.path.isfile(filename):
raise FileNotFoundError
logger.info('==> Loading parameters from checkpoint %s to %s' % (filename, 'CPU' if to_cpu else 'GPU'))
loc_type = torch.device('cpu') if to_cpu else None
checkpoint = torch.load(filename, map_location=loc_type)
model_state_disk = checkpoint['model_state']
version = checkpoint.get("version", None)
if version is not None:
logger.info('==> Checkpoint trained from version: %s' % version)
state_dict, update_model_state = self._load_state_dict(model_state_disk, strict=False)
for key in state_dict:
if key not in update_model_state:
logger.info('Not updated weight %s: %s' % (key, str(state_dict[key].shape)))
logger.info('==> Done (loaded %d/%d)' % (len(update_model_state), len(state_dict)))
def load_params_with_optimizer(self, filename, to_cpu=False, optimizer=None, logger=None):
if not os.path.isfile(filename):
raise FileNotFoundError
logger.info('==> Loading parameters from checkpoint %s to %s' % (filename, 'CPU' if to_cpu else 'GPU'))
loc_type = torch.device('cpu') if to_cpu else None
checkpoint = torch.load(filename, map_location=loc_type)
epoch = checkpoint.get('epoch', -1)
it = checkpoint.get('it', 0.0)
self._load_state_dict(checkpoint['model_state'], strict=True)
if optimizer is not None:
if 'optimizer_state' in checkpoint and checkpoint['optimizer_state'] is not None:
logger.info('==> Loading optimizer parameters from checkpoint %s to %s'
% (filename, 'CPU' if to_cpu else 'GPU'))
optimizer.load_state_dict(checkpoint['optimizer_state'])
else:
assert filename[-4] == '.', filename
src_file, ext = filename[:-4], filename[-3:]
optimizer_filename = '%s_optim.%s' % (src_file, ext)
if os.path.exists(optimizer_filename):
optimizer_ckpt = torch.load(optimizer_filename, map_location=loc_type)
optimizer.load_state_dict(optimizer_ckpt['optimizer_state'])
if 'version' in checkpoint:
print('==> Checkpoint trained from version: %s' % checkpoint['version'])
logger.info('==> Done')
return it, epoch
| 19,470
| 45.030733
| 119
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/detectors/pv_rcnn_plusplus.py
|
from .detector3d_template import Detector3DTemplate
from .detector3d_template_multi_db import Detector3DTemplate_M_DB
from pcdet.utils import common_utils
import numpy as np
class PVRCNNPlusPlus(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict):
batch_dict = self.vfe(batch_dict)
if self.unetscn:
batch_dict = self.unetscn(batch_dict)
batch_dict = self.backbone_3d(batch_dict)
batch_dict = self.map_to_bev_module(batch_dict)
batch_dict = self.backbone_2d(batch_dict)
batch_dict = self.dense_head(batch_dict)
batch_dict = self.roi_head.proposal_layer(
batch_dict, nms_config=self.roi_head.model_cfg.NMS_CONFIG['TRAIN' if self.training else 'TEST']
)
if self.training:
targets_dict = self.roi_head.assign_targets(batch_dict)
batch_dict['rois'] = targets_dict['rois']
batch_dict['roi_labels'] = targets_dict['roi_labels']
batch_dict['roi_targets_dict'] = targets_dict
num_rois_per_scene = targets_dict['rois'].shape[1]
if 'roi_valid_num' in batch_dict:
batch_dict['roi_valid_num'] = [num_rois_per_scene for _ in range(batch_dict['batch_size'])]
batch_dict = self.pfe(batch_dict)
batch_dict = self.point_head(batch_dict)
batch_dict = self.roi_head(batch_dict)
if self.training:
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
def get_training_loss(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head.get_loss()
if self.point_head is not None:
loss_point, tb_dict = self.point_head.get_loss(tb_dict)
else:
loss_point = 0
loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)
loss = loss_rpn + loss_point + loss_rcnn
return loss, tb_dict, disp_dict
class SemiPVRCNNPlusPlus(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
self.model_type = None
def set_model_type(self, model_type):
assert model_type in ['origin', 'teacher', 'student']
self.model_type = model_type
self.dense_head.model_type = model_type
self.point_head.model_type = model_type
self.roi_head.model_type = model_type
def forward(self, batch_dict):
if self.model_type == 'origin':
batch_dict = self.vfe(batch_dict)
batch_dict = self.backbone_3d(batch_dict)
batch_dict = self.map_to_bev_module(batch_dict)
batch_dict = self.backbone_2d(batch_dict)
batch_dict = self.dense_head(batch_dict)
batch_dict = self.roi_head.proposal_layer(
batch_dict, nms_config=self.roi_head.model_cfg.NMS_CONFIG['TRAIN' if self.training else 'TEST']
)
if self.training:
targets_dict = self.roi_head.assign_targets(batch_dict)
batch_dict['rois'] = targets_dict['rois']
batch_dict['roi_labels'] = targets_dict['roi_labels']
batch_dict['roi_targets_dict'] = targets_dict
num_rois_per_scene = targets_dict['rois'].shape[1]
if 'roi_valid_num' in batch_dict:
batch_dict['roi_valid_num'] = [num_rois_per_scene for _ in range(batch_dict['batch_size'])]
batch_dict = self.pfe(batch_dict)
batch_dict = self.point_head(batch_dict)
batch_dict = self.roi_head(batch_dict)
if self.training:
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
# teacher: (testing, return raw boxes)
elif self.model_type == 'teacher':
batch_dict = self.vfe(batch_dict)
batch_dict = self.backbone_3d(batch_dict)
batch_dict = self.map_to_bev_module(batch_dict)
batch_dict = self.backbone_2d(batch_dict)
batch_dict = self.dense_head(batch_dict)
batch_dict = self.roi_head.proposal_layer(
batch_dict, nms_config=self.roi_head.model_cfg.NMS_CONFIG['TEST']
)
batch_dict = self.pfe(batch_dict)
batch_dict = self.point_head(batch_dict)
batch_dict = self.roi_head(batch_dict)
return batch_dict
# student: (training, return (loss & raw boxes w/ gt_boxes) or raw boxes (w/o gt_boxes) for consistency)
# (testing, return final_boxes)
elif self.model_type == 'student':
batch_dict = self.vfe(batch_dict)
batch_dict = self.backbone_3d(batch_dict)
batch_dict = self.map_to_bev_module(batch_dict)
batch_dict = self.backbone_2d(batch_dict)
batch_dict = self.dense_head(batch_dict)
if 'gt_boxes' in batch_dict:
batch_dict = self.roi_head.proposal_layer(
batch_dict, nms_config=self.roi_head.model_cfg.NMS_CONFIG['TRAIN' if self.training else 'TEST']
)
else:
batch_dict = self.roi_head.proposal_layer(
batch_dict, nms_config=self.roi_head.model_cfg.NMS_CONFIG['TEST']
)
if self.training:
if 'gt_boxes' in batch_dict:
targets_dict = self.roi_head.assign_targets(batch_dict)
batch_dict['rois'] = targets_dict['rois']
batch_dict['roi_labels'] = targets_dict['roi_labels']
batch_dict['roi_targets_dict'] = targets_dict
num_rois_per_scene = targets_dict['rois'].shape[1]
if 'roi_valid_num' in batch_dict:
batch_dict['roi_valid_num'] = [num_rois_per_scene for _ in range(batch_dict['batch_size'])]
batch_dict = self.pfe(batch_dict)
batch_dict = self.point_head(batch_dict)
batch_dict = self.roi_head(batch_dict)
if self.training:
if 'gt_boxes' in batch_dict:
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {
'loss': loss
}
return batch_dict, ret_dict, tb_dict, disp_dict
else:
return batch_dict
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
else:
raise Exception('Unsupprted model type')
def get_training_loss(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head.get_loss()
if self.point_head is not None:
loss_point, tb_dict = self.point_head.get_loss(tb_dict)
else:
loss_point = 0
loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)
loss = loss_rpn + loss_point + loss_rcnn
return loss, tb_dict, disp_dict
class PVRCNNPlusPlus_M_DB(Detector3DTemplate_M_DB):
def __init__(self, model_cfg, num_class, num_class_s2, dataset, dataset_s2, source_one_name):
super().__init__(model_cfg=model_cfg, num_class=num_class, num_class_s2=num_class_s2, dataset=dataset,
dataset_s2=dataset_s2, source_one_name=source_one_name)
self.module_list = self.build_networks()
self.source_one_name = source_one_name
def forward(self, batch_dict):
# Split the Concat dataset batch into batch_1 and batch_2
split_tag_s1, split_tag_s2 = common_utils.split_batch_dict(self.source_one_name, batch_dict)
batch_s1 = {}
batch_s2 = {}
batch_dict = self.vfe(batch_dict)
batch_dict = self.backbone_3d(batch_dict)
batch_dict = self.map_to_bev_module(batch_dict)
batch_dict = self.backbone_2d(batch_dict)
# Dataset-specific DenseHead
if len(split_tag_s1) == batch_dict['batch_size']:
batch_dict = self.dense_head_s1(batch_dict)
batch_dict = self.roi_head_s1.proposal_layer(
batch_dict, nms_config=self.roi_head_s1.model_cfg.NMS_CONFIG['TRAIN' if self.training else 'TEST']
)
if self.training:
targets_dict = self.roi_head_s1.assign_targets(batch_dict)
batch_dict['rois'] = targets_dict['rois']
batch_dict['roi_labels'] = targets_dict['roi_labels']
batch_dict['roi_targets_dict'] = targets_dict
num_rois_per_scene = targets_dict['rois'].shape[1]
if 'roi_valid_num' in batch_dict:
batch_dict['roi_valid_num'] = [num_rois_per_scene for _ in range(batch_dict['batch_size'])]
batch_dict = self.pfe(batch_dict)
batch_dict = self.point_head_s1(batch_dict)
batch_dict = self.roi_head_s1(batch_dict)
elif len(split_tag_s2) == batch_dict['batch_size']:
batch_dict = self.dense_head_s2(batch_dict)
batch_dict = self.roi_head_s2.proposal_layer(
batch_dict, nms_config=self.roi_head_s2.model_cfg.NMS_CONFIG['TRAIN' if self.training else 'TEST']
)
if self.training:
targets_dict = self.roi_head_s2.assign_targets(batch_dict)
batch_dict['rois'] = targets_dict['rois']
batch_dict['roi_labels'] = targets_dict['roi_labels']
batch_dict['roi_targets_dict'] = targets_dict
num_rois_per_scene = targets_dict['rois'].shape[1]
if 'roi_valid_num' in batch_dict:
batch_dict['roi_valid_num'] = [num_rois_per_scene for _ in range(batch_dict['batch_size'])]
batch_dict = self.pfe(batch_dict)
batch_dict = self.point_head_s2(batch_dict)
batch_dict = self.roi_head_s2(batch_dict)
else:
batch_s1, batch_s2 = common_utils.split_two_batch_dict_gpu(split_tag_s1, split_tag_s2, batch_dict)
# Split branch One:
batch_s1 = self.dense_head_s1(batch_s1)
batch_s1 = self.roi_head_s1.proposal_layer(
batch_s1, nms_config=self.roi_head_s1.model_cfg.NMS_CONFIG['TRAIN' if self.training else 'TEST']
)
if self.training:
targets_dict = self.roi_head_s1.assign_targets(batch_s1)
batch_s1['rois'] = targets_dict['rois']
batch_s1['roi_labels'] = targets_dict['roi_labels']
batch_s1['roi_targets_dict'] = targets_dict
num_rois_per_scene = targets_dict['rois'].shape[1]
if 'roi_valid_num' in batch_s1:
batch_s1['roi_valid_num'] = [num_rois_per_scene for _ in range(batch_s1['batch_size'])]
batch_s1 = self.pfe(batch_s1)
batch_s1 = self.point_head_s1(batch_s1)
batch_s1 = self.roi_head_s1(batch_s1)
# Split branch TWO:
batch_s2 = self.dense_head_s2(batch_s2)
batch_s2 = self.roi_head_s2.proposal_layer(
batch_s2, nms_config=self.roi_head_s2.model_cfg.NMS_CONFIG['TRAIN' if self.training else 'TEST']
)
if self.training:
targets_dict = self.roi_head_s2.assign_targets(batch_s2)
batch_s2['rois'] = targets_dict['rois']
batch_s2['roi_labels'] = targets_dict['roi_labels']
batch_s2['roi_targets_dict'] = targets_dict
num_rois_per_scene = targets_dict['rois'].shape[1]
if 'roi_valid_num' in batch_s2:
batch_s2['roi_valid_num'] = [num_rois_per_scene for _ in range(batch_s2['batch_size'])]
batch_s2 = self.pfe(batch_s2)
batch_s2 = self.point_head_s2(batch_s2)
batch_s2 = self.roi_head_s2(batch_s2)
if self.training:
split_tag_s1, split_tag_s2 = common_utils.split_batch_dict(self.source_one_name, batch_dict)
if len(split_tag_s1) == batch_dict['batch_size']:
loss, tb_dict, disp_dict = self.get_training_loss_s1()
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
elif len(split_tag_s2) == batch_dict['batch_size']:
loss, tb_dict, disp_dict = self.get_training_loss_s2()
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
else:
loss_1, tb_dict_1, disp_dict_1 = self.get_training_loss_s1()
loss_2, tb_dict_2, disp_dict_2 = self.get_training_loss_s2()
ret_dict = {
'loss': loss_1 + loss_2
}
return ret_dict, tb_dict_1, disp_dict_1
else:
# NOTE: When peform the inference, only one dataset can be accessed.
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
def get_training_loss_s1(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head_s1.get_loss()
if self.point_head_s1 is not None:
loss_point, tb_dict = self.point_head_s1.get_loss(tb_dict)
else:
loss_point = 0
loss_rcnn, tb_dict = self.roi_head_s1.get_loss(tb_dict)
loss = loss_rpn + loss_point + loss_rcnn
return loss, tb_dict, disp_dict
def get_training_loss_s2(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head_s2.get_loss()
if self.point_head_s2 is not None:
loss_point, tb_dict = self.point_head_s2.get_loss(tb_dict)
else:
loss_point = 0
loss_rcnn, tb_dict = self.roi_head_s2.get_loss(tb_dict)
loss = loss_rpn + loss_point + loss_rcnn
return loss, tb_dict, disp_dict
| 14,722
| 44.301538
| 115
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/detectors/voxel_rcnn.py
|
from .detector3d_template import Detector3DTemplate
from .detector3d_template_ada import ActiveDetector3DTemplate
from .detector3d_template_multi_db import Detector3DTemplate_M_DB
from .detector3d_template_multi_db_3 import Detector3DTemplate_M_DB_3
from pcdet.utils import common_utils
from pcdet.config import cfg
class VoxelRCNN(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict):
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
def get_training_loss(self):
disp_dict = {}
loss = 0
loss_rpn, tb_dict = self.dense_head.get_loss()
loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)
loss = loss + loss_rpn + loss_rcnn
if hasattr(self.backbone_3d, 'get_loss'):
loss_backbone3d, tb_dict = self.backbone_3d.get_loss(tb_dict)
loss += loss_backbone3d
return loss, tb_dict, disp_dict
class VoxelRCNN_M_DB(Detector3DTemplate_M_DB):
def __init__(self, model_cfg, num_class, num_class_s2, dataset, dataset_s2, source_one_name):
super().__init__(model_cfg=model_cfg, num_class=num_class, num_class_s2=num_class_s2, dataset=dataset,
dataset_s2=dataset_s2, source_one_name=source_one_name)
self.module_list = self.build_networks()
self.source_one_name = source_one_name
def forward(self, batch_dict):
# Split the Concat dataset batch into batch_1 and batch_2
split_tag_s1, split_tag_s2 = common_utils.split_batch_dict(self.source_one_name, batch_dict)
batch_s1 = {}
batch_s2 = {}
len_of_module = len(self.module_list)
for k, cur_module in enumerate(self.module_list):
if k < len_of_module-4:
batch_dict = cur_module(batch_dict)
if k == len_of_module-4 or k == len_of_module-3:
if len(split_tag_s1) == batch_dict['batch_size']:
batch_dict = cur_module(batch_dict)
elif len(split_tag_s2) == batch_dict['batch_size']:
continue
else:
if k == len_of_module-4:
batch_s1, batch_s2 = common_utils.split_two_batch_dict_gpu(split_tag_s1, split_tag_s2, batch_dict)
batch_s1 = cur_module(batch_s1)
if k == len_of_module-2 or k == len_of_module-1:
if len(split_tag_s2) == batch_dict['batch_size']:
batch_dict = cur_module(batch_dict)
elif len(split_tag_s1) == batch_dict['batch_size']:
continue
else:
batch_s2 = cur_module(batch_s2)
if self.training:
split_tag_s1, split_tag_s2 = common_utils.split_batch_dict(self.source_one_name, batch_dict)
if len(split_tag_s1) == batch_dict['batch_size']:
loss, tb_dict, disp_dict = self.get_training_loss_s1()
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
elif len(split_tag_s2) == batch_dict['batch_size']:
loss, tb_dict, disp_dict = self.get_training_loss_s2()
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
else:
loss_1, tb_dict_1, disp_dict_1 = self.get_training_loss_s1()
loss_2, tb_dict_2, disp_dict_2 = self.get_training_loss_s2()
ret_dict = {
'loss': loss_1 + loss_2
}
return ret_dict, tb_dict_1, disp_dict_1
else:
# NOTE: When peform the inference, only one dataset can be accessed.
if 'batch_box_preds' in batch_dict.keys():
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
elif 'batch_box_preds' in batch_s1.keys():
pred_dicts_s1, recall_dicts_s1 = self.post_processing(batch_s1)
pred_dicts_s2, recall_dicts_s2 = self.post_processing(batch_s2)
return pred_dicts_s1, recall_dicts_s1, pred_dicts_s2, recall_dicts_s2
def get_training_loss_s1(self):
disp_dict = {}
loss = 0
loss_rpn, tb_dict = self.dense_head_s1.get_loss()
loss_rcnn, tb_dict = self.roi_head_s1.get_loss(tb_dict)
loss = loss + loss_rpn + loss_rcnn
return loss, tb_dict, disp_dict
def get_training_loss_s2(self):
disp_dict = {}
loss = 0
loss_rpn, tb_dict = self.dense_head_s2.get_loss()
loss_rcnn, tb_dict = self.roi_head_s2.get_loss(tb_dict)
loss = loss + loss_rpn + loss_rcnn
return loss, tb_dict, disp_dict
class VoxelRCNN_M_DB_3(Detector3DTemplate_M_DB_3):
def __init__(self, model_cfg, num_class, num_class_s2, num_class_s3, dataset, dataset_s2, dataset_s3, source_one_name, source_1):
super().__init__(model_cfg=model_cfg, num_class=num_class, num_class_s2=num_class_s2, num_class_s3=num_class_s3,
dataset=dataset, dataset_s2=dataset_s2, dataset_s3=dataset_s3, source_one_name=source_one_name, source_1=source_1)
self.module_list = self.build_networks()
self.source_one_name = source_one_name
self.source_1 = source_1
def forward(self, batch_dict):
batch_s1 = {}
batch_s2 = {}
batch_s3 = {}
if self.training:
len_of_module = len(self.module_list)
for k, cur_module in enumerate(self.module_list):
if k < len_of_module-6:
batch_dict = cur_module(batch_dict)
if k == len_of_module-6 or k == len_of_module-5:
# Split the Concat dataset batch into batch_1, batch_2, and batch_3
if k == len_of_module-6:
split_tag_s1, split_tag_s2_pre = common_utils.split_batch_dict('waymo', batch_dict)
batch_s1, batch_s2_pre = common_utils.split_two_batch_dict_gpu(split_tag_s1, split_tag_s2_pre, batch_dict)
split_tag_s2, split_tag_s3 = common_utils.split_batch_dict(self.source_one_name, batch_s2_pre)
batch_s2, batch_s3 = common_utils.split_two_batch_dict_gpu(split_tag_s2, split_tag_s3, batch_s2_pre)
batch_s1 = cur_module(batch_s1)
if k == len_of_module-4 or k == len_of_module-3:
batch_s2 = cur_module(batch_s2)
if k == len_of_module-2 or k == len_of_module-1:
batch_s3 = cur_module(batch_s3)
else:
len_of_module = len(self.module_list)
for k, cur_module in enumerate(self.module_list):
if k < len_of_module-6:
batch_dict = cur_module(batch_dict)
if k == len_of_module-6 or k == len_of_module-5:
if self.source_1 == 1:
batch_dict = cur_module(batch_dict)
else:
continue
if k == len_of_module-4 or k == len_of_module-3:
if self.source_1 == 2:
batch_dict = cur_module(batch_dict)
else:
continue
if k == len_of_module-2 or k == len_of_module-1:
if self.source_1 == 3:
batch_dict = cur_module(batch_dict)
else:
continue
if self.training:
loss_1, tb_dict_1, disp_dict_1 = self.get_training_loss_s1()
loss_2, tb_dict_2, disp_dict_2 = self.get_training_loss_s2()
loss_3, tb_dict_3, disp_dict_3 = self.get_training_loss_s3()
ret_dict = {
'loss': loss_1 + loss_2 + loss_3
}
return ret_dict, tb_dict_1, disp_dict_1
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
def get_training_loss_s1(self):
disp_dict = {}
loss = 0
loss_rpn, tb_dict = self.dense_head_s1.get_loss()
loss_rcnn, tb_dict = self.roi_head_s1.get_loss(tb_dict)
loss = loss + loss_rpn + loss_rcnn
return loss, tb_dict, disp_dict
def get_training_loss_s2(self):
disp_dict = {}
loss = 0
loss_rpn, tb_dict = self.dense_head_s2.get_loss()
loss_rcnn, tb_dict = self.roi_head_s2.get_loss(tb_dict)
loss = loss + loss_rpn + loss_rcnn
return loss, tb_dict, disp_dict
def get_training_loss_s3(self):
disp_dict = {}
loss = 0
loss_rpn, tb_dict = self.dense_head_s3.get_loss()
loss_rcnn, tb_dict = self.roi_head_s3.get_loss(tb_dict)
loss = loss + loss_rpn + loss_rcnn
return loss, tb_dict, disp_dict
class ActiveDualVoxelRCNN(ActiveDetector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict, **forward_args):
batch_dict['mode'] = forward_args.get('mode', None) if forward_args is not None else None
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training and forward_args.get('mode', None) == 'train_discriminator':
loss = self.discriminator.get_discriminator_loss(batch_dict, source=forward_args['source'])
return loss
if self.training and forward_args.get('mode', None) == 'train_detector':
loss, tb_dict, disp_dict = self.get_detector_loss()
elif not self.training and forward_args.get('mode', None) == 'active_evaluate':
batch_dict = self.post_processing(batch_dict)
sample_score = self.get_evaluate_score(batch_dict, forward_args['domain'])
return sample_score
elif not self.training and forward_args.get('mode', None) == None:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
def get_detector_loss(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head.get_loss()
loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)
loss = loss_rpn + loss_rcnn
return loss, tb_dict, disp_dict
def get_evaluate_score(self, batch_dict, domain):
batch_dict = self.discriminator.domainness_evaluate(batch_dict)
batch_size = batch_dict['batch_size']
frame_id = [str(id) for id in batch_dict['frame_id']]
domainness_evaluate = batch_dict['domainness_evaluate'].cpu()
reweight_roi = batch_dict['reweight_roi']
sample_score = []
for i in range(batch_size):
for i in range(batch_size):
frame_score = {
'frame_id': frame_id[i],
'domainness_evaluate': domainness_evaluate[i].cpu(),
'roi_feature': reweight_roi[i],
'total_score': domainness_evaluate[i].cpu()
}
sample_score.append(frame_score)
return sample_score
class VoxelRCNN_TQS(ActiveDetector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict, **forward_args):
batch_dict['mode'] = forward_args.get('mode', None) if forward_args is not None else None
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training and forward_args.get('mode', None) == 'train_discriminator':
loss = self.discriminator.get_discriminator_loss(batch_dict, source=forward_args['source'])
return loss
if self.training and forward_args.get('mode', None) == 'train_detector':
loss, tb_dict, disp_dict = self.get_detector_loss()
elif self.training and forward_args.get('mode', 'train_mul_cls'):
loss, tb_dict, disp_dict = self.get_mul_cls_loss()
elif not self.training and forward_args.get('mode', None) == 'active_evaluate':
batch_dict = self.post_processing(batch_dict)
sample_score = self.get_evaluate_score(batch_dict, forward_args['domain'])
return sample_score
elif not self.training and forward_args.get('mode', None) == None:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
def get_mul_cls_loss(self, mode='train_mul_cls'):
disp_dict = {}
loss, loss_mul, tb_dict = self.roi_head.get_mul_cls_loss()
return loss_mul, tb_dict, disp_dict
def get_detector_loss(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head.get_loss()
loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)
loss = loss_rpn + loss_rcnn
return loss, tb_dict, disp_dict
def get_evaluate_score(self, batch_dict, domain):
batch_dict = self.discriminator.domainness_evaluate(batch_dict)
batch_dict = self.roi_head.committee_evaluate(batch_dict)
batch_dict = self.roi_head.uncertainty_evaluate(batch_dict)
batch_size = batch_dict['batch_size']
frame_id = [str(id) for id in batch_dict['frame_id']]
domainness_evaluate = batch_dict['domainness_evaluate'].cpu()
reweight_roi = batch_dict['reweight_roi']
committee_evaluate = batch_dict['committee_score'].cpu()
uncertainty_evaluate = batch_dict['uncertainty'].cpu()
roi_score = batch_dict['cls_preds']
sample_score = []
for i in range(batch_size):
frame_score = {
'frame_id': frame_id[i],
'committee_evaluate': committee_evaluate[i],
'uncertainty_evaluate': uncertainty_evaluate[i],
'domainness_evaluate': domainness_evaluate[i],
'roi_feature': reweight_roi[i],
'roi_score': roi_score[i],
'total_score': committee_evaluate[i] + uncertainty_evaluate[i] + domainness_evaluate[i]
}
sample_score.append(frame_score)
return sample_score
class VoxelRCNN_CLUE(ActiveDetector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict, **forward_args):
batch_dict['mode'] = forward_args.get('mode', None) if forward_args is not None else None
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training and forward_args.get('mode', None) == 'train_discriminator':
loss = self.discriminator.get_discriminator_loss(batch_dict, source=forward_args['source'])
return loss
if self.training and forward_args.get('mode', None) == 'train_detector':
loss, tb_dict, disp_dict = self.get_detector_loss()
elif self.training and forward_args.get('mode', 'train_mul_cls'):
loss, tb_dict, disp_dict = self.get_mul_cls_loss()
elif not self.training and forward_args.get('mode', None) == 'active_evaluate':
batch_dict = self.post_processing(batch_dict)
sample_score = self.get_evaluate_score(batch_dict, forward_args['domain'])
return sample_score
elif not self.training and forward_args.get('mode', None) == None:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
def get_mul_cls_loss(self, mode='train_mul_cls'):
disp_dict = {}
loss, mul_loss, tb_dict = self.roi_head.get_mul_cls_loss()
return mul_loss, tb_dict, disp_dict
def get_detector_loss(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head.get_loss()
loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)
loss = loss_rpn + loss_rcnn
return loss, tb_dict, disp_dict
def get_evaluate_score(self, batch_dict, domain):
# batch_dict = self.discriminator.domainness_evaluate(batch_dict)
batch_size = batch_dict['batch_size']
frame_id = [str(id) for id in batch_dict['frame_id']]
# domainness_evaluate = batch_dict['domainness_evaluate'].cpu()
reweight_roi = batch_dict['reweight_roi']
roi_score = batch_dict['cls_preds']
sample_score = []
for i in range(batch_size):
for i in range(batch_size):
frame_score = {
'frame_id': frame_id[i],
# 'domainness_evaluate': domainness_evaluate[i].cpu(),
'roi_feature': reweight_roi[i],
'roi_score': roi_score[i]
# 'total_score': domainness_evaluate[i].cpu()
}
sample_score.append(frame_score)
return sample_score
| 18,381
| 41.550926
| 138
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/detectors/caddn.py
|
from .detector3d_template import Detector3DTemplate
class CaDDN(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict):
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
def get_training_loss(self):
disp_dict = {}
loss_rpn, tb_dict_rpn = self.dense_head.get_loss()
loss_depth, tb_dict_depth = self.vfe.get_loss()
tb_dict = {
'loss_rpn': loss_rpn.item(),
'loss_depth': loss_depth.item(),
**tb_dict_rpn,
**tb_dict_depth
}
loss = loss_rpn + loss_depth
return loss, tb_dict, disp_dict
| 1,164
| 28.871795
| 83
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/detectors/detector3d_template_ada.py
|
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from ...ops.iou3d_nms import iou3d_nms_utils
from ...utils.spconv_utils import find_all_spconv_keys
from .. import backbones_2d, backbones_3d, dense_heads, roi_heads, active_models
from ..backbones_2d import map_to_bev
from ..backbones_3d import pfe, vfe
from ..model_utils import model_nms_utils
class ActiveDetector3DTemplate(nn.Module):
def __init__(self, model_cfg, num_class, dataset):
super().__init__()
self.model_cfg = model_cfg
self.num_class = num_class
self.dataset = dataset
self.class_names = dataset.class_names
self.register_buffer('global_step', torch.LongTensor(1).zero_())
self.module_topology = [
'vfe', 'backbone_3d', 'map_to_bev_module', 'pfe',
'backbone_2d', 'dense_head', 'point_head', 'discriminator', 'roi_head'
]
@property
def mode(self):
return 'TRAIN' if self.training else 'TEST'
def update_global_step(self):
self.global_step += 1
def build_networks(self):
model_info_dict = {
'module_list': [],
'num_rawpoint_features': self.dataset.point_feature_encoder.num_point_features,
'num_point_features': self.dataset.point_feature_encoder.num_point_features,
'grid_size': self.dataset.grid_size,
'point_cloud_range': self.dataset.point_cloud_range,
'voxel_size': self.dataset.voxel_size,
'depth_downsample_factor': self.dataset.depth_downsample_factor
}
for module_name in self.module_topology:
module, model_info_dict = getattr(self, 'build_%s' % module_name)(
model_info_dict=model_info_dict
)
self.add_module(module_name, module)
return model_info_dict['module_list']
def build_vfe(self, model_info_dict):
if self.model_cfg.get('VFE', None) is None:
return None, model_info_dict
vfe_module = vfe.__all__[self.model_cfg.VFE.NAME](
model_cfg=self.model_cfg.VFE,
num_point_features=model_info_dict['num_rawpoint_features'],
point_cloud_range=model_info_dict['point_cloud_range'],
voxel_size=model_info_dict['voxel_size'],
grid_size=model_info_dict['grid_size'],
depth_downsample_factor=model_info_dict['depth_downsample_factor']
)
model_info_dict['num_point_features'] = vfe_module.get_output_feature_dim()
model_info_dict['module_list'].append(vfe_module)
return vfe_module, model_info_dict
def build_backbone_3d(self, model_info_dict):
if self.model_cfg.get('BACKBONE_3D', None) is None:
return None, model_info_dict
backbone_3d_module = backbones_3d.__all__[self.model_cfg.BACKBONE_3D.NAME](
model_cfg=self.model_cfg.BACKBONE_3D,
input_channels=model_info_dict['num_point_features'],
grid_size=model_info_dict['grid_size'],
voxel_size=model_info_dict['voxel_size'],
point_cloud_range=model_info_dict['point_cloud_range']
)
model_info_dict['module_list'].append(backbone_3d_module)
model_info_dict['num_point_features'] = backbone_3d_module.num_point_features
model_info_dict['backbone_channels'] = backbone_3d_module.backbone_channels \
if hasattr(backbone_3d_module, 'backbone_channels') else None
return backbone_3d_module, model_info_dict
def build_map_to_bev_module(self, model_info_dict):
if self.model_cfg.get('MAP_TO_BEV', None) is None:
return None, model_info_dict
map_to_bev_module = map_to_bev.__all__[self.model_cfg.MAP_TO_BEV.NAME](
model_cfg=self.model_cfg.MAP_TO_BEV,
grid_size=model_info_dict['grid_size']
)
model_info_dict['module_list'].append(map_to_bev_module)
model_info_dict['num_bev_features'] = map_to_bev_module.num_bev_features
return map_to_bev_module, model_info_dict
def build_backbone_2d(self, model_info_dict):
if self.model_cfg.get('BACKBONE_2D', None) is None:
return None, model_info_dict
backbone_2d_module = backbones_2d.__all__[self.model_cfg.BACKBONE_2D.NAME](
model_cfg=self.model_cfg.BACKBONE_2D,
input_channels=model_info_dict['num_bev_features']
)
model_info_dict['module_list'].append(backbone_2d_module)
model_info_dict['num_bev_features'] = backbone_2d_module.num_bev_features
return backbone_2d_module, model_info_dict
def build_pfe(self, model_info_dict):
if self.model_cfg.get('PFE', None) is None:
return None, model_info_dict
pfe_module = pfe.__all__[self.model_cfg.PFE.NAME](
model_cfg=self.model_cfg.PFE,
voxel_size=model_info_dict['voxel_size'],
point_cloud_range=model_info_dict['point_cloud_range'],
num_bev_features=model_info_dict['num_bev_features'],
num_rawpoint_features=model_info_dict['num_rawpoint_features']
)
model_info_dict['module_list'].append(pfe_module)
model_info_dict['num_point_features'] = pfe_module.num_point_features
model_info_dict['num_point_features_before_fusion'] = pfe_module.num_point_features_before_fusion
return pfe_module, model_info_dict
def build_dense_head(self, model_info_dict):
if self.model_cfg.get('DENSE_HEAD', None) is None:
return None, model_info_dict
dense_head_module = dense_heads.__all__[self.model_cfg.DENSE_HEAD.NAME](
model_cfg=self.model_cfg.DENSE_HEAD,
input_channels=model_info_dict['num_bev_features'],
num_class=self.num_class if not self.model_cfg.DENSE_HEAD.CLASS_AGNOSTIC else 1,
class_names=self.class_names,
grid_size=model_info_dict['grid_size'],
point_cloud_range=model_info_dict['point_cloud_range'],
predict_boxes_when_training=self.model_cfg.get('ROI_HEAD', False),
voxel_size=model_info_dict.get('voxel_size', False)
)
model_info_dict['module_list'].append(dense_head_module)
return dense_head_module, model_info_dict
def build_point_head(self, model_info_dict):
if self.model_cfg.get('POINT_HEAD', None) is None:
return None, model_info_dict
if self.model_cfg.POINT_HEAD.get('USE_POINT_FEATURES_BEFORE_FUSION', False):
num_point_features = model_info_dict['num_point_features_before_fusion']
else:
num_point_features = model_info_dict['num_point_features']
point_head_module = dense_heads.__all__[self.model_cfg.POINT_HEAD.NAME](
model_cfg=self.model_cfg.POINT_HEAD,
input_channels=num_point_features,
num_class=self.num_class if not self.model_cfg.POINT_HEAD.CLASS_AGNOSTIC else 1,
predict_boxes_when_training=self.model_cfg.get('ROI_HEAD', False)
)
model_info_dict['module_list'].append(point_head_module)
return point_head_module, model_info_dict
def build_roi_head(self, model_info_dict):
if self.model_cfg.get('ROI_HEAD', None) is None:
return None, model_info_dict
point_head_module = roi_heads.__all__[self.model_cfg.ROI_HEAD.NAME](
model_cfg=self.model_cfg.ROI_HEAD,
input_channels=model_info_dict['num_point_features'],
backbone_channels=model_info_dict['backbone_channels'],
point_cloud_range=model_info_dict['point_cloud_range'],
voxel_size=model_info_dict['voxel_size'],
num_class=self.num_class if not self.model_cfg.ROI_HEAD.CLASS_AGNOSTIC else 1,
)
model_info_dict['module_list'].append(point_head_module)
return point_head_module, model_info_dict
def build_discriminator(self, model_info_dict):
if self.model_cfg.get('DISCRIMINATOR', None) is None:
return None, model_info_dict
discriminator_module = active_models.__all__[self.model_cfg.DISCRIMINATOR.NAME](
model_cfg=self.model_cfg.DISCRIMINATOR
)
model_info_dict['module_list'].append(discriminator_module)
return discriminator_module, model_info_dict
def forward(self, **kwargs):
raise NotImplementedError
def post_processing(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
batch_cls_preds: (B, num_boxes, num_classes | 1) or (N1+N2+..., num_classes | 1)
or [(B, num_boxes, num_class1), (B, num_boxes, num_class2) ...]
multihead_label_mapping: [(num_class1), (num_class2), ...]
batch_box_preds: (B, num_boxes, 7+C) or (N1+N2+..., 7+C)
cls_preds_normalized: indicate whether batch_cls_preds is normalized
batch_index: optional (N1+N2+...)
has_class_labels: True/False
roi_labels: (B, num_rois) 1 .. num_classes
batch_pred_labels: (B, num_boxes, 1)
Returns:
"""
post_process_cfg = self.model_cfg.POST_PROCESSING
batch_size = batch_dict['batch_size']
recall_dict = {}
pred_dicts = []
for index in range(batch_size):
if batch_dict.get('batch_index', None) is not None:
assert batch_dict['batch_box_preds'].shape.__len__() == 2
batch_mask = (batch_dict['batch_index'] == index)
else:
assert batch_dict['batch_box_preds'].shape.__len__() == 3
batch_mask = index
box_preds = batch_dict['batch_box_preds'][batch_mask]
src_box_preds = box_preds
if batch_dict['mode'] == 'active_evaluate':
roi_feature = batch_dict['roi_shared_feature'][batch_mask]
if not isinstance(batch_dict['batch_cls_preds'], list):
cls_preds = batch_dict['batch_cls_preds'][batch_mask]
src_cls_preds = cls_preds
assert cls_preds.shape[1] in [1, self.num_class]
if not batch_dict['cls_preds_normalized']:
cls_preds = torch.sigmoid(cls_preds)
else:
cls_preds = [x[batch_mask] for x in batch_dict['batch_cls_preds']]
src_cls_preds = cls_preds
if not batch_dict['cls_preds_normalized']:
cls_preds = [torch.sigmoid(x) for x in cls_preds]
if post_process_cfg.NMS_CONFIG.MULTI_CLASSES_NMS:
if not isinstance(cls_preds, list):
cls_preds = [cls_preds]
multihead_label_mapping = [torch.arange(1, self.num_class, device=cls_preds[0].device)]
else:
multihead_label_mapping = batch_dict['multihead_label_mapping']
cur_start_idx = 0
pred_scores, pred_labels, pred_boxes = [], [], []
for cur_cls_preds, cur_label_mapping in zip(cls_preds, multihead_label_mapping):
assert cur_cls_preds.shape[1] == len(cur_label_mapping)
cur_box_preds = box_preds[cur_start_idx: cur_start_idx + cur_cls_preds.shape[0]]
cur_pred_scores, cur_pred_labels, cur_pred_boxes = model_nms_utils.multi_classes_nms(
cls_scores=cur_cls_preds, box_preds=cur_box_preds,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH
)
cur_pred_labels = cur_label_mapping[cur_pred_labels]
pred_scores.append(cur_pred_scores)
pred_labels.append(cur_pred_labels)
pred_boxes.append(cur_pred_boxes)
cur_start_idx += cur_cls_preds.shape[0]
final_scores = torch.cat(pred_scores, dim=0)
final_labels = torch.cat(pred_labels, dim=0)
final_boxes = torch.cat(pred_boxes, dim=0)
else:
cls_preds, label_preds = torch.max(cls_preds, dim=-1)
if batch_dict.get('has_class_labels', False):
label_key = 'roi_labels' if 'roi_labels' in batch_dict else 'batch_pred_labels'
label_preds = batch_dict[label_key][index]
else:
label_preds = label_preds + 1
# selected, selected_scores = model_nms_utils.class_agnostic_nms(
# box_scores=cls_preds, box_preds=box_preds,
# nms_config=post_process_cfg.NMS_CONFIG,
# score_thresh=post_process_cfg.SCORE_THRESH
# )
if batch_dict['mode'] == 'active_evaluate':
if batch_dict.get('reweight_roi', None) is None:
batch_dict['post_roi_num'] = []
batch_dict['reweight_roi'] = []
batch_dict['reweight_roi_entropy'] = []
batch_dict['reweight_roi_entropy_score'] = []
batch_dict['reweight_roi_entropy_score_2'] = []
batch_dict['uncertainty_score'] = []
batch_dict['cls_preds'] = []
batch_dict['roi_feature'] = []
selected, selected_scores, selected_roi = model_nms_utils.class_agnostic_nms_with_roi(
box_scores=cls_preds, box_preds=box_preds, roi_feature=roi_feature,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH
)
batch_dict['cls_preds'].append(cls_preds.mean().view(1))
batch_dict['roi_feature'].append(roi_feature.mean(dim=0).view(1, -1))
# reweight roi feature based on score
reweight = F.softmax(selected_scores)
reweight_roi = reweight.view(1, -1) @ selected_roi
batch_dict['reweight_roi'].append(reweight_roi)
# reweight roi feature base on entropy
entropy = -cls_preds * torch.log(cls_preds)
reweight_roi_entropy = entropy.view(-1,1) * roi_feature
reweight_roi_entropy = reweight_roi_entropy.mean(dim=0)
batch_dict['reweight_roi_entropy'].append(reweight_roi_entropy)
entropy_score = -selected_scores * torch.log(selected_scores)
reweight_roi_entropy_score = entropy_score.view(-1, 1) * selected_roi
reweight_roi_entropy_score = reweight_roi_entropy_score.mean(dim=0)
batch_dict['reweight_roi_entropy_score'].append(reweight_roi_entropy_score)
entropy_score_2 = entropy_score.view(-1, 1) + reweight.view(-1, 1)
reweight_roi_entropy_score_2 = entropy_score_2 * selected_roi
reweight_roi_entropy_score_2 = reweight_roi_entropy_score_2.mean(dim=0)
batch_dict['reweight_roi_entropy_score_2'].append(reweight_roi_entropy_score_2)
# use entropy to evaluate uncertainty
uncertainty = -selected_scores * torch.log2(selected_scores) - (1 - selected_scores) * torch.log2(1 - selected_scores)
uncertainty_score = uncertainty.view(1, -1).mean(dim=-1)
batch_dict['uncertainty_score'].append(uncertainty_score)
batch_dict['post_roi_num'].append(selected_roi.shape[0])
if index == (batch_size-1):
return batch_dict
else:
continue
else:
selected, selected_scores = model_nms_utils.class_agnostic_nms(
box_scores=cls_preds, box_preds=box_preds,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH
)
if post_process_cfg.OUTPUT_RAW_SCORE:
max_cls_preds, _ = torch.max(src_cls_preds, dim=-1)
selected_scores = max_cls_preds[selected]
final_scores = selected_scores
final_labels = label_preds[selected]
final_boxes = box_preds[selected]
recall_dict = self.generate_recall_record(
box_preds=final_boxes if 'rois' not in batch_dict else src_box_preds,
recall_dict=recall_dict, batch_index=index, data_dict=batch_dict,
thresh_list=post_process_cfg.RECALL_THRESH_LIST
)
record_dict = {
'pred_boxes': final_boxes,
'pred_scores': final_scores,
'pred_labels': final_labels
}
pred_dicts.append(record_dict)
return pred_dicts, recall_dict
@staticmethod
def generate_recall_record(box_preds, recall_dict, batch_index, data_dict=None, thresh_list=None):
if 'gt_boxes' not in data_dict:
return recall_dict
rois = data_dict['rois'][batch_index] if 'rois' in data_dict else None
gt_boxes = data_dict['gt_boxes'][batch_index]
if recall_dict.__len__() == 0:
recall_dict = {'gt': 0}
for cur_thresh in thresh_list:
recall_dict['roi_%s' % (str(cur_thresh))] = 0
recall_dict['rcnn_%s' % (str(cur_thresh))] = 0
cur_gt = gt_boxes
k = cur_gt.__len__() - 1
while k >= 0 and cur_gt[k].sum() == 0:
k -= 1
cur_gt = cur_gt[:k + 1]
if cur_gt.shape[0] > 0:
if box_preds.shape[0] > 0:
iou3d_rcnn = iou3d_nms_utils.boxes_iou3d_gpu(box_preds[:, 0:7], cur_gt[:, 0:7])
else:
iou3d_rcnn = torch.zeros((0, cur_gt.shape[0]))
if rois is not None:
iou3d_roi = iou3d_nms_utils.boxes_iou3d_gpu(rois[:, 0:7], cur_gt[:, 0:7])
for cur_thresh in thresh_list:
if iou3d_rcnn.shape[0] == 0:
recall_dict['rcnn_%s' % str(cur_thresh)] += 0
else:
rcnn_recalled = (iou3d_rcnn.max(dim=0)[0] > cur_thresh).sum().item()
recall_dict['rcnn_%s' % str(cur_thresh)] += rcnn_recalled
if rois is not None:
roi_recalled = (iou3d_roi.max(dim=0)[0] > cur_thresh).sum().item()
recall_dict['roi_%s' % str(cur_thresh)] += roi_recalled
recall_dict['gt'] += cur_gt.shape[0]
else:
gt_iou = box_preds.new_zeros(box_preds.shape[0])
return recall_dict
def _load_state_dict(self, model_state_disk, *, strict=True):
state_dict = self.state_dict() # local cache of state_dict
spconv_keys = find_all_spconv_keys(self)
update_model_state = {}
for key, val in model_state_disk.items():
if key in spconv_keys and key in state_dict and state_dict[key].shape != val.shape:
# with different spconv versions, we need to adapt weight shapes for spconv blocks
# adapt spconv weights from version 1.x to version 2.x if you used weights from spconv 1.x
val_native = val.transpose(-1, -2) # (k1, k2, k3, c_in, c_out) to (k1, k2, k3, c_out, c_in)
if val_native.shape == state_dict[key].shape:
val = val_native.contiguous()
else:
assert val.shape.__len__() == 5, 'currently only spconv 3D is supported'
val_implicit = val.permute(4, 0, 1, 2, 3) # (k1, k2, k3, c_in, c_out) to (c_out, k1, k2, k3, c_in)
if val_implicit.shape == state_dict[key].shape:
val = val_implicit.contiguous()
if key in state_dict and state_dict[key].shape == val.shape:
update_model_state[key] = val
# logger.info('Update weight %s: %s' % (key, str(val.shape)))
if strict:
self.load_state_dict(update_model_state)
else:
state_dict.update(update_model_state)
self.load_state_dict(state_dict)
return state_dict, update_model_state
def load_params_from_file(self, filename, logger, to_cpu=False):
if not os.path.isfile(filename):
raise FileNotFoundError
logger.info('==> Loading parameters from checkpoint %s to %s' % (filename, 'CPU' if to_cpu else 'GPU'))
loc_type = torch.device('cpu') if to_cpu else None
checkpoint = torch.load(filename, map_location=loc_type)
model_state_disk = checkpoint['model_state']
version = checkpoint.get("version", None)
if version is not None:
logger.info('==> Checkpoint trained from version: %s' % version)
state_dict, update_model_state = self._load_state_dict(model_state_disk, strict=False)
for key in state_dict:
if key not in update_model_state:
logger.info('Not updated weight %s: %s' % (key, str(state_dict[key].shape)))
logger.info('==> Done (loaded %d/%d)' % (len(update_model_state), len(state_dict)))
def load_params_with_optimizer(self, filename, to_cpu=False, optimizer=None, logger=None):
if not os.path.isfile(filename):
raise FileNotFoundError
logger.info('==> Loading parameters from checkpoint %s to %s' % (filename, 'CPU' if to_cpu else 'GPU'))
loc_type = torch.device('cpu') if to_cpu else None
checkpoint = torch.load(filename, map_location=loc_type)
epoch = checkpoint.get('epoch', -1)
it = checkpoint.get('it', 0.0)
self._load_state_dict(checkpoint['model_state'], strict=True)
if optimizer is not None:
if 'optimizer_state' in checkpoint and checkpoint['optimizer_state'] is not None:
logger.info('==> Loading optimizer parameters from checkpoint %s to %s'
% (filename, 'CPU' if to_cpu else 'GPU'))
optimizer.load_state_dict(checkpoint['optimizer_state'])
else:
assert filename[-4] == '.', filename
src_file, ext = filename[:-4], filename[-3:]
optimizer_filename = '%s_optim.%s' % (src_file, ext)
if os.path.exists(optimizer_filename):
optimizer_ckpt = torch.load(optimizer_filename, map_location=loc_type)
optimizer.load_state_dict(optimizer_ckpt['optimizer_state'])
if 'version' in checkpoint:
print('==> Checkpoint trained from version: %s' % checkpoint['version'])
logger.info('==> Done')
return it, epoch
| 23,013
| 47.348739
| 138
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/detectors/detector3d_template_multi_db.py
|
import os
import torch
import torch.nn as nn
from ...ops.iou3d_nms import iou3d_nms_utils
from ...utils.spconv_utils import find_all_spconv_keys
from .. import backbones_2d, backbones_3d, dense_heads, roi_heads, mdf_models
from ..backbones_2d import map_to_bev
from ..backbones_3d import pfe, vfe
from ..model_utils import model_nms_utils
class Detector3DTemplate_M_DB(nn.Module):
def __init__(self, model_cfg, num_class, num_class_s2, dataset, dataset_s2, source_one_name):
super().__init__()
self.model_cfg = model_cfg
self.num_class = num_class
self.num_class_s2 = num_class_s2
self.dataset = dataset
self.dataset_s2 = dataset_s2
self.class_names = dataset.class_names
self.class_names_s2 = dataset_s2.class_names
self.source_one_name = source_one_name
self.register_buffer('global_step', torch.LongTensor(1).zero_())
self.module_topology = [
'point_t', 'vfe', 'backbone_3d', 'map_to_bev_module', 'dense_3d_moe', 'pfe',
'backbone_2d', 'dense_2d_moe', 'dense_head_s1', 'point_head_s1', 'roi_head_s1',
'dense_head_s2', 'point_head_s2', 'roi_head_s2',
]
@property
def mode(self):
return 'TRAIN' if self.training else 'TEST'
def update_global_step(self):
self.global_step += 1
def build_networks(self):
model_info_dict = {
'module_list': [],
'num_rawpoint_features': self.dataset.point_feature_encoder.num_point_features,
'num_point_features': self.dataset.point_feature_encoder.num_point_features,
'grid_size': self.dataset.grid_size,
'point_cloud_range': self.dataset.point_cloud_range,
'voxel_size': self.dataset.voxel_size,
'depth_downsample_factor': self.dataset.depth_downsample_factor
}
for module_name in self.module_topology:
module, model_info_dict = getattr(self, 'build_%s' % module_name)(
model_info_dict=model_info_dict
)
self.add_module(module_name, module)
return model_info_dict['module_list']
def build_point_t(self, model_info_dict):
if self.model_cfg.get('POINT_T', None) is None:
return None, model_info_dict
point_t_module = pfe.__all__[self.model_cfg.POINT_T.NAME](
model_cfg=self.model_cfg.POINT_T
)
model_info_dict['module_list'].append(point_t_module)
return point_t_module, model_info_dict
def build_vfe(self, model_info_dict):
if self.model_cfg.get('VFE', None) is None:
return None, model_info_dict
vfe_module = vfe.__all__[self.model_cfg.VFE.NAME](
model_cfg=self.model_cfg.VFE,
num_point_features=model_info_dict['num_rawpoint_features'],
point_cloud_range=model_info_dict['point_cloud_range'],
voxel_size=model_info_dict['voxel_size'],
grid_size=model_info_dict['grid_size'],
depth_downsample_factor=model_info_dict['depth_downsample_factor']
)
model_info_dict['num_point_features'] = vfe_module.get_output_feature_dim()
model_info_dict['module_list'].append(vfe_module)
return vfe_module, model_info_dict
def build_backbone_3d(self, model_info_dict):
if self.model_cfg.get('BACKBONE_3D', None) is None:
return None, model_info_dict
backbone_3d_module = backbones_3d.__all__[self.model_cfg.BACKBONE_3D.NAME](
model_cfg=self.model_cfg.BACKBONE_3D,
input_channels=model_info_dict['num_point_features'],
grid_size=model_info_dict['grid_size'],
voxel_size=model_info_dict['voxel_size'],
point_cloud_range=model_info_dict['point_cloud_range']
)
model_info_dict['module_list'].append(backbone_3d_module)
model_info_dict['num_point_features'] = backbone_3d_module.num_point_features
model_info_dict['backbone_channels'] = backbone_3d_module.backbone_channels \
if hasattr(backbone_3d_module, 'backbone_channels') else None
return backbone_3d_module, model_info_dict
def build_map_to_bev_module(self, model_info_dict):
if self.model_cfg.get('MAP_TO_BEV', None) is None:
return None, model_info_dict
map_to_bev_module = map_to_bev.__all__[self.model_cfg.MAP_TO_BEV.NAME](
model_cfg=self.model_cfg.MAP_TO_BEV,
grid_size=model_info_dict['grid_size']
)
model_info_dict['module_list'].append(map_to_bev_module)
model_info_dict['num_bev_features'] = map_to_bev_module.num_bev_features
return map_to_bev_module, model_info_dict
def build_dense_3d_moe(self, model_info_dict):
if self.model_cfg.get('DENSE_3D_MoE', None) is None:
return None, model_info_dict
dense_moe_module = mdf_models.__all__[self.model_cfg.DENSE_3D_MoE.NAME](
model_cfg=self.model_cfg.DENSE_3D_MoE
)
model_info_dict['module_list'].append(dense_moe_module)
return dense_moe_module, model_info_dict
def build_backbone_2d(self, model_info_dict):
if self.model_cfg.get('BACKBONE_2D', None) is None:
return None, model_info_dict
backbone_2d_module = backbones_2d.__all__[self.model_cfg.BACKBONE_2D.NAME](
model_cfg=self.model_cfg.BACKBONE_2D,
input_channels=model_info_dict['num_bev_features']
)
model_info_dict['module_list'].append(backbone_2d_module)
model_info_dict['num_bev_features'] = backbone_2d_module.num_bev_features
return backbone_2d_module, model_info_dict
def build_pfe(self, model_info_dict):
if self.model_cfg.get('PFE', None) is None:
return None, model_info_dict
pfe_module = pfe.__all__[self.model_cfg.PFE.NAME](
model_cfg=self.model_cfg.PFE,
voxel_size=model_info_dict['voxel_size'],
point_cloud_range=model_info_dict['point_cloud_range'],
num_bev_features=model_info_dict['num_bev_features'],
num_rawpoint_features=model_info_dict['num_rawpoint_features']
)
model_info_dict['module_list'].append(pfe_module)
model_info_dict['num_point_features'] = pfe_module.num_point_features
model_info_dict['num_point_features_before_fusion'] = pfe_module.num_point_features_before_fusion
return pfe_module, model_info_dict
def build_dense_2d_moe(self, model_info_dict):
if self.model_cfg.get('DENSE_2D_MoE', None) is None:
return None, model_info_dict
dense_moe_module = mdf_models.__all__[self.model_cfg.DENSE_2D_MoE.NAME](
model_cfg=self.model_cfg.DENSE_2D_MoE
)
model_info_dict['module_list'].append(dense_moe_module)
return dense_moe_module, model_info_dict
def build_dense_head_s1(self, model_info_dict):
if self.model_cfg.get('DENSE_HEAD_S1', None) is None:
return None, model_info_dict
dense_head_module_s1 = dense_heads.__all__[self.model_cfg.DENSE_HEAD_S1.NAME](
model_cfg=self.model_cfg.DENSE_HEAD_S1,
input_channels=model_info_dict['num_bev_features'],
num_class=self.num_class if not self.model_cfg.DENSE_HEAD_S1.CLASS_AGNOSTIC else 1,
class_names=self.class_names,
grid_size=model_info_dict['grid_size'],
point_cloud_range=model_info_dict['point_cloud_range'],
predict_boxes_when_training=self.model_cfg.get('ROI_HEAD_S1', False),
voxel_size=model_info_dict.get('voxel_size', False)
)
model_info_dict['module_list'].append(dense_head_module_s1)
return dense_head_module_s1, model_info_dict
def build_dense_head_s2(self, model_info_dict):
if self.model_cfg.get('DENSE_HEAD_S2', None) is None:
return None, model_info_dict
dense_head_module_s2 = dense_heads.__all__[self.model_cfg.DENSE_HEAD_S2.NAME](
model_cfg=self.model_cfg.DENSE_HEAD_S2,
input_channels=model_info_dict['num_bev_features'],
num_class=self.num_class_s2 if not self.model_cfg.DENSE_HEAD_S2.CLASS_AGNOSTIC else 1,
class_names=self.class_names_s2,
grid_size=model_info_dict['grid_size'],
point_cloud_range=model_info_dict['point_cloud_range'],
predict_boxes_when_training=self.model_cfg.get('ROI_HEAD_S2', False),
voxel_size=model_info_dict.get('voxel_size', False)
)
model_info_dict['module_list'].append(dense_head_module_s2)
return dense_head_module_s2, model_info_dict
def build_point_head_s1(self, model_info_dict):
if self.model_cfg.get('POINT_HEAD_S1', None) is None:
return None, model_info_dict
if self.model_cfg.POINT_HEAD_S1.get('USE_POINT_FEATURES_BEFORE_FUSION', False):
num_point_features = model_info_dict['num_point_features_before_fusion']
else:
num_point_features = model_info_dict['num_point_features']
point_head_module_s1 = dense_heads.__all__[self.model_cfg.POINT_HEAD_S1.NAME](
model_cfg=self.model_cfg.POINT_HEAD_S1,
input_channels=num_point_features,
num_class=self.num_class if not self.model_cfg.POINT_HEAD_S1.CLASS_AGNOSTIC else 1,
predict_boxes_when_training=self.model_cfg.get('ROI_HEAD_S1', False)
)
model_info_dict['module_list'].append(point_head_module_s1)
return point_head_module_s1, model_info_dict
def build_point_head_s2(self, model_info_dict):
if self.model_cfg.get('POINT_HEAD_S2', None) is None:
return None, model_info_dict
if self.model_cfg.POINT_HEAD_S2.get('USE_POINT_FEATURES_BEFORE_FUSION', False):
num_point_features = model_info_dict['num_point_features_before_fusion']
else:
num_point_features = model_info_dict['num_point_features']
point_head_module_s2 = dense_heads.__all__[self.model_cfg.POINT_HEAD_S2.NAME](
model_cfg=self.model_cfg.POINT_HEAD_S2,
input_channels=num_point_features,
num_class=self.num_class_s2 if not self.model_cfg.POINT_HEAD_S2.CLASS_AGNOSTIC else 1,
predict_boxes_when_training=self.model_cfg.get('ROI_HEAD_S2', False)
)
model_info_dict['module_list'].append(point_head_module_s2)
return point_head_module_s2, model_info_dict
def build_roi_head_s1(self, model_info_dict):
if self.model_cfg.get('ROI_HEAD_S1', None) is None:
return None, model_info_dict
point_head_module_s1 = roi_heads.__all__[self.model_cfg.ROI_HEAD_S1.NAME](
model_cfg=self.model_cfg.ROI_HEAD_S1,
input_channels=model_info_dict['num_point_features'],
backbone_channels=model_info_dict['backbone_channels'],
point_cloud_range=model_info_dict['point_cloud_range'],
voxel_size=model_info_dict['voxel_size'],
num_class=self.num_class if not self.model_cfg.ROI_HEAD_S1.CLASS_AGNOSTIC else 1,
)
model_info_dict['module_list'].append(point_head_module_s1)
return point_head_module_s1, model_info_dict
def build_roi_head_s2(self, model_info_dict):
if self.model_cfg.get('ROI_HEAD_S2', None) is None:
return None, model_info_dict
point_head_module_s2 = roi_heads.__all__[self.model_cfg.ROI_HEAD_S2.NAME](
model_cfg=self.model_cfg.ROI_HEAD_S2,
input_channels=model_info_dict['num_point_features'],
backbone_channels=model_info_dict['backbone_channels'],
point_cloud_range=model_info_dict['point_cloud_range'],
voxel_size=model_info_dict['voxel_size'],
num_class=self.num_class_s2 if not self.model_cfg.ROI_HEAD_S2.CLASS_AGNOSTIC else 1,
)
model_info_dict['module_list'].append(point_head_module_s2)
return point_head_module_s2, model_info_dict
def forward(self, **kwargs):
raise NotImplementedError
# add some multi-head operation
def post_processing(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
batch_cls_preds: (B, num_boxes, num_classes | 1) or (N1+N2+..., num_classes | 1)
or [(B, num_boxes, num_class1), (B, num_boxes, num_class2) ...]
multihead_label_mapping: [(num_class1), (num_class2), ...]
batch_box_preds: (B, num_boxes, 7+C) or (N1+N2+..., 7+C)
cls_preds_normalized: indicate whether batch_cls_preds is normalized
batch_index: optional (N1+N2+...)
has_class_labels: True/False
roi_labels: (B, num_rois) 1 .. num_classes
batch_pred_labels: (B, num_boxes, 1)
Returns:
"""
post_process_cfg = self.model_cfg.POST_PROCESSING
batch_size = batch_dict['batch_size']
recall_dict = {}
pred_dicts = []
for index in range(batch_size):
if batch_dict.get('batch_index', None) is not None:
assert batch_dict['batch_box_preds'].shape.__len__() == 2
batch_mask = (batch_dict['batch_index'] == index)
else:
assert batch_dict['batch_box_preds'].shape.__len__() == 3
batch_mask = index
box_preds = batch_dict['batch_box_preds'][batch_mask]
src_box_preds = box_preds
if not isinstance(batch_dict['batch_cls_preds'], list):
cls_preds = batch_dict['batch_cls_preds'][batch_mask]
src_cls_preds = cls_preds
assert cls_preds.shape[1] in [1, self.num_class]
if not batch_dict['cls_preds_normalized']:
cls_preds = torch.sigmoid(cls_preds)
else:
cls_preds = [x[batch_mask] for x in batch_dict['batch_cls_preds']]
src_cls_preds = cls_preds
if not batch_dict['cls_preds_normalized']:
cls_preds = [torch.sigmoid(x) for x in cls_preds]
if post_process_cfg.NMS_CONFIG.MULTI_CLASSES_NMS:
if not isinstance(cls_preds, list):
cls_preds = [cls_preds]
multihead_label_mapping = [torch.arange(1, self.num_class, device=cls_preds[0].device)]
else:
multihead_label_mapping = batch_dict['multihead_label_mapping']
cur_start_idx = 0
pred_scores, pred_labels, pred_boxes = [], [], []
for cur_cls_preds, cur_label_mapping in zip(cls_preds, multihead_label_mapping):
assert cur_cls_preds.shape[1] == len(cur_label_mapping)
cur_box_preds = box_preds[cur_start_idx: cur_start_idx + cur_cls_preds.shape[0]]
cur_pred_scores, cur_pred_labels, cur_pred_boxes = model_nms_utils.multi_classes_nms(
cls_scores=cur_cls_preds, box_preds=cur_box_preds,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH
)
cur_pred_labels = cur_label_mapping[cur_pred_labels]
pred_scores.append(cur_pred_scores)
pred_labels.append(cur_pred_labels)
pred_boxes.append(cur_pred_boxes)
cur_start_idx += cur_cls_preds.shape[0]
final_scores = torch.cat(pred_scores, dim=0)
final_labels = torch.cat(pred_labels, dim=0)
final_boxes = torch.cat(pred_boxes, dim=0)
else:
cls_preds, label_preds = torch.max(cls_preds, dim=-1)
if batch_dict.get('has_class_labels', False):
label_key = 'roi_labels' if 'roi_labels' in batch_dict else 'batch_pred_labels'
label_preds = batch_dict[label_key][index]
else:
label_preds = label_preds + 1
selected, selected_scores = model_nms_utils.class_agnostic_nms(
box_scores=cls_preds, box_preds=box_preds,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH
)
if post_process_cfg.OUTPUT_RAW_SCORE:
max_cls_preds, _ = torch.max(src_cls_preds, dim=-1)
selected_scores = max_cls_preds[selected]
final_scores = selected_scores
final_labels = label_preds[selected]
final_boxes = box_preds[selected]
recall_dict = self.generate_recall_record(
box_preds=final_boxes if 'rois' not in batch_dict else src_box_preds,
recall_dict=recall_dict, batch_index=index, data_dict=batch_dict,
thresh_list=post_process_cfg.RECALL_THRESH_LIST
)
record_dict = {
'pred_boxes': final_boxes,
'pred_scores': final_scores,
'pred_labels': final_labels
}
pred_dicts.append(record_dict)
return pred_dicts, recall_dict
@staticmethod
def generate_recall_record(box_preds, recall_dict, batch_index, data_dict=None, thresh_list=None):
if 'gt_boxes' not in data_dict:
return recall_dict
rois = data_dict['rois'][batch_index] if 'rois' in data_dict else None
gt_boxes = data_dict['gt_boxes'][batch_index]
if recall_dict.__len__() == 0:
recall_dict = {'gt': 0}
for cur_thresh in thresh_list:
recall_dict['roi_%s' % (str(cur_thresh))] = 0
recall_dict['rcnn_%s' % (str(cur_thresh))] = 0
cur_gt = gt_boxes
k = cur_gt.__len__() - 1
while k >= 0 and cur_gt[k].sum() == 0:
k -= 1
cur_gt = cur_gt[:k + 1]
if cur_gt.shape[0] > 0:
if box_preds.shape[0] > 0:
iou3d_rcnn = iou3d_nms_utils.boxes_iou3d_gpu(box_preds[:, 0:7], cur_gt[:, 0:7])
else:
iou3d_rcnn = torch.zeros((0, cur_gt.shape[0]))
if rois is not None:
iou3d_roi = iou3d_nms_utils.boxes_iou3d_gpu(rois[:, 0:7], cur_gt[:, 0:7])
for cur_thresh in thresh_list:
if iou3d_rcnn.shape[0] == 0:
recall_dict['rcnn_%s' % str(cur_thresh)] += 0
else:
rcnn_recalled = (iou3d_rcnn.max(dim=0)[0] > cur_thresh).sum().item()
recall_dict['rcnn_%s' % str(cur_thresh)] += rcnn_recalled
if rois is not None:
roi_recalled = (iou3d_roi.max(dim=0)[0] > cur_thresh).sum().item()
recall_dict['roi_%s' % str(cur_thresh)] += roi_recalled
recall_dict['gt'] += cur_gt.shape[0]
else:
gt_iou = box_preds.new_zeros(box_preds.shape[0])
return recall_dict
def _load_state_dict(self, model_state_disk, *, strict=True):
state_dict = self.state_dict() # local cache of state_dict
spconv_keys = find_all_spconv_keys(self)
update_model_state = {}
for key, val in model_state_disk.items():
if key in spconv_keys and key in state_dict and state_dict[key].shape != val.shape:
# with different spconv versions, we need to adapt weight shapes for spconv blocks
# adapt spconv weights from version 1.x to version 2.x if you used weights from spconv 1.x
val_native = val.transpose(-1, -2) # (k1, k2, k3, c_in, c_out) to (k1, k2, k3, c_out, c_in)
if val_native.shape == state_dict[key].shape:
val = val_native.contiguous()
else:
assert val.shape.__len__() == 5, 'currently only spconv 3D is supported'
val_implicit = val.permute(4, 0, 1, 2, 3) # (k1, k2, k3, c_in, c_out) to (c_out, k1, k2, k3, c_in)
if val_implicit.shape == state_dict[key].shape:
val = val_implicit.contiguous()
if key in state_dict and state_dict[key].shape == val.shape:
update_model_state[key] = val
# logger.info('Update weight %s: %s' % (key, str(val.shape)))
if strict:
self.load_state_dict(update_model_state)
else:
state_dict.update(update_model_state)
self.load_state_dict(state_dict)
return state_dict, update_model_state
def frozen_model(self, model):
for p in model.vfe.parameters():
p.requires_grad = False
for p in model.backbone_3d.parameters():
p.requires_grad = False
for p in model.map_to_bev_module.parameters():
p.requires_grad = False
for p in model.backbone_2d.parameters():
p.requires_grad = False
return model
def load_params_from_file(self, filename, logger, to_cpu=False):
if not os.path.isfile(filename):
raise FileNotFoundError
logger.info('==> Loading parameters from checkpoint %s to %s' % (filename, 'CPU' if to_cpu else 'GPU'))
loc_type = torch.device('cpu') if to_cpu else None
checkpoint = torch.load(filename, map_location=loc_type)
model_state_disk = checkpoint['model_state']
version = checkpoint.get("version", None)
if version is not None:
logger.info('==> Checkpoint trained from version: %s' % version)
state_dict, update_model_state = self._load_state_dict(model_state_disk, strict=False)
for key in state_dict:
if key not in update_model_state:
logger.info('Not updated weight %s: %s' % (key, str(state_dict[key].shape)))
logger.info('==> Done (loaded %d/%d)' % (len(update_model_state), len(state_dict)))
def load_params_with_optimizer(self, filename, to_cpu=False, optimizer=None, logger=None):
if not os.path.isfile(filename):
raise FileNotFoundError
logger.info('==> Loading parameters from checkpoint %s to %s' % (filename, 'CPU' if to_cpu else 'GPU'))
loc_type = torch.device('cpu') if to_cpu else None
checkpoint = torch.load(filename, map_location=loc_type)
epoch = checkpoint.get('epoch', -1)
it = checkpoint.get('it', 0.0)
self._load_state_dict(checkpoint['model_state'], strict=True)
if optimizer is not None:
if 'optimizer_state' in checkpoint and checkpoint['optimizer_state'] is not None:
logger.info('==> Loading optimizer parameters from checkpoint %s to %s'
% (filename, 'CPU' if to_cpu else 'GPU'))
optimizer.load_state_dict(checkpoint['optimizer_state'])
else:
assert filename[-4] == '.', filename
src_file, ext = filename[:-4], filename[-3:]
optimizer_filename = '%s_optim.%s' % (src_file, ext)
if os.path.exists(optimizer_filename):
optimizer_ckpt = torch.load(optimizer_filename, map_location=loc_type)
optimizer.load_state_dict(optimizer_ckpt['optimizer_state'])
if 'version' in checkpoint:
print('==> Checkpoint trained from version: %s' % checkpoint['version'])
logger.info('==> Done')
return it, epoch
| 23,645
| 45.455796
| 119
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/detectors/PartA2_net.py
|
from .detector3d_template import Detector3DTemplate
class PartA2Net(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict):
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
def get_training_loss(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head.get_loss()
loss_point, tb_dict = self.point_head.get_loss(tb_dict)
loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)
loss = loss_rpn + loss_point + loss_rcnn
return loss, tb_dict, disp_dict
| 1,072
| 32.53125
| 83
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/detectors/IASSD.py
|
from .detector3d_template_IASSD import Detector3DTemplate_IASSD
class IASSD(Detector3DTemplate_IASSD):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict):
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
def get_training_loss(self):
disp_dict = {}
loss_point, tb_dict = self.point_head.get_loss()
loss = loss_point
return loss, tb_dict, disp_dict
| 946
| 32.821429
| 83
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/detectors/pv_rcnn.py
|
import torch
from .detector3d_template import Detector3DTemplate
from .detector3d_template_multi_db import Detector3DTemplate_M_DB
from .detector3d_template_multi_db_3 import Detector3DTemplate_M_DB_3
from .detector3d_template_ada import ActiveDetector3DTemplate
from pcdet.utils import common_utils
class PVRCNN(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict):
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
def get_training_loss(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head.get_loss()
loss_point, tb_dict = self.point_head.get_loss(tb_dict)
loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)
loss = loss_rpn + loss_point + loss_rcnn
if hasattr(self.backbone_3d, 'get_loss'):
loss_backbone3d, tb_dict = self.backbone_3d.get_loss(tb_dict)
loss += loss_backbone3d
return loss, tb_dict, disp_dict
class SemiPVRCNN(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
self.model_type = None
def set_model_type(self, model_type):
assert model_type in ['origin', 'teacher', 'student']
self.model_type = model_type
self.dense_head.model_type = model_type
self.point_head.model_type = model_type
self.roi_head.model_type = model_type
def forward(self, batch_dict):
# origin: (training, return loss) (testing, return final boxes)
if self.model_type == 'origin':
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
# teacher: (testing, return raw boxes)
elif self.model_type == 'teacher':
# assert not self.training
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
return batch_dict
# student:
elif self.model_type == 'student':
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
if 'gt_boxes' in batch_dict: # for (pseudo-)labeled data
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {
'loss': loss
}
return batch_dict, ret_dict, tb_dict, disp_dict
else:
return batch_dict
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
else:
raise Exception('Unsupprted model type')
def get_training_loss(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head.get_loss()
loss_point, tb_dict = self.point_head.get_loss(tb_dict)
loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)
loss = loss_rpn + loss_point + loss_rcnn
if hasattr(self.backbone_3d, 'get_loss'):
loss_backbone3d, tb_dict = self.backbone_3d.get_loss(tb_dict)
loss += loss_backbone3d
return loss, tb_dict, disp_dict
class PVRCNN_M_DB(Detector3DTemplate_M_DB):
def __init__(self, model_cfg, num_class, num_class_s2, dataset, dataset_s2, source_one_name):
super().__init__(model_cfg=model_cfg, num_class=num_class, num_class_s2=num_class_s2, dataset=dataset,
dataset_s2=dataset_s2, source_one_name=source_one_name)
self.module_list = self.build_networks()
self.source_one_name = source_one_name
def forward(self, batch_dict):
# Split the Concat dataset batch into batch_1 and batch_2
split_tag_s1, split_tag_s2 = common_utils.split_batch_dict(self.source_one_name, batch_dict)
batch_s1 = {}
batch_s2 = {}
len_of_module = len(self.module_list)
for k, cur_module in enumerate(self.module_list):
if k < len_of_module-6:
batch_dict = cur_module(batch_dict)
if k == len_of_module-6 or k == len_of_module-5 or k == len_of_module-4:
if len(split_tag_s1) == batch_dict['batch_size']:
batch_dict = cur_module(batch_dict)
elif len(split_tag_s2) == batch_dict['batch_size']:
continue
else:
if k == len_of_module-6:
batch_s1, batch_s2 = common_utils.split_two_batch_dict_gpu(split_tag_s1, split_tag_s2, batch_dict)
batch_s1 = cur_module(batch_s1)
if k == len_of_module-3 or k == len_of_module-2 or k == len_of_module-1:
if len(split_tag_s2) == batch_dict['batch_size']:
batch_dict = cur_module(batch_dict)
elif len(split_tag_s1) == batch_dict['batch_size']:
continue
else:
batch_s2 = cur_module(batch_s2)
if self.training:
split_tag_s1, split_tag_s2 = common_utils.split_batch_dict(self.source_one_name, batch_dict)
if len(split_tag_s1) == batch_dict['batch_size']:
loss, tb_dict, disp_dict = self.get_training_loss_s1()
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
elif len(split_tag_s2) == batch_dict['batch_size']:
loss, tb_dict, disp_dict = self.get_training_loss_s2()
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
else:
loss_1, tb_dict_1, disp_dict_1 = self.get_training_loss_s1()
loss_2, tb_dict_2, disp_dict_2 = self.get_training_loss_s2()
ret_dict = {
'loss': loss_1 + loss_2
}
return ret_dict, tb_dict_1, disp_dict_1
else:
# NOTE: When peform the inference, only one dataset can be accessed.
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
def get_training_loss_s1(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head_s1.get_loss()
loss_point, tb_dict = self.point_head_s1.get_loss(tb_dict)
loss_rcnn, tb_dict = self.roi_head_s1.get_loss(tb_dict)
loss = loss_rpn + loss_point + loss_rcnn
return loss, tb_dict, disp_dict
def get_training_loss_s2(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head_s2.get_loss()
loss_point, tb_dict = self.point_head_s2.get_loss(tb_dict)
loss_rcnn, tb_dict = self.roi_head_s2.get_loss(tb_dict)
loss = loss_rpn + loss_point + loss_rcnn
return loss, tb_dict, disp_dict
class PVRCNN_M_DB_3(Detector3DTemplate_M_DB_3):
def __init__(self, model_cfg, num_class, num_class_s2, num_class_s3, dataset, dataset_s2, dataset_s3, source_one_name, source_1):
super().__init__(model_cfg=model_cfg, num_class=num_class, num_class_s2=num_class_s2, num_class_s3=num_class_s3,
dataset=dataset, dataset_s2=dataset_s2, dataset_s3=dataset_s3, source_one_name=source_one_name, source_1=source_1)
self.module_list = self.build_networks()
self.source_one_name = source_one_name
self.source_1 = source_1
def forward(self, batch_dict):
batch_s1 = {}
batch_s2 = {}
batch_s3 = {}
if self.training:
len_of_module = len(self.module_list)
for k, cur_module in enumerate(self.module_list):
if k < len_of_module-9:
batch_dict = cur_module(batch_dict)
if k == len_of_module-9 or k == len_of_module-8 or k == len_of_module-7:
if k == len_of_module-9:
# Split the Concat dataset batch into batch_1, batch_2, and batch_3
split_tag_s1, split_tag_s2_pre = common_utils.split_batch_dict('waymo', batch_dict)
batch_s1, batch_s2_pre = common_utils.split_two_batch_dict_gpu(split_tag_s1, split_tag_s2_pre, batch_dict)
split_tag_s2, split_tag_s3 = common_utils.split_batch_dict(self.source_one_name, batch_s2_pre)
batch_s2, batch_s3 = common_utils.split_two_batch_dict_gpu(split_tag_s2, split_tag_s3, batch_s2_pre)
batch_s1 = cur_module(batch_s1)
if k == len_of_module-6 or k == len_of_module-5 or k == len_of_module-4:
batch_s2 = cur_module(batch_s2)
if k == len_of_module-3 or k == len_of_module-2 or k == len_of_module-1:
batch_s3 = cur_module(batch_s3)
else:
len_of_module = len(self.module_list)
for k, cur_module in enumerate(self.module_list):
if k < len_of_module-9:
batch_dict = cur_module(batch_dict)
if k == len_of_module-9 or k == len_of_module-8 or k == len_of_module-7:
if self.source_1 == 1:
batch_dict = cur_module(batch_dict)
else:
continue
if k == len_of_module-6 or k == len_of_module-5 or k == len_of_module-4:
if self.source_1 == 2:
batch_dict = cur_module(batch_dict)
else:
continue
if k == len_of_module-3 or k == len_of_module-2 or k == len_of_module-1:
if self.source_1 == 3:
batch_dict = cur_module(batch_dict)
else:
continue
if self.training:
loss_1, tb_dict_1, disp_dict_1 = self.get_training_loss_s1()
loss_2, tb_dict_2, disp_dict_2 = self.get_training_loss_s2()
loss_3, tb_dict_3, disp_dict_3 = self.get_training_loss_s3()
ret_dict = {
'loss': loss_1 + loss_2 + loss_3
}
return ret_dict, tb_dict_1, disp_dict_1
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
def get_training_loss_s1(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head_s1.get_loss()
loss_point, tb_dict = self.point_head_s1.get_loss(tb_dict)
loss_rcnn, tb_dict = self.roi_head_s1.get_loss(tb_dict)
loss = loss_rpn + loss_point + loss_rcnn
return loss, tb_dict, disp_dict
def get_training_loss_s2(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head_s2.get_loss()
loss_point, tb_dict = self.point_head_s2.get_loss(tb_dict)
loss_rcnn, tb_dict = self.roi_head_s2.get_loss(tb_dict)
loss = loss_rpn + loss_point + loss_rcnn
return loss, tb_dict, disp_dict
def get_training_loss_s3(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head_s3.get_loss()
loss_point, tb_dict = self.point_head_s3.get_loss(tb_dict)
loss_rcnn, tb_dict = self.roi_head_s3.get_loss(tb_dict)
loss = loss_rpn + loss_point + loss_rcnn
return loss, tb_dict, disp_dict
class ActivePVRCNN_DUAL(ActiveDetector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict, **forward_args):
batch_dict['mode'] = forward_args.get('mode', None) if forward_args is not None else None
if self.training and forward_args.get('mode', None) == 'train_discriminator':
batch_dict = self.module_list[0](batch_dict) # MeanVFE
batch_dict = self.module_list[1](batch_dict) # VoxelBackBone8x
batch_dict = self.module_list[2](batch_dict) # HeightCompression
batch_dict = self.module_list[3](batch_dict) # VoxelSetAbstraction
batch_dict = self.module_list[4](batch_dict)
batch_dict = self.module_list[5](batch_dict)
batch_dict = self.discriminator(batch_dict)
loss = self.discriminator.get_discriminator_loss(batch_dict, source=forward_args['source'])
return loss
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training and forward_args.get('mode', None) == 'train_detector':
loss, tb_dict, disp_dict = self.get_detector_loss()
elif not self.training and forward_args.get('mode', None) == 'active_evaluate':
batch_dict = self.post_processing(batch_dict)
sample_score = self.get_evaluate_score(batch_dict, forward_args['domain'])
return sample_score
elif not self.training and forward_args.get('mode', None) is None:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
ret_dict={
'loss': loss
}
return ret_dict, tb_dict, disp_dict
def get_detector_loss(self):
disp_dict= {}
loss_rpn, tb_dict = self.dense_head.get_loss()
loss_point, tb_dict = self.point_head.get_loss(tb_dict)
loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)
loss = loss_rpn + loss_point + loss_rcnn
return loss, tb_dict, disp_dict
def get_mul_classifier_loss(self, mode=None):
disp_dict = {}
loss, tb_dict = self.dense_head.get_active_loss(mode)
return loss, tb_dict, disp_dict
def get_evaluate_score(self, batch_dict, domain):
batch_dict = self.discriminator.domainness_evaluate(batch_dict)
batch_size = batch_dict['batch_size']
frame_id = [str(id) for id in batch_dict['frame_id']]
domainness_evaluate = batch_dict['domainness_evaluate'].cpu()
reweight_roi = batch_dict['reweight_roi']
sample_score = []
for i in range(batch_size):
frame_score = {
'frame_id': frame_id[i],
'domainness_evaluate': domainness_evaluate[i].cpu(),
'roi_feature': reweight_roi[i],
'total_score': domainness_evaluate[i].cpu(),
}
sample_score.append(frame_score)
return sample_score
def get_discriminator_result(self, batch_dict):
acc = self.discriminator.get_accuracy(batch_dict)
return acc
class PVRCNN_TQS(ActiveDetector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict, **forward_args):
batch_dict['mode'] = forward_args.get('mode', None) if forward_args is not None else None
if self.training and forward_args.get('mode', None) is 'train_discriminator':
batch_dict = self.module_list[0](batch_dict) # MeanVFE
batch_dict = self.module_list[1](batch_dict) # VoxelBackBone8x
batch_dict = self.module_list[2](batch_dict) # HeightCompression
batch_dict = self.module_list[3](batch_dict) # VoxelSetAbstraction
batch_dict = self.module_list[4](batch_dict)
batch_dict = self.module_list[5](batch_dict)
batch_dict = self.point_head.get_point_score(batch_dict)
batch_dict = self.discriminator(batch_dict)
loss = self.discriminator.get_discriminator_loss(batch_dict, source=forward_args['source'])
return loss
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training and forward_args.get('mode', None) is 'finetune':
loss, tb_dict, disp_dict = self.get_finetune_loss()
elif self.training and forward_args.get('mode', None) is 'train_detector':
loss, tb_dict, disp_dict = self.get_detector_loss()
elif self.training and forward_args.get('mode', None) is 'train_mul_cls':
loss, tb_dict, disp_dict = self.get_mul_classifier_loss()
elif not self.training and forward_args.get('mode', None) is 'active_evaluate':
sample_score = self.get_evaluate_score(batch_dict)
return sample_score
elif not self.training and forward_args.get('mode', None) is None:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
# discriminator_acc = self.get_discriminator_result(batch_dict, forward_args['source'])
return pred_dicts, recall_dicts
ret_dict={
'loss': loss
}
return ret_dict, tb_dict, disp_dict
def get_mul_cls_loss(self, mode='train_mul_cls'):
disp_dict = {}
loss, tb_dict = self.dense_head.get_active_loss(mode)
return loss, tb_dict, disp_dict
def get_detector_loss(self):
disp_dict= {}
loss_rpn, tb_dict = self.dense_head.get_active_loss(mode='train_detector')
loss_point, tb_dict = self.point_head.get_loss(tb_dict)
loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)
loss = loss_rpn + loss_point + loss_rcnn
return loss, tb_dict, disp_dict
def get_mul_classifier_loss(self, mode=None):
disp_dict = {}
loss, tb_dict = self.dense_head.get_active_loss('train_mul_cls')
return loss, tb_dict, disp_dict
def get_evaluate_score(self, batch_dict):
sample_score = {}
batch_dict = self.dense_head.committee_evaluate(batch_dict)
batch_dict = self.dense_head.uncertainty_evaluate(batch_dict)
batch_dict = self.discriminator.domainness_evaluate(batch_dict)
batch_size = batch_dict['batch_size']
frame_id = batch_dict['frame_id']
committee_evaluate = batch_dict['committee_evaluate']
uncertainty_evaluate = batch_dict['uncertainty_evaluate']
domainness_evaluate = batch_dict['domainness_evaluate'].cpu()
sample_score = []
for i in range(batch_size):
frame_score = {
'frame_id': frame_id[i],
'committee_evaluate': committee_evaluate[i],
'uncertainty_evaluate': uncertainty_evaluate[i],
'domainness_evaluate': domainness_evaluate[i],
'total_score': committee_evaluate[i] + uncertainty_evaluate[i] + domainness_evaluate[i]
}
sample_score.append(frame_score)
return sample_score
class PVRCNN_CLUE(ActiveDetector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict, **forward_args):
batch_dict['mode'] = forward_args.get('mode', None) if forward_args is not None else None
if self.training and forward_args.get('mode', None) is 'train_discriminator':
batch_dict = self.module_list[0](batch_dict) # MeanVFE
batch_dict = self.module_list[1](batch_dict) # VoxelBackBone8x
batch_dict = self.module_list[2](batch_dict) # HeightCompression
batch_dict = self.module_list[3](batch_dict) # VoxelSetAbstraction
batch_dict = self.module_list[4](batch_dict)
batch_dict = self.module_list[5](batch_dict)
batch_dict = self.point_head.get_point_score(batch_dict)
batch_dict = self.discriminator(batch_dict)
loss = self.discriminator.get_discriminator_loss(batch_dict, source=forward_args['source'])
return loss
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training and forward_args.get('mode', None) is 'finetune':
loss, tb_dict, disp_dict = self.get_finetune_loss()
elif self.training and forward_args.get('mode', None) is 'train_detector':
loss, tb_dict, disp_dict = self.get_detector_loss()
elif self.training and forward_args.get('mode', None) is 'train_mul_cls':
loss, tb_dict, disp_dict = self.get_mul_cls_loss()
elif not self.training and forward_args.get('mode', None) is 'active_evaluate':
batch_dict = self.post_processing(batch_dict)
sample_score = self.get_evaluate_score(batch_dict)
return sample_score
elif not self.training and forward_args.get('mode', None) is None:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
# discriminator_acc = self.get_discriminator_result(batch_dict, forward_args['source'])
return pred_dicts, recall_dicts
ret_dict={
'loss': loss
}
return ret_dict, tb_dict, disp_dict
def get_mul_cls_loss(self, mode='train_mul_cls'):
disp_dict = {}
loss, tb_dict = self.dense_head.get_active_loss(mode)
return loss, tb_dict, disp_dict
def get_detector_loss(self):
disp_dict= {}
loss_rpn, tb_dict = self.dense_head.get_loss()
loss_point, tb_dict = self.point_head.get_loss(tb_dict)
loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)
loss = loss_rpn + loss_point + loss_rcnn
return loss, tb_dict, disp_dict
def get_mul_classifier_loss(self, mode=None):
disp_dict = {}
loss, tb_dict = self.dense_head.get_active_loss('train_mul_cls')
return loss, tb_dict, disp_dict
def get_evaluate_score(self, batch_dict):
sample_score = {}
batch_size = batch_dict['batch_size']
frame_id = batch_dict['frame_id']
roi_score = batch_dict['cls_preds']
roi_feature = batch_dict['roi_feature']
sample_score = []
for i in range(batch_size):
frame_score = {
'frame_id': frame_id[i],
'roi_score': roi_score[i],
'roi_feature': roi_feature[i]
}
sample_score.append(frame_score)
return sample_score
| 23,337
| 43.880769
| 138
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/detectors/__init__.py
|
from .detector3d_template import Detector3DTemplate
from .detector3d_template_ada import ActiveDetector3DTemplate
from .detector3d_template_multi_db import Detector3DTemplate_M_DB
from .PartA2_net import PartA2Net
from .point_rcnn import PointRCNN
from .pointpillar import PointPillar
from .pv_rcnn import PVRCNN
from .pv_rcnn import PVRCNN_M_DB
from .pv_rcnn import PVRCNN_M_DB_3
from .pv_rcnn import SemiPVRCNN
from .pv_rcnn import ActivePVRCNN_DUAL
from .pv_rcnn import PVRCNN_TQS
from .pv_rcnn import PVRCNN_CLUE
from .second_net import SECONDNet
from .second_net_iou import SECONDNetIoU
from .second_net_iou import ActiveSECONDNetIoU
from .caddn import CaDDN
from .voxel_rcnn import VoxelRCNN
from .voxel_rcnn import VoxelRCNN_M_DB
from .voxel_rcnn import VoxelRCNN_M_DB_3
from .voxel_rcnn import ActiveDualVoxelRCNN
from .voxel_rcnn import VoxelRCNN_CLUE
from .voxel_rcnn import VoxelRCNN_TQS
from .centerpoint import CenterPoint
from .centerpoint import CenterPoint_M_DB
from .centerpoint import SemiCenterPoint
from .pv_rcnn_plusplus import PVRCNNPlusPlus
from .pv_rcnn_plusplus import PVRCNNPlusPlus_M_DB
from .pv_rcnn_plusplus import SemiPVRCNNPlusPlus
from .IASSD import IASSD
from .semi_second import SemiSECOND, SemiSECONDIoU
from .unsupervised_model.pvrcnn_plus_backbone import PVRCNN_PLUS_BACKBONE
__all__ = {
'Detector3DTemplate': Detector3DTemplate,
'Detector3DTemplate_ADA': ActiveDetector3DTemplate,
'Detector3DTemplate_M_DB': Detector3DTemplate_M_DB,
'SECONDNet': SECONDNet,
'PartA2Net': PartA2Net,
'PVRCNN': PVRCNN,
'PVRCNN_M_DB': PVRCNN_M_DB,
'PVRCNN_M_DB_3': PVRCNN_M_DB_3,
'SemiPVRCNN': SemiPVRCNN,
'ActiveDualPVRCNN': ActivePVRCNN_DUAL,
'PVRCNN_TQS': PVRCNN_TQS,
'PVRCNN_CLUE': PVRCNN_CLUE,
'PointPillar': PointPillar,
'PointRCNN': PointRCNN,
'SECONDNetIoU': SECONDNetIoU,
'ActiveSECONDNetIoU': ActiveSECONDNetIoU,
'CaDDN': CaDDN,
'VoxelRCNN': VoxelRCNN,
'VoxelRCNN_M_DB': VoxelRCNN_M_DB,
'VoxelRCNN_M_DB_3': VoxelRCNN_M_DB_3,
'ActiveDualVoxelRCNN': ActiveDualVoxelRCNN,
'VoxelRCNN_CLUE': VoxelRCNN_CLUE,
'VoxelRCNN_TQS': VoxelRCNN_TQS,
'CenterPoint': CenterPoint,
'CenterPoint_M_DB':CenterPoint_M_DB,
'SemiCenterPoint': SemiCenterPoint,
'PVRCNNPlusPlus': PVRCNNPlusPlus,
'PVRCNNPlusPlus_M_DB': PVRCNNPlusPlus_M_DB,
'SemiPVRCNNPlusPlus': SemiPVRCNNPlusPlus,
'IASSD': IASSD,
'ActiveDualPVRCNN': ActivePVRCNN_DUAL,
'SemiSECOND': SemiSECOND,
'SemiSECONDIoU': SemiSECONDIoU,
'PVRCNN_PLUS_BACKBONE': PVRCNN_PLUS_BACKBONE
}
def build_detector(model_cfg, num_class, dataset):
model = __all__[model_cfg.NAME](
model_cfg=model_cfg, num_class=num_class, dataset=dataset
)
return model
# def build_detector_multi_db_v2(model_cfg, num_class, dataset):
# model = __all__[model_cfg.NAME](
# model_cfg=model_cfg, num_class=num_class, dataset=dataset
# )
# return model
def build_detector_multi_db(model_cfg, num_class, num_class_s2, dataset, dataset_s2, source_one_name):
model = __all__[model_cfg.NAME](
model_cfg=model_cfg, num_class=num_class, num_class_s2=num_class_s2, dataset=dataset,
dataset_s2=dataset_s2, source_one_name=source_one_name
)
return model
def build_detector_multi_db_3(model_cfg, num_class, num_class_s2, num_class_s3, dataset, dataset_s2, dataset_s3, source_one_name, source_1):
model = __all__[model_cfg.NAME](
model_cfg=model_cfg, num_class=num_class, num_class_s2=num_class_s2, num_class_s3=num_class_s3, dataset=dataset,
dataset_s2=dataset_s2, dataset_s3=dataset_s3, source_one_name=source_one_name, source_1=source_1
)
return model
| 3,713
| 36.14
| 140
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/detectors/detector3d_template_multi_db_3.py
|
import os
import torch
import torch.nn as nn
from ...ops.iou3d_nms import iou3d_nms_utils
from ...utils.spconv_utils import find_all_spconv_keys
from .. import backbones_2d, backbones_3d, dense_heads, roi_heads, mdf_models
from ..backbones_2d import map_to_bev
from ..backbones_3d import pfe, vfe
from ..model_utils import model_nms_utils
class Detector3DTemplate_M_DB_3(nn.Module):
def __init__(self, model_cfg, num_class, num_class_s2, num_class_s3, dataset, dataset_s2, dataset_s3, source_one_name, source_1):
super().__init__()
self.model_cfg = model_cfg
self.num_class = num_class
self.num_class_s2 = num_class_s2
self.num_class_s3 = num_class_s3
self.dataset = dataset
self.dataset_s2 = dataset_s2
self.dataset_s3 = dataset_s3
self.class_names = dataset.class_names
self.class_names_s2 = dataset_s2.class_names
self.class_names_s3 = dataset_s3.class_names
self.source_one_name = source_one_name
self.source_1 = source_1
self.register_buffer('global_step', torch.LongTensor(1).zero_())
self.module_topology = [
'point_t', 'vfe', 'backbone_3d', 'map_to_bev_module', 'dense_3d_moe', 'pfe',
'backbone_2d', 'dense_2d_moe', 'dense_head_s1', 'point_head_s1', 'roi_head_s1',
'dense_head_s2', 'point_head_s2', 'roi_head_s2', 'dense_head_s3', 'point_head_s3', 'roi_head_s3',
]
@property
def mode(self):
return 'TRAIN' if self.training else 'TEST'
def update_global_step(self):
self.global_step += 1
def build_networks(self):
model_info_dict = {
'module_list': [],
'num_rawpoint_features': self.dataset.point_feature_encoder.num_point_features,
'num_point_features': self.dataset.point_feature_encoder.num_point_features,
'grid_size': self.dataset.grid_size,
'point_cloud_range': self.dataset.point_cloud_range,
'voxel_size': self.dataset.voxel_size,
'depth_downsample_factor': self.dataset.depth_downsample_factor
}
for module_name in self.module_topology:
module, model_info_dict = getattr(self, 'build_%s' % module_name)(
model_info_dict=model_info_dict
)
self.add_module(module_name, module)
return model_info_dict['module_list']
def build_point_t(self, model_info_dict):
if self.model_cfg.get('POINT_T', None) is None:
return None, model_info_dict
point_t_module = pfe.__all__[self.model_cfg.POINT_T.NAME](
model_cfg=self.model_cfg.POINT_T
)
model_info_dict['module_list'].append(point_t_module)
return point_t_module, model_info_dict
def build_vfe(self, model_info_dict):
if self.model_cfg.get('VFE', None) is None:
return None, model_info_dict
vfe_module = vfe.__all__[self.model_cfg.VFE.NAME](
model_cfg=self.model_cfg.VFE,
num_point_features=model_info_dict['num_rawpoint_features'],
point_cloud_range=model_info_dict['point_cloud_range'],
voxel_size=model_info_dict['voxel_size'],
grid_size=model_info_dict['grid_size'],
depth_downsample_factor=model_info_dict['depth_downsample_factor']
)
model_info_dict['num_point_features'] = vfe_module.get_output_feature_dim()
model_info_dict['module_list'].append(vfe_module)
return vfe_module, model_info_dict
def build_backbone_3d(self, model_info_dict):
if self.model_cfg.get('BACKBONE_3D', None) is None:
return None, model_info_dict
backbone_3d_module = backbones_3d.__all__[self.model_cfg.BACKBONE_3D.NAME](
model_cfg=self.model_cfg.BACKBONE_3D,
input_channels=model_info_dict['num_point_features'],
grid_size=model_info_dict['grid_size'],
voxel_size=model_info_dict['voxel_size'],
point_cloud_range=model_info_dict['point_cloud_range']
)
model_info_dict['module_list'].append(backbone_3d_module)
model_info_dict['num_point_features'] = backbone_3d_module.num_point_features
model_info_dict['backbone_channels'] = backbone_3d_module.backbone_channels \
if hasattr(backbone_3d_module, 'backbone_channels') else None
return backbone_3d_module, model_info_dict
def build_map_to_bev_module(self, model_info_dict):
if self.model_cfg.get('MAP_TO_BEV', None) is None:
return None, model_info_dict
map_to_bev_module = map_to_bev.__all__[self.model_cfg.MAP_TO_BEV.NAME](
model_cfg=self.model_cfg.MAP_TO_BEV,
grid_size=model_info_dict['grid_size']
)
model_info_dict['module_list'].append(map_to_bev_module)
model_info_dict['num_bev_features'] = map_to_bev_module.num_bev_features
return map_to_bev_module, model_info_dict
def build_dense_3d_moe(self, model_info_dict):
if self.model_cfg.get('DENSE_3D_MoE', None) is None:
return None, model_info_dict
dense_moe_module = mdf_models.__all__[self.model_cfg.DENSE_3D_MoE.NAME](
model_cfg=self.model_cfg.DENSE_3D_MoE
)
model_info_dict['module_list'].append(dense_moe_module)
return dense_moe_module, model_info_dict
def build_backbone_2d(self, model_info_dict):
if self.model_cfg.get('BACKBONE_2D', None) is None:
return None, model_info_dict
backbone_2d_module = backbones_2d.__all__[self.model_cfg.BACKBONE_2D.NAME](
model_cfg=self.model_cfg.BACKBONE_2D,
input_channels=model_info_dict['num_bev_features']
)
model_info_dict['module_list'].append(backbone_2d_module)
model_info_dict['num_bev_features'] = backbone_2d_module.num_bev_features
return backbone_2d_module, model_info_dict
def build_pfe(self, model_info_dict):
if self.model_cfg.get('PFE', None) is None:
return None, model_info_dict
pfe_module = pfe.__all__[self.model_cfg.PFE.NAME](
model_cfg=self.model_cfg.PFE,
voxel_size=model_info_dict['voxel_size'],
point_cloud_range=model_info_dict['point_cloud_range'],
num_bev_features=model_info_dict['num_bev_features'],
num_rawpoint_features=model_info_dict['num_rawpoint_features']
)
model_info_dict['module_list'].append(pfe_module)
model_info_dict['num_point_features'] = pfe_module.num_point_features
model_info_dict['num_point_features_before_fusion'] = pfe_module.num_point_features_before_fusion
return pfe_module, model_info_dict
def build_dense_2d_moe(self, model_info_dict):
if self.model_cfg.get('DENSE_2D_MoE', None) is None:
return None, model_info_dict
dense_moe_module = mdf_models.__all__[self.model_cfg.DENSE_2D_MoE.NAME](
model_cfg=self.model_cfg.DENSE_2D_MoE
)
model_info_dict['module_list'].append(dense_moe_module)
return dense_moe_module, model_info_dict
def build_dense_head_s1(self, model_info_dict):
if self.model_cfg.get('DENSE_HEAD_S1', None) is None:
return None, model_info_dict
dense_head_module_s1 = dense_heads.__all__[self.model_cfg.DENSE_HEAD_S1.NAME](
model_cfg=self.model_cfg.DENSE_HEAD_S1,
input_channels=model_info_dict['num_bev_features'],
num_class=self.num_class if not self.model_cfg.DENSE_HEAD_S1.CLASS_AGNOSTIC else 1,
class_names=self.class_names,
grid_size=model_info_dict['grid_size'],
point_cloud_range=model_info_dict['point_cloud_range'],
predict_boxes_when_training=self.model_cfg.get('ROI_HEAD_S1', False),
voxel_size=model_info_dict.get('voxel_size', False)
)
model_info_dict['module_list'].append(dense_head_module_s1)
return dense_head_module_s1, model_info_dict
def build_dense_head_s2(self, model_info_dict):
if self.model_cfg.get('DENSE_HEAD_S2', None) is None:
return None, model_info_dict
dense_head_module_s2 = dense_heads.__all__[self.model_cfg.DENSE_HEAD_S2.NAME](
model_cfg=self.model_cfg.DENSE_HEAD_S2,
input_channels=model_info_dict['num_bev_features'],
num_class=self.num_class_s2 if not self.model_cfg.DENSE_HEAD_S2.CLASS_AGNOSTIC else 1,
class_names=self.class_names_s2,
grid_size=model_info_dict['grid_size'],
point_cloud_range=model_info_dict['point_cloud_range'],
predict_boxes_when_training=self.model_cfg.get('ROI_HEAD_S2', False),
voxel_size=model_info_dict.get('voxel_size', False)
)
model_info_dict['module_list'].append(dense_head_module_s2)
return dense_head_module_s2, model_info_dict
def build_dense_head_s3(self, model_info_dict):
if self.model_cfg.get('DENSE_HEAD_S3', None) is None:
return None, model_info_dict
dense_head_module_s3 = dense_heads.__all__[self.model_cfg.DENSE_HEAD_S3.NAME](
model_cfg=self.model_cfg.DENSE_HEAD_S3,
input_channels=model_info_dict['num_bev_features'],
num_class=self.num_class_s3 if not self.model_cfg.DENSE_HEAD_S3.CLASS_AGNOSTIC else 1,
class_names=self.class_names_s3,
grid_size=model_info_dict['grid_size'],
point_cloud_range=model_info_dict['point_cloud_range'],
predict_boxes_when_training=self.model_cfg.get('ROI_HEAD_S3', False),
voxel_size=model_info_dict.get('voxel_size', False)
)
model_info_dict['module_list'].append(dense_head_module_s3)
return dense_head_module_s3, model_info_dict
def build_point_head_s1(self, model_info_dict):
if self.model_cfg.get('POINT_HEAD_S1', None) is None:
return None, model_info_dict
if self.model_cfg.POINT_HEAD_S1.get('USE_POINT_FEATURES_BEFORE_FUSION', False):
num_point_features = model_info_dict['num_point_features_before_fusion']
else:
num_point_features = model_info_dict['num_point_features']
point_head_module_s1 = dense_heads.__all__[self.model_cfg.POINT_HEAD_S1.NAME](
model_cfg=self.model_cfg.POINT_HEAD_S1,
input_channels=num_point_features,
num_class=self.num_class if not self.model_cfg.POINT_HEAD_S1.CLASS_AGNOSTIC else 1,
predict_boxes_when_training=self.model_cfg.get('ROI_HEAD_S1', False)
)
model_info_dict['module_list'].append(point_head_module_s1)
return point_head_module_s1, model_info_dict
def build_point_head_s2(self, model_info_dict):
if self.model_cfg.get('POINT_HEAD_S2', None) is None:
return None, model_info_dict
if self.model_cfg.POINT_HEAD_S2.get('USE_POINT_FEATURES_BEFORE_FUSION', False):
num_point_features = model_info_dict['num_point_features_before_fusion']
else:
num_point_features = model_info_dict['num_point_features']
point_head_module_s2 = dense_heads.__all__[self.model_cfg.POINT_HEAD_S2.NAME](
model_cfg=self.model_cfg.POINT_HEAD_S2,
input_channels=num_point_features,
num_class=self.num_class_s2 if not self.model_cfg.POINT_HEAD_S2.CLASS_AGNOSTIC else 1,
predict_boxes_when_training=self.model_cfg.get('ROI_HEAD_S2', False)
)
model_info_dict['module_list'].append(point_head_module_s2)
return point_head_module_s2, model_info_dict
def build_point_head_s3(self, model_info_dict):
if self.model_cfg.get('POINT_HEAD_S3', None) is None:
return None, model_info_dict
if self.model_cfg.POINT_HEAD_S3.get('USE_POINT_FEATURES_BEFORE_FUSION', False):
num_point_features = model_info_dict['num_point_features_before_fusion']
else:
num_point_features = model_info_dict['num_point_features']
point_head_module_s3 = dense_heads.__all__[self.model_cfg.POINT_HEAD_S3.NAME](
model_cfg=self.model_cfg.POINT_HEAD_S3,
input_channels=num_point_features,
num_class=self.num_class_s3 if not self.model_cfg.POINT_HEAD_S3.CLASS_AGNOSTIC else 1,
predict_boxes_when_training=self.model_cfg.get('ROI_HEAD_S3', False)
)
model_info_dict['module_list'].append(point_head_module_s3)
return point_head_module_s3, model_info_dict
def build_roi_head_s1(self, model_info_dict):
if self.model_cfg.get('ROI_HEAD_S1', None) is None:
return None, model_info_dict
point_head_module_s1 = roi_heads.__all__[self.model_cfg.ROI_HEAD_S1.NAME](
model_cfg=self.model_cfg.ROI_HEAD_S1,
input_channels=model_info_dict['num_point_features'],
backbone_channels=model_info_dict['backbone_channels'],
point_cloud_range=model_info_dict['point_cloud_range'],
voxel_size=model_info_dict['voxel_size'],
num_class=self.num_class if not self.model_cfg.ROI_HEAD_S1.CLASS_AGNOSTIC else 1,
)
model_info_dict['module_list'].append(point_head_module_s1)
return point_head_module_s1, model_info_dict
def build_roi_head_s2(self, model_info_dict):
if self.model_cfg.get('ROI_HEAD_S2', None) is None:
return None, model_info_dict
point_head_module_s2 = roi_heads.__all__[self.model_cfg.ROI_HEAD_S2.NAME](
model_cfg=self.model_cfg.ROI_HEAD_S2,
input_channels=model_info_dict['num_point_features'],
backbone_channels=model_info_dict['backbone_channels'],
point_cloud_range=model_info_dict['point_cloud_range'],
voxel_size=model_info_dict['voxel_size'],
num_class=self.num_class_s2 if not self.model_cfg.ROI_HEAD_S2.CLASS_AGNOSTIC else 1,
)
model_info_dict['module_list'].append(point_head_module_s2)
return point_head_module_s2, model_info_dict
def build_roi_head_s3(self, model_info_dict):
if self.model_cfg.get('ROI_HEAD_S3', None) is None:
return None, model_info_dict
point_head_module_s3 = roi_heads.__all__[self.model_cfg.ROI_HEAD_S3.NAME](
model_cfg=self.model_cfg.ROI_HEAD_S3,
input_channels=model_info_dict['num_point_features'],
backbone_channels=model_info_dict['backbone_channels'],
point_cloud_range=model_info_dict['point_cloud_range'],
voxel_size=model_info_dict['voxel_size'],
num_class=self.num_class_s3 if not self.model_cfg.ROI_HEAD_S3.CLASS_AGNOSTIC else 1,
)
model_info_dict['module_list'].append(point_head_module_s3)
return point_head_module_s3, model_info_dict
def forward(self, **kwargs):
raise NotImplementedError
# add some multi-head operation
def post_processing(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
batch_cls_preds: (B, num_boxes, num_classes | 1) or (N1+N2+..., num_classes | 1)
or [(B, num_boxes, num_class1), (B, num_boxes, num_class2) ...]
multihead_label_mapping: [(num_class1), (num_class2), ...]
batch_box_preds: (B, num_boxes, 7+C) or (N1+N2+..., 7+C)
cls_preds_normalized: indicate whether batch_cls_preds is normalized
batch_index: optional (N1+N2+...)
has_class_labels: True/False
roi_labels: (B, num_rois) 1 .. num_classes
batch_pred_labels: (B, num_boxes, 1)
Returns:
"""
post_process_cfg = self.model_cfg.POST_PROCESSING
batch_size = batch_dict['batch_size']
recall_dict = {}
pred_dicts = []
for index in range(batch_size):
if batch_dict.get('batch_index', None) is not None:
assert batch_dict['batch_box_preds'].shape.__len__() == 2
batch_mask = (batch_dict['batch_index'] == index)
else:
assert batch_dict['batch_box_preds'].shape.__len__() == 3
batch_mask = index
box_preds = batch_dict['batch_box_preds'][batch_mask]
src_box_preds = box_preds
if not isinstance(batch_dict['batch_cls_preds'], list):
cls_preds = batch_dict['batch_cls_preds'][batch_mask]
src_cls_preds = cls_preds
assert cls_preds.shape[1] in [1, self.num_class]
if not batch_dict['cls_preds_normalized']:
cls_preds = torch.sigmoid(cls_preds)
else:
cls_preds = [x[batch_mask] for x in batch_dict['batch_cls_preds']]
src_cls_preds = cls_preds
if not batch_dict['cls_preds_normalized']:
cls_preds = [torch.sigmoid(x) for x in cls_preds]
if post_process_cfg.NMS_CONFIG.MULTI_CLASSES_NMS:
if not isinstance(cls_preds, list):
cls_preds = [cls_preds]
multihead_label_mapping = [torch.arange(1, self.num_class, device=cls_preds[0].device)]
else:
multihead_label_mapping = batch_dict['multihead_label_mapping']
cur_start_idx = 0
pred_scores, pred_labels, pred_boxes = [], [], []
for cur_cls_preds, cur_label_mapping in zip(cls_preds, multihead_label_mapping):
assert cur_cls_preds.shape[1] == len(cur_label_mapping)
cur_box_preds = box_preds[cur_start_idx: cur_start_idx + cur_cls_preds.shape[0]]
cur_pred_scores, cur_pred_labels, cur_pred_boxes = model_nms_utils.multi_classes_nms(
cls_scores=cur_cls_preds, box_preds=cur_box_preds,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH
)
cur_pred_labels = cur_label_mapping[cur_pred_labels]
pred_scores.append(cur_pred_scores)
pred_labels.append(cur_pred_labels)
pred_boxes.append(cur_pred_boxes)
cur_start_idx += cur_cls_preds.shape[0]
final_scores = torch.cat(pred_scores, dim=0)
final_labels = torch.cat(pred_labels, dim=0)
final_boxes = torch.cat(pred_boxes, dim=0)
else:
cls_preds, label_preds = torch.max(cls_preds, dim=-1)
if batch_dict.get('has_class_labels', False):
label_key = 'roi_labels' if 'roi_labels' in batch_dict else 'batch_pred_labels'
label_preds = batch_dict[label_key][index]
else:
label_preds = label_preds + 1
selected, selected_scores = model_nms_utils.class_agnostic_nms(
box_scores=cls_preds, box_preds=box_preds,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH
)
if post_process_cfg.OUTPUT_RAW_SCORE:
max_cls_preds, _ = torch.max(src_cls_preds, dim=-1)
selected_scores = max_cls_preds[selected]
final_scores = selected_scores
final_labels = label_preds[selected]
final_boxes = box_preds[selected]
recall_dict = self.generate_recall_record(
box_preds=final_boxes if 'rois' not in batch_dict else src_box_preds,
recall_dict=recall_dict, batch_index=index, data_dict=batch_dict,
thresh_list=post_process_cfg.RECALL_THRESH_LIST
)
record_dict = {
'pred_boxes': final_boxes,
'pred_scores': final_scores,
'pred_labels': final_labels
}
pred_dicts.append(record_dict)
return pred_dicts, recall_dict
@staticmethod
def generate_recall_record(box_preds, recall_dict, batch_index, data_dict=None, thresh_list=None):
if 'gt_boxes' not in data_dict:
return recall_dict
rois = data_dict['rois'][batch_index] if 'rois' in data_dict else None
gt_boxes = data_dict['gt_boxes'][batch_index]
if recall_dict.__len__() == 0:
recall_dict = {'gt': 0}
for cur_thresh in thresh_list:
recall_dict['roi_%s' % (str(cur_thresh))] = 0
recall_dict['rcnn_%s' % (str(cur_thresh))] = 0
cur_gt = gt_boxes
k = cur_gt.__len__() - 1
while k >= 0 and cur_gt[k].sum() == 0:
k -= 1
cur_gt = cur_gt[:k + 1]
if cur_gt.shape[0] > 0:
if box_preds.shape[0] > 0:
iou3d_rcnn = iou3d_nms_utils.boxes_iou3d_gpu(box_preds[:, 0:7], cur_gt[:, 0:7])
else:
iou3d_rcnn = torch.zeros((0, cur_gt.shape[0]))
if rois is not None:
iou3d_roi = iou3d_nms_utils.boxes_iou3d_gpu(rois[:, 0:7], cur_gt[:, 0:7])
for cur_thresh in thresh_list:
if iou3d_rcnn.shape[0] == 0:
recall_dict['rcnn_%s' % str(cur_thresh)] += 0
else:
rcnn_recalled = (iou3d_rcnn.max(dim=0)[0] > cur_thresh).sum().item()
recall_dict['rcnn_%s' % str(cur_thresh)] += rcnn_recalled
if rois is not None:
roi_recalled = (iou3d_roi.max(dim=0)[0] > cur_thresh).sum().item()
recall_dict['roi_%s' % str(cur_thresh)] += roi_recalled
recall_dict['gt'] += cur_gt.shape[0]
else:
gt_iou = box_preds.new_zeros(box_preds.shape[0])
return recall_dict
def _load_state_dict(self, model_state_disk, *, strict=True):
state_dict = self.state_dict() # local cache of state_dict
spconv_keys = find_all_spconv_keys(self)
update_model_state = {}
for key, val in model_state_disk.items():
if key in spconv_keys and key in state_dict and state_dict[key].shape != val.shape:
# with different spconv versions, we need to adapt weight shapes for spconv blocks
# adapt spconv weights from version 1.x to version 2.x if you used weights from spconv 1.x
val_native = val.transpose(-1, -2) # (k1, k2, k3, c_in, c_out) to (k1, k2, k3, c_out, c_in)
if val_native.shape == state_dict[key].shape:
val = val_native.contiguous()
else:
assert val.shape.__len__() == 5, 'currently only spconv 3D is supported'
val_implicit = val.permute(4, 0, 1, 2, 3) # (k1, k2, k3, c_in, c_out) to (c_out, k1, k2, k3, c_in)
if val_implicit.shape == state_dict[key].shape:
val = val_implicit.contiguous()
if key in state_dict and state_dict[key].shape == val.shape:
update_model_state[key] = val
# logger.info('Update weight %s: %s' % (key, str(val.shape)))
if strict:
self.load_state_dict(update_model_state)
else:
state_dict.update(update_model_state)
self.load_state_dict(state_dict)
return state_dict, update_model_state
def frozen_model(self, model):
for p in model.vfe.parameters():
p.requires_grad = False
for p in model.backbone_3d.parameters():
p.requires_grad = False
for p in model.map_to_bev_module.parameters():
p.requires_grad = False
for p in model.backbone_2d.parameters():
p.requires_grad = False
return model
def load_params_from_file(self, filename, logger, to_cpu=False):
if not os.path.isfile(filename):
raise FileNotFoundError
logger.info('==> Loading parameters from checkpoint %s to %s' % (filename, 'CPU' if to_cpu else 'GPU'))
loc_type = torch.device('cpu') if to_cpu else None
checkpoint = torch.load(filename, map_location=loc_type)
model_state_disk = checkpoint['model_state']
version = checkpoint.get("version", None)
if version is not None:
logger.info('==> Checkpoint trained from version: %s' % version)
state_dict, update_model_state = self._load_state_dict(model_state_disk, strict=False)
for key in state_dict:
if key not in update_model_state:
logger.info('Not updated weight %s: %s' % (key, str(state_dict[key].shape)))
logger.info('==> Done (loaded %d/%d)' % (len(update_model_state), len(state_dict)))
def load_params_with_optimizer(self, filename, to_cpu=False, optimizer=None, logger=None):
if not os.path.isfile(filename):
raise FileNotFoundError
logger.info('==> Loading parameters from checkpoint %s to %s' % (filename, 'CPU' if to_cpu else 'GPU'))
loc_type = torch.device('cpu') if to_cpu else None
checkpoint = torch.load(filename, map_location=loc_type)
epoch = checkpoint.get('epoch', -1)
it = checkpoint.get('it', 0.0)
self._load_state_dict(checkpoint['model_state'], strict=True)
if optimizer is not None:
if 'optimizer_state' in checkpoint and checkpoint['optimizer_state'] is not None:
logger.info('==> Loading optimizer parameters from checkpoint %s to %s'
% (filename, 'CPU' if to_cpu else 'GPU'))
optimizer.load_state_dict(checkpoint['optimizer_state'])
else:
assert filename[-4] == '.', filename
src_file, ext = filename[:-4], filename[-3:]
optimizer_filename = '%s_optim.%s' % (src_file, ext)
if os.path.exists(optimizer_filename):
optimizer_ckpt = torch.load(optimizer_filename, map_location=loc_type)
optimizer.load_state_dict(optimizer_ckpt['optimizer_state'])
if 'version' in checkpoint:
print('==> Checkpoint trained from version: %s' % checkpoint['version'])
logger.info('==> Done')
return it, epoch
| 26,507
| 45.916814
| 133
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/detectors/second_net_iou.py
|
import torch
from .detector3d_template import Detector3DTemplate
from .detector3d_template_ada import ActiveDetector3DTemplate
from ..model_utils.model_nms_utils import class_agnostic_nms, class_agnostic_nms_with_roi
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
import torch.nn.functional as F
class SECONDNetIoU(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict):
batch_dict['dataset_cfg'] = self.dataset.dataset_cfg
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
def get_training_loss(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head.get_loss()
loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)
loss = loss_rpn + loss_rcnn
return loss, tb_dict, disp_dict
@staticmethod
def cal_scores_by_npoints(cls_scores, iou_scores, num_points_in_gt, cls_thresh=10, iou_thresh=100):
"""
Args:
cls_scores: (N)
iou_scores: (N)
num_points_in_gt: (N, 7+c)
cls_thresh: scalar
iou_thresh: scalar
"""
assert iou_thresh >= cls_thresh
alpha = torch.zeros(cls_scores.shape, dtype=torch.float32).cuda()
alpha[num_points_in_gt <= cls_thresh] = 0
alpha[num_points_in_gt >= iou_thresh] = 1
mask = ((num_points_in_gt > cls_thresh) & (num_points_in_gt < iou_thresh))
alpha[mask] = (num_points_in_gt[mask] - 10) / (iou_thresh - cls_thresh)
scores = (1 - alpha) * cls_scores + alpha * iou_scores
return scores
def set_nms_score_by_class(self, iou_preds, cls_preds, label_preds, score_by_class):
n_classes = torch.unique(label_preds).shape[0]
nms_scores = torch.zeros(iou_preds.shape, dtype=torch.float32).cuda()
for i in range(n_classes):
mask = label_preds == (i + 1)
class_name = self.class_names[i]
score_type = score_by_class[class_name]
if score_type == 'iou':
nms_scores[mask] = iou_preds[mask]
elif score_type == 'cls':
nms_scores[mask] = cls_preds[mask]
else:
raise NotImplementedError
return nms_scores
def post_processing(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
batch_cls_preds: (B, num_boxes, num_classes | 1) or (N1+N2+..., num_classes | 1)
batch_box_preds: (B, num_boxes, 7+C) or (N1+N2+..., 7+C)
cls_preds_normalized: indicate whether batch_cls_preds is normalized
batch_index: optional (N1+N2+...)
roi_labels: (B, num_rois) 1 .. num_classes
Returns:
"""
post_process_cfg = self.model_cfg.POST_PROCESSING
batch_size = batch_dict['batch_size']
recall_dict = {}
pred_dicts = []
for index in range(batch_size):
if batch_dict.get('batch_index', None) is not None:
assert batch_dict['batch_cls_preds'].shape.__len__() == 2
batch_mask = (batch_dict['batch_index'] == index)
else:
assert batch_dict['batch_cls_preds'].shape.__len__() == 3
batch_mask = index
box_preds = batch_dict['batch_box_preds'][batch_mask]
iou_preds = batch_dict['batch_cls_preds'][batch_mask]
cls_preds = batch_dict['roi_scores'][batch_mask]
src_iou_preds = iou_preds
src_box_preds = box_preds
src_cls_preds = cls_preds
assert iou_preds.shape[1] in [1, self.num_class]
if not batch_dict['cls_preds_normalized']:
iou_preds = torch.sigmoid(iou_preds)
cls_preds = torch.sigmoid(cls_preds)
if post_process_cfg.NMS_CONFIG.MULTI_CLASSES_NMS:
raise NotImplementedError
else:
iou_preds, label_preds = torch.max(iou_preds, dim=-1)
label_preds = batch_dict['roi_labels'][index] if batch_dict.get('has_class_labels', False) else label_preds + 1
if post_process_cfg.NMS_CONFIG.get('SCORE_BY_CLASS', None) and \
post_process_cfg.NMS_CONFIG.SCORE_TYPE == 'score_by_class':
nms_scores = self.set_nms_score_by_class(
iou_preds, cls_preds, label_preds, post_process_cfg.NMS_CONFIG.SCORE_BY_CLASS
)
elif post_process_cfg.NMS_CONFIG.get('SCORE_TYPE', None) == 'iou' or \
post_process_cfg.NMS_CONFIG.get('SCORE_TYPE', None) is None:
nms_scores = iou_preds
elif post_process_cfg.NMS_CONFIG.SCORE_TYPE == 'cls':
nms_scores = cls_preds
elif post_process_cfg.NMS_CONFIG.SCORE_TYPE == 'weighted_iou_cls':
nms_scores = post_process_cfg.NMS_CONFIG.SCORE_WEIGHTS.iou * iou_preds + \
post_process_cfg.NMS_CONFIG.SCORE_WEIGHTS.cls * cls_preds
elif post_process_cfg.NMS_CONFIG.SCORE_TYPE == 'num_pts_iou_cls':
point_mask = (batch_dict['points'][:, 0] == batch_mask)
batch_points = batch_dict['points'][point_mask][:, 1:4]
num_pts_in_gt = roiaware_pool3d_utils.points_in_boxes_cpu(
batch_points.cpu(), box_preds[:, 0:7].cpu()
).sum(dim=1).float().cuda()
score_thresh_cfg = post_process_cfg.NMS_CONFIG.SCORE_THRESH
nms_scores = self.cal_scores_by_npoints(
cls_preds, iou_preds, num_pts_in_gt,
score_thresh_cfg.cls, score_thresh_cfg.iou
)
else:
raise NotImplementedError
selected, selected_scores = class_agnostic_nms(
box_scores=nms_scores, box_preds=box_preds,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH
)
if post_process_cfg.OUTPUT_RAW_SCORE:
raise NotImplementedError
final_scores = selected_scores
final_labels = label_preds[selected]
final_boxes = box_preds[selected]
recall_dict = self.generate_recall_record(
box_preds=final_boxes if 'rois' not in batch_dict else src_box_preds,
recall_dict=recall_dict, batch_index=index, data_dict=batch_dict,
thresh_list=post_process_cfg.RECALL_THRESH_LIST
)
record_dict = {
'pred_boxes': final_boxes,
'pred_scores': final_scores,
'pred_labels': final_labels,
'pred_cls_scores': cls_preds[selected],
'pred_iou_scores': iou_preds[selected]
}
pred_dicts.append(record_dict)
return pred_dicts, recall_dict
class ActiveSECONDNetIoU(ActiveDetector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict, **forward_args):
batch_dict['dataset_cfg'] = self.dataset.dataset_cfg
batch_dict['mode'] = forward_args.get('mode', None) if forward_args is not None else None
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training and forward_args.get('mode', None) is 'train_discriminator':
loss = self.discriminator.get_discriminator_loss(batch_dict, source=forward_args['source'])
return loss
if self.training and forward_args.get('mode', None) is 'train_detector':
loss, tb_dict, disp_dict = self.get_detector_loss()
elif not self.training and forward_args.get('mode', None) is 'active_evaluate':
batch_dict = self.post_processing(batch_dict)
sample_score = self.get_evaluate_score(batch_dict, forward_args['domain'])
return sample_score
elif not self.training and forward_args.get('mode', None) is None:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
ret_dict={
'loss': loss
}
return ret_dict, tb_dict, disp_dict
def get_detector_loss(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head.get_loss()
loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)
loss = loss_rpn + loss_rcnn
return loss, tb_dict, disp_dict
def get_evaluate_score(self, batch_dict, domain):
batch_dict = self.discriminator.domainness_evaluate(batch_dict)
batch_size = batch_dict['batch_size']
frame_id = [str(id) for id in batch_dict['frame_id']]
reweight_roi = batch_dict['reweight_roi']
domainness_evaluate = batch_dict['domainness_evaluate'].cpu()
sample_score = []
for i in range(batch_size):
frame_score = {
'frame_id': frame_id[i],
'domainness_evaluate': domainness_evaluate[i].cpu(),
'roi_feature': reweight_roi[i],
'total_score': domainness_evaluate[i].cpu()
}
sample_score.append(frame_score)
return sample_score
@staticmethod
def cal_scores_by_npoints(cls_scores, iou_scores, num_points_in_gt, cls_thresh=10, iou_thresh=100):
"""
Args:
cls_scores: (N)
iou_scores: (N)
num_points_in_gt: (N, 7+c)
cls_thresh: scalar
iou_thresh: scalar
"""
assert iou_thresh >= cls_thresh
alpha = torch.zeros(cls_scores.shape, dtype=torch.float32).cuda()
alpha[num_points_in_gt <= cls_thresh] = 0
alpha[num_points_in_gt >= iou_thresh] = 1
mask = ((num_points_in_gt > cls_thresh) & (num_points_in_gt < iou_thresh))
alpha[mask] = (num_points_in_gt[mask] - 10) / (iou_thresh - cls_thresh)
scores = (1 - alpha) * cls_scores + alpha * iou_scores
return scores
def set_nms_score_by_class(self, iou_preds, cls_preds, label_preds, score_by_class):
n_classes = torch.unique(label_preds).shape[0]
nms_scores = torch.zeros(iou_preds.shape, dtype=torch.float32).cuda()
for i in range(n_classes):
mask = label_preds == (i + 1)
class_name = self.class_names[i]
score_type = score_by_class[class_name]
if score_type == 'iou':
nms_scores[mask] = iou_preds[mask]
elif score_type == 'cls':
nms_scores[mask] = cls_preds[mask]
else:
raise NotImplementedError
return nms_scores
def post_processing(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
batch_cls_preds: (B, num_boxes, num_classes | 1) or (N1+N2+..., num_classes | 1)
batch_box_preds: (B, num_boxes, 7+C) or (N1+N2+..., 7+C)
cls_preds_normalized: indicate whether batch_cls_preds is normalized
batch_index: optional (N1+N2+...)
roi_labels: (B, num_rois) 1 .. num_classes
Returns:
"""
post_process_cfg = self.model_cfg.POST_PROCESSING
batch_size = batch_dict['batch_size']
recall_dict = {}
pred_dicts = []
for index in range(batch_size):
if batch_dict.get('batch_index', None) is not None:
assert batch_dict['batch_cls_preds'].shape.__len__() == 2
batch_mask = (batch_dict['batch_index'] == index)
else:
assert batch_dict['batch_cls_preds'].shape.__len__() == 3
batch_mask = index
box_preds = batch_dict['batch_box_preds'][batch_mask]
iou_preds = batch_dict['batch_cls_preds'][batch_mask]
cls_preds = batch_dict['roi_scores'][batch_mask]
src_iou_preds = iou_preds
src_box_preds = box_preds
src_cls_preds = cls_preds
assert iou_preds.shape[1] in [1, self.num_class]
if batch_dict['mode'] == 'active_evaluate':
roi_feature = batch_dict['roi_shared_feature'][batch_mask]
if not batch_dict['cls_preds_normalized']:
iou_preds = torch.sigmoid(iou_preds)
cls_preds = torch.sigmoid(cls_preds)
if post_process_cfg.NMS_CONFIG.MULTI_CLASSES_NMS:
raise NotImplementedError
else:
iou_preds, label_preds = torch.max(iou_preds, dim=-1)
label_preds = batch_dict['roi_labels'][index] if batch_dict.get('has_class_labels', False) else label_preds + 1
if post_process_cfg.NMS_CONFIG.get('SCORE_BY_CLASS', None) and \
post_process_cfg.NMS_CONFIG.SCORE_TYPE == 'score_by_class':
nms_scores = self.set_nms_score_by_class(
iou_preds, cls_preds, label_preds, post_process_cfg.NMS_CONFIG.SCORE_BY_CLASS
)
elif post_process_cfg.NMS_CONFIG.get('SCORE_TYPE', None) == 'iou' or \
post_process_cfg.NMS_CONFIG.get('SCORE_TYPE', None) is None:
nms_scores = iou_preds
elif post_process_cfg.NMS_CONFIG.SCORE_TYPE == 'cls':
nms_scores = cls_preds
elif post_process_cfg.NMS_CONFIG.SCORE_TYPE == 'weighted_iou_cls':
nms_scores = post_process_cfg.NMS_CONFIG.SCORE_WEIGHTS.iou * iou_preds + \
post_process_cfg.NMS_CONFIG.SCORE_WEIGHTS.cls * cls_preds
elif post_process_cfg.NMS_CONFIG.SCORE_TYPE == 'num_pts_iou_cls':
point_mask = (batch_dict['points'][:, 0] == batch_mask)
batch_points = batch_dict['points'][point_mask][:, 1:4]
num_pts_in_gt = roiaware_pool3d_utils.points_in_boxes_cpu(
batch_points.cpu(), box_preds[:, 0:7].cpu()
).sum(dim=1).float().cuda()
score_thresh_cfg = post_process_cfg.NMS_CONFIG.SCORE_THRESH
nms_scores = self.cal_scores_by_npoints(
cls_preds, iou_preds, num_pts_in_gt,
score_thresh_cfg.cls, score_thresh_cfg.iou
)
else:
raise NotImplementedError
if batch_dict['mode'] == 'active_evaluate':
if batch_dict.get('reweight_roi', None) is None:
batch_dict['reweight_roi'] = []
selected, selected_scores, selected_roi = class_agnostic_nms_with_roi(
box_scores=nms_scores, box_preds=box_preds, roi_feature=roi_feature,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH
)
reweight = F.softmax(selected_scores)
reweight_roi = reweight.view(1, -1) @ selected_roi
batch_dict['reweight_roi'].append(reweight_roi)
if index == (batch_size-1):
return batch_dict
else:
continue
selected, selected_scores = class_agnostic_nms(
box_scores=nms_scores, box_preds=box_preds,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH
)
if post_process_cfg.OUTPUT_RAW_SCORE:
raise NotImplementedError
final_scores = selected_scores
final_labels = label_preds[selected]
final_boxes = box_preds[selected]
recall_dict = self.generate_recall_record(
box_preds=final_boxes if 'rois' not in batch_dict else src_box_preds,
recall_dict=recall_dict, batch_index=index, data_dict=batch_dict,
thresh_list=post_process_cfg.RECALL_THRESH_LIST
)
record_dict = {
'pred_boxes': final_boxes,
'pred_scores': final_scores,
'pred_labels': final_labels,
'pred_cls_scores': cls_preds[selected],
'pred_iou_scores': iou_preds[selected]
}
pred_dicts.append(record_dict)
return pred_dicts, recall_dict
| 17,487
| 43.161616
| 127
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/detectors/semi_second.py
|
import torch
from .detector3d_template import Detector3DTemplate
from ..model_utils.model_nms_utils import class_agnostic_nms
class SemiSECOND(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
self.model_type = None
def set_model_type(self, model_type):
assert model_type in ['origin', 'teacher', 'student']
self.model_type = model_type
self.dense_head.model_type = model_type
def forward(self, batch_dict):
# import pdb
# pdb.set_trace()
# origin: (training, return loss) (testing, return final boxes)
if self.model_type == 'origin':
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
# teacher: (testing, return raw boxes)
elif self.model_type == 'teacher':
# assert not self.training
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
return batch_dict
# student: (training, return (loss & raw boxes w/ gt_boxes) or raw boxes (w/o gt_boxes) for consistency)
# (testing, return final_boxes)
elif self.model_type == 'student':
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
if 'gt_boxes' in batch_dict: # for (pseudo-)labeled data
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {
'loss': loss
}
return batch_dict, ret_dict, tb_dict, disp_dict
else:
return batch_dict
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
else:
raise Exception('Unsupprted model type')
def get_training_loss(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head.get_loss()
tb_dict = {
'loss_rpn': loss_rpn.item(),
**tb_dict
}
loss = loss_rpn
return loss, tb_dict, disp_dict
class SemiSECONDIoU(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
self.model_type = None
def set_model_type(self, model_type):
assert model_type in ['origin', 'teacher', 'student']
self.model_type = model_type
self.dense_head.model_type = model_type
self.roi_head.model_type = model_type
"""
if model_type in ['teacher', 'student']:
for param in self.roi_head.parameters():
param.requires_grad = False
"""
def forward(self, batch_dict):
# origin: (training, return loss) (testing, return final boxes)
if self.model_type == 'origin':
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
# teacher: (testing, return initial filtered boxes and iou_scores)
elif self.model_type == 'teacher':
#assert not self.training
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
return batch_dict
# student: (training, return (loss & raw boxes w/ gt_boxes) or raw boxes (w/o gt_boxes) for consistency)
# (testing, return final_boxes)
elif self.model_type == 'student':
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
if 'gt_boxes' in batch_dict:
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {
'loss': loss
}
return batch_dict, ret_dict, tb_dict, disp_dict
else:
return batch_dict
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
else:
raise Exception('Unsupprted model type')
def get_training_loss(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head.get_loss()
tb_dict = {
'loss_rpn': loss_rpn.item(),
**tb_dict
}
#if self.model_type == 'origin':
if self.model_type in ['origin', 'student']:
loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)
loss = loss_rpn + loss_rcnn
#elif self.model_type in ['teacher', 'student']:
elif self.model_type in ['teacher']:
loss = loss_rpn
else:
raise Exception('Unsupprted model type')
return loss, tb_dict, disp_dict
def post_processing(self, batch_dict):
"""
we found NMS with IoU-guided filtering is bad, probablely bugs in the head
thus we only use original RPN score for NMS
"""
post_process_cfg = self.model_cfg.POST_PROCESSING
batch_size = batch_dict['batch_size']
recall_dict = {}
pred_dicts = []
for index in range(batch_size):
box_preds = batch_dict['rois'][index]
iou_preds = batch_dict['roi_ious'][index]
cls_preds = batch_dict['roi_scores'][index]
label_preds = batch_dict['roi_labels'][index]
assert iou_preds.shape[1] in [1, self.num_class]
if not batch_dict['cls_preds_normalized']:
iou_preds = torch.sigmoid(iou_preds)
cls_preds = torch.sigmoid(cls_preds)
if post_process_cfg.NMS_CONFIG.MULTI_CLASSES_NMS:
raise NotImplementedError
else:
nms_scores = cls_preds # iou_preds
nms_scores = nms_scores.squeeze(-1)
selected, selected_scores = class_agnostic_nms(
box_scores=nms_scores, box_preds=box_preds,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH
)
final_scores = selected_scores
final_labels = label_preds[selected]
final_boxes = box_preds[selected]
# added filtering boxes with size 0
zero_mask = (final_boxes[:, 3:6] != 0).all(1)
final_boxes = final_boxes[zero_mask]
final_labels = final_labels[zero_mask]
final_scores = final_scores[zero_mask]
recall_dict = self.generate_recall_record(
box_preds=final_boxes,
recall_dict=recall_dict, batch_index=index, data_dict=batch_dict,
thresh_list=post_process_cfg.RECALL_THRESH_LIST
)
record_dict = {
'pred_boxes': final_boxes,
'pred_scores': final_scores,
'pred_labels': final_labels,
}
pred_dicts.append(record_dict)
return pred_dicts, recall_dict
| 8,060
| 37.385714
| 112
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/detectors/centerpoint.py
|
from .detector3d_template import Detector3DTemplate
from .detector3d_template_multi_db import Detector3DTemplate_M_DB
from pcdet.utils import common_utils
class CenterPoint(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict):
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
def get_training_loss(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head.get_loss()
tb_dict = {
'loss_rpn': loss_rpn.item(),
**tb_dict
}
loss = loss_rpn
return loss, tb_dict, disp_dict
def post_processing(self, batch_dict):
post_process_cfg = self.model_cfg.POST_PROCESSING
batch_size = batch_dict['batch_size']
final_pred_dict = batch_dict['final_box_dicts']
recall_dict = {}
for index in range(batch_size):
pred_boxes = final_pred_dict[index]['pred_boxes']
recall_dict = self.generate_recall_record(
box_preds=pred_boxes,
recall_dict=recall_dict, batch_index=index, data_dict=batch_dict,
thresh_list=post_process_cfg.RECALL_THRESH_LIST
)
return final_pred_dict, recall_dict
class SemiCenterPoint(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
self.model_type = None
def set_model_type(self, model_type):
assert model_type in ['origin', 'teacher', 'student']
self.model_type = model_type
self.dense_head.model_type = model_type
def forward(self, batch_dict):
if self.model_type == 'origin':
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
elif self.model_type == 'teacher':
# assert not self.training
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
return batch_dict
# student: (training, return (loss & raw boxes w/ gt_boxes) or raw boxes (w/o gt_boxes) for consistency)
# (testing, return final_boxes)
elif self.model_type == 'student':
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
if 'gt_boxes' in batch_dict: # for (pseudo-)labeled data
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {
'loss': loss
}
return batch_dict, ret_dict, tb_dict, disp_dict
else:
return batch_dict
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
else:
raise Exception('Unsupprted model type')
def get_training_loss(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head.get_loss()
tb_dict = {
'loss_rpn': loss_rpn.item(),
**tb_dict
}
loss = loss_rpn
return loss, tb_dict, disp_dict
def post_processing(self, batch_dict):
post_process_cfg = self.model_cfg.POST_PROCESSING
batch_size = batch_dict['batch_size']
final_pred_dict = batch_dict['final_box_dicts']
recall_dict = {}
for index in range(batch_size):
pred_boxes = final_pred_dict[index]['pred_boxes']
recall_dict = self.generate_recall_record(
box_preds=pred_boxes,
recall_dict=recall_dict, batch_index=index, data_dict=batch_dict,
thresh_list=post_process_cfg.RECALL_THRESH_LIST
)
return final_pred_dict, recall_dict
class CenterPoint_M_DB(Detector3DTemplate_M_DB):
def __init__(self, model_cfg, num_class, num_class_s2, dataset, dataset_s2, source_one_name):
super().__init__(model_cfg=model_cfg, num_class=num_class, num_class_s2=num_class_s2, dataset=dataset,
dataset_s2=dataset_s2, source_one_name=source_one_name)
self.module_list = self.build_networks()
self.source_one_name = source_one_name
def forward(self, batch_dict):
# Split the Concat dataset batch into batch_1 and batch_2
split_tag_s1, split_tag_s2 = common_utils.split_batch_dict(self.source_one_name, batch_dict)
batch_s1 = {}
batch_s2 = {}
module_num = -1
for cur_module in self.module_list:
module_num += 1
if module_num < 4:
batch_dict = cur_module(batch_dict)
if module_num == 4:
if len(split_tag_s1) == batch_dict['batch_size']:
batch_dict = cur_module(batch_dict)
elif len(split_tag_s2) == batch_dict['batch_size']:
continue
else:
if module_num == 4:
batch_s1, batch_s2 = common_utils.split_two_batch_dict_gpu(split_tag_s1, split_tag_s2, batch_dict)
batch_s1 = cur_module(batch_s1)
if module_num == 5:
if len(split_tag_s2) == batch_dict['batch_size']:
batch_dict = cur_module(batch_dict)
elif len(split_tag_s1) == batch_dict['batch_size']:
continue
else:
batch_s2 = cur_module(batch_s2)
if self.training:
split_tag_s1, split_tag_s2 = common_utils.split_batch_dict(self.source_one_name, batch_dict)
if len(split_tag_s1) == batch_dict['batch_size']:
loss, tb_dict, disp_dict = self.get_training_loss_s1()
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
elif len(split_tag_s2) == batch_dict['batch_size']:
loss, tb_dict, disp_dict = self.get_training_loss_s2()
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
else:
loss_1, tb_dict_1, disp_dict_1 = self.get_training_loss_s1()
loss_2, tb_dict_2, disp_dict_2 = self.get_training_loss_s2()
ret_dict = {
'loss': loss_1 + loss_2
}
return ret_dict, tb_dict_1, disp_dict_1
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
def get_training_loss_s1(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head_s1.get_loss()
tb_dict = {
'loss_rpn': loss_rpn.item(),
**tb_dict
}
loss = loss_rpn
return loss, tb_dict, disp_dict
def get_training_loss_s2(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head_s2.get_loss()
tb_dict = {
'loss_rpn': loss_rpn.item(),
**tb_dict
}
loss = loss_rpn
return loss, tb_dict, disp_dict
def post_processing(self, batch_dict):
post_process_cfg = self.model_cfg.POST_PROCESSING
batch_size = batch_dict['batch_size']
final_pred_dict = batch_dict['final_box_dicts']
recall_dict = {}
for index in range(batch_size):
pred_boxes = final_pred_dict[index]['pred_boxes']
recall_dict = self.generate_recall_record(
box_preds=pred_boxes,
recall_dict=recall_dict, batch_index=index, data_dict=batch_dict,
thresh_list=post_process_cfg.RECALL_THRESH_LIST
)
return final_pred_dict, recall_dict
| 8,824
| 35.770833
| 122
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/detectors/unsupervised_model/pvrcnn_plus_backbone.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from ..detector3d_template import Detector3DTemplate
from ....ops.pointnet2.pointnet2_stack import pointnet2_modules as pointnet2_stack_modules
from ....ops.pointnet2.pointnet2_stack import pointnet2_utils as pointnet2_stack_utils
from ....utils import common_utils
# copy from voxel set abstraction module
def bilinear_interpolate_torch(im, x, y):
"""
Args:
im: (H, W, C) [y, x]
x: (N)
y: (N)
Returns:
"""
x0 = torch.floor(x).long()
x1 = x0 + 1
y0 = torch.floor(y).long()
y1 = y0 + 1
x0 = torch.clamp(x0, 0, im.shape[1] - 1)
x1 = torch.clamp(x1, 0, im.shape[1] - 1)
y0 = torch.clamp(y0, 0, im.shape[0] - 1)
y1 = torch.clamp(y1, 0, im.shape[0] - 1)
Ia = im[y0, x0]
Ib = im[y1, x0]
Ic = im[y0, x1]
Id = im[y1, x1]
wa = (x1.type_as(x) - x) * (y1.type_as(y) - y)
wb = (x1.type_as(x) - x) * (y - y0.type_as(y))
wc = (x - x0.type_as(x)) * (y1.type_as(y) - y)
wd = (x - x0.type_as(x)) * (y - y0.type_as(y))
ans = torch.t((torch.t(Ia) * wa)) + torch.t(torch.t(Ib) * wb) + torch.t(torch.t(Ic) * wc) + torch.t(torch.t(Id) * wd)
return ans
def sample_points_with_roi(rois, points, sample_radius_with_roi, num_max_points_of_part=200000):
"""
Args:
rois: (M, 7 + C)
points: (N, 3)
sample_radius_with_roi:
num_max_points_of_part:
Returns:
sampled_points: (N_out, 3)
"""
if points.shape[0] < num_max_points_of_part:
distance = (points[:, None, :] - rois[None, :, 0:3]).norm(dim=-1)
min_dis, min_dis_roi_idx = distance.min(dim=-1)
roi_max_dim = (rois[min_dis_roi_idx, 3:6] / 2).norm(dim=-1)
point_mask = min_dis < roi_max_dim + sample_radius_with_roi
else:
start_idx = 0
point_mask_list = []
while start_idx < points.shape[0]:
distance = (points[start_idx:start_idx + num_max_points_of_part, None, :] - rois[None, :, 0:3]).norm(dim=-1)
min_dis, min_dis_roi_idx = distance.min(dim=-1)
roi_max_dim = (rois[min_dis_roi_idx, 3:6] / 2).norm(dim=-1)
cur_point_mask = min_dis < roi_max_dim + sample_radius_with_roi
point_mask_list.append(cur_point_mask)
start_idx += num_max_points_of_part
point_mask = torch.cat(point_mask_list, dim=0)
sampled_points = points[:1] if point_mask.sum() == 0 else points[point_mask, :]
return sampled_points, point_mask
# TODO add LOSS_CFG TO MODEL_CFG
class PVRCNN_PLUS_BACKBONE(Detector3DTemplate):
def __init__(self, model_cfg, dataset, num_class=None):
super().__init__(model_cfg=model_cfg, num_class=None, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict):
batch_dict = self.vfe(batch_dict)
batch_dict = self.backbone_3d(batch_dict)
batch_dict = self.map_to_bev_module(batch_dict)
# batch_dict = self.backbone_2d(batch_dict)
return batch_dict
# TODO add POS_THRESH, NEG_THRESH
class HardestContrastiveLoss():
def __init__(self, loss_cfg, voxel_size, point_cloud_range, num_bev_features=None,
num_rawpoint_features=None, **kwargs):
super().__init__()
self.loss_cfg = loss_cfg
self.point_feature_names = []
self.pos_thresh = loss_cfg.POS_THRESH
self.neg_thresh = loss_cfg.NEG_THRESH
self.voxel_size = voxel_size
self.point_cloud_range = point_cloud_range
SA_cfg = self.loss_cfg.SA_LAYER
self.point_feature_names = []
self.downsample_times_map = {}
self.SA_layers = nn.ModuleList()
for src_name in self.loss_cfg.FEATURES_SOURCE:
if src_name in ['bev', 'raw_points']:
continue
self.downsample_times_map[src_name] = SA_cfg[src_name].DOWNSAMPLE_FACTOR
self.point_feature_names.append(src_name)
self.SA_layers.append(build_feature_aggregation_module(config=SA_cfg[src_name]))
def pdist(self, point_features_1, point_features_2):
D = torch.sum((point_features_1.unsqueeze(1) - point_features_2.unsqueeze(0)).pow(2), 2)
return torch.sqrt(D + 1e-7)
# TODO add self.pos_thresh and self.neg_thresh
def get_hardest_contrastive_loss(self, batch_dict_1, batch_dict_2):
batch_size = batch_dict_1['batch_size']
batch_dict_1, batch_dict_2, keypoints_inds = self.get_point_features(batch_dict_1, batch_dict_2, tag='positive')
batch_dict_1, batch_dict_2, (keypoints_inds_1, keypoints_inds_2) = self.get_point_features(batch_dict_1, batch_dict_2, tag='negative')
pos_features_1, pos_features_2 = batch_dict_1['point_features_positive'], batch_dict_2['point_features_positive']
neg_features_1, neg_features_2 = batch_dict_1['point_features_negative'], batch_dict_2['point_features_negative']
batch_size = batch_dict_1['batch_size']
pos_loss_all = None
neg_loss_all = None
for bs_idx in range(batch_size):
mask_pos = batch_dict_1['points_coords_positive'][:, 0] == bs_idx
cur_pos_features_1, cur_pos_features_2 = pos_features_1[mask_pos], pos_features_2[mask_pos]
pos_loss = torch.relu((cur_pos_features_1 - cur_pos_features_2).pow(2).sum(1) - self.pos_thresh)
mask_neg = batch_dict_1['points_coords_negative'][:, 0] == bs_idx
cur_neg_features_1, cur_neg_features_2 = neg_features_1[mask_neg], neg_features_2[mask_neg]
distance_1 = self.pdist(cur_pos_features_1, cur_neg_features_2)
distance_2 = self.pdist(cur_pos_features_2, cur_neg_features_1)
distance_1_min, distance_1_ind = distance_1.min(1)
distance_2_min, distance_2_ind = distance_2.min(1)
mask_1 = keypoints_inds[mask_pos] != keypoints_inds_2[distance_1_ind].to(keypoints_inds.device)
mask_2 = keypoints_inds[mask_pos] != keypoints_inds_1[distance_2_ind].to(keypoints_inds.device)
neg_loss_1 = torch.relu(self.neg_thresh - distance_1_min[mask_1]).pow(2)
neg_loss_2 = torch.relu(self.neg_thresh - distance_2_min[mask_2]).pow(2)
pos_loss = pos_loss.mean()
neg_loss = (neg_loss_1.mean() + neg_loss_2.mean()) / 2
if pos_loss_all is None and neg_loss_all is None:
pos_loss_all = pos_loss
neg_loss_all = neg_loss
else:
pos_loss_all += pos_loss
neg_loss_all += neg_loss
pos_loss_all = pos_loss_all / batch_size
neg_loss_all = neg_loss_all / batch_size
return pos_loss_all, neg_loss_all
def get_point_features(self, batch_dict_1, batch_dict_2, tag='positive'):
if tag == 'positive':
keypoints_1, keypoints_2, keypoints_inds = self.get_positive_sampled_points(batch_dict_1, batch_dict_2)
else:
keypoints_1, keypoints_2, keypoints_inds_1, keypoints_inds_2 = self.get_negative_sampled_points(batch_dict_1, batch_dict_2, method='random')
keypoints_inds = (keypoints_inds_1, keypoints_inds_2)
point_feature_list_1 = []
point_feature_list_2 = []
if 'bev' in self.loss_cfg.FEATURES_SOURCE:
point_bev_features_1 = self.interpolate_from_bev_features(
keypoints_1, batch_dict_1['spatial_features'], batch_dict_1['batch_size'],
bev_stride=batch_dict_1['spatial_features_stride']
)
point_bev_features_2 = self.interpolate_from_bev_features(
keypoints_2, batch_dict_2['spatial_features'], batch_dict_2['batch_size'],
bev_stride=batch_dict_2['spatial_features_stride']
)
point_feature_list_1.append(point_bev_features_1)
point_feature_list_2.append(point_bev_features_2)
batch_size = batch_dict_1['batch_size']
new_xyz_1 = keypoints_1[:, 1:4].contiguous()
new_xyz_2 = keypoints_2[:, 1:4].contiguous()
new_xyz_batch_cnt_1 = new_xyz_1.new_zeros(batch_size).int()
new_xyz_batch_cnt_2 = new_xyz_2.new_zeros(batch_size).int()
for k in range(batch_size):
new_xyz_batch_cnt_1[k] = (keypoints_1[:, 0] == k).sum()
for k in range(batch_size):
new_xyz_batch_cnt_2[k] = (keypoints_2[:, 0] == k).sum()
for k, src_name in enumerate(self.point_feature_names):
cur_coords_1 = batch_dict_1['multi_scale_3d_features'][src_name].indices
cur_features_1 = batch_dict_1['multi_scale_3d_features'][src_name].features.contiguous()
# TODO: add self.down_sample_times_map, self.voxel_size, self.point_cloud_range to __init__
xyz_1 = common_utils.get_voxel_centers(
cur_coords_1[:, 1:4], downsample_times=self.downsample_times_map[src_name],
voxel_size=self.voxel_size, point_cloud_range=self.point_cloud_range
)
cur_coords_2 = batch_dict_2['multi_scale_3d_features'][src_name].indices
cur_features_2 = batch_dict_2['multi_scale_3d_features'][src_name].features.contiguous()
xyz_2 = common_utils.get_voxel_centers(
cur_coords_2[:, 1:4], downsample_times=self.downsample_times_map[src_name],
voxel_size=self.voxel_size, point_cloud_range=self.point_cloud_range
)
pooled_features_1 = self.aggregate_keypoint_features_from_one_source(
batch_size=batch_size, aggregate_func=self.SA_layers[k],
xyz=xyz_1.contiguous(), xyz_features=cur_features_1, xyz_bs_idxs=cur_coords_1[:, 0],
new_xyz=new_xyz_1, new_xyz_batch_cnt=new_xyz_batch_cnt_1,
filter_neighbors_with_roi=self.loss_cfg.SA_LAYER[src_name].get('FILTER_NEIGHBOR_WITH_ROI', False),
radius_of_neighbor=self.loss_cfg.SA_LAYER[src_name].get('RADIUS_OF_NEIGHBOR_WITH_ROI', None),
rois=batch_dict_1.get('rois', None),
cover_feat_4=self.model_cfg.COVER_FEAT if self.loss_cfg.get('COVER_FEAT', None) else None
)
pooled_features_2 = self.aggregate_keypoint_features_from_one_source(
batch_size=batch_size, aggregate_func=self.SA_layers[k],
xyz=xyz_2.contiguous(), xyz_features=cur_features_2, xyz_bs_idxs=cur_coords_2[:, 0],
new_xyz=new_xyz_2, new_xyz_batch_cnt=new_xyz_batch_cnt_2,
filter_neighbors_with_roi=self.loss_cfg.SA_LAYER[src_name].get('FILTER_NEIGHBOR_WITH_ROI', False),
radius_of_neighbor=self.loss_cfg.SA_LAYER[src_name].get('RADIUS_OF_NEIGHBOR_WITH_ROI', None),
rois=batch_dict_2.get('rois', None),
cover_feat_4=self.model_cfg.COVER_FEAT if self.loss_cfg.get('COVER_FEAT', None) else None
)
point_feature_list_1.append(pooled_features_1)
point_feature_list_2.append(pooled_features_2)
point_features_1 = torch.cat(point_feature_list_1, dim=-1)
point_features_2 = torch.cat(point_feature_list_2, dim=-1)
save_name = 'point_features_' + tag
save_name_coords = 'points_coords_' + tag
batch_dict_1[save_name] = point_features_1.view(-1, point_features_1.shape[-1])
batch_dict_2[save_name] = point_features_2.view(-1, point_features_2.shape[-1])
batch_dict_1[save_name_coords] = keypoints_1
batch_dict_2[save_name_coords] = keypoints_2
return batch_dict_1, batch_dict_2, keypoints_inds
@staticmethod
def aggregate_keypoint_features_from_one_source(
batch_size, aggregate_func, xyz, xyz_features, xyz_bs_idxs, new_xyz, new_xyz_batch_cnt,
filter_neighbors_with_roi=False, radius_of_neighbor=None, num_max_points_of_part=200000, rois=None, cover_feat_4=False
):
"""
Args:
aggregate_func:
xyz: (N, 3)
xyz_features: (N, C)
xyz_bs_idxs: (N)
new_xyz: (M, 3)
new_xyz_batch_cnt: (batch_size), [N1, N2, ...]
filter_neighbors_with_roi: True/False
radius_of_neighbor: float
num_max_points_of_part: int
rois: (batch_size, num_rois, 7 + C)
cover_feat_4: if cover the xyz_features using the values in z-dimension
Returns:
"""
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
if filter_neighbors_with_roi:
point_features = torch.cat((xyz, xyz_features), dim=-1) if xyz_features is not None else xyz
point_features_list = []
for bs_idx in range(batch_size):
bs_mask = (xyz_bs_idxs == bs_idx)
_, valid_mask = sample_points_with_roi(
rois=rois[bs_idx], points=xyz[bs_mask],
sample_radius_with_roi=radius_of_neighbor, num_max_points_of_part=num_max_points_of_part,
)
point_features_list.append(point_features[bs_mask][valid_mask])
xyz_batch_cnt[bs_idx] = valid_mask.sum()
valid_point_features = torch.cat(point_features_list, dim=0)
xyz = valid_point_features[:, 0:3]
xyz_features = valid_point_features[:, 3:] if xyz_features is not None else None
else:
for bs_idx in range(batch_size):
xyz_batch_cnt[bs_idx] = (xyz_bs_idxs == bs_idx).sum()
#modify: for z-axes as the fourth dimension feature of point-cloud representations
if xyz_features is None:
if cover_feat_4:
xyz_features=xyz[:, 2].view(-1, 1)
pooled_points, pooled_features = aggregate_func(
xyz=xyz.contiguous(),
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
features=xyz_features.contiguous(),
)
else:
pooled_points, pooled_features = aggregate_func(
xyz=xyz.contiguous(),
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
features=xyz_features,
)
else:
pooled_points, pooled_features = aggregate_func(
xyz=xyz.contiguous(),
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
features=xyz_features.contiguous(),
)
return pooled_features
# copy from voxel set abstraction module
def interpolate_from_bev_features(self, keypoints, bev_features, batch_size, bev_stride):
"""
Args:
keypoints: (N1 + N2 + ..., 4)
bev_features: (B, C, H, W)
batch_size:
bev_stride:
Returns:
point_bev_features: (N1 + N2 + ..., C)
"""
x_idxs = (keypoints[:, 1] - self.point_cloud_range[0]) / self.voxel_size[0]
y_idxs = (keypoints[:, 2] - self.point_cloud_range[1]) / self.voxel_size[1]
x_idxs = x_idxs / bev_stride
y_idxs = y_idxs / bev_stride
point_bev_features_list = []
for k in range(batch_size):
bs_mask = (keypoints[:, 0] == k)
cur_x_idxs = x_idxs[bs_mask]
cur_y_idxs = y_idxs[bs_mask]
cur_bev_features = bev_features[k].permute(1, 2, 0) # (H, W, C)
point_bev_features = bilinear_interpolate_torch(cur_bev_features, cur_x_idxs, cur_y_idxs)
point_bev_features_list.append(point_bev_features)
point_bev_features = torch.cat(point_bev_features_list, dim=0) # (N1 + N2 + ..., C)
return point_bev_features
# copy from voxel set abstraction module
def get_positive_sampled_points(self, batch_dict_1, batch_dict_2):
"""
Args:
batch_dict:
Returns:
keypoints: (N1 + N2 + ..., 4), where 4 indicates [bs_idx, x, y, z]
"""
batch_size = batch_dict_1['batch_size']
if self.loss_cfg.POINT_SOURCE == 'raw_points':
src_points_1 = batch_dict_1['points'][:, 1:4]
src_points_2 = batch_dict_2['points'][:, 1:4]
batch_indices = batch_dict_1['points'][:, 0].long()
elif self.loss_cfg.POINT_SOURCE == 'voxel_centers':
src_points_1 = common_utils.get_voxel_centers(
batch_dict_1['voxel_coords'][:, 1:4],
downsample_times=1,
voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range
)
src_points_2 = common_utils.get_voxel_centers(
batch_dict_2['voxel_coords'],
downsample_times=1,
voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range
)
batch_indices = batch_dict_1['voxel_coords'][:, 0].long()
else:
raise NotImplementedError
keypoints_list_1 = []
keypoints_list_2 = []
keypoints_inds_list = []
for bs_idx in range(batch_size):
bs_mask = (batch_indices == bs_idx)
sampled_points_1 = src_points_1[bs_mask].unsqueeze(dim=0) # (1, N, 3)
sampled_points_2 = src_points_2[bs_mask].unsqueeze(dim=0)
# using FPS to sample points
cur_pt_idxs = pointnet2_stack_utils.farthest_point_sample(
sampled_points_1[:, :, 0:3].contiguous(), self.loss_cfg.NUM_KEYPOINTS
).long()
if sampled_points_1.shape[1] < self.loss_cfg.NUM_KEYPOINTS:
times = int(self.loss_cfg.NUM_KEYPOINTS / sampled_points_1.shape[1]) + 1
non_empty = cur_pt_idxs[0, :sampled_points_1.shape[1]]
cur_pt_idxs[0] = non_empty.repeat(times)[:self.model_cfg.NUM_KEYPOINTS]
keypoints_1 = sampled_points_1[0][cur_pt_idxs[0]].unsqueeze(dim=0)
keypoints_2 = sampled_points_2[0][cur_pt_idxs[0]].unsqueeze(dim=0)
keypoints_list_1.append(keypoints_1)
keypoints_list_2.append(keypoints_2)
keypoints_inds_list.append(cur_pt_idxs[0])
keypoints_1 = torch.cat(keypoints_list_1, dim=0) # (B, M, 3) or (N1 + N2 + ..., 4)
keypoints_2 = torch.cat(keypoints_list_2, dim=0)
keypoints_inds = torch.cat(keypoints_inds_list, dim=0)
if len(keypoints_1.shape) == 3:
batch_idx = torch.arange(batch_size, device=keypoints_1.device).view(-1, 1).repeat(1, keypoints_1.shape[1]).view(-1, 1)
keypoints_1 = torch.cat((batch_idx.float(), keypoints_1.view(-1, 3)), dim=1)
keypoints_2 = torch.cat((batch_idx.float(), keypoints_2.view(-1, 3)), dim=1)
return keypoints_1, keypoints_2, keypoints_inds
# TODO add NUM_NEGATIVE_KEYPOINTS to config
def get_negative_sampled_points(self, batch_dict_1, batch_dict_2, method='random'):
"""
Args:
batch_dict:
Returns:
keypoints: (N1 + N2 + ..., 4), where 4 indicates [bs_idx, x, y, z]
"""
batch_size = batch_dict_1['batch_size']
if self.loss_cfg.POINT_SOURCE == 'raw_points':
src_points_1 = batch_dict_1['points'][:, 1:4]
src_points_2 = batch_dict_2['points'][:, 1:4]
batch_indices = batch_dict_1['points'][:, 0].long()
elif self.loss_cfg.POINT_SOURCE == 'voxel_centers':
src_points_1 = common_utils.get_voxel_centers(
batch_dict_1['voxel_coords'][:, 1:4],
downsample_times=1,
voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range
)
src_points_2 = common_utils.get_voxel_centers(
batch_dict_2['voxel_coords'],
downsample_times=1,
voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range
)
batch_indices = batch_dict_1['voxel_coords'][:, 0].long()
else:
raise NotImplementedError
keypoints_list_1 = []
keypoints_list_2 = []
keypoints_inds_list_1 = []
keypoints_inds_list_2 = []
for bs_idx in range(batch_size):
bs_mask = (batch_indices == bs_idx)
sampled_points_1 = src_points_1[bs_mask].unsqueeze(dim=0) # (1, N, 3)
sampled_points_2 = src_points_2[bs_mask].unsqueeze(dim=0)
# using FPS to sample points
if method == 'fps':
cur_pt_idxs_1 = pointnet2_stack_utils.farthest_point_sample(
sampled_points_1[:, :, 0:3].contiguous(), self.loss_cfg.NUM_NEGATIVE_KEYPOINTS
).long()
cur_pt_idxs_2 = pointnet2_stack_utils.farthest_point_sample(
sampled_points_2[:, :, 0:3].contiguous(), self.loss_cfg.NUM_NEGATIVE_KEYPOINTS
).long()
elif method == 'random':
num_points = sampled_points_1.shape[1]
cur_pt_idxs_1 = torch.from_numpy(np.random.choice(num_points, self.loss_cfg.NUM_NEGATIVE_KEYPOINTS, replace=False)).long()
cur_pt_idxs_2 = torch.from_numpy(np.random.choice(num_points, self.loss_cfg.NUM_NEGATIVE_KEYPOINTS, replace=False)).long()
cur_pt_idxs_1 = cur_pt_idxs_1.view(1, -1)
cur_pt_idxs_2 = cur_pt_idxs_2.view(1, -1)
if sampled_points_1.shape[1] < self.loss_cfg.NUM_NEGATIVE_KEYPOINTS:
times = int(self.loss_cfg.NUM_NEGATIVE_KEYPOINTS / sampled_points_1.shape[1]) + 1
non_empty = cur_pt_idxs_1[0, :sampled_points_1.shape[1]]
cur_pt_idxs_1[0] = non_empty.repeat(times)[:self.model_cfg.NUM_NEGATIVE_KEYPOINTS]
if sampled_points_2.shape[1] < self.loss_cfg.NUM_NEGATIVE_KEYPOINTS:
times = int(self.loss_cfg.NUM_NEGATIVE_KEYPOINTS / sampled_points_2.shape[1]) + 1
non_empty = cur_pt_idxs_2[0, :sampled_points_2.shape[1]]
cur_pt_idxs_2[0] = non_empty.repeat(times)[:self.model_cfg.NUM_NEGATIVE_KEYPOINTS]
keypoints_1 = sampled_points_1[0][cur_pt_idxs_1[0]].unsqueeze(dim=0)
keypoints_2 = sampled_points_2[0][cur_pt_idxs_2[0]].unsqueeze(dim=0)
keypoints_inds_list_1.append(cur_pt_idxs_1[0])
keypoints_inds_list_2.append(cur_pt_idxs_2[0])
keypoints_list_1.append(keypoints_1)
keypoints_list_2.append(keypoints_2)
keypoints_1 = torch.cat(keypoints_list_1, dim=0) # (B, M, 3) or (N1 + N2 + ..., 4)
keypoints_2 = torch.cat(keypoints_list_2, dim=0)
keypoints_inds_1 = torch.cat(keypoints_inds_list_1, dim=0)
keypoints_inds_2 = torch.cat(keypoints_inds_list_2, dim=0)
if len(keypoints_1.shape) == 3:
batch_idx = torch.arange(batch_size, device=keypoints_1.device).view(-1, 1).repeat(1, keypoints_1.shape[1]).view(-1, 1)
keypoints_1 = torch.cat((batch_idx.float(), keypoints_1.view(-1, 3)), dim=1)
keypoints_2 = torch.cat((batch_idx.float(), keypoints_2.view(-1, 3)), dim=1)
return keypoints_1, keypoints_2, keypoints_inds_1, keypoints_inds_2
def build_feature_aggregation_module(config):
local_aggregation_name = config.get('NAME', 'StackSAModuleMSG')
if local_aggregation_name == 'StackSAModuleMSG':
cur_layer = StackPointFeature(
radii=config.POOL_RADIUS, nsamples=config.NSAMPLE, pool_method='max_pool',
)
else:
raise NotImplementedError
return cur_layer
class StackPointFeature(nn.Module):
def __init__(self, *, radii, nsamples, pool_method='max_pool'):
"""
Args:
radii: list of float, list of radii to group with
nsamples: list of int, number of samples in each ball query
mlps: list of list of int, spec of the pointnet before the global pooling for each scale
use_xyz:
pool_method: max_pool / avg_pool
"""
super().__init__()
assert len(radii) == len(nsamples)
self.groupers = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(pointnet2_stack_utils.QueryAndGroup(radius, nsample, use_xyz=False))
self.pool_method = pool_method
def forward(self, xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt, features=None, empty_voxel_set_zeros=True):
"""
:param xyz: (N1 + N2 ..., 3) tensor of the xyz coordinates of the features
:param xyz_batch_cnt: (batch_size), [N1, N2, ...]
:param new_xyz: (M1 + M2 ..., 3)
:param new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
:param features: (N1 + N2 ..., C) tensor of the descriptors of the the features
:return:
new_xyz: (M1 + M2 ..., 3) tensor of the new features' xyz
new_features: (M1 + M2 ..., \sum_k(mlps[k][-1])) tensor of the new_features descriptors
"""
new_features_list = []
for k in range(len(self.groupers)):
new_features, ball_idxs = self.groupers[k](
xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt, features
) # (M1 + M2, C, nsample)
new_features = new_features.permute(1, 0, 2).unsqueeze(dim=0) # (1, C, M1 + M2 ..., nsample)
if self.pool_method == 'max_pool':
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
).squeeze(dim=-1) # (1, C, M1 + M2 ...)
elif self.pool_method == 'avg_pool':
new_features = F.avg_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
).squeeze(dim=-1) # (1, C, M1 + M2 ...)
else:
raise NotImplementedError
new_features = new_features.squeeze(dim=0).permute(1, 0) # (M1 + M2 ..., C)
new_features_list.append(new_features)
new_features = torch.cat(new_features_list, dim=1) # (M1 + M2 ..., C)
return new_xyz, new_features
| 26,396
| 45.555556
| 152
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/backbones_3d/IASSD_backbone.py
|
import torch
import torch.nn as nn
from ...ops.pointnet2.pointnet2_batch import pointnet2_modules
import os
class IASSD_Backbone(nn.Module):
""" Backbone for IA-SSD"""
def __init__(self, model_cfg, num_class, input_channels, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.num_class = num_class
self.SA_modules = nn.ModuleList()
channel_in = input_channels - 3
channel_out_list = [channel_in]
self.num_points_each_layer = []
sa_config = self.model_cfg.SA_CONFIG
self.layer_types = sa_config.LAYER_TYPE
self.ctr_idx_list = sa_config.CTR_INDEX
self.layer_inputs = sa_config.LAYER_INPUT
self.aggregation_mlps = sa_config.get('AGGREGATION_MLPS', None)
self.confidence_mlps = sa_config.get('CONFIDENCE_MLPS', None)
self.max_translate_range = sa_config.get('MAX_TRANSLATE_RANGE', None)
for k in range(sa_config.NSAMPLE_LIST.__len__()):
if isinstance(self.layer_inputs[k], list): ###
channel_in = channel_out_list[self.layer_inputs[k][-1]]
else:
channel_in = channel_out_list[self.layer_inputs[k]]
if self.layer_types[k] == 'SA_Layer':
mlps = sa_config.MLPS[k].copy()
channel_out = 0
for idx in range(mlps.__len__()):
mlps[idx] = [channel_in] + mlps[idx]
channel_out += mlps[idx][-1]
if self.aggregation_mlps and self.aggregation_mlps[k]:
aggregation_mlp = self.aggregation_mlps[k].copy()
if aggregation_mlp.__len__() == 0:
aggregation_mlp = None
else:
channel_out = aggregation_mlp[-1]
else:
aggregation_mlp = None
if self.confidence_mlps and self.confidence_mlps[k]:
confidence_mlp = self.confidence_mlps[k].copy()
if confidence_mlp.__len__() == 0:
confidence_mlp = None
else:
confidence_mlp = None
self.SA_modules.append(
pointnet2_modules.PointnetSAModuleMSG_WithSampling(
npoint_list=sa_config.NPOINT_LIST[k],
sample_range_list=sa_config.SAMPLE_RANGE_LIST[k],
sample_type_list=sa_config.SAMPLE_METHOD_LIST[k],
radii=sa_config.RADIUS_LIST[k],
nsamples=sa_config.NSAMPLE_LIST[k],
mlps=mlps,
use_xyz=True,
dilated_group=sa_config.DILATED_GROUP[k],
aggregation_mlp=aggregation_mlp,
confidence_mlp=confidence_mlp,
num_class = self.num_class
)
)
elif self.layer_types[k] == 'Vote_Layer':
self.SA_modules.append(pointnet2_modules.Vote_layer(mlp_list=sa_config.MLPS[k],
pre_channel=channel_out_list[self.layer_inputs[k]],
max_translate_range=self.max_translate_range
)
)
channel_out_list.append(channel_out)
self.num_point_features = channel_out
def break_up_pc(self, pc):
batch_idx = pc[:, 0]
xyz = pc[:, 1:4].contiguous()
features = (pc[:, 4:].contiguous() if pc.size(-1) > 4 else None)
return batch_idx, xyz, features
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size: int
vfe_features: (num_voxels, C)
points: (num_points, 4 + C), [batch_idx, x, y, z, ...]
Returns:
batch_dict:
encoded_spconv_tensor: sparse tensor
point_features: (N, C)
"""
batch_size = batch_dict['batch_size']
points = batch_dict['points']
batch_idx, xyz, features = self.break_up_pc(points)
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
for bs_idx in range(batch_size):
xyz_batch_cnt[bs_idx] = (batch_idx == bs_idx).sum()
assert xyz_batch_cnt.min() == xyz_batch_cnt.max()
xyz = xyz.view(batch_size, -1, 3)
features = features.view(batch_size, -1, features.shape[-1]).permute(0, 2, 1).contiguous() if features is not None else None ###
encoder_xyz, encoder_features, sa_ins_preds = [xyz], [features], []
encoder_coords = [torch.cat([batch_idx.view(batch_size, -1, 1), xyz], dim=-1)]
li_cls_pred = None
for i in range(len(self.SA_modules)):
xyz_input = encoder_xyz[self.layer_inputs[i]]
feature_input = encoder_features[self.layer_inputs[i]]
if self.layer_types[i] == 'SA_Layer':
ctr_xyz = encoder_xyz[self.ctr_idx_list[i]] if self.ctr_idx_list[i] != -1 else None
li_xyz, li_features, li_cls_pred = self.SA_modules[i](xyz_input, feature_input, li_cls_pred, ctr_xyz=ctr_xyz)
elif self.layer_types[i] == 'Vote_Layer': #i=4
li_xyz, li_features, xyz_select, ctr_offsets = self.SA_modules[i](xyz_input, feature_input)
centers = li_xyz
centers_origin = xyz_select
center_origin_batch_idx = batch_idx.view(batch_size, -1)[:, :centers_origin.shape[1]]
encoder_coords.append(torch.cat([center_origin_batch_idx[..., None].float(),centers_origin.view(batch_size, -1, 3)],dim =-1))
encoder_xyz.append(li_xyz)
li_batch_idx = batch_idx.view(batch_size, -1)[:, :li_xyz.shape[1]]
encoder_coords.append(torch.cat([li_batch_idx[..., None].float(),li_xyz.view(batch_size, -1, 3)],dim =-1))
encoder_features.append(li_features)
if li_cls_pred is not None:
li_cls_batch_idx = batch_idx.view(batch_size, -1)[:, :li_cls_pred.shape[1]]
sa_ins_preds.append(torch.cat([li_cls_batch_idx[..., None].float(),li_cls_pred.view(batch_size, -1, li_cls_pred.shape[-1])],dim =-1))
else:
sa_ins_preds.append([])
ctr_batch_idx = batch_idx.view(batch_size, -1)[:, :li_xyz.shape[1]]
ctr_batch_idx = ctr_batch_idx.contiguous().view(-1)
batch_dict['ctr_offsets'] = torch.cat((ctr_batch_idx[:, None].float(), ctr_offsets.contiguous().view(-1, 3)), dim=1)
batch_dict['centers'] = torch.cat((ctr_batch_idx[:, None].float(), centers.contiguous().view(-1, 3)), dim=1)
batch_dict['centers_origin'] = torch.cat((ctr_batch_idx[:, None].float(), centers_origin.contiguous().view(-1, 3)), dim=1)
center_features = encoder_features[-1].permute(0, 2, 1).contiguous().view(-1, encoder_features[-1].shape[1])
batch_dict['centers_features'] = center_features
batch_dict['ctr_batch_idx'] = ctr_batch_idx
batch_dict['encoder_xyz'] = encoder_xyz
batch_dict['encoder_coords'] = encoder_coords
batch_dict['sa_ins_preds'] = sa_ins_preds
batch_dict['encoder_features'] = encoder_features
###save per frame
if self.model_cfg.SA_CONFIG.get('SAVE_SAMPLE_LIST',False) and not self.training:
import numpy as np
result_dir = np.load('/home/yifan/tmp.npy', allow_pickle=True)
for i in range(batch_size) :
# i=0
# point_saved_path = '/home/yifan/tmp'
point_saved_path = result_dir / 'sample_list_save'
os.makedirs(point_saved_path, exist_ok=True)
idx = batch_dict['frame_id'][i]
xyz_list = []
for sa_xyz in encoder_xyz:
xyz_list.append(sa_xyz[i].cpu().numpy())
if '/' in idx: # Kitti_tracking
sample_xyz = point_saved_path / idx.split('/')[0] / ('sample_list_' + ('%s' % idx.split('/')[1]))
os.makedirs(point_saved_path / idx.split('/')[0], exist_ok=True)
else:
sample_xyz = point_saved_path / ('sample_list_' + ('%s' % idx))
np.save(str(sample_xyz), xyz_list)
# np.save(str(new_file), point_new.detach().cpu().numpy())
return batch_dict
| 8,693
| 45.491979
| 150
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/backbones_3d/spconv_unet.py
|
from functools import partial
import torch
import torch.nn as nn
from ...utils.spconv_utils import replace_feature, spconv
from ...utils import common_utils
from .spconv_backbone import post_act_block
class SparseBasicBlock(spconv.SparseModule):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, indice_key=None, norm_fn=None):
super(SparseBasicBlock, self).__init__()
self.conv1 = spconv.SubMConv3d(
inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False, indice_key=indice_key
)
self.bn1 = norm_fn(planes)
self.relu = nn.ReLU()
self.conv2 = spconv.SubMConv3d(
planes, planes, kernel_size=3, stride=1, padding=1, bias=False, indice_key=indice_key
)
self.bn2 = norm_fn(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x.features
assert x.features.dim() == 2, 'x.features.dim()=%d' % x.features.dim()
out = self.conv1(x)
out = replace_feature(out, self.bn1(out.features))
out = replace_feature(out, self.relu(out.features))
out = self.conv2(out)
out = replace_feature(out, self.bn2(out.features))
if self.downsample is not None:
identity = self.downsample(x)
out = replace_feature(out, out.features + identity)
out = replace_feature(out, self.relu(out.features))
return out
class UNetV2(nn.Module):
"""
Sparse Convolution based UNet for point-wise feature learning.
Reference Paper: https://arxiv.org/abs/1907.03670 (Shaoshuai Shi, et. al)
From Points to Parts: 3D Object Detection from Point Cloud with Part-aware and Part-aggregation Network
"""
def __init__(self, model_cfg, input_channels, grid_size, voxel_size, point_cloud_range, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.sparse_shape = grid_size[::-1] + [1, 0, 0]
self.voxel_size = voxel_size
self.point_cloud_range = point_cloud_range
norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01)
self.conv_input = spconv.SparseSequential(
spconv.SubMConv3d(input_channels, 16, 3, padding=1, bias=False, indice_key='subm1'),
norm_fn(16),
nn.ReLU(),
)
block = post_act_block
self.conv1 = spconv.SparseSequential(
block(16, 16, 3, norm_fn=norm_fn, padding=1, indice_key='subm1'),
)
self.conv2 = spconv.SparseSequential(
# [1600, 1408, 41] <- [800, 704, 21]
block(16, 32, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv2', conv_type='spconv'),
block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='subm2'),
block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='subm2'),
)
self.conv3 = spconv.SparseSequential(
# [800, 704, 21] <- [400, 352, 11]
block(32, 64, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv3', conv_type='spconv'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3'),
)
self.conv4 = spconv.SparseSequential(
# [400, 352, 11] <- [200, 176, 5]
block(64, 64, 3, norm_fn=norm_fn, stride=2, padding=(0, 1, 1), indice_key='spconv4', conv_type='spconv'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4'),
)
if self.model_cfg.get('RETURN_ENCODED_TENSOR', True):
last_pad = self.model_cfg.get('last_pad', 0)
self.conv_out = spconv.SparseSequential(
# [200, 150, 5] -> [200, 150, 2]
spconv.SparseConv3d(64, 128, (3, 1, 1), stride=(2, 1, 1), padding=last_pad,
bias=False, indice_key='spconv_down2'),
norm_fn(128),
nn.ReLU(),
)
else:
self.conv_out = None
# decoder
# [400, 352, 11] <- [200, 176, 5]
self.conv_up_t4 = SparseBasicBlock(64, 64, indice_key='subm4', norm_fn=norm_fn)
self.conv_up_m4 = block(128, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4')
self.inv_conv4 = block(64, 64, 3, norm_fn=norm_fn, indice_key='spconv4', conv_type='inverseconv')
# [800, 704, 21] <- [400, 352, 11]
self.conv_up_t3 = SparseBasicBlock(64, 64, indice_key='subm3', norm_fn=norm_fn)
self.conv_up_m3 = block(128, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3')
self.inv_conv3 = block(64, 32, 3, norm_fn=norm_fn, indice_key='spconv3', conv_type='inverseconv')
# [1600, 1408, 41] <- [800, 704, 21]
self.conv_up_t2 = SparseBasicBlock(32, 32, indice_key='subm2', norm_fn=norm_fn)
self.conv_up_m2 = block(64, 32, 3, norm_fn=norm_fn, indice_key='subm2')
self.inv_conv2 = block(32, 16, 3, norm_fn=norm_fn, indice_key='spconv2', conv_type='inverseconv')
# [1600, 1408, 41] <- [1600, 1408, 41]
self.conv_up_t1 = SparseBasicBlock(16, 16, indice_key='subm1', norm_fn=norm_fn)
self.conv_up_m1 = block(32, 16, 3, norm_fn=norm_fn, indice_key='subm1')
self.conv5 = spconv.SparseSequential(
block(16, 16, 3, norm_fn=norm_fn, padding=1, indice_key='subm1')
)
self.num_point_features = 16
def UR_block_forward(self, x_lateral, x_bottom, conv_t, conv_m, conv_inv):
x_trans = conv_t(x_lateral)
x = x_trans
x = replace_feature(x, torch.cat((x_bottom.features, x_trans.features), dim=1))
x_m = conv_m(x)
x = self.channel_reduction(x, x_m.features.shape[1])
x = replace_feature(x, x_m.features + x.features)
x = conv_inv(x)
return x
@staticmethod
def channel_reduction(x, out_channels):
"""
Args:
x: x.features (N, C1)
out_channels: C2
Returns:
"""
features = x.features
n, in_channels = features.shape
assert (in_channels % out_channels == 0) and (in_channels >= out_channels)
x = replace_feature(x, features.view(n, out_channels, -1).sum(dim=2))
return x
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size: int
vfe_features: (num_voxels, C)
voxel_coords: (num_voxels, 4), [batch_idx, z_idx, y_idx, x_idx]
Returns:
batch_dict:
encoded_spconv_tensor: sparse tensor
point_features: (N, C)
"""
voxel_features, voxel_coords = batch_dict['voxel_features'], batch_dict['voxel_coords']
batch_size = batch_dict['batch_size']
input_sp_tensor = spconv.SparseConvTensor(
features=voxel_features,
indices=voxel_coords.int(),
spatial_shape=self.sparse_shape,
batch_size=batch_size
)
x = self.conv_input(input_sp_tensor)
x_conv1 = self.conv1(x)
x_conv2 = self.conv2(x_conv1)
x_conv3 = self.conv3(x_conv2)
x_conv4 = self.conv4(x_conv3)
if self.conv_out is not None:
# for detection head
# [200, 176, 5] -> [200, 176, 2]
out = self.conv_out(x_conv4)
batch_dict['encoded_spconv_tensor'] = out
batch_dict['encoded_spconv_tensor_stride'] = 8
# for segmentation head
# [400, 352, 11] <- [200, 176, 5]
x_up4 = self.UR_block_forward(x_conv4, x_conv4, self.conv_up_t4, self.conv_up_m4, self.inv_conv4)
# [800, 704, 21] <- [400, 352, 11]
x_up3 = self.UR_block_forward(x_conv3, x_up4, self.conv_up_t3, self.conv_up_m3, self.inv_conv3)
# [1600, 1408, 41] <- [800, 704, 21]
x_up2 = self.UR_block_forward(x_conv2, x_up3, self.conv_up_t2, self.conv_up_m2, self.inv_conv2)
# [1600, 1408, 41] <- [1600, 1408, 41]
x_up1 = self.UR_block_forward(x_conv1, x_up2, self.conv_up_t1, self.conv_up_m1, self.conv5)
batch_dict['point_features'] = x_up1.features
point_coords = common_utils.get_voxel_centers(
x_up1.indices[:, 1:], downsample_times=1, voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range
)
batch_dict['point_coords'] = torch.cat((x_up1.indices[:, 0:1].float(), point_coords), dim=1)
return batch_dict
| 8,602
| 39.389671
| 117
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/backbones_3d/spconv_backbone_unibn.py
|
from functools import partial
import torch.nn as nn
from ...utils.spconv_utils import replace_feature, spconv
from ...utils import uni3d_norm_2_in
def post_act_block(in_channels, out_channels, kernel_size, indice_key=None, stride=1, padding=0,
conv_type='subm'):
if conv_type == 'subm':
conv = spconv.SubMConv3d(in_channels, out_channels, kernel_size, bias=False, indice_key=indice_key)
elif conv_type == 'spconv':
conv = spconv.SparseConv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding,
bias=False, indice_key=indice_key)
elif conv_type == 'inverseconv':
conv = spconv.SparseInverseConv3d(in_channels, out_channels, kernel_size, indice_key=indice_key, bias=False)
else:
raise NotImplementedError
m = spconv.SparseSequential(
conv,
# norm_fn(out_channels),
# nn.ReLU(),
)
return m
class SparseBasicBlock(spconv.SparseModule):
expansion = 1
def __init__(self, inplanes, planes, stride=1, norm_fn=None, downsample=None, indice_key=None):
super(SparseBasicBlock, self).__init__()
assert norm_fn is not None
bias = norm_fn is not None
self.conv1 = spconv.SubMConv3d(
inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=bias, indice_key=indice_key
)
self.bn1 = norm_fn(planes)
self.relu = nn.ReLU()
self.conv2 = spconv.SubMConv3d(
planes, planes, kernel_size=3, stride=stride, padding=1, bias=bias, indice_key=indice_key
)
self.bn2 = norm_fn(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = replace_feature(out, self.bn1(out.features))
out = replace_feature(out, self.relu(out.features))
out = self.conv2(out)
out = replace_feature(out, self.bn2(out.features))
if self.downsample is not None:
identity = self.downsample(x)
out = replace_feature(out, out.features + identity.features)
out = replace_feature(out, self.relu(out.features))
return out
class VoxelBackBone8x_UniBN(nn.Module):
def __init__(self, model_cfg, input_channels, grid_size, **kwargs):
super().__init__()
self.model_cfg = model_cfg
norm_fn = partial(uni3d_norm_2_in.UniNorm1d, dataset_from_flag=int(self.model_cfg.db_source), eps=1e-3, momentum=0.01, voxel_coord=True)
self.sparse_shape = grid_size[::-1] + [1, 0, 0]
self.conv_input = spconv.SparseSequential(
spconv.SubMConv3d(input_channels, 16, 3, padding=1, bias=False, indice_key='subm1'),
)
self.bn_input = norm_fn(16)
self.relu_input = nn.ReLU()
block = post_act_block
#----------Block_1---------#
self.conv1 = spconv.SparseSequential(
block(16, 16, 3, padding=1, indice_key='subm1'),
)
self.conv1_bn_1 = norm_fn(16)
self.conv1_relu_1 = nn.ReLU()
#----------Block_2---------#
self.conv2_1 = spconv.SparseSequential(
# [1600, 1408, 41] <- [800, 704, 21]
block(16, 32, 3, stride=2, padding=1, indice_key='spconv2', conv_type='spconv'),
)
self.conv2_bn_1 = norm_fn(32)
self.conv2_relu_1 = nn.ReLU()
self.conv2_2 = spconv.SparseSequential(
# [1600, 1408, 41] <- [800, 704, 21]
block(32, 32, 3, padding=1, indice_key='subm2'),
)
self.conv2_bn_2 = norm_fn(32)
self.conv2_relu_2 = nn.ReLU()
self.conv2_3 = spconv.SparseSequential(
# [1600, 1408, 41] <- [800, 704, 21]
block(32, 32, 3, padding=1, indice_key='subm2'),
)
self.conv2_bn_3 = norm_fn(32)
self.conv2_relu_3 = nn.ReLU()
#----------Block_3---------#
self.conv3_1 = spconv.SparseSequential(
# [800, 704, 21] <- [400, 352, 11]
block(32, 64, 3, stride=2, padding=1, indice_key='spconv3', conv_type='spconv'),
)
self.conv3_bn_1 = norm_fn(64)
self.conv3_relu_1 = nn.ReLU()
self.conv3_2 = spconv.SparseSequential(
# [800, 704, 21] <- [400, 352, 11]
block(64, 64, 3, padding=1, indice_key='subm3'),
)
self.conv3_bn_2 = norm_fn(64)
self.conv3_relu_2 = nn.ReLU()
self.conv3_3 = spconv.SparseSequential(
# [800, 704, 21] <- [400, 352, 11]
block(64, 64, 3, padding=1, indice_key='subm3'),
)
self.conv3_bn_3 = norm_fn(64)
self.conv3_relu_3 = nn.ReLU()
#----------Block_4---------#
self.conv4_1 = spconv.SparseSequential(
# [400, 352, 11] <- [200, 176, 5]
block(64, 64, 3, stride=2, padding=(0, 1, 1), indice_key='spconv4', conv_type='spconv'),
)
self.conv4_bn_1 = norm_fn(64)
self.conv4_relu_1 = nn.ReLU()
self.conv4_2 = spconv.SparseSequential(
# [400, 352, 11] <- [200, 176, 5]
block(64, 64, 3, padding=1, indice_key='subm4'),
)
self.conv4_bn_2 = norm_fn(64)
self.conv4_relu_2 = nn.ReLU()
self.conv4_3 = spconv.SparseSequential(
# [400, 352, 11] <- [200, 176, 5]
block(64, 64, 3, padding=1, indice_key='subm4'),
)
self.conv4_bn_3 = norm_fn(64)
self.conv4_relu_3 = nn.ReLU()
#----------Last Block---------#
last_pad = 0
last_pad = self.model_cfg.get('last_pad', last_pad)
self.conv_out = spconv.SparseSequential(
# [200, 150, 5] -> [200, 150, 2]
spconv.SparseConv3d(64, 128, (3, 1, 1), stride=(2, 1, 1), padding=last_pad,
bias=False, indice_key='spconv_down2'),
)
self.conv_out_bn = norm_fn(128)
self.conv_out_relu = nn.ReLU()
self.num_point_features = 128
self.backbone_channels = {
'x_conv1': 16,
'x_conv2': 32,
'x_conv3': 64,
'x_conv4': 64
}
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size: int
vfe_features: (num_voxels, C)
voxel_coords: (num_voxels, 4), [batch_idx, z_idx, y_idx, x_idx]
Returns:
batch_dict:
encoded_spconv_tensor: sparse tensor
"""
voxel_features, voxel_coords = batch_dict['voxel_features'], batch_dict['voxel_coords']
batch_size = batch_dict['batch_size']
input_sp_tensor = spconv.SparseConvTensor(
features=voxel_features,
indices=voxel_coords.int(),
spatial_shape=self.sparse_shape,
batch_size=batch_size
)
#----------Input Block---------#
t_input = self.conv_input(input_sp_tensor)
t_input = replace_feature(t_input, self.bn_input(t_input.features, t_input.indices))
t_input = replace_feature(t_input, self.relu_input(t_input.features))
#----------Block_1---------#
t_conv1 = self.conv1(t_input)
t_conv1 = replace_feature(t_conv1, self.conv1_bn_1(t_conv1.features, t_conv1.indices))
t_conv1 = replace_feature(t_conv1, self.conv1_relu_1(t_conv1.features))
#----------Block_2---------#
t_conv2_1 = self.conv2_1(t_conv1)
t_conv2_1 = replace_feature(t_conv2_1, self.conv2_bn_1(t_conv2_1.features, t_conv2_1.indices))
t_conv2_1 = replace_feature(t_conv2_1, self.conv2_relu_1(t_conv2_1.features))
t_conv2_2 = self.conv2_2(t_conv2_1)
t_conv2_2 = replace_feature(t_conv2_2, self.conv2_bn_2(t_conv2_2.features, t_conv2_2.indices))
t_conv2_2 = replace_feature(t_conv2_2, self.conv2_relu_2(t_conv2_2.features))
t_conv2_3 = self.conv2_3(t_conv2_2)
t_conv2_3 = replace_feature(t_conv2_3, self.conv2_bn_3(t_conv2_3.features, t_conv2_3.indices))
t_conv2_3 = replace_feature(t_conv2_3, self.conv2_relu_3(t_conv2_3.features))
#----------Block_3---------#
t_conv3_1 = self.conv3_1(t_conv2_3)
t_conv3_1 = replace_feature(t_conv3_1, self.conv3_bn_1(t_conv3_1.features, t_conv3_1.indices))
t_conv3_1 = replace_feature(t_conv3_1, self.conv3_relu_1(t_conv3_1.features))
t_conv3_2 = self.conv3_2(t_conv3_1)
t_conv3_2 = replace_feature(t_conv3_2, self.conv3_bn_2(t_conv3_2.features, t_conv3_2.indices))
t_conv3_2 = replace_feature(t_conv3_2, self.conv3_relu_2(t_conv3_2.features))
t_conv3_3 = self.conv3_3(t_conv3_2)
t_conv3_3 = replace_feature(t_conv3_3, self.conv3_bn_3(t_conv3_3.features, t_conv3_3.indices))
t_conv3_3 = replace_feature(t_conv3_3, self.conv3_relu_3(t_conv3_3.features))
#----------Block_4---------#
t_conv4_1 = self.conv4_1(t_conv3_3)
t_conv4_1 = replace_feature(t_conv4_1, self.conv4_bn_1(t_conv4_1.features, t_conv4_1.indices))
t_conv4_1 = replace_feature(t_conv4_1, self.conv4_relu_1(t_conv4_1.features))
t_conv4_2 = self.conv4_2(t_conv4_1)
t_conv4_2 = replace_feature(t_conv4_2, self.conv4_bn_2(t_conv4_2.features, t_conv4_2.indices))
t_conv4_2 = replace_feature(t_conv4_2, self.conv4_relu_2(t_conv4_2.features))
t_conv4_3 = self.conv4_3(t_conv4_2)
t_conv4_3 = replace_feature(t_conv4_3, self.conv4_bn_3(t_conv4_3.features, t_conv4_3.indices))
t_conv4_3 = replace_feature(t_conv4_3, self.conv4_relu_3(t_conv4_3.features))
# for detection head
# [200, 176, 5] -> [200, 176, 2]
out = self.conv_out(t_conv4_3)
out = replace_feature(out, self.conv_out_bn(out.features, out.indices))
out = replace_feature(out, self.conv_out_relu(out.features))
batch_dict.update({
'encoded_spconv_tensor': out,
'encoded_spconv_tensor_stride': 8
})
batch_dict.update({
'multi_scale_3d_features': {
'x_conv1': t_conv1,
'x_conv2': t_conv2_3,
'x_conv3': t_conv3_3,
'x_conv4': t_conv4_3,
}
})
batch_dict.update({
'multi_scale_3d_strides': {
'x_conv1': 1,
'x_conv2': 2,
'x_conv3': 4,
'x_conv4': 8,
}
})
return batch_dict
class VoxelResBackBone8x_UniBN(nn.Module):
def __init__(self, model_cfg, input_channels, grid_size, **kwargs):
super().__init__()
self.model_cfg = model_cfg
norm_fn = partial(uni3d_norm_2_in.UniNorm1d, dataset_from_flag=int(self.model_cfg.db_source), eps=1e-3, momentum=0.01, voxel_coord=True)
self.sparse_shape = grid_size[::-1] + [1, 0, 0]
self.conv_input = spconv.SparseSequential(
spconv.SubMConv3d(input_channels, 16, 3, padding=1, bias=False, indice_key='subm1'),
norm_fn(16),
nn.ReLU(),
)
block = post_act_block
self.conv1 = spconv.SparseSequential(
SparseBasicBlock(16, 16, norm_fn=norm_fn, indice_key='res1'),
SparseBasicBlock(16, 16, norm_fn=norm_fn, indice_key='res1'),
)
self.conv2 = spconv.SparseSequential(
# [1600, 1408, 41] <- [800, 704, 21]
block(16, 32, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv2', conv_type='spconv'),
SparseBasicBlock(32, 32, norm_fn=norm_fn, indice_key='res2'),
SparseBasicBlock(32, 32, norm_fn=norm_fn, indice_key='res2'),
)
self.conv3 = spconv.SparseSequential(
# [800, 704, 21] <- [400, 352, 11]
block(32, 64, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv3', conv_type='spconv'),
SparseBasicBlock(64, 64, norm_fn=norm_fn, indice_key='res3'),
SparseBasicBlock(64, 64, norm_fn=norm_fn, indice_key='res3'),
)
self.conv4 = spconv.SparseSequential(
# [400, 352, 11] <- [200, 176, 5]
block(64, 128, 3, norm_fn=norm_fn, stride=2, padding=(0, 1, 1), indice_key='spconv4', conv_type='spconv'),
SparseBasicBlock(128, 128, norm_fn=norm_fn, indice_key='res4'),
SparseBasicBlock(128, 128, norm_fn=norm_fn, indice_key='res4'),
)
last_pad = 0
last_pad = self.model_cfg.get('last_pad', last_pad)
self.conv_out = spconv.SparseSequential(
# [200, 150, 5] -> [200, 150, 2]
spconv.SparseConv3d(128, 128, (3, 1, 1), stride=(2, 1, 1), padding=last_pad,
bias=False, indice_key='spconv_down2'),
norm_fn(128),
nn.ReLU(),
)
self.num_point_features = 128
self.backbone_channels = {
'x_conv1': 16,
'x_conv2': 32,
'x_conv3': 64,
'x_conv4': 128
}
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size: int
vfe_features: (num_voxels, C)
voxel_coords: (num_voxels, 4), [batch_idx, z_idx, y_idx, x_idx]
Returns:
batch_dict:
encoded_spconv_tensor: sparse tensor
"""
voxel_features, voxel_coords = batch_dict['voxel_features'], batch_dict['voxel_coords']
batch_size = batch_dict['batch_size']
input_sp_tensor = spconv.SparseConvTensor(
features=voxel_features,
indices=voxel_coords.int(),
spatial_shape=self.sparse_shape,
batch_size=batch_size
)
x = self.conv_input(input_sp_tensor)
x_conv1 = self.conv1(x)
x_conv2 = self.conv2(x_conv1)
x_conv3 = self.conv3(x_conv2)
x_conv4 = self.conv4(x_conv3)
# for detection head
# [200, 176, 5] -> [200, 176, 2]
out = self.conv_out(x_conv4)
batch_dict.update({
'encoded_spconv_tensor': out,
'encoded_spconv_tensor_stride': 8
})
batch_dict.update({
'multi_scale_3d_features': {
'x_conv1': x_conv1,
'x_conv2': x_conv2,
'x_conv3': x_conv3,
'x_conv4': x_conv4,
}
})
batch_dict.update({
'multi_scale_3d_strides': {
'x_conv1': 1,
'x_conv2': 2,
'x_conv3': 4,
'x_conv4': 8,
}
})
return batch_dict
| 14,619
| 37.072917
| 144
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/backbones_3d/spconv_backbone.py
|
from functools import partial
import torch.nn as nn
from ...utils.spconv_utils import replace_feature, spconv
def post_act_block(in_channels, out_channels, kernel_size, indice_key=None, stride=1, padding=0,
conv_type='subm', norm_fn=None):
if conv_type == 'subm':
conv = spconv.SubMConv3d(in_channels, out_channels, kernel_size, bias=False, indice_key=indice_key)
elif conv_type == 'spconv':
conv = spconv.SparseConv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding,
bias=False, indice_key=indice_key)
elif conv_type == 'inverseconv':
conv = spconv.SparseInverseConv3d(in_channels, out_channels, kernel_size, indice_key=indice_key, bias=False)
else:
raise NotImplementedError
m = spconv.SparseSequential(
conv,
norm_fn(out_channels),
nn.ReLU(),
)
return m
class SparseBasicBlock(spconv.SparseModule):
expansion = 1
def __init__(self, inplanes, planes, stride=1, norm_fn=None, downsample=None, indice_key=None):
super(SparseBasicBlock, self).__init__()
assert norm_fn is not None
bias = norm_fn is not None
self.conv1 = spconv.SubMConv3d(
inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=bias, indice_key=indice_key
)
self.bn1 = norm_fn(planes)
self.relu = nn.ReLU()
self.conv2 = spconv.SubMConv3d(
planes, planes, kernel_size=3, stride=stride, padding=1, bias=bias, indice_key=indice_key
)
self.bn2 = norm_fn(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = replace_feature(out, self.bn1(out.features))
out = replace_feature(out, self.relu(out.features))
out = self.conv2(out)
out = replace_feature(out, self.bn2(out.features))
if self.downsample is not None:
identity = self.downsample(x)
out = replace_feature(out, out.features + identity.features)
out = replace_feature(out, self.relu(out.features))
return out
class VoxelBackBone8x(nn.Module):
def __init__(self, model_cfg, input_channels, grid_size, **kwargs):
super().__init__()
self.model_cfg = model_cfg
norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01)
self.sparse_shape = grid_size[::-1] + [1, 0, 0]
self.conv_input = spconv.SparseSequential(
spconv.SubMConv3d(input_channels, 16, 3, padding=1, bias=False, indice_key='subm1'),
norm_fn(16),
nn.ReLU(),
)
block = post_act_block
self.conv1 = spconv.SparseSequential(
block(16, 16, 3, norm_fn=norm_fn, padding=1, indice_key='subm1'),
)
self.conv2 = spconv.SparseSequential(
# [1600, 1408, 41] <- [800, 704, 21]
block(16, 32, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv2', conv_type='spconv'),
block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='subm2'),
block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='subm2'),
)
self.conv3 = spconv.SparseSequential(
# [800, 704, 21] <- [400, 352, 11]
block(32, 64, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv3', conv_type='spconv'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3'),
)
self.conv4 = spconv.SparseSequential(
# [400, 352, 11] <- [200, 176, 5]
block(64, 64, 3, norm_fn=norm_fn, stride=2, padding=(0, 1, 1), indice_key='spconv4', conv_type='spconv'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4'),
)
last_pad = 0
last_pad = self.model_cfg.get('last_pad', last_pad)
self.conv_out = spconv.SparseSequential(
# [200, 150, 5] -> [200, 150, 2]
spconv.SparseConv3d(64, 128, (3, 1, 1), stride=(2, 1, 1), padding=last_pad,
bias=False, indice_key='spconv_down2'),
norm_fn(128),
nn.ReLU(),
)
self.num_point_features = 128
self.backbone_channels = {
'x_conv1': 16,
'x_conv2': 32,
'x_conv3': 64,
'x_conv4': 64
}
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size: int
vfe_features: (num_voxels, C)
voxel_coords: (num_voxels, 4), [batch_idx, z_idx, y_idx, x_idx]
Returns:
batch_dict:
encoded_spconv_tensor: sparse tensor
"""
voxel_features, voxel_coords = batch_dict['voxel_features'], batch_dict['voxel_coords']
batch_size = batch_dict['batch_size']
input_sp_tensor = spconv.SparseConvTensor(
features=voxel_features,
indices=voxel_coords.int(),
spatial_shape=self.sparse_shape,
batch_size=batch_size
)
x = self.conv_input(input_sp_tensor)
x_conv1 = self.conv1(x)
x_conv2 = self.conv2(x_conv1)
x_conv3 = self.conv3(x_conv2)
x_conv4 = self.conv4(x_conv3)
# for detection head
# [200, 176, 5] -> [200, 176, 2]
out = self.conv_out(x_conv4)
batch_dict.update({
'encoded_spconv_tensor': out,
'encoded_spconv_tensor_stride': 8
})
batch_dict.update({
'multi_scale_3d_features': {
'x_conv1': x_conv1,
'x_conv2': x_conv2,
'x_conv3': x_conv3,
'x_conv4': x_conv4,
}
})
batch_dict.update({
'multi_scale_3d_strides': {
'x_conv1': 1,
'x_conv2': 2,
'x_conv3': 4,
'x_conv4': 8,
}
})
return batch_dict
class VoxelResBackBone8x(nn.Module):
def __init__(self, model_cfg, input_channels, grid_size, **kwargs):
super().__init__()
self.model_cfg = model_cfg
norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01)
self.sparse_shape = grid_size[::-1] + [1, 0, 0]
self.conv_input = spconv.SparseSequential(
spconv.SubMConv3d(input_channels, 16, 3, padding=1, bias=False, indice_key='subm1'),
norm_fn(16),
nn.ReLU(),
)
block = post_act_block
self.conv1 = spconv.SparseSequential(
SparseBasicBlock(16, 16, norm_fn=norm_fn, indice_key='res1'),
SparseBasicBlock(16, 16, norm_fn=norm_fn, indice_key='res1'),
)
self.conv2 = spconv.SparseSequential(
# [1600, 1408, 41] <- [800, 704, 21]
block(16, 32, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv2', conv_type='spconv'),
SparseBasicBlock(32, 32, norm_fn=norm_fn, indice_key='res2'),
SparseBasicBlock(32, 32, norm_fn=norm_fn, indice_key='res2'),
)
self.conv3 = spconv.SparseSequential(
# [800, 704, 21] <- [400, 352, 11]
block(32, 64, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv3', conv_type='spconv'),
SparseBasicBlock(64, 64, norm_fn=norm_fn, indice_key='res3'),
SparseBasicBlock(64, 64, norm_fn=norm_fn, indice_key='res3'),
)
self.conv4 = spconv.SparseSequential(
# [400, 352, 11] <- [200, 176, 5]
block(64, 128, 3, norm_fn=norm_fn, stride=2, padding=(0, 1, 1), indice_key='spconv4', conv_type='spconv'),
SparseBasicBlock(128, 128, norm_fn=norm_fn, indice_key='res4'),
SparseBasicBlock(128, 128, norm_fn=norm_fn, indice_key='res4'),
)
last_pad = 0
last_pad = self.model_cfg.get('last_pad', last_pad)
self.conv_out = spconv.SparseSequential(
# [200, 150, 5] -> [200, 150, 2]
spconv.SparseConv3d(128, 128, (3, 1, 1), stride=(2, 1, 1), padding=last_pad,
bias=False, indice_key='spconv_down2'),
norm_fn(128),
nn.ReLU(),
)
self.num_point_features = 128
self.backbone_channels = {
'x_conv1': 16,
'x_conv2': 32,
'x_conv3': 64,
'x_conv4': 128
}
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size: int
vfe_features: (num_voxels, C)
voxel_coords: (num_voxels, 4), [batch_idx, z_idx, y_idx, x_idx]
Returns:
batch_dict:
encoded_spconv_tensor: sparse tensor
"""
voxel_features, voxel_coords = batch_dict['voxel_features'], batch_dict['voxel_coords']
batch_size = batch_dict['batch_size']
input_sp_tensor = spconv.SparseConvTensor(
features=voxel_features,
indices=voxel_coords.int(),
spatial_shape=self.sparse_shape,
batch_size=batch_size
)
x = self.conv_input(input_sp_tensor)
x_conv1 = self.conv1(x)
x_conv2 = self.conv2(x_conv1)
x_conv3 = self.conv3(x_conv2)
x_conv4 = self.conv4(x_conv3)
# for detection head
# [200, 176, 5] -> [200, 176, 2]
out = self.conv_out(x_conv4)
batch_dict.update({
'encoded_spconv_tensor': out,
'encoded_spconv_tensor_stride': 8
})
batch_dict.update({
'multi_scale_3d_features': {
'x_conv1': x_conv1,
'x_conv2': x_conv2,
'x_conv3': x_conv3,
'x_conv4': x_conv4,
}
})
batch_dict.update({
'multi_scale_3d_strides': {
'x_conv1': 1,
'x_conv2': 2,
'x_conv3': 4,
'x_conv4': 8,
}
})
return batch_dict
# ------------------------------------------------------- #
# ------------------New 3D Backbone---------------------- #
# ------------------------------------------------------- #
class VoxelWideResBackBone8x(nn.Module):
def __init__(self, model_cfg, input_channels, grid_size, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.input_channels = model_cfg.IN_CHANNELS
norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01)
self.sparse_shape = grid_size[::-1] + [1, 0, 0]
self.wide_factor = model_cfg.WIDE_FACTOR
self.conv_input = spconv.SparseSequential(
spconv.SubMConv3d(self.input_channels, 16*self.wide_factor, 3, padding=1, bias=False, indice_key='subm1'),
norm_fn(16*self.wide_factor),
nn.ReLU(),
)
block = post_act_block
self.conv1 = spconv.SparseSequential(
SparseBasicBlock(16*self.wide_factor, 16*self.wide_factor, norm_fn=norm_fn, indice_key='res1'),
SparseBasicBlock(16*self.wide_factor, 16*self.wide_factor, norm_fn=norm_fn, indice_key='res1'),
)
self.conv2 = spconv.SparseSequential(
# [1600, 1408, 41] <- [800, 704, 21]
block(16*self.wide_factor, 32*self.wide_factor, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv2', conv_type='spconv'),
SparseBasicBlock(32*self.wide_factor, 32*self.wide_factor, norm_fn=norm_fn, indice_key='res2'),
SparseBasicBlock(32*self.wide_factor, 32*self.wide_factor, norm_fn=norm_fn, indice_key='res2'),
)
self.conv3 = spconv.SparseSequential(
# [800, 704, 21] <- [400, 352, 11]
block(32*self.wide_factor, 64*self.wide_factor, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv3', conv_type='spconv'),
SparseBasicBlock(64*self.wide_factor, 64*self.wide_factor, norm_fn=norm_fn, indice_key='res3'),
SparseBasicBlock(64*self.wide_factor, 64*self.wide_factor, norm_fn=norm_fn, indice_key='res3'),
)
self.conv4 = spconv.SparseSequential(
# [400, 352, 11] <- [200, 176, 5]
block(64*self.wide_factor, 128*self.wide_factor, 3, norm_fn=norm_fn, stride=2, padding=(0, 1, 1), indice_key='spconv4', conv_type='spconv'),
SparseBasicBlock(128*self.wide_factor, 128*self.wide_factor, norm_fn=norm_fn, indice_key='res4'),
SparseBasicBlock(128*self.wide_factor, 128*self.wide_factor, norm_fn=norm_fn, indice_key='res4'),
)
last_pad = 0
last_pad = self.model_cfg.get('last_pad', last_pad)
self.conv_out = spconv.SparseSequential(
# [200, 150, 5] -> [200, 150, 2]
spconv.SparseConv3d(128*self.wide_factor, 128*self.wide_factor, (3, 1, 1), stride=(2, 1, 1), padding=last_pad,
bias=False, indice_key='spconv_down2'),
norm_fn(128*self.wide_factor),
nn.ReLU(),
)
self.num_point_features = 128
self.backbone_channels = {
'x_conv1': 16*self.wide_factor,
'x_conv2': 32*self.wide_factor,
'x_conv3': 64*self.wide_factor,
'x_conv4': 128*self.wide_factor
}
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size: int
vfe_features: (num_voxels, C)
voxel_coords: (num_voxels, 4), [batch_idx, z_idx, y_idx, x_idx]
Returns:
batch_dict:
encoded_spconv_tensor: sparse tensor
"""
voxel_features, voxel_coords = batch_dict['voxel_features_after_scn'], batch_dict['voxel_coords']
batch_size = batch_dict['batch_size']
input_sp_tensor = spconv.SparseConvTensor(
features=voxel_features,
indices=voxel_coords.int(),
spatial_shape=self.sparse_shape,
batch_size=batch_size
)
x = self.conv_input(input_sp_tensor)
x_conv1 = self.conv1(x)
x_conv2 = self.conv2(x_conv1)
x_conv3 = self.conv3(x_conv2)
x_conv4 = self.conv4(x_conv3)
# for detection head
# [200, 176, 5] -> [200, 176, 2]
out = self.conv_out(x_conv4)
batch_dict.update({
'encoded_spconv_tensor': out,
'encoded_spconv_tensor_stride': 8
})
batch_dict.update({
'multi_scale_3d_features': {
'x_conv1': x_conv1,
'x_conv2': x_conv2,
'x_conv3': x_conv3,
'x_conv4': x_conv4,
}
})
batch_dict.update({
'multi_scale_3d_strides': {
'x_conv1': 1,
'x_conv2': 2,
'x_conv3': 4,
'x_conv4': 8,
}
})
return batch_dict
class VoxelWideResBackBone_L8x(nn.Module):
def __init__(self, model_cfg, input_channels, grid_size, **kwargs):
super().__init__()
self.model_cfg = model_cfg
norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01)
self.sparse_shape = grid_size[::-1] + [1, 0, 0]
self.wide_factor = model_cfg.WIDE_FACTOR
self.conv_input = spconv.SparseSequential(
spconv.SubMConv3d(input_channels, 16*self.wide_factor, 3, padding=1, bias=False, indice_key='subm1'),
norm_fn(16*self.wide_factor),
nn.ReLU(),
)
block = post_act_block
self.conv1 = spconv.SparseSequential(
SparseBasicBlock(16*self.wide_factor, 16*self.wide_factor, norm_fn=norm_fn, indice_key='res1'),
SparseBasicBlock(16*self.wide_factor, 16*self.wide_factor, norm_fn=norm_fn, indice_key='res1'),
SparseBasicBlock(16*self.wide_factor, 16*self.wide_factor, norm_fn=norm_fn, indice_key='res1'),
SparseBasicBlock(16*self.wide_factor, 16*self.wide_factor, norm_fn=norm_fn, indice_key='res1'),
)
self.conv2 = spconv.SparseSequential(
# [1600, 1408, 41] <- [800, 704, 21]
block(16*self.wide_factor, 32*self.wide_factor, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv2', conv_type='spconv'),
SparseBasicBlock(32*self.wide_factor, 32*self.wide_factor, norm_fn=norm_fn, indice_key='res2'),
SparseBasicBlock(32*self.wide_factor, 32*self.wide_factor, norm_fn=norm_fn, indice_key='res2'),
SparseBasicBlock(32*self.wide_factor, 32*self.wide_factor, norm_fn=norm_fn, indice_key='res2'),
SparseBasicBlock(32*self.wide_factor, 32*self.wide_factor, norm_fn=norm_fn, indice_key='res2'),
)
self.conv3 = spconv.SparseSequential(
# [800, 704, 21] <- [400, 352, 11]
block(32*self.wide_factor, 64*self.wide_factor, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv3', conv_type='spconv'),
SparseBasicBlock(64*self.wide_factor, 64*self.wide_factor, norm_fn=norm_fn, indice_key='res3'),
SparseBasicBlock(64*self.wide_factor, 64*self.wide_factor, norm_fn=norm_fn, indice_key='res3'),
SparseBasicBlock(64*self.wide_factor, 64*self.wide_factor, norm_fn=norm_fn, indice_key='res3'),
SparseBasicBlock(64*self.wide_factor, 64*self.wide_factor, norm_fn=norm_fn, indice_key='res3'),
)
self.conv4 = spconv.SparseSequential(
# [400, 352, 11] <- [200, 176, 5]
block(64*self.wide_factor, 128*self.wide_factor, 3, norm_fn=norm_fn, stride=2, padding=(0, 1, 1), indice_key='spconv4', conv_type='spconv'),
SparseBasicBlock(128*self.wide_factor, 128*self.wide_factor, norm_fn=norm_fn, indice_key='res4'),
SparseBasicBlock(128*self.wide_factor, 128*self.wide_factor, norm_fn=norm_fn, indice_key='res4'),
SparseBasicBlock(128*self.wide_factor, 128*self.wide_factor, norm_fn=norm_fn, indice_key='res4'),
SparseBasicBlock(128*self.wide_factor, 128*self.wide_factor, norm_fn=norm_fn, indice_key='res4'),
)
last_pad = 0
last_pad = self.model_cfg.get('last_pad', last_pad)
self.conv_out = spconv.SparseSequential(
# [200, 150, 5] -> [200, 150, 2]
spconv.SparseConv3d(128*self.wide_factor, 128*self.wide_factor, (3, 1, 1), stride=(2, 1, 1), padding=last_pad,
bias=False, indice_key='spconv_down2'),
norm_fn(128*self.wide_factor),
nn.ReLU(),
)
self.num_point_features = 128
self.backbone_channels = {
'x_conv1': 16*self.wide_factor,
'x_conv2': 32*self.wide_factor,
'x_conv3': 64*self.wide_factor,
'x_conv4': 128*self.wide_factor
}
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size: int
vfe_features: (num_voxels, C)
voxel_coords: (num_voxels, 4), [batch_idx, z_idx, y_idx, x_idx]
Returns:
batch_dict:
encoded_spconv_tensor: sparse tensor
"""
voxel_features, voxel_coords = batch_dict['voxel_features'], batch_dict['voxel_coords']
batch_size = batch_dict['batch_size']
input_sp_tensor = spconv.SparseConvTensor(
features=voxel_features,
indices=voxel_coords.int(),
spatial_shape=self.sparse_shape,
batch_size=batch_size
)
x = self.conv_input(input_sp_tensor)
x_conv1 = self.conv1(x)
x_conv2 = self.conv2(x_conv1)
x_conv3 = self.conv3(x_conv2)
x_conv4 = self.conv4(x_conv3)
# for detection head
# [200, 176, 5] -> [200, 176, 2]
out = self.conv_out(x_conv4)
batch_dict.update({
'encoded_spconv_tensor': out,
'encoded_spconv_tensor_stride': 8
})
batch_dict.update({
'multi_scale_3d_features': {
'x_conv1': x_conv1,
'x_conv2': x_conv2,
'x_conv3': x_conv3,
'x_conv4': x_conv4,
}
})
batch_dict.update({
'multi_scale_3d_strides': {
'x_conv1': 1,
'x_conv2': 2,
'x_conv3': 4,
'x_conv4': 8,
}
})
return batch_dict
| 20,640
| 37.581308
| 152
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/backbones_3d/__init__.py
|
from .pointnet2_backbone import PointNet2Backbone, PointNet2MSG
from .spconv_backbone import VoxelBackBone8x, VoxelResBackBone8x, VoxelWideResBackBone8x, VoxelWideResBackBone_L8x
from .spconv_backbone_unibn import VoxelBackBone8x_UniBN, VoxelResBackBone8x_UniBN
from .spconv_unet import UNetV2
from .IASSD_backbone import IASSD_Backbone
__all__ = {
'VoxelBackBone8x': VoxelBackBone8x,
'UNetV2': UNetV2,
'PointNet2Backbone': PointNet2Backbone,
'PointNet2MSG': PointNet2MSG,
'VoxelResBackBone8x': VoxelResBackBone8x,
'VoxelWideResBackBone8x': VoxelWideResBackBone8x,
'VoxelWideResBackBone_L8x': VoxelWideResBackBone_L8x,
# Dataset-specific Norm Layer
'VoxelBackBone8x_UniBN':VoxelBackBone8x_UniBN,
'VoxelResBackBone8x_UniBN':VoxelResBackBone8x_UniBN,
'IASSD_Backbone': IASSD_Backbone,
}
| 830
| 40.55
| 114
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/backbones_3d/pointnet2_backbone.py
|
import torch
import torch.nn as nn
from ...ops.pointnet2.pointnet2_batch import pointnet2_modules
from ...ops.pointnet2.pointnet2_stack import pointnet2_modules as pointnet2_modules_stack
from ...ops.pointnet2.pointnet2_stack import pointnet2_utils as pointnet2_utils_stack
class PointNet2MSG(nn.Module):
def __init__(self, model_cfg, input_channels, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.SA_modules = nn.ModuleList()
channel_in = input_channels - 3
self.num_points_each_layer = []
skip_channel_list = [input_channels - 3]
for k in range(self.model_cfg.SA_CONFIG.NPOINTS.__len__()):
mlps = self.model_cfg.SA_CONFIG.MLPS[k].copy()
channel_out = 0
for idx in range(mlps.__len__()):
mlps[idx] = [channel_in] + mlps[idx]
channel_out += mlps[idx][-1]
self.SA_modules.append(
pointnet2_modules.PointnetSAModuleMSG(
npoint=self.model_cfg.SA_CONFIG.NPOINTS[k],
radii=self.model_cfg.SA_CONFIG.RADIUS[k],
nsamples=self.model_cfg.SA_CONFIG.NSAMPLE[k],
mlps=mlps,
use_xyz=self.model_cfg.SA_CONFIG.get('USE_XYZ', True),
)
)
skip_channel_list.append(channel_out)
channel_in = channel_out
self.FP_modules = nn.ModuleList()
for k in range(self.model_cfg.FP_MLPS.__len__()):
pre_channel = self.model_cfg.FP_MLPS[k + 1][-1] if k + 1 < len(self.model_cfg.FP_MLPS) else channel_out
self.FP_modules.append(
pointnet2_modules.PointnetFPModule(
mlp=[pre_channel + skip_channel_list[k]] + self.model_cfg.FP_MLPS[k]
)
)
self.num_point_features = self.model_cfg.FP_MLPS[0][-1]
def break_up_pc(self, pc):
batch_idx = pc[:, 0]
xyz = pc[:, 1:4].contiguous()
features = (pc[:, 4:].contiguous() if pc.size(-1) > 4 else None)
return batch_idx, xyz, features
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size: int
vfe_features: (num_voxels, C)
points: (num_points, 4 + C), [batch_idx, x, y, z, ...]
Returns:
batch_dict:
encoded_spconv_tensor: sparse tensor
point_features: (N, C)
"""
batch_size = batch_dict['batch_size']
points = batch_dict['points']
batch_idx, xyz, features = self.break_up_pc(points)
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
for bs_idx in range(batch_size):
xyz_batch_cnt[bs_idx] = (batch_idx == bs_idx).sum()
assert xyz_batch_cnt.min() == xyz_batch_cnt.max()
xyz = xyz.view(batch_size, -1, 3)
features = features.view(batch_size, -1, features.shape[-1]).permute(0, 2, 1).contiguous() if features is not None else None
l_xyz, l_features = [xyz], [features]
for i in range(len(self.SA_modules)):
li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i])
l_xyz.append(li_xyz)
l_features.append(li_features)
for i in range(-1, -(len(self.FP_modules) + 1), -1):
l_features[i - 1] = self.FP_modules[i](
l_xyz[i - 1], l_xyz[i], l_features[i - 1], l_features[i]
) # (B, C, N)
point_features = l_features[0].permute(0, 2, 1).contiguous() # (B, N, C)
batch_dict['point_features'] = point_features.view(-1, point_features.shape[-1])
batch_dict['point_coords'] = torch.cat((batch_idx[:, None].float(), l_xyz[0].view(-1, 3)), dim=1)
return batch_dict
class PointNet2Backbone(nn.Module):
"""
DO NOT USE THIS CURRENTLY SINCE IT MAY HAVE POTENTIAL BUGS, 20200723
"""
def __init__(self, model_cfg, input_channels, **kwargs):
assert False, 'DO NOT USE THIS CURRENTLY SINCE IT MAY HAVE POTENTIAL BUGS, 20200723'
super().__init__()
self.model_cfg = model_cfg
self.SA_modules = nn.ModuleList()
channel_in = input_channels - 3
self.num_points_each_layer = []
skip_channel_list = [input_channels]
for k in range(self.model_cfg.SA_CONFIG.NPOINTS.__len__()):
self.num_points_each_layer.append(self.model_cfg.SA_CONFIG.NPOINTS[k])
mlps = self.model_cfg.SA_CONFIG.MLPS[k].copy()
channel_out = 0
for idx in range(mlps.__len__()):
mlps[idx] = [channel_in] + mlps[idx]
channel_out += mlps[idx][-1]
self.SA_modules.append(
pointnet2_modules_stack.StackSAModuleMSG(
radii=self.model_cfg.SA_CONFIG.RADIUS[k],
nsamples=self.model_cfg.SA_CONFIG.NSAMPLE[k],
mlps=mlps,
use_xyz=self.model_cfg.SA_CONFIG.get('USE_XYZ', True),
)
)
skip_channel_list.append(channel_out)
channel_in = channel_out
self.FP_modules = nn.ModuleList()
for k in range(self.model_cfg.FP_MLPS.__len__()):
pre_channel = self.model_cfg.FP_MLPS[k + 1][-1] if k + 1 < len(self.model_cfg.FP_MLPS) else channel_out
self.FP_modules.append(
pointnet2_modules_stack.StackPointnetFPModule(
mlp=[pre_channel + skip_channel_list[k]] + self.model_cfg.FP_MLPS[k]
)
)
self.num_point_features = self.model_cfg.FP_MLPS[0][-1]
def break_up_pc(self, pc):
batch_idx = pc[:, 0]
xyz = pc[:, 1:4].contiguous()
features = (pc[:, 4:].contiguous() if pc.size(-1) > 4 else None)
return batch_idx, xyz, features
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size: int
vfe_features: (num_voxels, C)
points: (num_points, 4 + C), [batch_idx, x, y, z, ...]
Returns:
batch_dict:
encoded_spconv_tensor: sparse tensor
point_features: (N, C)
"""
batch_size = batch_dict['batch_size']
points = batch_dict['points']
batch_idx, xyz, features = self.break_up_pc(points)
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
for bs_idx in range(batch_size):
xyz_batch_cnt[bs_idx] = (batch_idx == bs_idx).sum()
l_xyz, l_features, l_batch_cnt = [xyz], [features], [xyz_batch_cnt]
for i in range(len(self.SA_modules)):
new_xyz_list = []
for k in range(batch_size):
if len(l_xyz) == 1:
cur_xyz = l_xyz[0][batch_idx == k]
else:
last_num_points = self.num_points_each_layer[i - 1]
cur_xyz = l_xyz[-1][k * last_num_points: (k + 1) * last_num_points]
cur_pt_idxs = pointnet2_utils_stack.farthest_point_sample(
cur_xyz[None, :, :].contiguous(), self.num_points_each_layer[i]
).long()[0]
if cur_xyz.shape[0] < self.num_points_each_layer[i]:
empty_num = self.num_points_each_layer[i] - cur_xyz.shape[1]
cur_pt_idxs[0, -empty_num:] = cur_pt_idxs[0, :empty_num]
new_xyz_list.append(cur_xyz[cur_pt_idxs])
new_xyz = torch.cat(new_xyz_list, dim=0)
new_xyz_batch_cnt = xyz.new_zeros(batch_size).int().fill_(self.num_points_each_layer[i])
li_xyz, li_features = self.SA_modules[i](
xyz=l_xyz[i], features=l_features[i], xyz_batch_cnt=l_batch_cnt[i],
new_xyz=new_xyz, new_xyz_batch_cnt=new_xyz_batch_cnt
)
l_xyz.append(li_xyz)
l_features.append(li_features)
l_batch_cnt.append(new_xyz_batch_cnt)
l_features[0] = points[:, 1:]
for i in range(-1, -(len(self.FP_modules) + 1), -1):
l_features[i - 1] = self.FP_modules[i](
unknown=l_xyz[i - 1], unknown_batch_cnt=l_batch_cnt[i - 1],
known=l_xyz[i], known_batch_cnt=l_batch_cnt[i],
unknown_feats=l_features[i - 1], known_feats=l_features[i]
)
batch_dict['point_features'] = l_features[0]
batch_dict['point_coords'] = torch.cat((batch_idx[:, None].float(), l_xyz[0]), dim=1)
return batch_dict
| 8,540
| 40.26087
| 132
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/backbones_3d/pfe/unet_scn.py
|
from functools import partial
import torch
import torch.nn as nn
from pcdet.utils.spconv_utils import replace_feature, spconv
from pcdet.utils import common_utils
from pcdet.models.backbones_3d.spconv_backbone import post_act_block
class SparseBasicBlock(spconv.SparseModule):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, indice_key=None, norm_fn=None):
super(SparseBasicBlock, self).__init__()
self.conv1 = spconv.SubMConv3d(
inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False, indice_key=indice_key
)
self.bn1 = norm_fn(planes)
self.relu = nn.ReLU()
self.conv2 = spconv.SubMConv3d(
planes, planes, kernel_size=3, stride=1, padding=1, bias=False, indice_key=indice_key
)
self.bn2 = norm_fn(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x.features
assert x.features.dim() == 2, 'x.features.dim()=%d' % x.features.dim()
out = self.conv1(x)
out = replace_feature(out, self.bn1(out.features))
out = replace_feature(out, self.relu(out.features))
out = self.conv2(out)
out = replace_feature(out, self.bn2(out.features))
if self.downsample is not None:
identity = self.downsample(x)
out = replace_feature(out, out.features + identity)
out = replace_feature(out, self.relu(out.features))
return out
class UNetSCN(nn.Module):
"""
Sparse Convolution based UNet for point-wise feature learning.
Reference Paper: https://arxiv.org/abs/1907.03670 (Shaoshuai Shi, et. al)
From Points to Parts: 3D Object Detection from Point Cloud with Part-aware and Part-aggregation Network
"""
def __init__(self, model_cfg, input_channels, grid_size, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.sparse_shape = grid_size[::-1] + [1, 0, 0]
norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01)
self.conv_input = spconv.SparseSequential(
spconv.SubMConv3d(input_channels, 16, 3, padding=1, bias=False, indice_key='subm1'),
norm_fn(16),
nn.ReLU(),
)
block = post_act_block
self.conv1 = spconv.SparseSequential(
block(16, 16, 3, norm_fn=norm_fn, padding=1, indice_key='subm1'),
)
self.conv2 = spconv.SparseSequential(
# [1600, 1408, 41] <- [800, 704, 21]
block(16, 32, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv2', conv_type='spconv'),
block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='subm2'),
block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='subm2'),
)
self.conv3 = spconv.SparseSequential(
# [800, 704, 21] <- [400, 352, 11]
block(32, 64, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv3', conv_type='spconv'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3'),
)
self.conv4 = spconv.SparseSequential(
# [400, 352, 11] <- [200, 176, 5]
block(64, 64, 3, norm_fn=norm_fn, stride=2, padding=(0, 1, 1), indice_key='spconv4', conv_type='spconv'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4'),
)
# decoder
# [400, 352, 11] <- [200, 176, 5]
self.conv_up_t4 = SparseBasicBlock(64, 64, indice_key='subm4', norm_fn=norm_fn)
self.conv_up_m4 = block(128, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4')
self.inv_conv4 = block(64, 64, 3, norm_fn=norm_fn, indice_key='spconv4', conv_type='inverseconv')
# [800, 704, 21] <- [400, 352, 11]
self.conv_up_t3 = SparseBasicBlock(64, 64, indice_key='subm3', norm_fn=norm_fn)
self.conv_up_m3 = block(128, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3')
self.inv_conv3 = block(64, 32, 3, norm_fn=norm_fn, indice_key='spconv3', conv_type='inverseconv')
# [1600, 1408, 41] <- [800, 704, 21]
self.conv_up_t2 = SparseBasicBlock(32, 32, indice_key='subm2', norm_fn=norm_fn)
self.conv_up_m2 = block(64, 32, 3, norm_fn=norm_fn, indice_key='subm2')
self.inv_conv2 = block(32, 16, 3, norm_fn=norm_fn, indice_key='spconv2', conv_type='inverseconv')
# [1600, 1408, 41] <- [1600, 1408, 41]
self.conv_up_t1 = SparseBasicBlock(16, 16, indice_key='subm1', norm_fn=norm_fn)
self.conv_up_m1 = block(32, 16, 3, norm_fn=norm_fn, indice_key='subm1')
self.conv5 = spconv.SparseSequential(
block(16, 16, 3, norm_fn=norm_fn, padding=1, indice_key='subm1')
)
self.num_point_features = 16
def UR_block_forward(self, x_lateral, x_bottom, conv_t, conv_m, conv_inv):
x_trans = conv_t(x_lateral)
x = x_trans
x = replace_feature(x, torch.cat((x_bottom.features, x_trans.features), dim=1))
x_m = conv_m(x)
x = self.channel_reduction(x, x_m.features.shape[1])
x = replace_feature(x, x_m.features + x.features)
x = conv_inv(x)
return x
@staticmethod
def channel_reduction(x, out_channels):
"""
Args:
x: x.features (N, C1)
out_channels: C2
Returns:
"""
features = x.features
n, in_channels = features.shape
assert (in_channels % out_channels == 0) and (in_channels >= out_channels)
x = replace_feature(x, features.view(n, out_channels, -1).sum(dim=2))
return x
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size: int
vfe_features: (num_voxels, C)
voxel_coords: (num_voxels, 4), [batch_idx, z_idx, y_idx, x_idx]
Returns:
batch_dict:
encoded_spconv_tensor: sparse tensor
point_features: (N, C)
"""
voxel_features, voxel_coords = batch_dict['voxel_features'], batch_dict['voxel_coords']
batch_size = batch_dict['batch_size']
input_sp_tensor = spconv.SparseConvTensor(
features=voxel_features,
indices=voxel_coords.int(),
spatial_shape=self.sparse_shape,
batch_size=batch_size
)
x = self.conv_input(input_sp_tensor)
x_conv1 = self.conv1(x)
x_conv2 = self.conv2(x_conv1)
x_conv3 = self.conv3(x_conv2)
x_conv4 = self.conv4(x_conv3)
# for segmentation head
# [400, 352, 11] <- [200, 176, 5]
x_up4 = self.UR_block_forward(x_conv4, x_conv4, self.conv_up_t4, self.conv_up_m4, self.inv_conv4)
# [800, 704, 21] <- [400, 352, 11]
x_up3 = self.UR_block_forward(x_conv3, x_up4, self.conv_up_t3, self.conv_up_m3, self.inv_conv3)
# [1600, 1408, 41] <- [800, 704, 21]
x_up2 = self.UR_block_forward(x_conv2, x_up3, self.conv_up_t2, self.conv_up_m2, self.inv_conv2)
# [1600, 1408, 41] <- [1600, 1408, 41]
x_up1 = self.UR_block_forward(x_conv1, x_up2, self.conv_up_t1, self.conv_up_m1, self.conv5)
batch_dict['voxel_features_after_scn'] = x_up1.features
return batch_dict
# class UNetSCN(nn.Module):
# def __init__(self,
# model_cfg,
# m=16, # number of unet features (multiplied in each layer)
# block_reps=1, # depth
# residual_blocks=False, # ResNet style basic blocks
# full_scale=4096,
# num_planes=7
# ):
# super(UNetSCN, self).__init__()
# self.model_cfg = model_cfg
# self.in_channels = self.model_cfg.IN_CHANNELS #3
# self.out_channels = m
# n_planes = [(n + 1) * m for n in range(num_planes)]
# self.sparseModel = scn.Sequential().add(
# scn.InputLayer(DIMENSION, full_scale, mode=4)).add(
# scn.SubmanifoldConvolution(DIMENSION, self.in_channels, m, 3, False)).add(
# scn.UNet(DIMENSION, block_reps, n_planes, residual_blocks)).add(
# scn.BatchNormReLU(m)).add(
# scn.OutputLayer(DIMENSION))
# def forward(self, batch_dict):
# voxel_features, voxel_coords = batch_dict['voxel_features'], batch_dict['voxel_coords']
# x = [voxel_coords, voxel_features]
# x = self.sparseModel(x)
# batch_dict['voxel_features_after_scn'] = x
# return batch_dict
| 8,709
| 39.138249
| 117
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/backbones_3d/pfe/point_t_trans.py
|
import copy
import numpy as np
import torch
import torch.nn as nn
from ....utils import uni3d_norm_2_in
class POINT_T(nn.Module):
def __init__(self, model_cfg, **kwargs):
super().__init__()
self.model_cfg = model_cfg
# using the domain-specific norm
self.scale_bn = uni3d_norm_2_in.UniNorm1d(self.model_cfg.SHARED_CONV_CHANNEL,
dataset_from_flag=int(self.model_cfg.db_source),
eps=1e-3, momentum=0.01, voxel_coord=True)
#self.scale_bn = nn.BatchNorm1d(self.model_cfg.SHARED_CONV_CHANNEL)
# ---update the xyz coord---
# def forward(self, data_dict):
# points = data_dict['points']
# points_idx = points[:,0].unsqueeze(1)
# points_coord = points[:,1:4]
# points_rescaled = self.scale_bn(points_coord)
# points = torch.cat([points_idx, points_rescaled], dim=1)
# data_dict['points'] = points
# return data_dict
# ---only update the z coord---
def forward(self, data_dict):
points = data_dict['points']
points_others = points[:,0:3]
points_coord_z = points[:,3].unsqueeze(1)
points_rescaled = self.scale_bn(points_coord_z, points[:,0].unsqueeze(1))
points = torch.cat([points_others, points_rescaled], dim=1)
data_dict['points'] = points
return data_dict
| 1,448
| 33.5
| 89
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/backbones_3d/pfe/__init__.py
|
from .voxel_set_abstraction import VoxelSetAbstraction
from .point_t_trans import POINT_T
from .unet_scn import UNetSCN
__all__ = {
'VoxelSetAbstraction': VoxelSetAbstraction,
'POINT_T':POINT_T,
'UNetSCN':UNetSCN,
}
| 229
| 22
| 54
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/backbones_3d/pfe/voxel_set_abstraction.py
|
import math
import numpy as np
import torch
import torch.nn as nn
from ....ops.pointnet2.pointnet2_stack import pointnet2_modules as pointnet2_stack_modules
from ....ops.pointnet2.pointnet2_stack import pointnet2_utils as pointnet2_stack_utils
from ....utils import common_utils
from ....utils import uni3d_norm
def bilinear_interpolate_torch(im, x, y):
"""
Args:
im: (H, W, C) [y, x]
x: (N)
y: (N)
Returns:
"""
x0 = torch.floor(x).long()
x1 = x0 + 1
y0 = torch.floor(y).long()
y1 = y0 + 1
x0 = torch.clamp(x0, 0, im.shape[1] - 1)
x1 = torch.clamp(x1, 0, im.shape[1] - 1)
y0 = torch.clamp(y0, 0, im.shape[0] - 1)
y1 = torch.clamp(y1, 0, im.shape[0] - 1)
Ia = im[y0, x0]
Ib = im[y1, x0]
Ic = im[y0, x1]
Id = im[y1, x1]
wa = (x1.type_as(x) - x) * (y1.type_as(y) - y)
wb = (x1.type_as(x) - x) * (y - y0.type_as(y))
wc = (x - x0.type_as(x)) * (y1.type_as(y) - y)
wd = (x - x0.type_as(x)) * (y - y0.type_as(y))
ans = torch.t((torch.t(Ia) * wa)) + torch.t(torch.t(Ib) * wb) + torch.t(torch.t(Ic) * wc) + torch.t(torch.t(Id) * wd)
return ans
def sample_points_with_roi(rois, points, sample_radius_with_roi, num_max_points_of_part=200000):
"""
Args:
rois: (M, 7 + C)
points: (N, 3)
sample_radius_with_roi:
num_max_points_of_part:
Returns:
sampled_points: (N_out, 3)
"""
if points.shape[0] < num_max_points_of_part:
distance = (points[:, None, :] - rois[None, :, 0:3]).norm(dim=-1)
min_dis, min_dis_roi_idx = distance.min(dim=-1)
roi_max_dim = (rois[min_dis_roi_idx, 3:6] / 2).norm(dim=-1)
point_mask = min_dis < roi_max_dim + sample_radius_with_roi
else:
start_idx = 0
point_mask_list = []
while start_idx < points.shape[0]:
distance = (points[start_idx:start_idx + num_max_points_of_part, None, :] - rois[None, :, 0:3]).norm(dim=-1)
min_dis, min_dis_roi_idx = distance.min(dim=-1)
roi_max_dim = (rois[min_dis_roi_idx, 3:6] / 2).norm(dim=-1)
cur_point_mask = min_dis < roi_max_dim + sample_radius_with_roi
point_mask_list.append(cur_point_mask)
start_idx += num_max_points_of_part
point_mask = torch.cat(point_mask_list, dim=0)
sampled_points = points[:1] if point_mask.sum() == 0 else points[point_mask, :]
return sampled_points, point_mask
def sector_fps(points, num_sampled_points, num_sectors):
"""
Args:
points: (N, 3)
num_sampled_points: int
num_sectors: int
Returns:
sampled_points: (N_out, 3)
"""
sector_size = np.pi * 2 / num_sectors
point_angles = torch.atan2(points[:, 1], points[:, 0]) + np.pi
sector_idx = (point_angles / sector_size).floor().clamp(min=0, max=num_sectors)
xyz_points_list = []
xyz_batch_cnt = []
num_sampled_points_list = []
for k in range(num_sectors):
mask = (sector_idx == k)
cur_num_points = mask.sum().item()
if cur_num_points > 0:
xyz_points_list.append(points[mask])
xyz_batch_cnt.append(cur_num_points)
ratio = cur_num_points / points.shape[0]
num_sampled_points_list.append(
min(cur_num_points, math.ceil(ratio * num_sampled_points))
)
if len(xyz_batch_cnt) == 0:
xyz_points_list.append(points)
xyz_batch_cnt.append(len(points))
num_sampled_points_list.append(num_sampled_points)
print(f'Warning: empty sector points detected in SectorFPS: points.shape={points.shape}')
xyz = torch.cat(xyz_points_list, dim=0)
xyz_batch_cnt = torch.tensor(xyz_batch_cnt, device=points.device).int()
sampled_points_batch_cnt = torch.tensor(num_sampled_points_list, device=points.device).int()
sampled_pt_idxs = pointnet2_stack_utils.stack_farthest_point_sample(
xyz.contiguous(), xyz_batch_cnt, sampled_points_batch_cnt
).long()
sampled_points = xyz[sampled_pt_idxs]
return sampled_points
class VoxelSetAbstraction(nn.Module):
def __init__(self, model_cfg, voxel_size, point_cloud_range, num_bev_features=None,
num_rawpoint_features=None, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.voxel_size = voxel_size
self.point_cloud_range = point_cloud_range
SA_cfg = self.model_cfg.SA_LAYER
self.SA_layers = nn.ModuleList()
self.SA_layer_names = []
self.downsample_times_map = {}
c_in = 0
for src_name in self.model_cfg.FEATURES_SOURCE:
if src_name in ['bev', 'raw_points']:
continue
self.downsample_times_map[src_name] = SA_cfg[src_name].DOWNSAMPLE_FACTOR
if SA_cfg[src_name].get('INPUT_CHANNELS', None) is None:
input_channels = SA_cfg[src_name].MLPS[0][0] \
if isinstance(SA_cfg[src_name].MLPS[0], list) else SA_cfg[src_name].MLPS[0]
else:
input_channels = SA_cfg[src_name]['INPUT_CHANNELS']
cur_layer, cur_num_c_out = pointnet2_stack_modules.build_local_aggregation_module(
input_channels=input_channels, config=SA_cfg[src_name]
)
self.SA_layers.append(cur_layer)
self.SA_layer_names.append(src_name)
c_in += cur_num_c_out
if 'bev' in self.model_cfg.FEATURES_SOURCE:
c_bev = num_bev_features
c_in += c_bev
if 'raw_points' in self.model_cfg.FEATURES_SOURCE:
self.SA_rawpoints, cur_num_c_out = pointnet2_stack_modules.build_local_aggregation_module(
input_channels=num_rawpoint_features - 3, config=SA_cfg['raw_points']
)
c_in += cur_num_c_out
if self.model_cfg.get('DUAL_NORM', None):
self.vsa_point_feature_fusion = nn.Sequential(
nn.Linear(c_in, self.model_cfg.NUM_OUTPUT_FEATURES, bias=False),
uni3d_norm.UniNorm1d(self.model_cfg.NUM_OUTPUT_FEATURES, dataset_from_flag=int(self.model_cfg.db_source)),
nn.ReLU(),
)
else:
self.vsa_point_feature_fusion = nn.Sequential(
nn.Linear(c_in, self.model_cfg.NUM_OUTPUT_FEATURES, bias=False),
nn.BatchNorm1d(self.model_cfg.NUM_OUTPUT_FEATURES),
nn.ReLU(),
)
self.num_point_features = self.model_cfg.NUM_OUTPUT_FEATURES
self.num_point_features_before_fusion = c_in
def interpolate_from_bev_features(self, keypoints, bev_features, batch_size, bev_stride):
"""
Args:
keypoints: (N1 + N2 + ..., 4)
bev_features: (B, C, H, W)
batch_size:
bev_stride:
Returns:
point_bev_features: (N1 + N2 + ..., C)
"""
x_idxs = (keypoints[:, 1] - self.point_cloud_range[0]) / self.voxel_size[0]
y_idxs = (keypoints[:, 2] - self.point_cloud_range[1]) / self.voxel_size[1]
x_idxs = x_idxs / bev_stride
y_idxs = y_idxs / bev_stride
point_bev_features_list = []
for k in range(batch_size):
bs_mask = (keypoints[:, 0] == k)
cur_x_idxs = x_idxs[bs_mask]
cur_y_idxs = y_idxs[bs_mask]
cur_bev_features = bev_features[k].permute(1, 2, 0) # (H, W, C)
point_bev_features = bilinear_interpolate_torch(cur_bev_features, cur_x_idxs, cur_y_idxs)
point_bev_features_list.append(point_bev_features)
point_bev_features = torch.cat(point_bev_features_list, dim=0) # (N1 + N2 + ..., C)
return point_bev_features
def sectorized_proposal_centric_sampling(self, roi_boxes, points):
"""
Args:
roi_boxes: (M, 7 + C)
points: (N, 3)
Returns:
sampled_points: (N_out, 3)
"""
sampled_points, _ = sample_points_with_roi(
rois=roi_boxes, points=points,
sample_radius_with_roi=self.model_cfg.SPC_SAMPLING.SAMPLE_RADIUS_WITH_ROI,
num_max_points_of_part=self.model_cfg.SPC_SAMPLING.get('NUM_POINTS_OF_EACH_SAMPLE_PART', 200000)
)
sampled_points = sector_fps(
points=sampled_points, num_sampled_points=self.model_cfg.NUM_KEYPOINTS,
num_sectors=self.model_cfg.SPC_SAMPLING.NUM_SECTORS
)
return sampled_points
def get_sampled_points(self, batch_dict):
"""
Args:
batch_dict:
Returns:
keypoints: (N1 + N2 + ..., 4), where 4 indicates [bs_idx, x, y, z]
"""
batch_size = batch_dict['batch_size']
if self.model_cfg.POINT_SOURCE == 'raw_points':
src_points = batch_dict['points'][:, 1:4]
batch_indices = batch_dict['points'][:, 0].long()
elif self.model_cfg.POINT_SOURCE == 'voxel_centers':
src_points = common_utils.get_voxel_centers(
batch_dict['voxel_coords'][:, 1:4],
downsample_times=1,
voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range
)
batch_indices = batch_dict['voxel_coords'][:, 0].long()
else:
raise NotImplementedError
keypoints_list = []
for bs_idx in range(batch_size):
bs_mask = (batch_indices == bs_idx)
sampled_points = src_points[bs_mask].unsqueeze(dim=0) # (1, N, 3)
if self.model_cfg.SAMPLE_METHOD == 'FPS':
cur_pt_idxs = pointnet2_stack_utils.farthest_point_sample(
sampled_points[:, :, 0:3].contiguous(), self.model_cfg.NUM_KEYPOINTS
).long()
if sampled_points.shape[1] < self.model_cfg.NUM_KEYPOINTS:
times = int(self.model_cfg.NUM_KEYPOINTS / sampled_points.shape[1]) + 1
non_empty = cur_pt_idxs[0, :sampled_points.shape[1]]
cur_pt_idxs[0] = non_empty.repeat(times)[:self.model_cfg.NUM_KEYPOINTS]
keypoints = sampled_points[0][cur_pt_idxs[0]].unsqueeze(dim=0)
elif self.model_cfg.SAMPLE_METHOD == 'SPC':
cur_keypoints = self.sectorized_proposal_centric_sampling(
roi_boxes=batch_dict['rois'][bs_idx], points=sampled_points[0]
)
bs_idxs = cur_keypoints.new_ones(cur_keypoints.shape[0]) * bs_idx
keypoints = torch.cat((bs_idxs[:, None], cur_keypoints), dim=1)
else:
raise NotImplementedError
keypoints_list.append(keypoints)
keypoints = torch.cat(keypoints_list, dim=0) # (B, M, 3) or (N1 + N2 + ..., 4)
if len(keypoints.shape) == 3:
batch_idx = torch.arange(batch_size, device=keypoints.device).view(-1, 1).repeat(1, keypoints.shape[1]).view(-1, 1)
keypoints = torch.cat((batch_idx.float(), keypoints.view(-1, 3)), dim=1)
return keypoints
@staticmethod
def aggregate_keypoint_features_from_one_source(
batch_size, aggregate_func, xyz, xyz_features, xyz_bs_idxs, new_xyz, new_xyz_batch_cnt,
filter_neighbors_with_roi=False, radius_of_neighbor=None, num_max_points_of_part=200000, rois=None, cover_feat_4=False
):
"""
Args:
aggregate_func:
xyz: (N, 3)
xyz_features: (N, C)
xyz_bs_idxs: (N)
new_xyz: (M, 3)
new_xyz_batch_cnt: (batch_size), [N1, N2, ...]
filter_neighbors_with_roi: True/False
radius_of_neighbor: float
num_max_points_of_part: int
rois: (batch_size, num_rois, 7 + C)
cover_feat_4: if cover the xyz_features using the values in z-dimension
Returns:
"""
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
if filter_neighbors_with_roi:
point_features = torch.cat((xyz, xyz_features), dim=-1) if xyz_features is not None else xyz
point_features_list = []
for bs_idx in range(batch_size):
bs_mask = (xyz_bs_idxs == bs_idx)
_, valid_mask = sample_points_with_roi(
rois=rois[bs_idx], points=xyz[bs_mask],
sample_radius_with_roi=radius_of_neighbor, num_max_points_of_part=num_max_points_of_part,
)
point_features_list.append(point_features[bs_mask][valid_mask])
xyz_batch_cnt[bs_idx] = valid_mask.sum()
valid_point_features = torch.cat(point_features_list, dim=0)
xyz = valid_point_features[:, 0:3]
xyz_features = valid_point_features[:, 3:] if xyz_features is not None else None
else:
for bs_idx in range(batch_size):
xyz_batch_cnt[bs_idx] = (xyz_bs_idxs == bs_idx).sum()
# for using the z-axes as the fourth dimension feature of point-cloud representations
if xyz_features is None:
if cover_feat_4:
xyz_features=xyz[:, 2].view(-1, 1)
pooled_points, pooled_features = aggregate_func(
xyz=xyz.contiguous(),
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
features=xyz_features.contiguous(),
)
else:
pooled_points, pooled_features = aggregate_func(
xyz=xyz.contiguous(),
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
features=xyz_features,
)
else:
pooled_points, pooled_features = aggregate_func(
xyz=xyz.contiguous(),
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
features=xyz_features.contiguous(),
)
return pooled_features
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
keypoints: (B, num_keypoints, 3)
multi_scale_3d_features: {
'x_conv4': ...
}
points: optional (N, 1 + 3 + C) [bs_idx, x, y, z, ...]
spatial_features: optional
spatial_features_stride: optional
Returns:
point_features: (N, C)
point_coords: (N, 4)
"""
keypoints = self.get_sampled_points(batch_dict)
point_features_list = []
if 'bev' in self.model_cfg.FEATURES_SOURCE:
point_bev_features = self.interpolate_from_bev_features(
keypoints, batch_dict['spatial_features'], batch_dict['batch_size'],
bev_stride=batch_dict['spatial_features_stride']
)
point_features_list.append(point_bev_features)
batch_size = batch_dict['batch_size']
new_xyz = keypoints[:, 1:4].contiguous()
new_xyz_batch_cnt = new_xyz.new_zeros(batch_size).int()
for k in range(batch_size):
new_xyz_batch_cnt[k] = (keypoints[:, 0] == k).sum()
if 'raw_points' in self.model_cfg.FEATURES_SOURCE:
raw_points = batch_dict['points']
pooled_features = self.aggregate_keypoint_features_from_one_source(
batch_size=batch_size, aggregate_func=self.SA_rawpoints,
xyz=raw_points[:, 1:4],
xyz_features=raw_points[:, 4:].contiguous() if raw_points.shape[1] > 4 else None,
xyz_bs_idxs=raw_points[:, 0],
new_xyz=new_xyz, new_xyz_batch_cnt=new_xyz_batch_cnt,
filter_neighbors_with_roi=self.model_cfg.SA_LAYER['raw_points'].get('FILTER_NEIGHBOR_WITH_ROI', False),
radius_of_neighbor=self.model_cfg.SA_LAYER['raw_points'].get('RADIUS_OF_NEIGHBOR_WITH_ROI', None),
rois=batch_dict.get('rois', None),
cover_feat_4=self.model_cfg.COVER_FEAT if self.model_cfg.get('COVER_FEAT', None) else None
)
point_features_list.append(pooled_features)
for k, src_name in enumerate(self.SA_layer_names):
cur_coords = batch_dict['multi_scale_3d_features'][src_name].indices
cur_features = batch_dict['multi_scale_3d_features'][src_name].features.contiguous()
xyz = common_utils.get_voxel_centers(
cur_coords[:, 1:4], downsample_times=self.downsample_times_map[src_name],
voxel_size=self.voxel_size, point_cloud_range=self.point_cloud_range
)
pooled_features = self.aggregate_keypoint_features_from_one_source(
batch_size=batch_size, aggregate_func=self.SA_layers[k],
xyz=xyz.contiguous(), xyz_features=cur_features, xyz_bs_idxs=cur_coords[:, 0],
new_xyz=new_xyz, new_xyz_batch_cnt=new_xyz_batch_cnt,
filter_neighbors_with_roi=self.model_cfg.SA_LAYER[src_name].get('FILTER_NEIGHBOR_WITH_ROI', False),
radius_of_neighbor=self.model_cfg.SA_LAYER[src_name].get('RADIUS_OF_NEIGHBOR_WITH_ROI', None),
rois=batch_dict.get('rois', None),
cover_feat_4=self.model_cfg.COVER_FEAT if self.model_cfg.get('COVER_FEAT', None) else None
)
point_features_list.append(pooled_features)
point_features = torch.cat(point_features_list, dim=-1)
batch_dict['point_features_before_fusion'] = point_features.view(-1, point_features.shape[-1])
point_features = self.vsa_point_feature_fusion(point_features.view(-1, point_features.shape[-1]))
batch_dict['point_features'] = point_features # (BxN, C)
batch_dict['point_coords'] = keypoints # (BxN, 4)
return batch_dict
| 18,045
| 39.920635
| 130
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/backbones_3d/vfe/vfe_template.py
|
import torch.nn as nn
class VFETemplate(nn.Module):
def __init__(self, model_cfg, **kwargs):
super().__init__()
self.model_cfg = model_cfg
def get_output_feature_dim(self):
raise NotImplementedError
def forward(self, **kwargs):
"""
Args:
**kwargs:
Returns:
batch_dict:
...
vfe_features: (num_voxels, C)
"""
raise NotImplementedError
| 470
| 19.478261
| 45
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/backbones_3d/vfe/dynamic_mean_vfe.py
|
import torch
from .vfe_template import VFETemplate
try:
import torch_scatter
except Exception as e:
# Incase someone doesn't want to use dynamic pillar vfe and hasn't installed torch_scatter
pass
from .vfe_template import VFETemplate
class DynamicMeanVFE(VFETemplate):
def __init__(self, model_cfg, num_point_features, voxel_size, grid_size, point_cloud_range, **kwargs):
super().__init__(model_cfg=model_cfg)
self.num_point_features = num_point_features
self.grid_size = torch.tensor(grid_size).cuda()
self.voxel_size = torch.tensor(voxel_size).cuda()
self.point_cloud_range = torch.tensor(point_cloud_range).cuda()
self.voxel_x = voxel_size[0]
self.voxel_y = voxel_size[1]
self.voxel_z = voxel_size[2]
self.x_offset = self.voxel_x / 2 + point_cloud_range[0]
self.y_offset = self.voxel_y / 2 + point_cloud_range[1]
self.z_offset = self.voxel_z / 2 + point_cloud_range[2]
self.scale_xyz = grid_size[0] * grid_size[1] * grid_size[2]
self.scale_yz = grid_size[1] * grid_size[2]
self.scale_z = grid_size[2]
def get_output_feature_dim(self):
return self.num_point_features
@torch.no_grad()
def forward(self, batch_dict, **kwargs):
"""
Args:
batch_dict:
voxels: (num_voxels, max_points_per_voxel, C)
voxel_num_points: optional (num_voxels)
**kwargs:
Returns:
vfe_features: (num_voxels, C)
"""
batch_size = batch_dict['batch_size']
points = batch_dict['points'] # (batch_idx, x, y, z, i, e)
# # debug
point_coords = torch.floor((points[:, 1:4] - self.point_cloud_range[0:3]) / self.voxel_size).int()
mask = ((point_coords >= 0) & (point_coords < self.grid_size)).all(dim=1)
points = points[mask]
point_coords = point_coords[mask]
merge_coords = points[:, 0].int() * self.scale_xyz + \
point_coords[:, 0] * self.scale_yz + \
point_coords[:, 1] * self.scale_z + \
point_coords[:, 2]
points_data = points[:, 1:].contiguous()
unq_coords, unq_inv, unq_cnt = torch.unique(merge_coords, return_inverse=True, return_counts=True)
points_mean = torch_scatter.scatter_mean(points_data, unq_inv, dim=0)
unq_coords = unq_coords.int()
voxel_coords = torch.stack((unq_coords // self.scale_xyz,
(unq_coords % self.scale_xyz) // self.scale_yz,
(unq_coords % self.scale_yz) // self.scale_z,
unq_coords % self.scale_z), dim=1)
voxel_coords = voxel_coords[:, [0, 3, 2, 1]]
batch_dict['voxel_features'] = points_mean.contiguous()
batch_dict['voxel_coords'] = voxel_coords.contiguous()
return batch_dict
| 2,980
| 37.714286
| 106
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/backbones_3d/vfe/mean_vfe.py
|
import torch
from .vfe_template import VFETemplate
class MeanVFE(VFETemplate):
def __init__(self, model_cfg, num_point_features, **kwargs):
super().__init__(model_cfg=model_cfg)
self.num_point_features = num_point_features
def get_output_feature_dim(self):
return self.num_point_features
def forward(self, batch_dict, **kwargs):
"""
Args:
batch_dict:
voxels: (num_voxels, max_points_per_voxel, C)
voxel_num_points: optional (num_voxels)
**kwargs:
Returns:
vfe_features: (num_voxels, C)
"""
voxel_features, voxel_num_points = batch_dict['voxels'], batch_dict['voxel_num_points']
points_mean = voxel_features[:, :, :].sum(dim=1, keepdim=False)
normalizer = torch.clamp_min(voxel_num_points.view(-1, 1), min=1.0).type_as(voxel_features)
points_mean = points_mean / normalizer
batch_dict['voxel_features'] = points_mean.contiguous()
return batch_dict
| 1,038
| 31.46875
| 99
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/backbones_3d/vfe/dynamic_pillar_vfe.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
try:
import torch_scatter
except Exception as e:
# Incase someone doesn't want to use dynamic pillar vfe and hasn't installed torch_scatter
pass
from .vfe_template import VFETemplate
class PFNLayerV2(nn.Module):
def __init__(self,
in_channels,
out_channels,
use_norm=True,
last_layer=False):
super().__init__()
self.last_vfe = last_layer
self.use_norm = use_norm
if not self.last_vfe:
out_channels = out_channels // 2
if self.use_norm:
self.linear = nn.Linear(in_channels, out_channels, bias=False)
self.norm = nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01)
else:
self.linear = nn.Linear(in_channels, out_channels, bias=True)
self.relu = nn.ReLU()
def forward(self, inputs, unq_inv):
x = self.linear(inputs)
x = self.norm(x) if self.use_norm else x
x = self.relu(x)
x_max = torch_scatter.scatter_max(x, unq_inv, dim=0)[0]
if self.last_vfe:
return x_max
else:
x_concatenated = torch.cat([x, x_max[unq_inv, :]], dim=1)
return x_concatenated
class DynamicPillarVFE(VFETemplate):
def __init__(self, model_cfg, num_point_features, voxel_size, grid_size, point_cloud_range, **kwargs):
super().__init__(model_cfg=model_cfg)
self.use_norm = self.model_cfg.USE_NORM
self.with_distance = self.model_cfg.WITH_DISTANCE
self.use_absolute_xyz = self.model_cfg.USE_ABSLOTE_XYZ
num_point_features += 6 if self.use_absolute_xyz else 3
if self.with_distance:
num_point_features += 1
self.num_filters = self.model_cfg.NUM_FILTERS
assert len(self.num_filters) > 0
num_filters = [num_point_features] + list(self.num_filters)
pfn_layers = []
for i in range(len(num_filters) - 1):
in_filters = num_filters[i]
out_filters = num_filters[i + 1]
pfn_layers.append(
PFNLayerV2(in_filters, out_filters, self.use_norm, last_layer=(i >= len(num_filters) - 2))
)
self.pfn_layers = nn.ModuleList(pfn_layers)
self.voxel_x = voxel_size[0]
self.voxel_y = voxel_size[1]
self.voxel_z = voxel_size[2]
self.x_offset = self.voxel_x / 2 + point_cloud_range[0]
self.y_offset = self.voxel_y / 2 + point_cloud_range[1]
self.z_offset = self.voxel_z / 2 + point_cloud_range[2]
self.scale_xy = grid_size[0] * grid_size[1]
self.scale_y = grid_size[1]
self.grid_size = torch.tensor(grid_size).cuda()
self.voxel_size = torch.tensor(voxel_size).cuda()
self.point_cloud_range = torch.tensor(point_cloud_range).cuda()
def get_output_feature_dim(self):
return self.num_filters[-1]
def forward(self, batch_dict, **kwargs):
points = batch_dict['points'] # (batch_idx, x, y, z, i, e)
points_coords = torch.floor((points[:, [1,2]] - self.point_cloud_range[[0,1]]) / self.voxel_size[[0,1]]).int()
mask = ((points_coords >= 0) & (points_coords < self.grid_size[[0,1]])).all(dim=1)
points = points[mask]
points_coords = points_coords[mask]
points_xyz = points[:, [1, 2, 3]].contiguous()
merge_coords = points[:, 0].int() * self.scale_xy + \
points_coords[:, 0] * self.scale_y + \
points_coords[:, 1]
unq_coords, unq_inv, unq_cnt = torch.unique(merge_coords, return_inverse=True, return_counts=True, dim=0)
points_mean = torch_scatter.scatter_mean(points_xyz, unq_inv, dim=0)
f_cluster = points_xyz - points_mean[unq_inv, :]
f_center = torch.zeros_like(points_xyz)
f_center[:, 0] = points_xyz[:, 0] - (points_coords[:, 0].to(points_xyz.dtype) * self.voxel_x + self.x_offset)
f_center[:, 1] = points_xyz[:, 1] - (points_coords[:, 1].to(points_xyz.dtype) * self.voxel_y + self.y_offset)
f_center[:, 2] = points_xyz[:, 2] - self.z_offset
if self.use_absolute_xyz:
features = [points[:, 1:], f_cluster, f_center]
else:
features = [points[:, 4:], f_cluster, f_center]
if self.with_distance:
points_dist = torch.norm(points[:, 1:4], 2, dim=1, keepdim=True)
features.append(points_dist)
features = torch.cat(features, dim=-1)
for pfn in self.pfn_layers:
features = pfn(features, unq_inv)
# features = self.linear1(features)
# features_max = torch_scatter.scatter_max(features, unq_inv, dim=0)[0]
# features = torch.cat([features, features_max[unq_inv, :]], dim=1)
# features = self.linear2(features)
# features = torch_scatter.scatter_max(features, unq_inv, dim=0)[0]
# generate voxel coordinates
unq_coords = unq_coords.int()
voxel_coords = torch.stack((unq_coords // self.scale_xy,
(unq_coords % self.scale_xy) // self.scale_y,
unq_coords % self.scale_y,
torch.zeros(unq_coords.shape[0]).to(unq_coords.device).int()
), dim=1)
voxel_coords = voxel_coords[:, [0, 3, 2, 1]]
batch_dict['pillar_features'] = features
batch_dict['voxel_coords'] = voxel_coords
return batch_dict
| 5,614
| 38.265734
| 118
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/backbones_3d/vfe/pillar_vfe.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .vfe_template import VFETemplate
class PFNLayer(nn.Module):
def __init__(self,
in_channels,
out_channels,
use_norm=True,
last_layer=False):
super().__init__()
self.last_vfe = last_layer
self.use_norm = use_norm
if not self.last_vfe:
out_channels = out_channels // 2
if self.use_norm:
self.linear = nn.Linear(in_channels, out_channels, bias=False)
self.norm = nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01)
else:
self.linear = nn.Linear(in_channels, out_channels, bias=True)
self.part = 50000
def forward(self, inputs):
if inputs.shape[0] > self.part:
# nn.Linear performs randomly when batch size is too large
num_parts = inputs.shape[0] // self.part
part_linear_out = [self.linear(inputs[num_part*self.part:(num_part+1)*self.part])
for num_part in range(num_parts+1)]
x = torch.cat(part_linear_out, dim=0)
else:
x = self.linear(inputs)
torch.backends.cudnn.enabled = False
x = self.norm(x.permute(0, 2, 1)).permute(0, 2, 1) if self.use_norm else x
torch.backends.cudnn.enabled = True
x = F.relu(x)
x_max = torch.max(x, dim=1, keepdim=True)[0]
if self.last_vfe:
return x_max
else:
x_repeat = x_max.repeat(1, inputs.shape[1], 1)
x_concatenated = torch.cat([x, x_repeat], dim=2)
return x_concatenated
class PillarVFE(VFETemplate):
def __init__(self, model_cfg, num_point_features, voxel_size, point_cloud_range, **kwargs):
super().__init__(model_cfg=model_cfg)
self.use_norm = self.model_cfg.USE_NORM
self.with_distance = self.model_cfg.WITH_DISTANCE
self.use_absolute_xyz = self.model_cfg.USE_ABSLOTE_XYZ
num_point_features += 6 if self.use_absolute_xyz else 3
if self.with_distance:
num_point_features += 1
self.num_filters = self.model_cfg.NUM_FILTERS
assert len(self.num_filters) > 0
num_filters = [num_point_features] + list(self.num_filters)
pfn_layers = []
for i in range(len(num_filters) - 1):
in_filters = num_filters[i]
out_filters = num_filters[i + 1]
pfn_layers.append(
PFNLayer(in_filters, out_filters, self.use_norm, last_layer=(i >= len(num_filters) - 2))
)
self.pfn_layers = nn.ModuleList(pfn_layers)
self.voxel_x = voxel_size[0]
self.voxel_y = voxel_size[1]
self.voxel_z = voxel_size[2]
self.x_offset = self.voxel_x / 2 + point_cloud_range[0]
self.y_offset = self.voxel_y / 2 + point_cloud_range[1]
self.z_offset = self.voxel_z / 2 + point_cloud_range[2]
def get_output_feature_dim(self):
return self.num_filters[-1]
def get_paddings_indicator(self, actual_num, max_num, axis=0):
actual_num = torch.unsqueeze(actual_num, axis + 1)
max_num_shape = [1] * len(actual_num.shape)
max_num_shape[axis + 1] = -1
max_num = torch.arange(max_num, dtype=torch.int, device=actual_num.device).view(max_num_shape)
paddings_indicator = actual_num.int() > max_num
return paddings_indicator
def forward(self, batch_dict, **kwargs):
voxel_features, voxel_num_points, coords = batch_dict['voxels'], batch_dict['voxel_num_points'], batch_dict['voxel_coords']
points_mean = voxel_features[:, :, :3].sum(dim=1, keepdim=True) / voxel_num_points.type_as(voxel_features).view(-1, 1, 1)
f_cluster = voxel_features[:, :, :3] - points_mean
f_center = torch.zeros_like(voxel_features[:, :, :3])
f_center[:, :, 0] = voxel_features[:, :, 0] - (coords[:, 3].to(voxel_features.dtype).unsqueeze(1) * self.voxel_x + self.x_offset)
f_center[:, :, 1] = voxel_features[:, :, 1] - (coords[:, 2].to(voxel_features.dtype).unsqueeze(1) * self.voxel_y + self.y_offset)
f_center[:, :, 2] = voxel_features[:, :, 2] - (coords[:, 1].to(voxel_features.dtype).unsqueeze(1) * self.voxel_z + self.z_offset)
if self.use_absolute_xyz:
features = [voxel_features, f_cluster, f_center]
else:
features = [voxel_features[..., 3:], f_cluster, f_center]
if self.with_distance:
points_dist = torch.norm(voxel_features[:, :, :3], 2, 2, keepdim=True)
features.append(points_dist)
features = torch.cat(features, dim=-1)
voxel_count = features.shape[1]
mask = self.get_paddings_indicator(voxel_num_points, voxel_count, axis=0)
mask = torch.unsqueeze(mask, -1).type_as(voxel_features)
features *= mask
for pfn in self.pfn_layers:
features = pfn(features)
features = features.squeeze()
batch_dict['pillar_features'] = features
return batch_dict
| 5,099
| 40.129032
| 137
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/backbones_3d/vfe/__init__.py
|
from .mean_vfe import MeanVFE
from .pillar_vfe import PillarVFE
from .dynamic_mean_vfe import DynamicMeanVFE
from .dynamic_pillar_vfe import DynamicPillarVFE
from .image_vfe import ImageVFE
from .vfe_template import VFETemplate
__all__ = {
'VFETemplate': VFETemplate,
'MeanVFE': MeanVFE,
'PillarVFE': PillarVFE,
'ImageVFE': ImageVFE,
'DynMeanVFE': DynamicMeanVFE,
'DynPillarVFE': DynamicPillarVFE,
}
| 425
| 25.625
| 48
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/backbones_3d/vfe/image_vfe.py
|
import torch
from .vfe_template import VFETemplate
from .image_vfe_modules import ffn, f2v
class ImageVFE(VFETemplate):
def __init__(self, model_cfg, grid_size, point_cloud_range, depth_downsample_factor, **kwargs):
super().__init__(model_cfg=model_cfg)
self.grid_size = grid_size
self.pc_range = point_cloud_range
self.downsample_factor = depth_downsample_factor
self.module_topology = [
'ffn', 'f2v'
]
self.build_modules()
def build_modules(self):
"""
Builds modules
"""
for module_name in self.module_topology:
module = getattr(self, 'build_%s' % module_name)()
self.add_module(module_name, module)
def build_ffn(self):
"""
Builds frustum feature network
Returns:
ffn_module: nn.Module, Frustum feature network
"""
ffn_module = ffn.__all__[self.model_cfg.FFN.NAME](
model_cfg=self.model_cfg.FFN,
downsample_factor=self.downsample_factor
)
self.disc_cfg = ffn_module.disc_cfg
return ffn_module
def build_f2v(self):
"""
Builds frustum to voxel transformation
Returns:
f2v_module: nn.Module, Frustum to voxel transformation
"""
f2v_module = f2v.__all__[self.model_cfg.F2V.NAME](
model_cfg=self.model_cfg.F2V,
grid_size=self.grid_size,
pc_range=self.pc_range,
disc_cfg=self.disc_cfg
)
return f2v_module
def get_output_feature_dim(self):
"""
Gets number of output channels
Returns:
out_feature_dim: int, Number of output channels
"""
out_feature_dim = self.ffn.get_output_feature_dim()
return out_feature_dim
def forward(self, batch_dict, **kwargs):
"""
Args:
batch_dict:
images: (N, 3, H_in, W_in), Input images
**kwargs:
Returns:
batch_dict:
voxel_features: (B, C, Z, Y, X), Image voxel features
"""
batch_dict = self.ffn(batch_dict)
batch_dict = self.f2v(batch_dict)
return batch_dict
def get_loss(self):
"""
Gets DDN loss
Returns:
loss: (1), Depth distribution network loss
tb_dict: dict[float], All losses to log in tensorboard
"""
loss, tb_dict = self.ffn.get_loss()
return loss, tb_dict
| 2,526
| 28.383721
| 99
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/backbones_3d/vfe/image_vfe_modules/__init__.py
| 0
| 0
| 0
|
py
|
|
3DTrans
|
3DTrans-master/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/depth_ffn.py
|
import torch.nn as nn
import torch.nn.functional as F
from . import ddn, ddn_loss
from pcdet.models.model_utils.basic_block_2d import BasicBlock2D
class DepthFFN(nn.Module):
def __init__(self, model_cfg, downsample_factor):
"""
Initialize frustum feature network via depth distribution estimation
Args:
model_cfg: EasyDict, Depth classification network config
downsample_factor: int, Depth map downsample factor
"""
super().__init__()
self.model_cfg = model_cfg
self.disc_cfg = model_cfg.DISCRETIZE
self.downsample_factor = downsample_factor
# Create modules
self.ddn = ddn.__all__[model_cfg.DDN.NAME](
num_classes=self.disc_cfg["num_bins"] + 1,
backbone_name=model_cfg.DDN.BACKBONE_NAME,
**model_cfg.DDN.ARGS
)
self.channel_reduce = BasicBlock2D(**model_cfg.CHANNEL_REDUCE)
self.ddn_loss = ddn_loss.__all__[model_cfg.LOSS.NAME](
disc_cfg=self.disc_cfg,
downsample_factor=downsample_factor,
**model_cfg.LOSS.ARGS
)
self.forward_ret_dict = {}
def get_output_feature_dim(self):
return self.channel_reduce.out_channels
def forward(self, batch_dict):
"""
Predicts depths and creates image depth feature volume using depth distributions
Args:
batch_dict:
images: (N, 3, H_in, W_in), Input images
Returns:
batch_dict:
frustum_features: (N, C, D, H_out, W_out), Image depth features
"""
# Pixel-wise depth classification
images = batch_dict["images"]
ddn_result = self.ddn(images)
image_features = ddn_result["features"]
depth_logits = ddn_result["logits"]
# Channel reduce
if self.channel_reduce is not None:
image_features = self.channel_reduce(image_features)
# Create image feature plane-sweep volume
frustum_features = self.create_frustum_features(image_features=image_features,
depth_logits=depth_logits)
batch_dict["frustum_features"] = frustum_features
if self.training:
self.forward_ret_dict["depth_maps"] = batch_dict["depth_maps"]
self.forward_ret_dict["gt_boxes2d"] = batch_dict["gt_boxes2d"]
self.forward_ret_dict["depth_logits"] = depth_logits
return batch_dict
def create_frustum_features(self, image_features, depth_logits):
"""
Create image depth feature volume by multiplying image features with depth distributions
Args:
image_features: (N, C, H, W), Image features
depth_logits: (N, D+1, H, W), Depth classification logits
Returns:
frustum_features: (N, C, D, H, W), Image features
"""
channel_dim = 1
depth_dim = 2
# Resize to match dimensions
image_features = image_features.unsqueeze(depth_dim)
depth_logits = depth_logits.unsqueeze(channel_dim)
# Apply softmax along depth axis and remove last depth category (> Max Range)
depth_probs = F.softmax(depth_logits, dim=depth_dim)
depth_probs = depth_probs[:, :, :-1]
# Multiply to form image depth feature volume
frustum_features = depth_probs * image_features
return frustum_features
def get_loss(self):
"""
Gets DDN loss
Args:
Returns:
loss: (1), Depth distribution network loss
tb_dict: dict[float], All losses to log in tensorboard
"""
loss, tb_dict = self.ddn_loss(**self.forward_ret_dict)
return loss, tb_dict
| 3,778
| 35.336538
| 96
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/__init__.py
|
from .depth_ffn import DepthFFN
__all__ = {
'DepthFFN': DepthFFN
}
| 72
| 11.166667
| 31
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/ddn/ddn_deeplabv3.py
|
from .ddn_template import DDNTemplate
try:
import torchvision
except:
pass
class DDNDeepLabV3(DDNTemplate):
def __init__(self, backbone_name, **kwargs):
"""
Initializes DDNDeepLabV3 model
Args:
backbone_name: string, ResNet Backbone Name [ResNet50/ResNet101]
"""
if backbone_name == "ResNet50":
constructor = torchvision.models.segmentation.deeplabv3_resnet50
elif backbone_name == "ResNet101":
constructor = torchvision.models.segmentation.deeplabv3_resnet101
else:
raise NotImplementedError
super().__init__(constructor=constructor, **kwargs)
| 674
| 26
| 77
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/ddn/__init__.py
|
from .ddn_deeplabv3 import DDNDeepLabV3
__all__ = {
'DDNDeepLabV3': DDNDeepLabV3
}
| 88
| 13.833333
| 39
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/ddn/ddn_template.py
|
from collections import OrderedDict
from pathlib import Path
from torch import hub
import torch
import torch.nn as nn
import torch.nn.functional as F
try:
from kornia.enhance.normalize import normalize
except:
pass
# print('Warning: kornia is not installed. This package is only required by CaDDN')
class DDNTemplate(nn.Module):
def __init__(self, constructor, feat_extract_layer, num_classes, pretrained_path=None, aux_loss=None):
"""
Initializes depth distribution network.
Args:
constructor: function, Model constructor
feat_extract_layer: string, Layer to extract features from
num_classes: int, Number of classes
pretrained_path: string, (Optional) Path of the model to load weights from
aux_loss: bool, Flag to include auxillary loss
"""
super().__init__()
self.num_classes = num_classes
self.pretrained_path = pretrained_path
self.pretrained = pretrained_path is not None
self.aux_loss = aux_loss
if self.pretrained:
# Preprocess Module
self.norm_mean = torch.Tensor([0.485, 0.456, 0.406])
self.norm_std = torch.Tensor([0.229, 0.224, 0.225])
# Model
self.model = self.get_model(constructor=constructor)
self.feat_extract_layer = feat_extract_layer
self.model.backbone.return_layers = {
feat_extract_layer: 'features',
**self.model.backbone.return_layers
}
def get_model(self, constructor):
"""
Get model
Args:
constructor: function, Model constructor
Returns:
model: nn.Module, Model
"""
# Get model
model = constructor(pretrained=False,
pretrained_backbone=False,
num_classes=self.num_classes,
aux_loss=self.aux_loss)
# Update weights
if self.pretrained_path is not None:
model_dict = model.state_dict()
# Download pretrained model if not available yet
checkpoint_path = Path(self.pretrained_path)
if not checkpoint_path.exists():
checkpoint = checkpoint_path.name
save_dir = checkpoint_path.parent
save_dir.mkdir(parents=True)
url = f'https://download.pytorch.org/models/{checkpoint}'
hub.load_state_dict_from_url(url, save_dir)
# Get pretrained state dict
pretrained_dict = torch.load(self.pretrained_path)
pretrained_dict = self.filter_pretrained_dict(model_dict=model_dict,
pretrained_dict=pretrained_dict)
# Update current model state dict
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
return model
def filter_pretrained_dict(self, model_dict, pretrained_dict):
"""
Removes layers from pretrained state dict that are not used or changed in model
Args:
model_dict: dict, Default model state dictionary
pretrained_dict: dict, Pretrained model state dictionary
Returns:
pretrained_dict: dict, Pretrained model state dictionary with removed weights
"""
# Removes aux classifier weights if not used
if "aux_classifier.0.weight" in pretrained_dict and "aux_classifier.0.weight" not in model_dict:
pretrained_dict = {key: value for key, value in pretrained_dict.items()
if "aux_classifier" not in key}
# Removes final conv layer from weights if number of classes are different
model_num_classes = model_dict["classifier.4.weight"].shape[0]
pretrained_num_classes = pretrained_dict["classifier.4.weight"].shape[0]
if model_num_classes != pretrained_num_classes:
pretrained_dict.pop("classifier.4.weight")
pretrained_dict.pop("classifier.4.bias")
return pretrained_dict
def forward(self, images):
"""
Forward pass
Args:
images: (N, 3, H_in, W_in), Input images
Returns
result: dict[torch.Tensor], Depth distribution result
features: (N, C, H_out, W_out), Image features
logits: (N, num_classes, H_out, W_out), Classification logits
aux: (N, num_classes, H_out, W_out), Auxillary classification logits
"""
# Preprocess images
x = self.preprocess(images)
# Extract features
result = OrderedDict()
features = self.model.backbone(x)
result['features'] = features['features']
feat_shape = features['features'].shape[-2:]
# Prediction classification logits
x = features["out"]
x = self.model.classifier(x)
x = F.interpolate(x, size=feat_shape, mode='bilinear', align_corners=False)
result["logits"] = x
# Prediction auxillary classification logits
if self.model.aux_classifier is not None:
x = features["aux"]
x = self.model.aux_classifier(x)
x = F.interpolate(x, size=feat_shape, mode='bilinear', align_corners=False)
result["aux"] = x
return result
def preprocess(self, images):
"""
Preprocess images
Args:
images: (N, 3, H, W), Input images
Return
x: (N, 3, H, W), Preprocessed images
"""
x = images
if self.pretrained:
# Create a mask for padded pixels
mask = torch.isnan(x)
# Match ResNet pretrained preprocessing
x = normalize(x, mean=self.norm_mean, std=self.norm_std)
# Make padded pixels = 0
x[mask] = 0
return x
| 5,941
| 35.453988
| 106
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/ddn_loss/balancer.py
|
import torch
import torch.nn as nn
from pcdet.utils import loss_utils
class Balancer(nn.Module):
def __init__(self, fg_weight, bg_weight, downsample_factor=1):
"""
Initialize fixed foreground/background loss balancer
Args:
fg_weight: float, Foreground loss weight
bg_weight: float, Background loss weight
downsample_factor: int, Depth map downsample factor
"""
super().__init__()
self.fg_weight = fg_weight
self.bg_weight = bg_weight
self.downsample_factor = downsample_factor
def forward(self, loss, gt_boxes2d):
"""
Forward pass
Args:
loss: (B, H, W), Pixel-wise loss
gt_boxes2d: (B, N, 4), 2D box labels for foreground/background balancing
Returns:
loss: (1), Total loss after foreground/background balancing
tb_dict: dict[float], All losses to log in tensorboard
"""
# Compute masks
fg_mask = loss_utils.compute_fg_mask(gt_boxes2d=gt_boxes2d,
shape=loss.shape,
downsample_factor=self.downsample_factor,
device=loss.device)
bg_mask = ~fg_mask
# Compute balancing weights
weights = self.fg_weight * fg_mask + self.bg_weight * bg_mask
num_pixels = fg_mask.sum() + bg_mask.sum()
# Compute losses
loss *= weights
fg_loss = loss[fg_mask].sum() / num_pixels
bg_loss = loss[bg_mask].sum() / num_pixels
# Get total loss
loss = fg_loss + bg_loss
tb_dict = {"balancer_loss": loss.item(), "fg_loss": fg_loss.item(), "bg_loss": bg_loss.item()}
return loss, tb_dict
| 1,806
| 34.431373
| 102
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/ddn_loss/__init__.py
|
from .ddn_loss import DDNLoss
__all__ = {
"DDNLoss": DDNLoss
}
| 68
| 10.5
| 29
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/ddn_loss/ddn_loss.py
|
import torch
import torch.nn as nn
from .balancer import Balancer
from pcdet.utils import transform_utils
try:
from kornia.losses.focal import FocalLoss
except:
pass
# print('Warning: kornia is not installed. This package is only required by CaDDN')
class DDNLoss(nn.Module):
def __init__(self,
weight,
alpha,
gamma,
disc_cfg,
fg_weight,
bg_weight,
downsample_factor):
"""
Initializes DDNLoss module
Args:
weight: float, Loss function weight
alpha: float, Alpha value for Focal Loss
gamma: float, Gamma value for Focal Loss
disc_cfg: dict, Depth discretiziation configuration
fg_weight: float, Foreground loss weight
bg_weight: float, Background loss weight
downsample_factor: int, Depth map downsample factor
"""
super().__init__()
self.device = torch.cuda.current_device()
self.disc_cfg = disc_cfg
self.balancer = Balancer(downsample_factor=downsample_factor,
fg_weight=fg_weight,
bg_weight=bg_weight)
# Set loss function
self.alpha = alpha
self.gamma = gamma
self.loss_func = FocalLoss(alpha=self.alpha, gamma=self.gamma, reduction="none")
self.weight = weight
def forward(self, depth_logits, depth_maps, gt_boxes2d):
"""
Gets DDN loss
Args:
depth_logits: (B, D+1, H, W), Predicted depth logits
depth_maps: (B, H, W), Depth map [m]
gt_boxes2d: torch.Tensor (B, N, 4), 2D box labels for foreground/background balancing
Returns:
loss: (1), Depth distribution network loss
tb_dict: dict[float], All losses to log in tensorboard
"""
tb_dict = {}
# Bin depth map to create target
depth_target = transform_utils.bin_depths(depth_maps, **self.disc_cfg, target=True)
# Compute loss
loss = self.loss_func(depth_logits, depth_target)
# Compute foreground/background balancing
loss, tb_dict = self.balancer(loss=loss, gt_boxes2d=gt_boxes2d)
# Final loss
loss *= self.weight
tb_dict.update({"ddn_loss": loss.item()})
return loss, tb_dict
| 2,428
| 30.960526
| 97
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/backbones_3d/vfe/image_vfe_modules/f2v/frustum_to_voxel.py
|
import torch
import torch.nn as nn
from .frustum_grid_generator import FrustumGridGenerator
from .sampler import Sampler
class FrustumToVoxel(nn.Module):
def __init__(self, model_cfg, grid_size, pc_range, disc_cfg):
"""
Initializes module to transform frustum features to voxel features via 3D transformation and sampling
Args:
model_cfg: EasyDict, Module configuration
grid_size: [X, Y, Z], Voxel grid size
pc_range: [x_min, y_min, z_min, x_max, y_max, z_max], Voxelization point cloud range (m)
disc_cfg: EasyDict, Depth discretiziation configuration
"""
super().__init__()
self.model_cfg = model_cfg
self.grid_size = grid_size
self.pc_range = pc_range
self.disc_cfg = disc_cfg
self.grid_generator = FrustumGridGenerator(grid_size=grid_size,
pc_range=pc_range,
disc_cfg=disc_cfg)
self.sampler = Sampler(**model_cfg.SAMPLER)
def forward(self, batch_dict):
"""
Generates voxel features via 3D transformation and sampling
Args:
batch_dict:
frustum_features: (B, C, D, H_image, W_image), Image frustum features
lidar_to_cam: (B, 4, 4), LiDAR to camera frame transformation
cam_to_img: (B, 3, 4), Camera projection matrix
image_shape: (B, 2), Image shape [H, W]
Returns:
batch_dict:
voxel_features: (B, C, Z, Y, X), Image voxel features
"""
# Generate sampling grid for frustum volume
grid = self.grid_generator(lidar_to_cam=batch_dict["trans_lidar_to_cam"],
cam_to_img=batch_dict["trans_cam_to_img"],
image_shape=batch_dict["image_shape"]) # (B, X, Y, Z, 3)
# Sample frustum volume to generate voxel volume
voxel_features = self.sampler(input_features=batch_dict["frustum_features"],
grid=grid) # (B, C, X, Y, Z)
# (B, C, X, Y, Z) -> (B, C, Z, Y, X)
voxel_features = voxel_features.permute(0, 1, 4, 3, 2)
batch_dict["voxel_features"] = voxel_features
return batch_dict
| 2,338
| 41.527273
| 109
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/backbones_3d/vfe/image_vfe_modules/f2v/frustum_grid_generator.py
|
import torch
import torch.nn as nn
try:
from kornia.utils.grid import create_meshgrid3d
from kornia.geometry.linalg import transform_points
except Exception as e:
# Note: Kornia team will fix this import issue to try to allow the usage of lower torch versions.
# print('Warning: kornia is not installed correctly, please ignore this warning if you do not use CaDDN. Otherwise, it is recommended to use torch version greater than 1.2 to use kornia properly.')
pass
from pcdet.utils import transform_utils
class FrustumGridGenerator(nn.Module):
def __init__(self, grid_size, pc_range, disc_cfg):
"""
Initializes Grid Generator for frustum features
Args:
grid_size: [X, Y, Z], Voxel grid size
pc_range: [x_min, y_min, z_min, x_max, y_max, z_max], Voxelization point cloud range (m)
disc_cfg: EasyDict, Depth discretiziation configuration
"""
super().__init__()
try:
import kornia
except Exception as e:
# Note: Kornia team will fix this import issue to try to allow the usage of lower torch versions.
print('Error: kornia is not installed correctly, please ignore this warning if you do not use CaDDN. '
'Otherwise, it is recommended to use torch version greater than 1.2 to use kornia properly.')
exit(-1)
self.dtype = torch.float32
self.grid_size = torch.as_tensor(grid_size, dtype=self.dtype)
self.pc_range = pc_range
self.out_of_bounds_val = -2
self.disc_cfg = disc_cfg
# Calculate voxel size
pc_range = torch.as_tensor(pc_range).reshape(2, 3)
self.pc_min = pc_range[0]
self.pc_max = pc_range[1]
self.voxel_size = (self.pc_max - self.pc_min) / self.grid_size
# Create voxel grid
self.depth, self.width, self.height = self.grid_size.int()
self.voxel_grid = create_meshgrid3d(depth=self.depth,
height=self.height,
width=self.width,
normalized_coordinates=False)
self.voxel_grid = self.voxel_grid.permute(0, 1, 3, 2, 4) # XZY-> XYZ
# Add offsets to center of voxel
self.voxel_grid += 0.5
self.grid_to_lidar = self.grid_to_lidar_unproject(pc_min=self.pc_min,
voxel_size=self.voxel_size)
def grid_to_lidar_unproject(self, pc_min, voxel_size):
"""
Calculate grid to LiDAR unprojection for each plane
Args:
pc_min: [x_min, y_min, z_min], Minimum of point cloud range (m)
voxel_size: [x, y, z], Size of each voxel (m)
Returns:
unproject: (4, 4), Voxel grid to LiDAR unprojection matrix
"""
x_size, y_size, z_size = voxel_size
x_min, y_min, z_min = pc_min
unproject = torch.tensor([[x_size, 0, 0, x_min],
[0, y_size, 0, y_min],
[0, 0, z_size, z_min],
[0, 0, 0, 1]],
dtype=self.dtype) # (4, 4)
return unproject
def transform_grid(self, voxel_grid, grid_to_lidar, lidar_to_cam, cam_to_img):
"""
Transforms voxel sampling grid into frustum sampling grid
Args:
grid: (B, X, Y, Z, 3), Voxel sampling grid
grid_to_lidar: (4, 4), Voxel grid to LiDAR unprojection matrix
lidar_to_cam: (B, 4, 4), LiDAR to camera frame transformation
cam_to_img: (B, 3, 4), Camera projection matrix
Returns:
frustum_grid: (B, X, Y, Z, 3), Frustum sampling grid
"""
B = lidar_to_cam.shape[0]
# Create transformation matricies
V_G = grid_to_lidar # Voxel Grid -> LiDAR (4, 4)
C_V = lidar_to_cam # LiDAR -> Camera (B, 4, 4)
I_C = cam_to_img # Camera -> Image (B, 3, 4)
trans = C_V @ V_G
# Reshape to match dimensions
trans = trans.reshape(B, 1, 1, 4, 4)
voxel_grid = voxel_grid.repeat_interleave(repeats=B, dim=0)
# Transform to camera frame
camera_grid = transform_points(trans_01=trans, points_1=voxel_grid)
# Project to image
I_C = I_C.reshape(B, 1, 1, 3, 4)
image_grid, image_depths = transform_utils.project_to_image(project=I_C, points=camera_grid)
# Convert depths to depth bins
image_depths = transform_utils.bin_depths(depth_map=image_depths, **self.disc_cfg)
# Stack to form frustum grid
image_depths = image_depths.unsqueeze(-1)
frustum_grid = torch.cat((image_grid, image_depths), dim=-1)
return frustum_grid
def forward(self, lidar_to_cam, cam_to_img, image_shape):
"""
Generates sampling grid for frustum features
Args:
lidar_to_cam: (B, 4, 4), LiDAR to camera frame transformation
cam_to_img: (B, 3, 4), Camera projection matrix
image_shape: (B, 2), Image shape [H, W]
Returns:
frustum_grid (B, X, Y, Z, 3), Sampling grids for frustum features
"""
frustum_grid = self.transform_grid(voxel_grid=self.voxel_grid.to(lidar_to_cam.device),
grid_to_lidar=self.grid_to_lidar.to(lidar_to_cam.device),
lidar_to_cam=lidar_to_cam,
cam_to_img=cam_to_img)
# Normalize grid
image_shape, _ = torch.max(image_shape, dim=0)
image_depth = torch.tensor([self.disc_cfg["num_bins"]],
device=image_shape.device,
dtype=image_shape.dtype)
frustum_shape = torch.cat((image_depth, image_shape))
frustum_grid = transform_utils.normalize_coords(coords=frustum_grid, shape=frustum_shape)
# Replace any NaNs or infinites with out of bounds
mask = ~torch.isfinite(frustum_grid)
frustum_grid[mask] = self.out_of_bounds_val
return frustum_grid
| 6,249
| 41.808219
| 201
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/backbones_3d/vfe/image_vfe_modules/f2v/sampler.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Sampler(nn.Module):
def __init__(self, mode="bilinear", padding_mode="zeros"):
"""
Initializes module
Args:
mode: string, Sampling mode [bilinear/nearest]
padding_mode: string, Padding mode for outside grid values [zeros/border/reflection]
"""
super().__init__()
self.mode = mode
self.padding_mode = padding_mode
def forward(self, input_features, grid):
"""
Samples input using sampling grid
Args:
input_features: (B, C, D, H, W), Input frustum features
grid: (B, X, Y, Z, 3), Sampling grids for input features
Returns
output_features: (B, C, X, Y, Z) Output voxel features
"""
# Sample from grid
output = F.grid_sample(input=input_features, grid=grid, mode=self.mode, padding_mode=self.padding_mode)
return output
| 980
| 30.645161
| 111
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/backbones_3d/vfe/image_vfe_modules/f2v/__init__.py
|
from .frustum_to_voxel import FrustumToVoxel
__all__ = {
'FrustumToVoxel': FrustumToVoxel
}
| 97
| 15.333333
| 44
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/active_models/discriminator.py
|
from xml.dom.minidom import DOMImplementation
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
class ActiveDiscriminator(nn.Module):
def __init__(self, model_cfg):
super().__init__()
self.model_cfg = model_cfg
self.fc = nn.Linear(model_cfg['FEATURE_DIM'], 1)
self.sigmoid = nn.Sigmoid()
nn.init.kaiming_uniform_(self.fc.weight)
def get_discriminator_loss(self, batch_dict, source=True):
domainness = batch_dict['domainness']
if source:
discri_loss = F.binary_cross_entropy(domainness, torch.zeros_like(domainness))
else:
discri_loss = F.binary_cross_entropy(domainness, torch.ones_like(domainness))
return discri_loss
def get_accuracy(self, batch_dict, source=True):
batch_size = batch_dict['batch_size']
domainness = batch_dict['domainness']
zero = torch.zeros_like(domainness)
one = torch.ones_like(domainness)
domainness = torch.where(domainness > 0.5, one, domainness)
domainness = torch.where(domainness <= 0.5, zero, domainness)
if source:
acc = (domainness == zero).sum() / batch_size
else:
acc = (domainness == one).sum() / batch_size
return acc
def domainness_evaluate(self, batch_dict, source=False):
domainness = batch_dict['domainness'] if source == False else 1 - batch_dict['domainness']
# domainness_value = 1 / (math.sqrt(2*3.14) * self.model_cfg.SIGMA) * torch.exp(-(domainness - self.model_cfg.MU).pow(2) / 2 * (self.model_cfg.SIGMA ** 2))
# batch_dict['domainness_evaluate'] = domainness_value
batch_dict['domainness_evaluate'] = domainness
return batch_dict
def forward(self, batch_dict):
point_feature = batch_dict['point_features']
point_feature_score = batch_dict['point_cls_scores']
point_coords = batch_dict['point_coords']
point_feature = point_feature * point_feature_score.view(-1, 1)
batch_size = batch_dict['batch_size']
scene_feature = point_feature.new_zeros((batch_size, point_feature.shape[-1]))
for i in range(batch_size):
mask = point_coords[:, 0] == i
scene_feature[i] = torch.mean(point_feature[mask], dim=0)
domainness = self.fc(scene_feature)
domainness = self.sigmoid(domainness)
batch_dict['domainness'] = domainness
return batch_dict
| 2,477
| 41.724138
| 163
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/active_models/discriminator_from_bev.py
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class BEVDiscriminator_Conv(nn.Module):
def __init__(self, model_cfg):
super().__init__()
c_in = model_cfg['FEATURE_DIM']
c_out = model_cfg['FEATURE_DIM'] // 4
self.c_out = c_out
self.block = nn.Sequential(
nn.Conv2d(c_in, c_out, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(c_out, c_out, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(c_out, c_out, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(c_out, c_out, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(c_out, c_out, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True)
)
self.gap = nn.AdaptiveAvgPool2d((1,1))
self.mlp = nn.Linear(c_out, 1)
def forward(self, batch_dict):
x = batch_dict['spatial_features_2d']
score = batch_dict['bev_score']
bev_map = batch_dict['bev_map']
bev_map = F.sigmoid(bev_map).clamp_(1e-6)
entropy_map = -bev_map * torch.log(bev_map)
entropy_map = entropy_map.max(dim=1)[0].view(-1, 1, *entropy_map.shape[2:])
score = F.sigmoid(score)
score = torch.cat([score, entropy_map], dim=1).mean(dim=1).view(-1, 1, *score.shape[2:])
x = x * (1 + score)
x = self.block(x)
x = self.gap(x).view(-1, self.c_out)
batch_dict['bev_pooled_feature'] = x
x = self.mlp(x)
batch_dict['domainness'] = x
return batch_dict
def get_discriminator_loss(self, batch_dict, source=True, loss='bce'):
domainness = batch_dict['domainness']
if source:
if loss == 'bce':
discri_loss = bce_loss(domainness, 0)
else:
discri_loss = ls_loss(domainness, 0)
else:
if loss == 'bce':
discri_loss = bce_loss(domainness, 1)
else:
discri_loss = ls_loss(domainness, 1)
return discri_loss
def domainness_evaluate(self, batch_dict, source=False):
domainness = batch_dict['domainness']
# domainness_value = 1 / (math.sqrt(2*3.14) * self.model_cfg.SIGMA) * torch.exp(-(domainness - self.model_cfg.MU).pow(2) / 2 * (self.model_cfg.SIGMA ** 2))
# batch_dict['domainness_evaluate'] = domainness_value
batch_dict['domainness_evaluate'] = domainness
batch_dict['domainness_evaluate_sigmoid'] = F.sigmoid(domainness)
return batch_dict
class BEVDiscriminator_Conv_2(nn.Module):
def __init__(self, model_cfg):
super().__init__()
c_in = model_cfg['FEATURE_DIM']
c_out = model_cfg['FEATURE_DIM'] // 4
self.c_out = c_out
self.block = nn.Sequential(
nn.Conv2d(c_in, c_out, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(c_out, c_out, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(c_out, c_out, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(c_out, c_out, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(c_out, c_out, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True)
)
self.gap = nn.AdaptiveAvgPool2d((1,1))
self.mlp = nn.Linear(c_out, 1)
def forward(self, batch_dict):
x = batch_dict['spatial_features_2d']
score = batch_dict['bev_score']
bev_map = batch_dict['bev_map']
bev_map = F.sigmoid(bev_map).clamp_(1e-6)
entropy_map = -bev_map * torch.log2(bev_map) - (1 - bev_map) * torch.log2(1 - bev_map)
entropy_map = entropy_map.max(dim=1)[0].view(-1, 1, *entropy_map.shape[2:])
score = F.sigmoid(score)
score = (torch.cat([score, entropy_map], dim=1).mean(dim=1).view(-1, 1, *score.shape[2:])) / 2
x = x * (1 + score)
x = self.block(x)
x = self.gap(x).view(-1, self.c_out)
x = self.mlp(x)
batch_dict['domainness'] = x
return batch_dict
def get_discriminator_loss(self, batch_dict, source=True, loss='bce'):
domainness = batch_dict['domainness']
if source:
if loss == 'bce':
discri_loss = bce_loss(domainness, 0)
else:
discri_loss = ls_loss(domainness, 0)
else:
if loss == 'bce':
discri_loss = bce_loss(domainness, 1)
else:
discri_loss = ls_loss(domainness, 1)
return discri_loss
def domainness_evaluate(self, batch_dict, source=False):
domainness = batch_dict['domainness']
# domainness_value = 1 / (math.sqrt(2*3.14) * self.model_cfg.SIGMA) * torch.exp(-(domainness - self.model_cfg.MU).pow(2) / 2 * (self.model_cfg.SIGMA ** 2))
# batch_dict['domainness_evaluate'] = domainness_value
batch_dict['domainness_evaluate'] = domainness
batch_dict['domainness_evaluate_sigmoid'] = F.sigmoid(domainness)
return batch_dict
class BEVDiscriminator_Center(nn.Module):
def __init__(self, model_cfg):
super().__init__()
c_in = model_cfg['FEATURE_DIM']
c_out = model_cfg['FEATURE_DIM'] // 4
self.c_out = c_out
self.block = nn.Sequential(
nn.Conv2d(c_in, c_out, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(c_out, c_out, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(c_out, c_out, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(c_out, c_out, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(c_out, c_out, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True)
)
self.gap = nn.AdaptiveAvgPool2d((1,1))
self.mlp = nn.Linear(c_out, 1)
def forward(self, batch_dict):
x = batch_dict['spatial_features_2d']
score = batch_dict['bev_score']
# bev_map = batch_dict['bev_map']
bev_map = F.sigmoid(score).clamp_(1e-6)
entropy_map = (-bev_map * torch.log2(bev_map) - (1-bev_map) * torch.log2(1-bev_map)) / 2
# entropy_map = entropy_map.max(dim=1)[0].view(-1, 1, *entropy_map.shape[2:])
score = F.sigmoid(score)
score = torch.cat([score, entropy_map], dim=1).mean(dim=1).view(-1, 1, *score.shape[2:])
x = x * (1 + score)
x = self.block(x)
x = self.gap(x).view(-1, self.c_out)
batch_dict['bev_pooled_feature'] = x
x = self.mlp(x)
batch_dict['domainness'] = x
return batch_dict
def get_discriminator_loss(self, batch_dict, source=True, loss='bce'):
domainness = batch_dict['domainness']
if source:
if loss == 'bce':
discri_loss = bce_loss(domainness, 0)
else:
discri_loss = ls_loss(domainness, 0)
else:
if loss == 'bce':
discri_loss = bce_loss(domainness, 1)
else:
discri_loss = ls_loss(domainness, 1)
return discri_loss
def domainness_evaluate(self, batch_dict, source=False):
domainness = batch_dict['domainness']
# domainness_value = 1 / (math.sqrt(2*3.14) * self.model_cfg.SIGMA) * torch.exp(-(domainness - self.model_cfg.MU).pow(2) / 2 * (self.model_cfg.SIGMA ** 2))
# batch_dict['domainness_evaluate'] = domainness_value
batch_dict['domainness_evaluate'] = domainness
batch_dict['domainness_evaluate_sigmoid'] = F.sigmoid(domainness)
return batch_dict
def bce_loss(y_pred, y_label):
y_truth_tensor = torch.FloatTensor(y_pred.size())
y_truth_tensor.fill_(y_label)
y_truth_tensor = y_truth_tensor.to(y_pred.get_device())
return nn.BCEWithLogitsLoss()(y_pred, y_truth_tensor)
def ls_loss(y_pred, y_label):
y_truth_tensor = torch.FloatTensor(y_pred.size())
y_truth_tensor.fill_(y_label)
y_truth_tensor = y_truth_tensor.to(y_pred.get_device())
return nn.MSELoss()(y_pred, y_truth_tensor)
class BEVDiscriminator_TQS(nn.Module):
def __init__(self, model_cfg):
super().__init__()
self.model_cfg = model_cfg
c_in = model_cfg['FEATURE_DIM']
c_out = model_cfg['FEATURE_DIM'] // 4
self.c_out = c_out
self.block = nn.Sequential(
nn.Conv2d(c_in, c_out, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(c_out, c_out, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(c_out, c_out, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(c_out, c_out, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(c_out, c_out, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True)
)
self.gap = nn.AdaptiveAvgPool2d((1,1))
self.mlp = nn.Linear(c_out, 1)
def forward(self, batch_dict):
x = batch_dict['spatial_features_2d']
x = self.block(x)
x = self.gap(x).view(-1, self.c_out)
x = self.mlp(x)
batch_dict['domainness'] = x
return batch_dict
def get_discriminator_loss(self, batch_dict, source=True, loss='bce'):
domainness = batch_dict['domainness']
if source:
if loss == 'bce':
discri_loss = bce_loss(domainness, 0)
else:
discri_loss = ls_loss(domainness, 0)
else:
if loss == 'bce':
discri_loss = bce_loss(domainness, 1)
else:
discri_loss = ls_loss(domainness, 1)
return discri_loss
def domainness_evaluate(self, batch_dict):
domainness = batch_dict['domainness']
domainness = F.sigmoid(domainness)
domainness_value = 1 / (math.sqrt(2*3.14) * self.model_cfg.SIGMA) * torch.exp(-(domainness - self.model_cfg.MU).pow(2) / (2 * (self.model_cfg.SIGMA ** 2)))
batch_dict['domainness_evaluate'] = domainness_value
return batch_dict
class BEVDiscriminator_Center_TQS(nn.Module):
def __init__(self, model_cfg):
super().__init__()
c_in = model_cfg['FEATURE_DIM']
c_out = model_cfg['FEATURE_DIM'] // 4
self.model_cfg = model_cfg
self.c_out = c_out
self.block = nn.Sequential(
nn.Conv2d(c_in, c_out, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(c_out, c_out, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(c_out, c_out, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(c_out, c_out, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(c_out, c_out, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True)
)
self.gap = nn.AdaptiveAvgPool2d((1,1))
self.mlp = nn.Linear(c_out, 1)
def forward(self, batch_dict):
x = batch_dict['spatial_features_2d']
x = self.block(x)
x = self.gap(x).view(-1, self.c_out)
batch_dict['bev_pooled_feature'] = x
x = self.mlp(x)
batch_dict['domainness'] = x
return batch_dict
def get_discriminator_loss(self, batch_dict, source=True, loss='bce'):
domainness = batch_dict['domainness']
if source:
if loss == 'bce':
discri_loss = bce_loss(domainness, 0)
else:
discri_loss = ls_loss(domainness, 0)
else:
if loss == 'bce':
discri_loss = bce_loss(domainness, 1)
else:
discri_loss = ls_loss(domainness, 1)
return discri_loss
def domainness_evaluate(self, batch_dict, source=False):
domainness = batch_dict['domainness']
domainness = F.sigmoid(domainness)
domainness_value = 1 / (math.sqrt(2*3.14) * self.model_cfg.SIGMA) * torch.exp(-(domainness - self.model_cfg.MU).pow(2) / (2 * (self.model_cfg.SIGMA ** 2)))
batch_dict['domainness_evaluate'] = domainness_value
return batch_dict
| 13,106
| 42.115132
| 163
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/active_models/__init__.py
|
from .discriminator import ActiveDiscriminator
from .discriminator_from_bev import BEVDiscriminator_Conv
from .discriminator_from_bev import BEVDiscriminator_Conv_2
from .discriminator_from_bev import BEVDiscriminator_Center
from .discriminator_from_bev import BEVDiscriminator_TQS
from .discriminator_from_bev import BEVDiscriminator_Center_TQS
__all__ = {
'ActiveDiscriminator': ActiveDiscriminator,
'ActiveBEVDiscriminator_Conv': BEVDiscriminator_Conv,
'ActiveBEVDiscriminator_Conv_2': BEVDiscriminator_Conv_2,
'BEVDiscriminator_Center': BEVDiscriminator_Center,
'BEVDiscriminator_TQS': BEVDiscriminator_TQS,
'BEVDiscriminator_Center_TQS': BEVDiscriminator_Center_TQS
}
| 698
| 42.6875
| 63
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/dense_heads/anchor_head_single.py
|
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from .anchor_head_template import AnchorHeadTemplate
class AnchorHeadSingle(AnchorHeadTemplate):
def __init__(self, model_cfg, input_channels, num_class, class_names, grid_size, point_cloud_range,
predict_boxes_when_training=True, **kwargs):
super().__init__(
model_cfg=model_cfg, num_class=num_class, class_names=class_names, grid_size=grid_size, point_cloud_range=point_cloud_range,
predict_boxes_when_training=predict_boxes_when_training
)
self.num_anchors_per_location = sum(self.num_anchors_per_location)
self.conv_cls = nn.Conv2d(
input_channels, self.num_anchors_per_location * self.num_class,
kernel_size=1
)
self.conv_box = nn.Conv2d(
input_channels, self.num_anchors_per_location * self.box_coder.code_size,
kernel_size=1
)
if self.model_cfg.get('USE_DIRECTION_CLASSIFIER', None) is not None:
self.conv_dir_cls = nn.Conv2d(
input_channels,
self.num_anchors_per_location * self.model_cfg.NUM_DIR_BINS,
kernel_size=1
)
else:
self.conv_dir_cls = None
self.init_weights()
def init_weights(self):
pi = 0.01
nn.init.constant_(self.conv_cls.bias, -np.log((1 - pi) / pi))
nn.init.normal_(self.conv_box.weight, mean=0, std=0.001)
def forward(self, data_dict):
spatial_features_2d = data_dict['spatial_features_2d']
cls_preds = self.conv_cls(spatial_features_2d)
box_preds = self.conv_box(spatial_features_2d)
cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous() # [N, H, W, C]
box_preds = box_preds.permute(0, 2, 3, 1).contiguous() # [N, H, W, C]
self.forward_ret_dict['cls_preds'] = cls_preds
self.forward_ret_dict['box_preds'] = box_preds
if self.conv_dir_cls is not None:
dir_cls_preds = self.conv_dir_cls(spatial_features_2d)
dir_cls_preds = dir_cls_preds.permute(0, 2, 3, 1).contiguous()
self.forward_ret_dict['dir_cls_preds'] = dir_cls_preds
else:
dir_cls_preds = None
if self.training:
targets_dict = self.assign_targets(
gt_boxes=data_dict['gt_boxes']
)
self.forward_ret_dict.update(targets_dict)
if not self.training or self.predict_boxes_when_training:
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=data_dict['batch_size'],
cls_preds=cls_preds, box_preds=box_preds, dir_cls_preds=dir_cls_preds
)
data_dict['batch_cls_preds'] = batch_cls_preds
data_dict['batch_box_preds'] = batch_box_preds
data_dict['cls_preds_normalized'] = False
return data_dict
class ActiveAnchorHeadSingle1(AnchorHeadTemplate):
def __init__(self, model_cfg, input_channels, num_class, class_names, grid_size, point_cloud_range,
predict_boxes_when_training=True, **kwargs):
super().__init__(
model_cfg=model_cfg, num_class=num_class, class_names=class_names, grid_size=grid_size, point_cloud_range=point_cloud_range,
predict_boxes_when_training=predict_boxes_when_training
)
self.num_anchors_per_location = sum(self.num_anchors_per_location)
self.conv_cls = nn.Conv2d(
input_channels, self.num_anchors_per_location * self.num_class,
kernel_size=1
)
self.conv_box = nn.Conv2d(
input_channels, self.num_anchors_per_location * self.box_coder.code_size,
kernel_size=1
)
if self.model_cfg.get('USE_DIRECTION_CLASSIFIER', None) is not None:
self.conv_dir_cls = nn.Conv2d(
input_channels,
self.num_anchors_per_location * self.model_cfg.NUM_DIR_BINS,
kernel_size=1
)
else:
self.conv_dir_cls = None
self.init_weights()
def init_weights(self):
pi = 0.01
nn.init.constant_(self.conv_cls.bias, -np.log((1 - pi) / pi))
nn.init.normal_(self.conv_box.weight, mean=0, std=0.001)
def forward(self, data_dict):
spatial_features_2d = data_dict['spatial_features_2d']
cls_preds = self.conv_cls(spatial_features_2d)
box_preds = self.conv_box(spatial_features_2d)
data_dict['bev_score'] = cls_preds.max(dim=1)[0].view(-1, 1, *cls_preds.shape[2:])
data_dict['bev_map'] = cls_preds
cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous() # [N, H, W, C]
box_preds = box_preds.permute(0, 2, 3, 1).contiguous() # [N, H, W, C]
self.forward_ret_dict['cls_preds'] = cls_preds
self.forward_ret_dict['box_preds'] = box_preds
if self.conv_dir_cls is not None:
dir_cls_preds = self.conv_dir_cls(spatial_features_2d)
dir_cls_preds = dir_cls_preds.permute(0, 2, 3, 1).contiguous()
self.forward_ret_dict['dir_cls_preds'] = dir_cls_preds
else:
dir_cls_preds = None
if self.training:
targets_dict = self.assign_targets(
gt_boxes=data_dict['gt_boxes']
)
self.forward_ret_dict.update(targets_dict)
if not self.training or self.predict_boxes_when_training:
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=data_dict['batch_size'],
cls_preds=cls_preds, box_preds=box_preds, dir_cls_preds=dir_cls_preds
)
data_dict['batch_cls_preds'] = batch_cls_preds
data_dict['batch_box_preds'] = batch_box_preds
data_dict['cls_preds_normalized'] = False
return data_dict
class AnchorHeadSingle_TQS(AnchorHeadTemplate):
def __init__(self, model_cfg, input_channels, num_class, class_names, grid_size, point_cloud_range,
predict_boxes_when_training=True, **kwargs):
super().__init__(
model_cfg=model_cfg, num_class=num_class, class_names=class_names, grid_size=grid_size, point_cloud_range=point_cloud_range,
predict_boxes_when_training=predict_boxes_when_training
)
self.num_anchors_per_location = sum(self.num_anchors_per_location)
self.input_channels = input_channels
self.margin_scale = self.model_cfg.get('MARGIN_SCALE', None)
self._init_cls_layers()
self.conv_box = nn.Conv2d(
input_channels, self.num_anchors_per_location * self.box_coder.code_size,
kernel_size=1
)
if self.model_cfg.get('USE_DIRECTION_CLASSIFIER', None) is not None:
self.conv_dir_cls = nn.Conv2d(
input_channels,
self.num_anchors_per_location * self.model_cfg.NUM_DIR_BINS,
kernel_size=1
)
else:
self.conv_dir_cls = None
self.init_weights()
def _init_cls_layers(self):
self.conv_cls = nn.Conv2d(
self.input_channels, self.num_anchors_per_location * self.num_class,
kernel_size=1
)
self.conv_cls1 = nn.Conv2d(
self.input_channels, self.num_anchors_per_location * self.num_class,
kernel_size=1
)
self.conv_cls2 = nn.Conv2d(
self.input_channels, self.num_anchors_per_location * self.num_class,
kernel_size=1
)
def init_weights(self):
pi = 0.01
nn.init.constant_(self.conv_cls.bias, -np.log((1 - pi) / pi))
nn.init.xavier_normal_(self.conv_cls1.weight)
nn.init.xavier_uniform_(self.conv_cls2.weight)
nn.init.normal_(self.conv_box.weight, mean=0, std=0.001)
def get_active_loss(self, mode=None):
cls_loss, tb_dict = self.get_multi_cls_layer_loss()
cls_loss_1, tb_dict_1 = self.get_multi_cls_layer_loss(head='cls_preds_1')
tb_dict.update(tb_dict_1)
cls_loss_2, tb_dict_2 = self.get_multi_cls_layer_loss(head='cls_preds_2')
tb_dict.update(tb_dict_2)
box_loss, tb_dict_box = self.get_box_reg_layer_loss()
tb_dict.update(tb_dict_box)
if mode is 'train_detector':
rpn_loss = cls_loss + box_loss + cls_loss_1 + cls_loss_2
elif mode == 'train_mul_cls':
rpn_loss = cls_loss_1 + cls_loss_2
return rpn_loss, tb_dict
def get_multi_cls_layer_loss(self, head=None):
head = 'cls_preds' if head is None else head
cls_preds = self.forward_ret_dict[head]
box_cls_labels = self.forward_ret_dict['box_cls_labels']
batch_size = int(cls_preds.shape[0])
cared = box_cls_labels >= 0 # [N, num_anchors]
positives = box_cls_labels > 0
negatives = box_cls_labels == 0
negative_cls_weights = negatives * 1.0
cls_weights = (negative_cls_weights + 1.0 * positives).float()
reg_weights = positives.float()
if self.num_class == 1:
# class agnostic
box_cls_labels[positives] = 1
pos_normalizer = positives.sum(1, keepdim=True).float()
reg_weights /= torch.clamp(pos_normalizer, min=1.0)
cls_weights /= torch.clamp(pos_normalizer, min=1.0)
cls_targets = box_cls_labels * cared.type_as(box_cls_labels)
cls_targets = cls_targets.unsqueeze(dim=-1)
cls_targets = cls_targets.squeeze(dim=-1)
one_hot_targets = torch.zeros(
*list(cls_targets.shape), self.num_class + 1, dtype=cls_preds.dtype, device=cls_targets.device
)
one_hot_targets.scatter_(-1, cls_targets.unsqueeze(dim=-1).long(), 1.0)
cls_preds = cls_preds.view(batch_size, -1, self.num_class)
one_hot_targets = one_hot_targets[..., 1:]
cls_loss_src = self.cls_loss_func(cls_preds, one_hot_targets, weights=cls_weights) # [N, M]
cls_loss = cls_loss_src.sum() / batch_size
cls_loss = cls_loss * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['cls_weight']
loss_name = 'rpn_loss_cls' + head.split('_')[-1] if head is not None else 'rpn_loss_cls'
tb_dict = {
loss_name: cls_loss.item()
}
return cls_loss, tb_dict
def forward(self, data_dict):
spatial_features_2d = data_dict['spatial_features_2d']
cls_preds = self.conv_cls(spatial_features_2d)
box_preds = self.conv_box(spatial_features_2d)
# multi-classifier
cls_preds_1 = self.conv_cls1(spatial_features_2d)
cls_preds_2 = self.conv_cls2(spatial_features_2d)
cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous() # [N, H, W, C]
box_preds = box_preds.permute(0, 2, 3, 1).contiguous() # [N, H, W, C]
# multi-classifier
cls_preds_1 = cls_preds_1.permute(0, 2, 3, 1).contiguous()
cls_preds_2 = cls_preds_2.permute(0, 2, 3, 1).contiguous()
self.forward_ret_dict['cls_preds'] = cls_preds
self.forward_ret_dict['box_preds'] = box_preds
self.forward_ret_dict['cls_preds_1'] = cls_preds_1
self.forward_ret_dict['cls_preds_2'] = cls_preds_2
if self.conv_dir_cls is not None:
dir_cls_preds = self.conv_dir_cls(spatial_features_2d)
dir_cls_preds = dir_cls_preds.permute(0, 2, 3, 1).contiguous()
self.forward_ret_dict['dir_cls_preds'] = dir_cls_preds
else:
dir_cls_preds = None
if self.training:
targets_dict = self.assign_targets(
gt_boxes=data_dict['gt_boxes']
)
self.forward_ret_dict.update(targets_dict)
if not self.training or self.predict_boxes_when_training:
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=data_dict['batch_size'],
cls_preds=cls_preds, box_preds=box_preds, dir_cls_preds=dir_cls_preds
)
data_dict['batch_cls_preds'] = batch_cls_preds
data_dict['batch_box_preds'] = batch_box_preds
data_dict['cls_preds_normalized'] = False
return data_dict
def committee_evaluate(self, data_dict):
batch_size = self.forward_ret_dict['cls_preds_1'].shape[0]
cls_preds_1 = self.forward_ret_dict['cls_preds_1']
cls_preds_2 = self.forward_ret_dict['cls_preds_2']
cls_preds_1 = cls_preds_1.view(batch_size, -1, self.num_class) # (B, num_anchor, num_class)
cls_preds_2 = cls_preds_2.view(batch_size, -1, self.num_class) # (B, num_anchor, num_class)
distances = torch.zeros((batch_size, 1))
for i in range(batch_size):
reweight_cls_1 = cls_preds_1[i]
reweight_cls_2 = cls_preds_2[i]
dis = (F.sigmoid(reweight_cls_1) - F.sigmoid(reweight_cls_2)).pow(2) # (num_pos_anchor, num_class)
dis = dis.mean(dim=-1).mean()
distances[i] = dis
self.forward_ret_dict['committee_evaluate'] = distances
data_dict['committee_evaluate'] = distances
return data_dict
def uncertainty_evaluate(self, data_dict):
batch_size = self.forward_ret_dict['cls_preds_1'].shape[0]
cls_preds_1 = self.forward_ret_dict['cls_preds_1'].view(batch_size, -1, self.num_class)
cls_preds_2 = self.forward_ret_dict['cls_preds_2'].view(batch_size, -1, self.num_class)
uncertainty = torch.zeros((batch_size, 1))
for i in range(batch_size):
reweight_cls_1 = cls_preds_1[i].view(-1, 1)
reweight_cls_2 = cls_preds_2[i].view(-1, 1)
reweight_cls_1 = F.sigmoid(reweight_cls_1)
reweight_cls_2 = F.sigmoid(reweight_cls_2)
uncertainty_cls_1 = torch.min(torch.cat([torch.ones_like(reweight_cls_1) - reweight_cls_1, reweight_cls_1 - torch.zeros_like(reweight_cls_1)], dim=1)).view(-1).mean()
uncertainty_cls_2 = torch.min(torch.cat([torch.ones_like(reweight_cls_2) - reweight_cls_2, reweight_cls_2 - torch.zeros_like(reweight_cls_2)], dim=1)).view(-1).mean()
uncertainty[i] = (uncertainty_cls_1 + uncertainty_cls_2) / 2
data_dict['uncertainty_evaluate'] = uncertainty
return data_dict
| 14,330
| 41.907186
| 178
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/dense_heads/point_head_template.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
from ...utils import common_utils, loss_utils
class PointHeadTemplate(nn.Module):
def __init__(self, model_cfg, num_class):
super().__init__()
self.model_cfg = model_cfg
self.num_class = num_class
self.build_losses(self.model_cfg.LOSS_CONFIG)
self.forward_ret_dict = None
def build_losses(self, losses_cfg):
self.add_module(
'cls_loss_func',
loss_utils.SigmoidFocalClassificationLoss(alpha=0.25, gamma=2.0)
)
reg_loss_type = losses_cfg.get('LOSS_REG', None)
if reg_loss_type == 'smooth-l1':
self.reg_loss_func = F.smooth_l1_loss
elif reg_loss_type == 'l1':
self.reg_loss_func = F.l1_loss
elif reg_loss_type == 'WeightedSmoothL1Loss':
self.reg_loss_func = loss_utils.WeightedSmoothL1Loss(
code_weights=losses_cfg.LOSS_WEIGHTS.get('code_weights', None)
)
else:
self.reg_loss_func = F.smooth_l1_loss
@staticmethod
def make_fc_layers(fc_cfg, input_channels, output_channels):
fc_layers = []
c_in = input_channels
for k in range(0, fc_cfg.__len__()):
fc_layers.extend([
nn.Linear(c_in, fc_cfg[k], bias=False),
nn.BatchNorm1d(fc_cfg[k]),
nn.ReLU(),
])
c_in = fc_cfg[k]
fc_layers.append(nn.Linear(c_in, output_channels, bias=True))
return nn.Sequential(*fc_layers)
def assign_stack_targets(self, points, gt_boxes, extend_gt_boxes=None,
ret_box_labels=False, ret_part_labels=False,
set_ignore_flag=True, use_ball_constraint=False, central_radius=2.0):
"""
Args:
points: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
gt_boxes: (B, M, 8)
extend_gt_boxes: [B, M, 8]
ret_box_labels:
ret_part_labels:
set_ignore_flag:
use_ball_constraint:
central_radius:
Returns:
point_cls_labels: (N1 + N2 + N3 + ...), long type, 0:background, -1:ignored
point_box_labels: (N1 + N2 + N3 + ..., code_size)
"""
assert len(points.shape) == 2 and points.shape[1] == 4, 'points.shape=%s' % str(points.shape)
assert len(gt_boxes.shape) == 3 and gt_boxes.shape[2] == 8, 'gt_boxes.shape=%s' % str(gt_boxes.shape)
assert extend_gt_boxes is None or len(extend_gt_boxes.shape) == 3 and extend_gt_boxes.shape[2] == 8, \
'extend_gt_boxes.shape=%s' % str(extend_gt_boxes.shape)
assert set_ignore_flag != use_ball_constraint, 'Choose one only!'
batch_size = gt_boxes.shape[0]
bs_idx = points[:, 0]
point_cls_labels = points.new_zeros(points.shape[0]).long()
point_box_labels = gt_boxes.new_zeros((points.shape[0], 8)) if ret_box_labels else None
point_part_labels = gt_boxes.new_zeros((points.shape[0], 3)) if ret_part_labels else None
for k in range(batch_size):
bs_mask = (bs_idx == k)
points_single = points[bs_mask][:, 1:4]
point_cls_labels_single = point_cls_labels.new_zeros(bs_mask.sum())
box_idxs_of_pts = roiaware_pool3d_utils.points_in_boxes_gpu(
points_single.unsqueeze(dim=0), gt_boxes[k:k + 1, :, 0:7].contiguous()
).long().squeeze(dim=0)
box_fg_flag = (box_idxs_of_pts >= 0)
if set_ignore_flag:
extend_box_idxs_of_pts = roiaware_pool3d_utils.points_in_boxes_gpu(
points_single.unsqueeze(dim=0), extend_gt_boxes[k:k+1, :, 0:7].contiguous()
).long().squeeze(dim=0)
fg_flag = box_fg_flag
ignore_flag = fg_flag ^ (extend_box_idxs_of_pts >= 0)
point_cls_labels_single[ignore_flag] = -1
elif use_ball_constraint:
box_centers = gt_boxes[k][box_idxs_of_pts][:, 0:3].clone()
box_centers[:, 2] += gt_boxes[k][box_idxs_of_pts][:, 5] / 2
ball_flag = ((box_centers - points_single).norm(dim=1) < central_radius)
fg_flag = box_fg_flag & ball_flag
else:
raise NotImplementedError
gt_box_of_fg_points = gt_boxes[k][box_idxs_of_pts[fg_flag]]
point_cls_labels_single[fg_flag] = 1 if self.num_class == 1 else gt_box_of_fg_points[:, -1].long()
point_cls_labels[bs_mask] = point_cls_labels_single
if ret_box_labels and gt_box_of_fg_points.shape[0] > 0:
point_box_labels_single = point_box_labels.new_zeros((bs_mask.sum(), 8))
fg_point_box_labels = self.box_coder.encode_torch(
gt_boxes=gt_box_of_fg_points[:, :-1], points=points_single[fg_flag],
gt_classes=gt_box_of_fg_points[:, -1].long()
)
point_box_labels_single[fg_flag] = fg_point_box_labels
point_box_labels[bs_mask] = point_box_labels_single
if ret_part_labels:
point_part_labels_single = point_part_labels.new_zeros((bs_mask.sum(), 3))
transformed_points = points_single[fg_flag] - gt_box_of_fg_points[:, 0:3]
transformed_points = common_utils.rotate_points_along_z(
transformed_points.view(-1, 1, 3), -gt_box_of_fg_points[:, 6]
).view(-1, 3)
offset = torch.tensor([0.5, 0.5, 0.5]).view(1, 3).type_as(transformed_points)
point_part_labels_single[fg_flag] = (transformed_points / gt_box_of_fg_points[:, 3:6]) + offset
point_part_labels[bs_mask] = point_part_labels_single
targets_dict = {
'point_cls_labels': point_cls_labels,
'point_box_labels': point_box_labels,
'point_part_labels': point_part_labels
}
return targets_dict
def get_cls_layer_loss(self, tb_dict=None):
point_cls_labels = self.forward_ret_dict['point_cls_labels'].view(-1)
point_cls_preds = self.forward_ret_dict['point_cls_preds'].view(-1, self.num_class)
positives = (point_cls_labels > 0)
negative_cls_weights = (point_cls_labels == 0) * 1.0
cls_weights = (negative_cls_weights + 1.0 * positives).float()
pos_normalizer = positives.sum(dim=0).float()
cls_weights /= torch.clamp(pos_normalizer, min=1.0)
one_hot_targets = point_cls_preds.new_zeros(*list(point_cls_labels.shape), self.num_class + 1)
one_hot_targets.scatter_(-1, (point_cls_labels * (point_cls_labels >= 0).long()).unsqueeze(dim=-1).long(), 1.0)
one_hot_targets = one_hot_targets[..., 1:]
cls_loss_src = self.cls_loss_func(point_cls_preds, one_hot_targets, weights=cls_weights)
point_loss_cls = cls_loss_src.sum()
loss_weights_dict = self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS
point_loss_cls = point_loss_cls * loss_weights_dict['point_cls_weight']
if tb_dict is None:
tb_dict = {}
tb_dict.update({
'point_loss_cls': point_loss_cls.item(),
'point_pos_num': pos_normalizer.item()
})
return point_loss_cls, tb_dict
def get_part_layer_loss(self, tb_dict=None):
pos_mask = self.forward_ret_dict['point_cls_labels'] > 0
pos_normalizer = max(1, (pos_mask > 0).sum().item())
point_part_labels = self.forward_ret_dict['point_part_labels']
point_part_preds = self.forward_ret_dict['point_part_preds']
point_loss_part = F.binary_cross_entropy(torch.sigmoid(point_part_preds), point_part_labels, reduction='none')
point_loss_part = (point_loss_part.sum(dim=-1) * pos_mask.float()).sum() / (3 * pos_normalizer)
loss_weights_dict = self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS
point_loss_part = point_loss_part * loss_weights_dict['point_part_weight']
if tb_dict is None:
tb_dict = {}
tb_dict.update({'point_loss_part': point_loss_part.item()})
return point_loss_part, tb_dict
def get_box_layer_loss(self, tb_dict=None):
pos_mask = self.forward_ret_dict['point_cls_labels'] > 0
point_box_labels = self.forward_ret_dict['point_box_labels']
point_box_preds = self.forward_ret_dict['point_box_preds']
reg_weights = pos_mask.float()
pos_normalizer = pos_mask.sum().float()
reg_weights /= torch.clamp(pos_normalizer, min=1.0)
point_loss_box_src = self.reg_loss_func(
point_box_preds[None, ...], point_box_labels[None, ...], weights=reg_weights[None, ...]
)
point_loss_box = point_loss_box_src.sum()
loss_weights_dict = self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS
point_loss_box = point_loss_box * loss_weights_dict['point_box_weight']
if tb_dict is None:
tb_dict = {}
tb_dict.update({'point_loss_box': point_loss_box.item()})
return point_loss_box, tb_dict
def generate_predicted_boxes(self, points, point_cls_preds, point_box_preds):
"""
Args:
points: (N, 3)
point_cls_preds: (N, num_class)
point_box_preds: (N, box_code_size)
Returns:
point_cls_preds: (N, num_class)
point_box_preds: (N, box_code_size)
"""
_, pred_classes = point_cls_preds.max(dim=-1)
point_box_preds = self.box_coder.decode_torch(point_box_preds, points, pred_classes + 1)
return point_cls_preds, point_box_preds
def forward(self, **kwargs):
raise NotImplementedError
| 9,776
| 45.336493
| 119
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/dense_heads/anchor_head_template.py
|
import numpy as np
import torch
import torch.nn as nn
from ...utils import box_coder_utils, common_utils, loss_utils
from .target_assigner.anchor_generator import AnchorGenerator
from .target_assigner.atss_target_assigner import ATSSTargetAssigner
from .target_assigner.axis_aligned_target_assigner import AxisAlignedTargetAssigner
class AnchorHeadTemplate(nn.Module):
def __init__(self, model_cfg, num_class, class_names, grid_size, point_cloud_range, predict_boxes_when_training):
super().__init__()
self.model_cfg = model_cfg
self.num_class = num_class
self.class_names = class_names
self.predict_boxes_when_training = predict_boxes_when_training
self.use_multihead = self.model_cfg.get('USE_MULTIHEAD', False)
anchor_target_cfg = self.model_cfg.TARGET_ASSIGNER_CONFIG
self.box_coder = getattr(box_coder_utils, anchor_target_cfg.BOX_CODER)(
num_dir_bins=anchor_target_cfg.get('NUM_DIR_BINS', 6),
**anchor_target_cfg.get('BOX_CODER_CONFIG', {})
)
anchor_generator_cfg = self.model_cfg.ANCHOR_GENERATOR_CONFIG
anchors, self.num_anchors_per_location = self.generate_anchors(
anchor_generator_cfg, grid_size=grid_size, point_cloud_range=point_cloud_range,
anchor_ndim=self.box_coder.code_size
)
self.anchors = [x.cuda() for x in anchors]
self.target_assigner = self.get_target_assigner(anchor_target_cfg)
self.forward_ret_dict = {}
self.build_losses(self.model_cfg.LOSS_CONFIG)
@staticmethod
def generate_anchors(anchor_generator_cfg, grid_size, point_cloud_range, anchor_ndim=7):
anchor_generator = AnchorGenerator(
anchor_range=point_cloud_range,
anchor_generator_config=anchor_generator_cfg
)
feature_map_size = [grid_size[:2] // config['feature_map_stride'] for config in anchor_generator_cfg]
anchors_list, num_anchors_per_location_list = anchor_generator.generate_anchors(feature_map_size)
if anchor_ndim != 7:
for idx, anchors in enumerate(anchors_list):
pad_zeros = anchors.new_zeros([*anchors.shape[0:-1], anchor_ndim - 7])
new_anchors = torch.cat((anchors, pad_zeros), dim=-1)
anchors_list[idx] = new_anchors
return anchors_list, num_anchors_per_location_list
def get_target_assigner(self, anchor_target_cfg):
if anchor_target_cfg.NAME == 'ATSS':
target_assigner = ATSSTargetAssigner(
topk=anchor_target_cfg.TOPK,
box_coder=self.box_coder,
use_multihead=self.use_multihead,
match_height=anchor_target_cfg.MATCH_HEIGHT
)
elif anchor_target_cfg.NAME == 'AxisAlignedTargetAssigner':
target_assigner = AxisAlignedTargetAssigner(
model_cfg=self.model_cfg,
class_names=self.class_names,
box_coder=self.box_coder,
match_height=anchor_target_cfg.MATCH_HEIGHT
)
else:
raise NotImplementedError
return target_assigner
def build_losses(self, losses_cfg):
self.add_module(
'cls_loss_func',
loss_utils.SigmoidFocalClassificationLoss(alpha=0.25, gamma=2.0)
)
reg_loss_name = 'WeightedSmoothL1Loss' if losses_cfg.get('REG_LOSS_TYPE', None) is None \
else losses_cfg.REG_LOSS_TYPE
self.add_module(
'reg_loss_func',
getattr(loss_utils, reg_loss_name)(code_weights=losses_cfg.LOSS_WEIGHTS['code_weights'])
)
self.add_module(
'dir_loss_func',
loss_utils.WeightedCrossEntropyLoss()
)
def assign_targets(self, gt_boxes):
"""
Args:
gt_boxes: (B, M, 8)
Returns:
"""
targets_dict = self.target_assigner.assign_targets(
self.anchors, gt_boxes
)
return targets_dict
def get_cls_layer_loss(self):
cls_preds = self.forward_ret_dict['cls_preds']
box_cls_labels = self.forward_ret_dict['box_cls_labels']
batch_size = int(cls_preds.shape[0])
cared = box_cls_labels >= 0 # [N, num_anchors]
positives = box_cls_labels > 0
negatives = box_cls_labels == 0
negative_cls_weights = negatives * 1.0
cls_weights = (negative_cls_weights + 1.0 * positives).float()
reg_weights = positives.float()
if self.num_class == 1:
# class agnostic
box_cls_labels[positives] = 1
pos_normalizer = positives.sum(1, keepdim=True).float()
reg_weights /= torch.clamp(pos_normalizer, min=1.0)
cls_weights /= torch.clamp(pos_normalizer, min=1.0)
cls_targets = box_cls_labels * cared.type_as(box_cls_labels)
cls_targets = cls_targets.unsqueeze(dim=-1)
cls_targets = cls_targets.squeeze(dim=-1)
one_hot_targets = torch.zeros(
*list(cls_targets.shape), self.num_class + 1, dtype=cls_preds.dtype, device=cls_targets.device
)
one_hot_targets.scatter_(-1, cls_targets.unsqueeze(dim=-1).long(), 1.0)
cls_preds = cls_preds.view(batch_size, -1, self.num_class)
one_hot_targets = one_hot_targets[..., 1:]
cls_loss_src = self.cls_loss_func(cls_preds, one_hot_targets, weights=cls_weights) # [N, M]
cls_loss = cls_loss_src.sum() / batch_size
cls_loss = cls_loss * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['cls_weight']
tb_dict = {
'rpn_loss_cls': cls_loss.item()
}
return cls_loss, tb_dict
@staticmethod
def add_sin_difference(boxes1, boxes2, dim=6):
assert dim != -1
rad_pred_encoding = torch.sin(boxes1[..., dim:dim + 1]) * torch.cos(boxes2[..., dim:dim + 1])
rad_tg_encoding = torch.cos(boxes1[..., dim:dim + 1]) * torch.sin(boxes2[..., dim:dim + 1])
boxes1 = torch.cat([boxes1[..., :dim], rad_pred_encoding, boxes1[..., dim + 1:]], dim=-1)
boxes2 = torch.cat([boxes2[..., :dim], rad_tg_encoding, boxes2[..., dim + 1:]], dim=-1)
return boxes1, boxes2
@staticmethod
def get_direction_target(anchors, reg_targets, one_hot=True, dir_offset=0, num_bins=2):
batch_size = reg_targets.shape[0]
anchors = anchors.view(batch_size, -1, anchors.shape[-1])
rot_gt = reg_targets[..., 6] + anchors[..., 6]
offset_rot = common_utils.limit_period(rot_gt - dir_offset, 0, 2 * np.pi)
dir_cls_targets = torch.floor(offset_rot / (2 * np.pi / num_bins)).long()
dir_cls_targets = torch.clamp(dir_cls_targets, min=0, max=num_bins - 1)
if one_hot:
dir_targets = torch.zeros(*list(dir_cls_targets.shape), num_bins, dtype=anchors.dtype,
device=dir_cls_targets.device)
dir_targets.scatter_(-1, dir_cls_targets.unsqueeze(dim=-1).long(), 1.0)
dir_cls_targets = dir_targets
return dir_cls_targets
def get_box_reg_layer_loss(self):
box_preds = self.forward_ret_dict['box_preds']
box_dir_cls_preds = self.forward_ret_dict.get('dir_cls_preds', None)
box_reg_targets = self.forward_ret_dict['box_reg_targets']
box_cls_labels = self.forward_ret_dict['box_cls_labels']
batch_size = int(box_preds.shape[0])
positives = box_cls_labels > 0
reg_weights = positives.float()
pos_normalizer = positives.sum(1, keepdim=True).float()
reg_weights /= torch.clamp(pos_normalizer, min=1.0)
if isinstance(self.anchors, list):
if self.use_multihead:
anchors = torch.cat(
[anchor.permute(3, 4, 0, 1, 2, 5).contiguous().view(-1, anchor.shape[-1]) for anchor in
self.anchors], dim=0)
else:
anchors = torch.cat(self.anchors, dim=-3)
else:
anchors = self.anchors
anchors = anchors.view(1, -1, anchors.shape[-1]).repeat(batch_size, 1, 1)
box_preds = box_preds.view(batch_size, -1,
box_preds.shape[-1] // self.num_anchors_per_location if not self.use_multihead else
box_preds.shape[-1])
# sin(a - b) = sinacosb-cosasinb
box_preds_sin, reg_targets_sin = self.add_sin_difference(box_preds, box_reg_targets)
loc_loss_src = self.reg_loss_func(box_preds_sin, reg_targets_sin, weights=reg_weights) # [N, M]
loc_loss = loc_loss_src.sum() / batch_size
loc_loss = loc_loss * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['loc_weight']
box_loss = loc_loss
tb_dict = {
'rpn_loss_loc': loc_loss.item()
}
if box_dir_cls_preds is not None:
dir_targets = self.get_direction_target(
anchors, box_reg_targets,
dir_offset=self.model_cfg.DIR_OFFSET,
num_bins=self.model_cfg.NUM_DIR_BINS
)
dir_logits = box_dir_cls_preds.view(batch_size, -1, self.model_cfg.NUM_DIR_BINS)
weights = positives.type_as(dir_logits)
weights /= torch.clamp(weights.sum(-1, keepdim=True), min=1.0)
dir_loss = self.dir_loss_func(dir_logits, dir_targets, weights=weights)
dir_loss = dir_loss.sum() / batch_size
dir_loss = dir_loss * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['dir_weight']
box_loss += dir_loss
tb_dict['rpn_loss_dir'] = dir_loss.item()
return box_loss, tb_dict
def get_loss(self):
cls_loss, tb_dict = self.get_cls_layer_loss()
box_loss, tb_dict_box = self.get_box_reg_layer_loss()
tb_dict.update(tb_dict_box)
rpn_loss = cls_loss + box_loss
tb_dict['rpn_loss'] = rpn_loss.item()
return rpn_loss, tb_dict
def generate_predicted_boxes(self, batch_size, cls_preds, box_preds, dir_cls_preds=None):
"""
Args:
batch_size:
cls_preds: (N, H, W, C1)
box_preds: (N, H, W, C2)
dir_cls_preds: (N, H, W, C3)
Returns:
batch_cls_preds: (B, num_boxes, num_classes)
batch_box_preds: (B, num_boxes, 7+C)
"""
if isinstance(self.anchors, list):
if self.use_multihead:
anchors = torch.cat([anchor.permute(3, 4, 0, 1, 2, 5).contiguous().view(-1, anchor.shape[-1])
for anchor in self.anchors], dim=0)
else:
anchors = torch.cat(self.anchors, dim=-3)
else:
anchors = self.anchors
num_anchors = anchors.view(-1, anchors.shape[-1]).shape[0]
batch_anchors = anchors.view(1, -1, anchors.shape[-1]).repeat(batch_size, 1, 1)
batch_cls_preds = cls_preds.view(batch_size, num_anchors, -1).float() \
if not isinstance(cls_preds, list) else cls_preds
batch_box_preds = box_preds.view(batch_size, num_anchors, -1) if not isinstance(box_preds, list) \
else torch.cat(box_preds, dim=1).view(batch_size, num_anchors, -1)
batch_box_preds = self.box_coder.decode_torch(batch_box_preds, batch_anchors)
if dir_cls_preds is not None:
dir_offset = self.model_cfg.DIR_OFFSET
dir_limit_offset = self.model_cfg.DIR_LIMIT_OFFSET
dir_cls_preds = dir_cls_preds.view(batch_size, num_anchors, -1) if not isinstance(dir_cls_preds, list) \
else torch.cat(dir_cls_preds, dim=1).view(batch_size, num_anchors, -1)
dir_labels = torch.max(dir_cls_preds, dim=-1)[1]
period = (2 * np.pi / self.model_cfg.NUM_DIR_BINS)
dir_rot = common_utils.limit_period(
batch_box_preds[..., 6] - dir_offset, dir_limit_offset, period
)
batch_box_preds[..., 6] = dir_rot + dir_offset + period * dir_labels.to(batch_box_preds.dtype)
if isinstance(self.box_coder, box_coder_utils.PreviousResidualDecoder):
batch_box_preds[..., 6] = common_utils.limit_period(
-(batch_box_preds[..., 6] + np.pi / 2), offset=0.5, period=np.pi * 2
)
return batch_cls_preds, batch_box_preds
def forward(self, **kwargs):
raise NotImplementedError
| 12,364
| 43.800725
| 118
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/dense_heads/center_head_semi.py
|
import copy
import numpy as np
import torch
import torch.nn as nn
from torch.nn.init import kaiming_normal_
from ..model_utils import model_nms_utils
from ..model_utils import centernet_utils
from ...utils import loss_utils
class SeparateHead(nn.Module):
def __init__(self, input_channels, sep_head_dict, init_bias=-2.19, use_bias=False):
super().__init__()
self.sep_head_dict = sep_head_dict
for cur_name in self.sep_head_dict:
output_channels = self.sep_head_dict[cur_name]['out_channels']
num_conv = self.sep_head_dict[cur_name]['num_conv']
fc_list = []
for k in range(num_conv - 1):
fc_list.append(nn.Sequential(
nn.Conv2d(input_channels, input_channels, kernel_size=3, stride=1, padding=1, bias=use_bias),
nn.BatchNorm2d(input_channels),
nn.ReLU()
))
fc_list.append(nn.Conv2d(input_channels, output_channels, kernel_size=3, stride=1, padding=1, bias=True))
fc = nn.Sequential(*fc_list)
if 'hm' in cur_name:
fc[-1].bias.data.fill_(init_bias)
else:
for m in fc.modules():
if isinstance(m, nn.Conv2d):
kaiming_normal_(m.weight.data)
if hasattr(m, "bias") and m.bias is not None:
nn.init.constant_(m.bias, 0)
self.__setattr__(cur_name, fc)
def forward(self, x):
ret_dict = {}
for cur_name in self.sep_head_dict:
ret_dict[cur_name] = self.__getattr__(cur_name)(x)
return ret_dict
class CenterHeadSemi(nn.Module):
def __init__(self, model_cfg, input_channels, num_class, class_names, grid_size, point_cloud_range, voxel_size,
predict_boxes_when_training=True):
super().__init__()
self.model_cfg = model_cfg
self.num_class = num_class
self.grid_size = grid_size
self.point_cloud_range = point_cloud_range
self.voxel_size = voxel_size
self.feature_map_stride = self.model_cfg.TARGET_ASSIGNER_CONFIG.get('FEATURE_MAP_STRIDE', None)
self.class_names = class_names
self.class_names_each_head = []
self.class_id_mapping_each_head = []
for cur_class_names in self.model_cfg.CLASS_NAMES_EACH_HEAD:
self.class_names_each_head.append([x for x in cur_class_names if x in class_names])
cur_class_id_mapping = torch.from_numpy(np.array(
[self.class_names.index(x) for x in cur_class_names if x in class_names]
)).cuda()
self.class_id_mapping_each_head.append(cur_class_id_mapping)
total_classes = sum([len(x) for x in self.class_names_each_head])
assert total_classes == len(self.class_names), f'class_names_each_head={self.class_names_each_head}'
self.shared_conv = nn.Sequential(
nn.Conv2d(
input_channels, self.model_cfg.SHARED_CONV_CHANNEL, 3, stride=1, padding=1,
bias=self.model_cfg.get('USE_BIAS_BEFORE_NORM', False)
),
nn.BatchNorm2d(self.model_cfg.SHARED_CONV_CHANNEL),
nn.ReLU(),
)
self.heads_list = nn.ModuleList()
self.separate_head_cfg = self.model_cfg.SEPARATE_HEAD_CFG
for idx, cur_class_names in enumerate(self.class_names_each_head):
cur_head_dict = copy.deepcopy(self.separate_head_cfg.HEAD_DICT)
cur_head_dict['hm'] = dict(out_channels=len(cur_class_names), num_conv=self.model_cfg.NUM_HM_CONV)
self.heads_list.append(
SeparateHead(
input_channels=self.model_cfg.SHARED_CONV_CHANNEL,
sep_head_dict=cur_head_dict,
init_bias=-2.19,
use_bias=self.model_cfg.get('USE_BIAS_BEFORE_NORM', False)
)
)
self.predict_boxes_when_training = predict_boxes_when_training
self.forward_ret_dict = {}
self.build_losses()
self.model_type = None
def build_losses(self):
self.add_module('hm_loss_func', loss_utils.FocalLossCenterNet())
self.add_module('reg_loss_func', loss_utils.RegLossCenterNet())
def assign_target_of_single_head(
self, num_classes, gt_boxes, feature_map_size, feature_map_stride, num_max_objs=500,
gaussian_overlap=0.1, min_radius=2
):
"""
Args:
gt_boxes: (N, 8)
feature_map_size: (2), [x, y]
Returns:
"""
heatmap = gt_boxes.new_zeros(num_classes, feature_map_size[1], feature_map_size[0])
ret_boxes = gt_boxes.new_zeros((num_max_objs, gt_boxes.shape[-1] - 1 + 1))
inds = gt_boxes.new_zeros(num_max_objs).long()
mask = gt_boxes.new_zeros(num_max_objs).long()
x, y, z = gt_boxes[:, 0], gt_boxes[:, 1], gt_boxes[:, 2]
coord_x = (x - self.point_cloud_range[0]) / self.voxel_size[0] / feature_map_stride
coord_y = (y - self.point_cloud_range[1]) / self.voxel_size[1] / feature_map_stride
coord_x = torch.clamp(coord_x, min=0, max=feature_map_size[0] - 0.5) # bugfixed: 1e-6 does not work for center.int()
coord_y = torch.clamp(coord_y, min=0, max=feature_map_size[1] - 0.5) #
center = torch.cat((coord_x[:, None], coord_y[:, None]), dim=-1)
center_int = center.int()
center_int_float = center_int.float()
dx, dy, dz = gt_boxes[:, 3], gt_boxes[:, 4], gt_boxes[:, 5]
dx = dx / self.voxel_size[0] / feature_map_stride
dy = dy / self.voxel_size[1] / feature_map_stride
radius = centernet_utils.gaussian_radius(dx, dy, min_overlap=gaussian_overlap)
radius = torch.clamp_min(radius.int(), min=min_radius)
for k in range(min(num_max_objs, gt_boxes.shape[0])):
if dx[k] <= 0 or dy[k] <= 0:
continue
if not (0 <= center_int[k][0] <= feature_map_size[0] and 0 <= center_int[k][1] <= feature_map_size[1]):
continue
cur_class_id = (gt_boxes[k, -1] - 1).long()
centernet_utils.draw_gaussian_to_heatmap(heatmap[cur_class_id], center[k], radius[k].item())
inds[k] = center_int[k, 1] * feature_map_size[0] + center_int[k, 0]
mask[k] = 1
ret_boxes[k, 0:2] = center[k] - center_int_float[k].float()
ret_boxes[k, 2] = z[k]
ret_boxes[k, 3:6] = gt_boxes[k, 3:6].log()
ret_boxes[k, 6] = torch.cos(gt_boxes[k, 6])
ret_boxes[k, 7] = torch.sin(gt_boxes[k, 6])
if gt_boxes.shape[1] > 8:
ret_boxes[k, 8:] = gt_boxes[k, 7:-1]
return heatmap, ret_boxes, inds, mask
def assign_targets(self, gt_boxes, feature_map_size=None, **kwargs):
"""
Args:
gt_boxes: (B, M, 8)
range_image_polar: (B, 3, H, W)
feature_map_size: (2) [H, W]
spatial_cartesian: (B, 4, H, W)
Returns:
"""
feature_map_size = feature_map_size[::-1] # [H, W] ==> [x, y]
target_assigner_cfg = self.model_cfg.TARGET_ASSIGNER_CONFIG
# feature_map_size = self.grid_size[:2] // target_assigner_cfg.FEATURE_MAP_STRIDE
batch_size = gt_boxes.shape[0]
ret_dict = {
'heatmaps': [],
'target_boxes': [],
'inds': [],
'masks': [],
'heatmap_masks': []
}
all_names = np.array(['bg', *self.class_names])
for idx, cur_class_names in enumerate(self.class_names_each_head):
heatmap_list, target_boxes_list, inds_list, masks_list = [], [], [], []
for bs_idx in range(batch_size):
cur_gt_boxes = gt_boxes[bs_idx]
gt_class_names = all_names[cur_gt_boxes[:, -1].cpu().long().numpy()]
gt_boxes_single_head = []
for idx, name in enumerate(gt_class_names):
if name not in cur_class_names:
continue
temp_box = cur_gt_boxes[idx]
temp_box[-1] = cur_class_names.index(name) + 1
gt_boxes_single_head.append(temp_box[None, :])
if len(gt_boxes_single_head) == 0:
gt_boxes_single_head = cur_gt_boxes[:0, :]
else:
gt_boxes_single_head = torch.cat(gt_boxes_single_head, dim=0)
heatmap, ret_boxes, inds, mask = self.assign_target_of_single_head(
num_classes=len(cur_class_names), gt_boxes=gt_boxes_single_head.cpu(),
feature_map_size=feature_map_size, feature_map_stride=target_assigner_cfg.FEATURE_MAP_STRIDE,
num_max_objs=target_assigner_cfg.NUM_MAX_OBJS,
gaussian_overlap=target_assigner_cfg.GAUSSIAN_OVERLAP,
min_radius=target_assigner_cfg.MIN_RADIUS,
)
heatmap_list.append(heatmap.to(gt_boxes_single_head.device))
target_boxes_list.append(ret_boxes.to(gt_boxes_single_head.device))
inds_list.append(inds.to(gt_boxes_single_head.device))
masks_list.append(mask.to(gt_boxes_single_head.device))
ret_dict['heatmaps'].append(torch.stack(heatmap_list, dim=0))
ret_dict['target_boxes'].append(torch.stack(target_boxes_list, dim=0))
ret_dict['inds'].append(torch.stack(inds_list, dim=0))
ret_dict['masks'].append(torch.stack(masks_list, dim=0))
return ret_dict
def sigmoid(self, x):
y = torch.clamp(x.sigmoid(), min=1e-4, max=1 - 1e-4)
return y
def get_loss(self):
pred_dicts = self.forward_ret_dict['pred_dicts']
target_dicts = self.forward_ret_dict['target_dicts']
tb_dict = {}
loss = 0
for idx, pred_dict in enumerate(pred_dicts):
pred_dict['hm'] = self.sigmoid(pred_dict['hm'])
hm_loss = self.hm_loss_func(pred_dict['hm'], target_dicts['heatmaps'][idx])
hm_loss *= self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['cls_weight']
target_boxes = target_dicts['target_boxes'][idx]
pred_boxes = torch.cat([pred_dict[head_name] for head_name in self.separate_head_cfg.HEAD_ORDER], dim=1)
reg_loss = self.reg_loss_func(
pred_boxes, target_dicts['masks'][idx], target_dicts['inds'][idx], target_boxes
)
loc_loss = (reg_loss * reg_loss.new_tensor(self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['code_weights'])).sum()
loc_loss = loc_loss * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['loc_weight']
loss += hm_loss + loc_loss
tb_dict['hm_loss_head_%d' % idx] = hm_loss.item()
tb_dict['loc_loss_head_%d' % idx] = loc_loss.item()
tb_dict['rpn_loss'] = loss.item()
return loss, tb_dict
def generate_predicted_boxes(self, batch_size, pred_dicts):
post_process_cfg = self.model_cfg.POST_PROCESSING
post_center_limit_range = torch.tensor(post_process_cfg.POST_CENTER_LIMIT_RANGE).cuda().float()
ret_dict = [{
'pred_boxes': [],
'pred_scores': [],
'pred_labels': [],
} for k in range(batch_size)]
for idx, pred_dict in enumerate(pred_dicts):
batch_hm = pred_dict['hm'].sigmoid()
batch_center = pred_dict['center']
batch_center_z = pred_dict['center_z']
batch_dim = pred_dict['dim'].exp()
batch_rot_cos = pred_dict['rot'][:, 0].unsqueeze(dim=1)
batch_rot_sin = pred_dict['rot'][:, 1].unsqueeze(dim=1)
batch_vel = pred_dict['vel'] if 'vel' in self.separate_head_cfg.HEAD_ORDER else None
final_pred_dicts = centernet_utils.decode_bbox_from_heatmap(
heatmap=batch_hm, rot_cos=batch_rot_cos, rot_sin=batch_rot_sin,
center=batch_center, center_z=batch_center_z, dim=batch_dim, vel=batch_vel,
point_cloud_range=self.point_cloud_range, voxel_size=self.voxel_size,
feature_map_stride=self.feature_map_stride,
K=post_process_cfg.MAX_OBJ_PER_SAMPLE,
circle_nms=(post_process_cfg.NMS_CONFIG.NMS_TYPE == 'circle_nms'),
score_thresh=post_process_cfg.SCORE_THRESH,
post_center_limit_range=post_center_limit_range
)
for k, final_dict in enumerate(final_pred_dicts):
final_dict['pred_labels'] = self.class_id_mapping_each_head[idx][final_dict['pred_labels'].long()]
if post_process_cfg.NMS_CONFIG.NMS_TYPE != 'circle_nms':
selected, selected_scores = model_nms_utils.class_agnostic_nms(
box_scores=final_dict['pred_scores'], box_preds=final_dict['pred_boxes'],
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=None
)
final_dict['pred_boxes'] = final_dict['pred_boxes'][selected]
final_dict['pred_scores'] = selected_scores
final_dict['pred_labels'] = final_dict['pred_labels'][selected]
ret_dict[k]['pred_boxes'].append(final_dict['pred_boxes'])
ret_dict[k]['pred_scores'].append(final_dict['pred_scores'])
ret_dict[k]['pred_labels'].append(final_dict['pred_labels'])
for k in range(batch_size):
ret_dict[k]['pred_boxes'] = torch.cat(ret_dict[k]['pred_boxes'], dim=0)
ret_dict[k]['pred_scores'] = torch.cat(ret_dict[k]['pred_scores'], dim=0)
ret_dict[k]['pred_labels'] = torch.cat(ret_dict[k]['pred_labels'], dim=0) + 1
return ret_dict
@staticmethod
def reorder_rois_for_refining(batch_size, pred_dicts):
num_max_rois = max([len(cur_dict['pred_boxes']) for cur_dict in pred_dicts])
num_max_rois = max(1, num_max_rois) # at least one faked rois to avoid error
pred_boxes = pred_dicts[0]['pred_boxes']
rois = pred_boxes.new_zeros((batch_size, num_max_rois, pred_boxes.shape[-1]))
roi_scores = pred_boxes.new_zeros((batch_size, num_max_rois))
roi_labels = pred_boxes.new_zeros((batch_size, num_max_rois)).long()
for bs_idx in range(batch_size):
num_boxes = len(pred_dicts[bs_idx]['pred_boxes'])
rois[bs_idx, :num_boxes, :] = pred_dicts[bs_idx]['pred_boxes']
roi_scores[bs_idx, :num_boxes] = pred_dicts[bs_idx]['pred_scores']
roi_labels[bs_idx, :num_boxes] = pred_dicts[bs_idx]['pred_labels']
return rois, roi_scores, roi_labels
def forward(self, data_dict):
spatial_features_2d = data_dict['spatial_features_2d']
x = self.shared_conv(spatial_features_2d)
pred_dicts = []
for head in self.heads_list:
pred_dicts.append(head(x))
if self.model_type == 'origin':
if self.training:
target_dict = self.assign_targets(
data_dict['gt_boxes'], feature_map_size=spatial_features_2d.size()[2:],
feature_map_stride=data_dict.get('spatial_features_2d_strides', None)
)
self.forward_ret_dict['target_dicts'] = target_dict
self.forward_ret_dict['pred_dicts'] = pred_dicts
if not self.training or self.predict_boxes_when_training:
pred_dicts = self.generate_predicted_boxes(
data_dict['batch_size'], pred_dicts
)
if self.predict_boxes_when_training:
rois, roi_scores, roi_labels = self.reorder_rois_for_refining(data_dict['batch_size'], pred_dicts)
data_dict['rois'] = rois
data_dict['roi_scores'] = roi_scores
data_dict['roi_labels'] = roi_labels
data_dict['has_class_labels'] = True
else:
data_dict['final_box_dicts'] = pred_dicts
elif self.model_type == 'teacher':
self.forward_ret_dict['pred_dicts'] = pred_dicts
pred_dicts = self.generate_predicted_boxes(
data_dict['batch_size'], pred_dicts
)
if self.predict_boxes_when_training:
rois, roi_scores, roi_labels = self.reorder_rois_for_refining(data_dict['batch_size'], pred_dicts)
data_dict['rois'] = rois
data_dict['roi_scores'] = roi_scores
data_dict['roi_labels'] = roi_labels
data_dict['has_class_labels'] = True
else:
data_dict['final_box_dicts'] = pred_dicts
elif self.model_type == 'student':
if self.training:
if 'gt_boxes' in data_dict:
target_dict = self.assign_targets(
data_dict['gt_boxes'], feature_map_size=spatial_features_2d.size()[2:],
feature_map_stride=data_dict.get('spatial_features_2d_strides', None)
)
self.forward_ret_dict['target_dicts'] = target_dict
self.forward_ret_dict['pred_dicts'] = pred_dicts
if not self.training or self.predict_boxes_when_training:
pred_dicts = self.generate_predicted_boxes(
data_dict['batch_size'], pred_dicts
)
if self.predict_boxes_when_training:
rois, roi_scores, roi_labels = self.reorder_rois_for_refining(data_dict['batch_size'], pred_dicts)
data_dict['rois'] = rois
data_dict['roi_scores'] = roi_scores
data_dict['roi_labels'] = roi_labels
data_dict['has_class_labels'] = True
else:
data_dict['final_box_dicts'] = pred_dicts
else:
raise Exception('Unsupprted model type')
return data_dict
| 18,189
| 44.588972
| 125
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/dense_heads/anchor_head_multi.py
|
import numpy as np
import torch
import torch.nn as nn
from ..backbones_2d import BaseBEVBackbone
from .anchor_head_template import AnchorHeadTemplate
class SingleHead(BaseBEVBackbone):
def __init__(self, model_cfg, input_channels, num_class, num_anchors_per_location, code_size, rpn_head_cfg=None,
head_label_indices=None, separate_reg_config=None):
super().__init__(rpn_head_cfg, input_channels)
self.num_anchors_per_location = num_anchors_per_location
self.num_class = num_class
self.code_size = code_size
self.model_cfg = model_cfg
self.separate_reg_config = separate_reg_config
self.register_buffer('head_label_indices', head_label_indices)
if self.separate_reg_config is not None:
code_size_cnt = 0
self.conv_box = nn.ModuleDict()
self.conv_box_names = []
num_middle_conv = self.separate_reg_config.NUM_MIDDLE_CONV
num_middle_filter = self.separate_reg_config.NUM_MIDDLE_FILTER
conv_cls_list = []
c_in = input_channels
for k in range(num_middle_conv):
conv_cls_list.extend([
nn.Conv2d(
c_in, num_middle_filter,
kernel_size=3, stride=1, padding=1, bias=False
),
nn.BatchNorm2d(num_middle_filter),
nn.ReLU()
])
c_in = num_middle_filter
conv_cls_list.append(nn.Conv2d(
c_in, self.num_anchors_per_location * self.num_class,
kernel_size=3, stride=1, padding=1
))
self.conv_cls = nn.Sequential(*conv_cls_list)
for reg_config in self.separate_reg_config.REG_LIST:
reg_name, reg_channel = reg_config.split(':')
reg_channel = int(reg_channel)
cur_conv_list = []
c_in = input_channels
for k in range(num_middle_conv):
cur_conv_list.extend([
nn.Conv2d(
c_in, num_middle_filter,
kernel_size=3, stride=1, padding=1, bias=False
),
nn.BatchNorm2d(num_middle_filter),
nn.ReLU()
])
c_in = num_middle_filter
cur_conv_list.append(nn.Conv2d(
c_in, self.num_anchors_per_location * int(reg_channel),
kernel_size=3, stride=1, padding=1, bias=True
))
code_size_cnt += reg_channel
self.conv_box[f'conv_{reg_name}'] = nn.Sequential(*cur_conv_list)
self.conv_box_names.append(f'conv_{reg_name}')
for m in self.conv_box.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
assert code_size_cnt == code_size, f'Code size does not match: {code_size_cnt}:{code_size}'
else:
self.conv_cls = nn.Conv2d(
input_channels, self.num_anchors_per_location * self.num_class,
kernel_size=1
)
self.conv_box = nn.Conv2d(
input_channels, self.num_anchors_per_location * self.code_size,
kernel_size=1
)
if self.model_cfg.get('USE_DIRECTION_CLASSIFIER', None) is not None:
self.conv_dir_cls = nn.Conv2d(
input_channels,
self.num_anchors_per_location * self.model_cfg.NUM_DIR_BINS,
kernel_size=1
)
else:
self.conv_dir_cls = None
self.use_multihead = self.model_cfg.get('USE_MULTIHEAD', False)
self.init_weights()
def init_weights(self):
pi = 0.01
if isinstance(self.conv_cls, nn.Conv2d):
nn.init.constant_(self.conv_cls.bias, -np.log((1 - pi) / pi))
else:
nn.init.constant_(self.conv_cls[-1].bias, -np.log((1 - pi) / pi))
def forward(self, spatial_features_2d):
ret_dict = {}
spatial_features_2d = super().forward({'spatial_features': spatial_features_2d})['spatial_features_2d']
cls_preds = self.conv_cls(spatial_features_2d)
if self.separate_reg_config is None:
box_preds = self.conv_box(spatial_features_2d)
else:
box_preds_list = []
for reg_name in self.conv_box_names:
box_preds_list.append(self.conv_box[reg_name](spatial_features_2d))
box_preds = torch.cat(box_preds_list, dim=1)
if not self.use_multihead:
box_preds = box_preds.permute(0, 2, 3, 1).contiguous()
cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous()
else:
H, W = box_preds.shape[2:]
batch_size = box_preds.shape[0]
box_preds = box_preds.view(-1, self.num_anchors_per_location,
self.code_size, H, W).permute(0, 1, 3, 4, 2).contiguous()
cls_preds = cls_preds.view(-1, self.num_anchors_per_location,
self.num_class, H, W).permute(0, 1, 3, 4, 2).contiguous()
box_preds = box_preds.view(batch_size, -1, self.code_size)
cls_preds = cls_preds.view(batch_size, -1, self.num_class)
if self.conv_dir_cls is not None:
dir_cls_preds = self.conv_dir_cls(spatial_features_2d)
if self.use_multihead:
dir_cls_preds = dir_cls_preds.view(
-1, self.num_anchors_per_location, self.model_cfg.NUM_DIR_BINS, H, W).permute(0, 1, 3, 4,
2).contiguous()
dir_cls_preds = dir_cls_preds.view(batch_size, -1, self.model_cfg.NUM_DIR_BINS)
else:
dir_cls_preds = dir_cls_preds.permute(0, 2, 3, 1).contiguous()
else:
dir_cls_preds = None
ret_dict['cls_preds'] = cls_preds
ret_dict['box_preds'] = box_preds
ret_dict['dir_cls_preds'] = dir_cls_preds
return ret_dict
class AnchorHeadMulti(AnchorHeadTemplate):
def __init__(self, model_cfg, input_channels, num_class, class_names, grid_size, point_cloud_range,
predict_boxes_when_training=True, **kwargs):
super().__init__(
model_cfg=model_cfg, num_class=num_class, class_names=class_names, grid_size=grid_size,
point_cloud_range=point_cloud_range, predict_boxes_when_training=predict_boxes_when_training
)
self.model_cfg = model_cfg
self.separate_multihead = self.model_cfg.get('SEPARATE_MULTIHEAD', False)
if self.model_cfg.get('SHARED_CONV_NUM_FILTER', None) is not None:
shared_conv_num_filter = self.model_cfg.SHARED_CONV_NUM_FILTER
self.shared_conv = nn.Sequential(
nn.Conv2d(input_channels, shared_conv_num_filter, 3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(shared_conv_num_filter, eps=1e-3, momentum=0.01),
nn.ReLU(),
)
else:
self.shared_conv = None
shared_conv_num_filter = input_channels
self.rpn_heads = None
self.make_multihead(shared_conv_num_filter)
def make_multihead(self, input_channels):
rpn_head_cfgs = self.model_cfg.RPN_HEAD_CFGS
rpn_heads = []
class_names = []
for rpn_head_cfg in rpn_head_cfgs:
class_names.extend(rpn_head_cfg['HEAD_CLS_NAME'])
for rpn_head_cfg in rpn_head_cfgs:
num_anchors_per_location = sum([self.num_anchors_per_location[class_names.index(head_cls)]
for head_cls in rpn_head_cfg['HEAD_CLS_NAME']])
head_label_indices = torch.from_numpy(np.array([
self.class_names.index(cur_name) + 1 for cur_name in rpn_head_cfg['HEAD_CLS_NAME']
]))
rpn_head = SingleHead(
self.model_cfg, input_channels,
len(rpn_head_cfg['HEAD_CLS_NAME']) if self.separate_multihead else self.num_class,
num_anchors_per_location, self.box_coder.code_size, rpn_head_cfg,
head_label_indices=head_label_indices,
separate_reg_config=self.model_cfg.get('SEPARATE_REG_CONFIG', None)
)
rpn_heads.append(rpn_head)
self.rpn_heads = nn.ModuleList(rpn_heads)
def forward(self, data_dict):
spatial_features_2d = data_dict['spatial_features_2d']
if self.shared_conv is not None:
spatial_features_2d = self.shared_conv(spatial_features_2d)
ret_dicts = []
for rpn_head in self.rpn_heads:
ret_dicts.append(rpn_head(spatial_features_2d))
cls_preds = [ret_dict['cls_preds'] for ret_dict in ret_dicts]
box_preds = [ret_dict['box_preds'] for ret_dict in ret_dicts]
ret = {
'cls_preds': cls_preds if self.separate_multihead else torch.cat(cls_preds, dim=1),
'box_preds': box_preds if self.separate_multihead else torch.cat(box_preds, dim=1),
}
if self.model_cfg.get('USE_DIRECTION_CLASSIFIER', False):
dir_cls_preds = [ret_dict['dir_cls_preds'] for ret_dict in ret_dicts]
ret['dir_cls_preds'] = dir_cls_preds if self.separate_multihead else torch.cat(dir_cls_preds, dim=1)
self.forward_ret_dict.update(ret)
if self.training:
targets_dict = self.assign_targets(
gt_boxes=data_dict['gt_boxes']
)
self.forward_ret_dict.update(targets_dict)
if not self.training or self.predict_boxes_when_training:
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=data_dict['batch_size'],
cls_preds=ret['cls_preds'], box_preds=ret['box_preds'], dir_cls_preds=ret.get('dir_cls_preds', None)
)
if isinstance(batch_cls_preds, list):
multihead_label_mapping = []
for idx in range(len(batch_cls_preds)):
multihead_label_mapping.append(self.rpn_heads[idx].head_label_indices)
data_dict['multihead_label_mapping'] = multihead_label_mapping
data_dict['batch_cls_preds'] = batch_cls_preds
data_dict['batch_box_preds'] = batch_box_preds
data_dict['cls_preds_normalized'] = False
return data_dict
def get_cls_layer_loss(self):
loss_weights = self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS
if 'pos_cls_weight' in loss_weights:
pos_cls_weight = loss_weights['pos_cls_weight']
neg_cls_weight = loss_weights['neg_cls_weight']
else:
pos_cls_weight = neg_cls_weight = 1.0
cls_preds = self.forward_ret_dict['cls_preds']
box_cls_labels = self.forward_ret_dict['box_cls_labels']
if not isinstance(cls_preds, list):
cls_preds = [cls_preds]
batch_size = int(cls_preds[0].shape[0])
cared = box_cls_labels >= 0 # [N, num_anchors]
positives = box_cls_labels > 0
negatives = box_cls_labels == 0
negative_cls_weights = negatives * 1.0 * neg_cls_weight
cls_weights = (negative_cls_weights + pos_cls_weight * positives).float()
reg_weights = positives.float()
if self.num_class == 1:
# class agnostic
box_cls_labels[positives] = 1
pos_normalizer = positives.sum(1, keepdim=True).float()
reg_weights /= torch.clamp(pos_normalizer, min=1.0)
cls_weights /= torch.clamp(pos_normalizer, min=1.0)
cls_targets = box_cls_labels * cared.type_as(box_cls_labels)
one_hot_targets = torch.zeros(
*list(cls_targets.shape), self.num_class + 1, dtype=cls_preds[0].dtype, device=cls_targets.device
)
one_hot_targets.scatter_(-1, cls_targets.unsqueeze(dim=-1).long(), 1.0)
one_hot_targets = one_hot_targets[..., 1:]
start_idx = c_idx = 0
cls_losses = 0
for idx, cls_pred in enumerate(cls_preds):
cur_num_class = self.rpn_heads[idx].num_class
cls_pred = cls_pred.view(batch_size, -1, cur_num_class)
if self.separate_multihead:
one_hot_target = one_hot_targets[:, start_idx:start_idx + cls_pred.shape[1],
c_idx:c_idx + cur_num_class]
c_idx += cur_num_class
else:
one_hot_target = one_hot_targets[:, start_idx:start_idx + cls_pred.shape[1]]
cls_weight = cls_weights[:, start_idx:start_idx + cls_pred.shape[1]]
cls_loss_src = self.cls_loss_func(cls_pred, one_hot_target, weights=cls_weight) # [N, M]
cls_loss = cls_loss_src.sum() / batch_size
cls_loss = cls_loss * loss_weights['cls_weight']
cls_losses += cls_loss
start_idx += cls_pred.shape[1]
assert start_idx == one_hot_targets.shape[1]
tb_dict = {
'rpn_loss_cls': cls_losses.item()
}
return cls_losses, tb_dict
def get_box_reg_layer_loss(self):
box_preds = self.forward_ret_dict['box_preds']
box_dir_cls_preds = self.forward_ret_dict.get('dir_cls_preds', None)
box_reg_targets = self.forward_ret_dict['box_reg_targets']
box_cls_labels = self.forward_ret_dict['box_cls_labels']
positives = box_cls_labels > 0
reg_weights = positives.float()
pos_normalizer = positives.sum(1, keepdim=True).float()
reg_weights /= torch.clamp(pos_normalizer, min=1.0)
if not isinstance(box_preds, list):
box_preds = [box_preds]
batch_size = int(box_preds[0].shape[0])
if isinstance(self.anchors, list):
if self.use_multihead:
anchors = torch.cat(
[anchor.permute(3, 4, 0, 1, 2, 5).contiguous().view(-1, anchor.shape[-1])
for anchor in self.anchors], dim=0
)
else:
anchors = torch.cat(self.anchors, dim=-3)
else:
anchors = self.anchors
anchors = anchors.view(1, -1, anchors.shape[-1]).repeat(batch_size, 1, 1)
start_idx = 0
box_losses = 0
tb_dict = {}
for idx, box_pred in enumerate(box_preds):
box_pred = box_pred.view(
batch_size, -1,
box_pred.shape[-1] // self.num_anchors_per_location if not self.use_multihead else box_pred.shape[-1]
)
box_reg_target = box_reg_targets[:, start_idx:start_idx + box_pred.shape[1]]
reg_weight = reg_weights[:, start_idx:start_idx + box_pred.shape[1]]
# sin(a - b) = sinacosb-cosasinb
if box_dir_cls_preds is not None:
box_pred_sin, reg_target_sin = self.add_sin_difference(box_pred, box_reg_target)
loc_loss_src = self.reg_loss_func(box_pred_sin, reg_target_sin, weights=reg_weight) # [N, M]
else:
loc_loss_src = self.reg_loss_func(box_pred, box_reg_target, weights=reg_weight) # [N, M]
loc_loss = loc_loss_src.sum() / batch_size
loc_loss = loc_loss * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['loc_weight']
box_losses += loc_loss
tb_dict['rpn_loss_loc'] = tb_dict.get('rpn_loss_loc', 0) + loc_loss.item()
if box_dir_cls_preds is not None:
if not isinstance(box_dir_cls_preds, list):
box_dir_cls_preds = [box_dir_cls_preds]
dir_targets = self.get_direction_target(
anchors, box_reg_targets,
dir_offset=self.model_cfg.DIR_OFFSET,
num_bins=self.model_cfg.NUM_DIR_BINS
)
box_dir_cls_pred = box_dir_cls_preds[idx]
dir_logit = box_dir_cls_pred.view(batch_size, -1, self.model_cfg.NUM_DIR_BINS)
weights = positives.type_as(dir_logit)
weights /= torch.clamp(weights.sum(-1, keepdim=True), min=1.0)
weight = weights[:, start_idx:start_idx + box_pred.shape[1]]
dir_target = dir_targets[:, start_idx:start_idx + box_pred.shape[1]]
dir_loss = self.dir_loss_func(dir_logit, dir_target, weights=weight)
dir_loss = dir_loss.sum() / batch_size
dir_loss = dir_loss * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['dir_weight']
box_losses += dir_loss
tb_dict['rpn_loss_dir'] = tb_dict.get('rpn_loss_dir', 0) + dir_loss.item()
start_idx += box_pred.shape[1]
return box_losses, tb_dict
| 17,041
| 44.566845
| 117
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/dense_heads/center_head.py
|
import copy
import numpy as np
import torch
import torch.nn as nn
from torch.nn.init import kaiming_normal_
from ..model_utils import model_nms_utils
from ..model_utils import centernet_utils
from ...utils import loss_utils
class SeparateHead(nn.Module):
def __init__(self, input_channels, sep_head_dict, init_bias=-2.19, use_bias=False):
super().__init__()
self.sep_head_dict = sep_head_dict
for cur_name in self.sep_head_dict:
output_channels = self.sep_head_dict[cur_name]['out_channels']
num_conv = self.sep_head_dict[cur_name]['num_conv']
fc_list = []
for k in range(num_conv - 1):
fc_list.append(nn.Sequential(
nn.Conv2d(input_channels, input_channels, kernel_size=3, stride=1, padding=1, bias=use_bias),
nn.BatchNorm2d(input_channels),
nn.ReLU()
))
fc_list.append(nn.Conv2d(input_channels, output_channels, kernel_size=3, stride=1, padding=1, bias=True))
fc = nn.Sequential(*fc_list)
if 'hm' in cur_name:
fc[-1].bias.data.fill_(init_bias)
else:
for m in fc.modules():
if isinstance(m, nn.Conv2d):
kaiming_normal_(m.weight.data)
if hasattr(m, "bias") and m.bias is not None:
nn.init.constant_(m.bias, 0)
self.__setattr__(cur_name, fc)
def forward(self, x):
ret_dict = {}
for cur_name in self.sep_head_dict:
ret_dict[cur_name] = self.__getattr__(cur_name)(x)
return ret_dict
class CenterHead(nn.Module):
def __init__(self, model_cfg, input_channels, num_class, class_names, grid_size, point_cloud_range, voxel_size,
predict_boxes_when_training=True):
super().__init__()
self.model_cfg = model_cfg
self.num_class = num_class
self.grid_size = grid_size
self.point_cloud_range = point_cloud_range
self.voxel_size = voxel_size
self.feature_map_stride = self.model_cfg.TARGET_ASSIGNER_CONFIG.get('FEATURE_MAP_STRIDE', None)
self.class_names = class_names
self.class_names_each_head = []
self.class_id_mapping_each_head = []
for cur_class_names in self.model_cfg.CLASS_NAMES_EACH_HEAD:
self.class_names_each_head.append([x for x in cur_class_names if x in class_names])
cur_class_id_mapping = torch.from_numpy(np.array(
[self.class_names.index(x) for x in cur_class_names if x in class_names]
)).cuda()
self.class_id_mapping_each_head.append(cur_class_id_mapping)
total_classes = sum([len(x) for x in self.class_names_each_head])
assert total_classes == len(self.class_names), f'class_names_each_head={self.class_names_each_head}'
self.shared_conv = nn.Sequential(
nn.Conv2d(
input_channels, self.model_cfg.SHARED_CONV_CHANNEL, 3, stride=1, padding=1,
bias=self.model_cfg.get('USE_BIAS_BEFORE_NORM', False)
),
nn.BatchNorm2d(self.model_cfg.SHARED_CONV_CHANNEL),
nn.ReLU(),
)
self.heads_list = nn.ModuleList()
self.separate_head_cfg = self.model_cfg.SEPARATE_HEAD_CFG
for idx, cur_class_names in enumerate(self.class_names_each_head):
cur_head_dict = copy.deepcopy(self.separate_head_cfg.HEAD_DICT)
cur_head_dict['hm'] = dict(out_channels=len(cur_class_names), num_conv=self.model_cfg.NUM_HM_CONV)
self.heads_list.append(
SeparateHead(
input_channels=self.model_cfg.SHARED_CONV_CHANNEL,
sep_head_dict=cur_head_dict,
init_bias=-2.19,
use_bias=self.model_cfg.get('USE_BIAS_BEFORE_NORM', False)
)
)
self.predict_boxes_when_training = predict_boxes_when_training
self.forward_ret_dict = {}
self.build_losses()
def build_losses(self):
self.add_module('hm_loss_func', loss_utils.FocalLossCenterNet())
self.add_module('reg_loss_func', loss_utils.RegLossCenterNet())
def assign_target_of_single_head(
self, num_classes, gt_boxes, feature_map_size, feature_map_stride, num_max_objs=500,
gaussian_overlap=0.1, min_radius=2
):
"""
Args:
gt_boxes: (N, 8)
feature_map_size: (2), [x, y]
Returns:
"""
heatmap = gt_boxes.new_zeros(num_classes, feature_map_size[1], feature_map_size[0])
ret_boxes = gt_boxes.new_zeros((num_max_objs, gt_boxes.shape[-1] - 1 + 1))
inds = gt_boxes.new_zeros(num_max_objs).long()
mask = gt_boxes.new_zeros(num_max_objs).long()
x, y, z = gt_boxes[:, 0], gt_boxes[:, 1], gt_boxes[:, 2]
coord_x = (x - self.point_cloud_range[0]) / self.voxel_size[0] / feature_map_stride
coord_y = (y - self.point_cloud_range[1]) / self.voxel_size[1] / feature_map_stride
coord_x = torch.clamp(coord_x, min=0, max=feature_map_size[0] - 0.5) # bugfixed: 1e-6 does not work for center.int()
coord_y = torch.clamp(coord_y, min=0, max=feature_map_size[1] - 0.5) #
center = torch.cat((coord_x[:, None], coord_y[:, None]), dim=-1)
center_int = center.int()
center_int_float = center_int.float()
dx, dy, dz = gt_boxes[:, 3], gt_boxes[:, 4], gt_boxes[:, 5]
dx = dx / self.voxel_size[0] / feature_map_stride
dy = dy / self.voxel_size[1] / feature_map_stride
radius = centernet_utils.gaussian_radius(dx, dy, min_overlap=gaussian_overlap)
radius = torch.clamp_min(radius.int(), min=min_radius)
for k in range(min(num_max_objs, gt_boxes.shape[0])):
if dx[k] <= 0 or dy[k] <= 0:
continue
if not (0 <= center_int[k][0] <= feature_map_size[0] and 0 <= center_int[k][1] <= feature_map_size[1]):
continue
cur_class_id = (gt_boxes[k, -1] - 1).long()
centernet_utils.draw_gaussian_to_heatmap(heatmap[cur_class_id], center[k], radius[k].item())
inds[k] = center_int[k, 1] * feature_map_size[0] + center_int[k, 0]
mask[k] = 1
ret_boxes[k, 0:2] = center[k] - center_int_float[k].float()
ret_boxes[k, 2] = z[k]
ret_boxes[k, 3:6] = gt_boxes[k, 3:6].log()
ret_boxes[k, 6] = torch.cos(gt_boxes[k, 6])
ret_boxes[k, 7] = torch.sin(gt_boxes[k, 6])
if gt_boxes.shape[1] > 8:
ret_boxes[k, 8:] = gt_boxes[k, 7:-1]
return heatmap, ret_boxes, inds, mask
def assign_targets(self, gt_boxes, feature_map_size=None, **kwargs):
"""
Args:
gt_boxes: (B, M, 8)
range_image_polar: (B, 3, H, W)
feature_map_size: (2) [H, W]
spatial_cartesian: (B, 4, H, W)
Returns:
"""
feature_map_size = feature_map_size[::-1] # [H, W] ==> [x, y]
target_assigner_cfg = self.model_cfg.TARGET_ASSIGNER_CONFIG
# feature_map_size = self.grid_size[:2] // target_assigner_cfg.FEATURE_MAP_STRIDE
batch_size = gt_boxes.shape[0]
ret_dict = {
'heatmaps': [],
'target_boxes': [],
'inds': [],
'masks': [],
'heatmap_masks': []
}
all_names = np.array(['bg', *self.class_names])
for idx, cur_class_names in enumerate(self.class_names_each_head):
heatmap_list, target_boxes_list, inds_list, masks_list = [], [], [], []
for bs_idx in range(batch_size):
cur_gt_boxes = gt_boxes[bs_idx]
gt_class_names = all_names[cur_gt_boxes[:, -1].cpu().long().numpy()]
gt_boxes_single_head = []
for idx, name in enumerate(gt_class_names):
if name not in cur_class_names:
continue
temp_box = cur_gt_boxes[idx]
temp_box[-1] = cur_class_names.index(name) + 1
gt_boxes_single_head.append(temp_box[None, :])
if len(gt_boxes_single_head) == 0:
gt_boxes_single_head = cur_gt_boxes[:0, :]
else:
gt_boxes_single_head = torch.cat(gt_boxes_single_head, dim=0)
heatmap, ret_boxes, inds, mask = self.assign_target_of_single_head(
num_classes=len(cur_class_names), gt_boxes=gt_boxes_single_head.cpu(),
feature_map_size=feature_map_size, feature_map_stride=target_assigner_cfg.FEATURE_MAP_STRIDE,
num_max_objs=target_assigner_cfg.NUM_MAX_OBJS,
gaussian_overlap=target_assigner_cfg.GAUSSIAN_OVERLAP,
min_radius=target_assigner_cfg.MIN_RADIUS,
)
heatmap_list.append(heatmap.to(gt_boxes_single_head.device))
target_boxes_list.append(ret_boxes.to(gt_boxes_single_head.device))
inds_list.append(inds.to(gt_boxes_single_head.device))
masks_list.append(mask.to(gt_boxes_single_head.device))
ret_dict['heatmaps'].append(torch.stack(heatmap_list, dim=0))
ret_dict['target_boxes'].append(torch.stack(target_boxes_list, dim=0))
ret_dict['inds'].append(torch.stack(inds_list, dim=0))
ret_dict['masks'].append(torch.stack(masks_list, dim=0))
return ret_dict
def sigmoid(self, x):
y = torch.clamp(x.sigmoid(), min=1e-4, max=1 - 1e-4)
return y
def get_loss(self):
pred_dicts = self.forward_ret_dict['pred_dicts']
target_dicts = self.forward_ret_dict['target_dicts']
tb_dict = {}
loss = 0
for idx, pred_dict in enumerate(pred_dicts):
pred_dict['hm'] = self.sigmoid(pred_dict['hm'])
hm_loss = self.hm_loss_func(pred_dict['hm'], target_dicts['heatmaps'][idx])
hm_loss *= self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['cls_weight']
target_boxes = target_dicts['target_boxes'][idx]
pred_boxes = torch.cat([pred_dict[head_name] for head_name in self.separate_head_cfg.HEAD_ORDER], dim=1)
reg_loss = self.reg_loss_func(
pred_boxes, target_dicts['masks'][idx], target_dicts['inds'][idx], target_boxes
)
loc_loss = (reg_loss * reg_loss.new_tensor(self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['code_weights'])).sum()
loc_loss = loc_loss * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['loc_weight']
loss += hm_loss + loc_loss
tb_dict['hm_loss_head_%d' % idx] = hm_loss.item()
tb_dict['loc_loss_head_%d' % idx] = loc_loss.item()
tb_dict['rpn_loss'] = loss.item()
return loss, tb_dict
def generate_predicted_boxes(self, batch_size, pred_dicts):
post_process_cfg = self.model_cfg.POST_PROCESSING
post_center_limit_range = torch.tensor(post_process_cfg.POST_CENTER_LIMIT_RANGE).cuda().float()
ret_dict = [{
'pred_boxes': [],
'pred_scores': [],
'pred_labels': [],
} for k in range(batch_size)]
for idx, pred_dict in enumerate(pred_dicts):
batch_hm = pred_dict['hm'].sigmoid()
batch_center = pred_dict['center']
batch_center_z = pred_dict['center_z']
batch_dim = pred_dict['dim'].exp()
batch_rot_cos = pred_dict['rot'][:, 0].unsqueeze(dim=1)
batch_rot_sin = pred_dict['rot'][:, 1].unsqueeze(dim=1)
batch_vel = pred_dict['vel'] if 'vel' in self.separate_head_cfg.HEAD_ORDER else None
final_pred_dicts = centernet_utils.decode_bbox_from_heatmap(
heatmap=batch_hm, rot_cos=batch_rot_cos, rot_sin=batch_rot_sin,
center=batch_center, center_z=batch_center_z, dim=batch_dim, vel=batch_vel,
point_cloud_range=self.point_cloud_range, voxel_size=self.voxel_size,
feature_map_stride=self.feature_map_stride,
K=post_process_cfg.MAX_OBJ_PER_SAMPLE,
circle_nms=(post_process_cfg.NMS_CONFIG.NMS_TYPE == 'circle_nms'),
score_thresh=post_process_cfg.SCORE_THRESH,
post_center_limit_range=post_center_limit_range
)
for k, final_dict in enumerate(final_pred_dicts):
final_dict['pred_labels'] = self.class_id_mapping_each_head[idx][final_dict['pred_labels'].long()]
if post_process_cfg.NMS_CONFIG.NMS_TYPE != 'circle_nms':
selected, selected_scores = model_nms_utils.class_agnostic_nms(
box_scores=final_dict['pred_scores'], box_preds=final_dict['pred_boxes'],
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=None
)
final_dict['pred_boxes'] = final_dict['pred_boxes'][selected]
final_dict['pred_scores'] = selected_scores
final_dict['pred_labels'] = final_dict['pred_labels'][selected]
ret_dict[k]['pred_boxes'].append(final_dict['pred_boxes'])
ret_dict[k]['pred_scores'].append(final_dict['pred_scores'])
ret_dict[k]['pred_labels'].append(final_dict['pred_labels'])
for k in range(batch_size):
ret_dict[k]['pred_boxes'] = torch.cat(ret_dict[k]['pred_boxes'], dim=0)
ret_dict[k]['pred_scores'] = torch.cat(ret_dict[k]['pred_scores'], dim=0)
ret_dict[k]['pred_labels'] = torch.cat(ret_dict[k]['pred_labels'], dim=0) + 1
return ret_dict
@staticmethod
def reorder_rois_for_refining(batch_size, pred_dicts):
num_max_rois = max([len(cur_dict['pred_boxes']) for cur_dict in pred_dicts])
num_max_rois = max(1, num_max_rois) # at least one faked rois to avoid error
pred_boxes = pred_dicts[0]['pred_boxes']
rois = pred_boxes.new_zeros((batch_size, num_max_rois, pred_boxes.shape[-1]))
roi_scores = pred_boxes.new_zeros((batch_size, num_max_rois))
roi_labels = pred_boxes.new_zeros((batch_size, num_max_rois)).long()
for bs_idx in range(batch_size):
num_boxes = len(pred_dicts[bs_idx]['pred_boxes'])
rois[bs_idx, :num_boxes, :] = pred_dicts[bs_idx]['pred_boxes']
roi_scores[bs_idx, :num_boxes] = pred_dicts[bs_idx]['pred_scores']
roi_labels[bs_idx, :num_boxes] = pred_dicts[bs_idx]['pred_labels']
return rois, roi_scores, roi_labels
def forward(self, data_dict):
spatial_features_2d = data_dict['spatial_features_2d']
x = self.shared_conv(spatial_features_2d)
pred_dicts = []
for head in self.heads_list:
pred_dicts.append(head(x))
if self.training:
target_dict = self.assign_targets(
data_dict['gt_boxes'], feature_map_size=spatial_features_2d.size()[2:],
feature_map_stride=data_dict.get('spatial_features_2d_strides', None)
)
self.forward_ret_dict['target_dicts'] = target_dict
self.forward_ret_dict['pred_dicts'] = pred_dicts
if not self.training or self.predict_boxes_when_training:
pred_dicts = self.generate_predicted_boxes(
data_dict['batch_size'], pred_dicts
)
if self.predict_boxes_when_training:
rois, roi_scores, roi_labels = self.reorder_rois_for_refining(data_dict['batch_size'], pred_dicts)
data_dict['rois'] = rois
data_dict['roi_scores'] = roi_scores
data_dict['roi_labels'] = roi_labels
data_dict['has_class_labels'] = True
else:
data_dict['final_box_dicts'] = pred_dicts
return data_dict
class ActiveCenterHead(nn.Module):
def __init__(self, model_cfg, input_channels, num_class, class_names, grid_size, point_cloud_range, voxel_size,
predict_boxes_when_training=True):
super().__init__()
self.model_cfg = model_cfg
self.num_class = num_class
self.grid_size = grid_size
self.point_cloud_range = point_cloud_range
self.voxel_size = voxel_size
self.feature_map_stride = self.model_cfg.TARGET_ASSIGNER_CONFIG.get('FEATURE_MAP_STRIDE', None)
self.class_names = class_names
self.class_names_each_head = []
self.class_id_mapping_each_head = []
for cur_class_names in self.model_cfg.CLASS_NAMES_EACH_HEAD:
self.class_names_each_head.append([x for x in cur_class_names if x in class_names])
cur_class_id_mapping = torch.from_numpy(np.array(
[self.class_names.index(x) for x in cur_class_names if x in class_names]
)).cuda()
self.class_id_mapping_each_head.append(cur_class_id_mapping)
total_classes = sum([len(x) for x in self.class_names_each_head])
assert total_classes == len(self.class_names), f'class_names_each_head={self.class_names_each_head}'
self.shared_conv = nn.Sequential(
nn.Conv2d(
input_channels, self.model_cfg.SHARED_CONV_CHANNEL, 3, stride=1, padding=1,
bias=self.model_cfg.get('USE_BIAS_BEFORE_NORM', False)
),
nn.BatchNorm2d(self.model_cfg.SHARED_CONV_CHANNEL),
nn.ReLU(),
)
self.heads_list = nn.ModuleList()
self.separate_head_cfg = self.model_cfg.SEPARATE_HEAD_CFG
for idx, cur_class_names in enumerate(self.class_names_each_head):
cur_head_dict = copy.deepcopy(self.separate_head_cfg.HEAD_DICT)
cur_head_dict['hm'] = dict(out_channels=len(cur_class_names), num_conv=self.model_cfg.NUM_HM_CONV)
self.heads_list.append(
SeparateHead(
input_channels=self.model_cfg.SHARED_CONV_CHANNEL,
sep_head_dict=cur_head_dict,
init_bias=-2.19,
use_bias=self.model_cfg.get('USE_BIAS_BEFORE_NORM', False)
)
)
self.predict_boxes_when_training = predict_boxes_when_training
self.forward_ret_dict = {}
self.build_losses()
def build_losses(self):
self.add_module('hm_loss_func', loss_utils.FocalLossCenterNet())
self.add_module('reg_loss_func', loss_utils.RegLossCenterNet())
def assign_target_of_single_head(
self, num_classes, gt_boxes, feature_map_size, feature_map_stride, num_max_objs=500,
gaussian_overlap=0.1, min_radius=2
):
"""
Args:
gt_boxes: (N, 8)
feature_map_size: (2), [x, y]
Returns:
"""
heatmap = gt_boxes.new_zeros(num_classes, feature_map_size[1], feature_map_size[0])
ret_boxes = gt_boxes.new_zeros((num_max_objs, gt_boxes.shape[-1] - 1 + 1))
inds = gt_boxes.new_zeros(num_max_objs).long()
mask = gt_boxes.new_zeros(num_max_objs).long()
x, y, z = gt_boxes[:, 0], gt_boxes[:, 1], gt_boxes[:, 2]
coord_x = (x - self.point_cloud_range[0]) / self.voxel_size[0] / feature_map_stride
coord_y = (y - self.point_cloud_range[1]) / self.voxel_size[1] / feature_map_stride
coord_x = torch.clamp(coord_x, min=0, max=feature_map_size[0] - 0.5) # bugfixed: 1e-6 does not work for center.int()
coord_y = torch.clamp(coord_y, min=0, max=feature_map_size[1] - 0.5) #
center = torch.cat((coord_x[:, None], coord_y[:, None]), dim=-1)
center_int = center.int()
center_int_float = center_int.float()
dx, dy, dz = gt_boxes[:, 3], gt_boxes[:, 4], gt_boxes[:, 5]
dx = dx / self.voxel_size[0] / feature_map_stride
dy = dy / self.voxel_size[1] / feature_map_stride
radius = centernet_utils.gaussian_radius(dx, dy, min_overlap=gaussian_overlap)
radius = torch.clamp_min(radius.int(), min=min_radius)
for k in range(min(num_max_objs, gt_boxes.shape[0])):
if dx[k] <= 0 or dy[k] <= 0:
continue
if not (0 <= center_int[k][0] <= feature_map_size[0] and 0 <= center_int[k][1] <= feature_map_size[1]):
continue
cur_class_id = (gt_boxes[k, -1] - 1).long()
centernet_utils.draw_gaussian_to_heatmap(heatmap[cur_class_id], center[k], radius[k].item())
inds[k] = center_int[k, 1] * feature_map_size[0] + center_int[k, 0]
mask[k] = 1
ret_boxes[k, 0:2] = center[k] - center_int_float[k].float()
ret_boxes[k, 2] = z[k]
ret_boxes[k, 3:6] = gt_boxes[k, 3:6].log()
ret_boxes[k, 6] = torch.cos(gt_boxes[k, 6])
ret_boxes[k, 7] = torch.sin(gt_boxes[k, 6])
if gt_boxes.shape[1] > 8:
ret_boxes[k, 8:] = gt_boxes[k, 7:-1]
return heatmap, ret_boxes, inds, mask
def assign_targets(self, gt_boxes, feature_map_size=None, **kwargs):
"""
Args:
gt_boxes: (B, M, 8)
range_image_polar: (B, 3, H, W)
feature_map_size: (2) [H, W]
spatial_cartesian: (B, 4, H, W)
Returns:
"""
feature_map_size = feature_map_size[::-1] # [H, W] ==> [x, y]
target_assigner_cfg = self.model_cfg.TARGET_ASSIGNER_CONFIG
# feature_map_size = self.grid_size[:2] // target_assigner_cfg.FEATURE_MAP_STRIDE
batch_size = gt_boxes.shape[0]
ret_dict = {
'heatmaps': [],
'target_boxes': [],
'inds': [],
'masks': [],
'heatmap_masks': []
}
all_names = np.array(['bg', *self.class_names])
for idx, cur_class_names in enumerate(self.class_names_each_head):
heatmap_list, target_boxes_list, inds_list, masks_list = [], [], [], []
for bs_idx in range(batch_size):
cur_gt_boxes = gt_boxes[bs_idx]
gt_class_names = all_names[cur_gt_boxes[:, -1].cpu().long().numpy()]
gt_boxes_single_head = []
for idx, name in enumerate(gt_class_names):
if name not in cur_class_names:
continue
temp_box = cur_gt_boxes[idx]
temp_box[-1] = cur_class_names.index(name) + 1
gt_boxes_single_head.append(temp_box[None, :])
if len(gt_boxes_single_head) == 0:
gt_boxes_single_head = cur_gt_boxes[:0, :]
else:
gt_boxes_single_head = torch.cat(gt_boxes_single_head, dim=0)
heatmap, ret_boxes, inds, mask = self.assign_target_of_single_head(
num_classes=len(cur_class_names), gt_boxes=gt_boxes_single_head.cpu(),
feature_map_size=feature_map_size, feature_map_stride=target_assigner_cfg.FEATURE_MAP_STRIDE,
num_max_objs=target_assigner_cfg.NUM_MAX_OBJS,
gaussian_overlap=target_assigner_cfg.GAUSSIAN_OVERLAP,
min_radius=target_assigner_cfg.MIN_RADIUS,
)
heatmap_list.append(heatmap.to(gt_boxes_single_head.device))
target_boxes_list.append(ret_boxes.to(gt_boxes_single_head.device))
inds_list.append(inds.to(gt_boxes_single_head.device))
masks_list.append(mask.to(gt_boxes_single_head.device))
ret_dict['heatmaps'].append(torch.stack(heatmap_list, dim=0))
ret_dict['target_boxes'].append(torch.stack(target_boxes_list, dim=0))
ret_dict['inds'].append(torch.stack(inds_list, dim=0))
ret_dict['masks'].append(torch.stack(masks_list, dim=0))
return ret_dict
def sigmoid(self, x):
y = torch.clamp(x.sigmoid(), min=1e-4, max=1 - 1e-4)
return y
def get_loss(self):
pred_dicts = self.forward_ret_dict['pred_dicts']
target_dicts = self.forward_ret_dict['target_dicts']
tb_dict = {}
loss = 0
for idx, pred_dict in enumerate(pred_dicts):
pred_dict['hm'] = self.sigmoid(pred_dict['hm'])
hm_loss = self.hm_loss_func(pred_dict['hm'], target_dicts['heatmaps'][idx])
hm_loss *= self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['cls_weight']
target_boxes = target_dicts['target_boxes'][idx]
pred_boxes = torch.cat([pred_dict[head_name] for head_name in self.separate_head_cfg.HEAD_ORDER], dim=1)
reg_loss = self.reg_loss_func(
pred_boxes, target_dicts['masks'][idx], target_dicts['inds'][idx], target_boxes
)
loc_loss = (reg_loss * reg_loss.new_tensor(self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['code_weights'])).sum()
loc_loss = loc_loss * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['loc_weight']
loss += hm_loss + loc_loss
tb_dict['hm_loss_head_%d' % idx] = hm_loss.item()
tb_dict['loc_loss_head_%d' % idx] = loc_loss.item()
tb_dict['rpn_loss'] = loss.item()
return loss, tb_dict
def generate_predicted_boxes(self, batch_size, pred_dicts):
post_process_cfg = self.model_cfg.POST_PROCESSING
post_center_limit_range = torch.tensor(post_process_cfg.POST_CENTER_LIMIT_RANGE).cuda().float()
ret_dict = [{
'pred_boxes': [],
'pred_scores': [],
'pred_labels': [],
} for k in range(batch_size)]
for idx, pred_dict in enumerate(pred_dicts):
batch_hm = pred_dict['hm'].sigmoid()
batch_center = pred_dict['center']
batch_center_z = pred_dict['center_z']
batch_dim = pred_dict['dim'].exp()
batch_rot_cos = pred_dict['rot'][:, 0].unsqueeze(dim=1)
batch_rot_sin = pred_dict['rot'][:, 1].unsqueeze(dim=1)
batch_vel = pred_dict['vel'] if 'vel' in self.separate_head_cfg.HEAD_ORDER else None
final_pred_dicts = centernet_utils.decode_bbox_from_heatmap(
heatmap=batch_hm, rot_cos=batch_rot_cos, rot_sin=batch_rot_sin,
center=batch_center, center_z=batch_center_z, dim=batch_dim, vel=batch_vel,
point_cloud_range=self.point_cloud_range, voxel_size=self.voxel_size,
feature_map_stride=self.feature_map_stride,
K=post_process_cfg.MAX_OBJ_PER_SAMPLE,
circle_nms=(post_process_cfg.NMS_CONFIG.NMS_TYPE == 'circle_nms'),
score_thresh=post_process_cfg.SCORE_THRESH,
post_center_limit_range=post_center_limit_range
)
for k, final_dict in enumerate(final_pred_dicts):
final_dict['pred_labels'] = self.class_id_mapping_each_head[idx][final_dict['pred_labels'].long()]
if post_process_cfg.NMS_CONFIG.NMS_TYPE != 'circle_nms':
selected, selected_scores = model_nms_utils.class_agnostic_nms(
box_scores=final_dict['pred_scores'], box_preds=final_dict['pred_boxes'],
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=None
)
final_dict['pred_boxes'] = final_dict['pred_boxes'][selected]
final_dict['pred_scores'] = selected_scores
final_dict['pred_labels'] = final_dict['pred_labels'][selected]
ret_dict[k]['pred_boxes'].append(final_dict['pred_boxes'])
ret_dict[k]['pred_scores'].append(final_dict['pred_scores'])
ret_dict[k]['pred_labels'].append(final_dict['pred_labels'])
for k in range(batch_size):
ret_dict[k]['pred_boxes'] = torch.cat(ret_dict[k]['pred_boxes'], dim=0)
ret_dict[k]['pred_scores'] = torch.cat(ret_dict[k]['pred_scores'], dim=0)
ret_dict[k]['pred_labels'] = torch.cat(ret_dict[k]['pred_labels'], dim=0) + 1
return ret_dict
@staticmethod
def reorder_rois_for_refining(batch_size, pred_dicts):
num_max_rois = max([len(cur_dict['pred_boxes']) for cur_dict in pred_dicts])
num_max_rois = max(1, num_max_rois) # at least one faked rois to avoid error
pred_boxes = pred_dicts[0]['pred_boxes']
rois = pred_boxes.new_zeros((batch_size, num_max_rois, pred_boxes.shape[-1]))
roi_scores = pred_boxes.new_zeros((batch_size, num_max_rois))
roi_labels = pred_boxes.new_zeros((batch_size, num_max_rois)).long()
for bs_idx in range(batch_size):
num_boxes = len(pred_dicts[bs_idx]['pred_boxes'])
rois[bs_idx, :num_boxes, :] = pred_dicts[bs_idx]['pred_boxes']
roi_scores[bs_idx, :num_boxes] = pred_dicts[bs_idx]['pred_scores']
roi_labels[bs_idx, :num_boxes] = pred_dicts[bs_idx]['pred_labels']
return rois, roi_scores, roi_labels
def forward(self, data_dict):
spatial_features_2d = data_dict['spatial_features_2d']
x = self.shared_conv(spatial_features_2d)
pred_dicts = []
for head in self.heads_list:
pred_dicts.append(head(x))
data_dict['bev_score'] = pred_dicts[0]['hm']
if self.training:
target_dict = self.assign_targets(
data_dict['gt_boxes'], feature_map_size=spatial_features_2d.size()[2:],
feature_map_stride=data_dict.get('spatial_features_2d_strides', None)
)
self.forward_ret_dict['target_dicts'] = target_dict
self.forward_ret_dict['pred_dicts'] = pred_dicts
if not self.training or self.predict_boxes_when_training:
pred_dicts = self.generate_predicted_boxes(
data_dict['batch_size'], pred_dicts
)
if self.predict_boxes_when_training:
rois, roi_scores, roi_labels = self.reorder_rois_for_refining(data_dict['batch_size'], pred_dicts)
data_dict['rois'] = rois
data_dict['roi_scores'] = roi_scores
data_dict['roi_labels'] = roi_labels
data_dict['has_class_labels'] = True
else:
data_dict['final_box_dicts'] = pred_dicts
return data_dict
| 30,472
| 45.101362
| 125
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/dense_heads/anchor_head_semi.py
|
import numpy as np
import torch.nn as nn
from .anchor_head_template import AnchorHeadTemplate
class AnchorHeadSemi(AnchorHeadTemplate):
def __init__(self, model_cfg, input_channels, num_class, class_names, grid_size, voxel_size, point_cloud_range,
predict_boxes_when_training=True):
super().__init__(
model_cfg=model_cfg, num_class=num_class, class_names=class_names, grid_size=grid_size, point_cloud_range=point_cloud_range,
predict_boxes_when_training=predict_boxes_when_training
)
self.num_anchors_per_location = sum(self.num_anchors_per_location)
self.conv_cls = nn.Conv2d(
input_channels, self.num_anchors_per_location * self.num_class,
kernel_size=1
)
self.conv_box = nn.Conv2d(
input_channels, self.num_anchors_per_location * self.box_coder.code_size,
kernel_size=1
)
if self.model_cfg.get('USE_DIRECTION_CLASSIFIER', None) is not None:
self.conv_dir_cls = nn.Conv2d(
input_channels,
self.num_anchors_per_location * self.model_cfg.NUM_DIR_BINS,
kernel_size=1
)
else:
self.conv_dir_cls = None
self.init_weights()
self.model_type = None
def init_weights(self):
pi = 0.01
nn.init.constant_(self.conv_cls.bias, -np.log((1 - pi) / pi))
nn.init.normal_(self.conv_box.weight, mean=0, std=0.001)
def forward(self, data_dict):
spatial_features_2d = data_dict['spatial_features_2d']
cls_preds = self.conv_cls(spatial_features_2d)
box_preds = self.conv_box(spatial_features_2d)
cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous() # [N, H, W, C]
box_preds = box_preds.permute(0, 2, 3, 1).contiguous() # [N, H, W, C]
self.forward_ret_dict['cls_preds'] = cls_preds
self.forward_ret_dict['box_preds'] = box_preds
if self.conv_dir_cls is not None:
dir_cls_preds = self.conv_dir_cls(spatial_features_2d)
dir_cls_preds = dir_cls_preds.permute(0, 2, 3, 1).contiguous()
self.forward_ret_dict['dir_cls_preds'] = dir_cls_preds
else:
dir_cls_preds = None
if self.model_type == 'origin':
if self.training:
targets_dict = self.assign_targets(
gt_boxes=data_dict['gt_boxes']
)
self.forward_ret_dict.update(targets_dict)
if not self.training or self.predict_boxes_when_training:
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=data_dict['batch_size'],
cls_preds=cls_preds, box_preds=box_preds, dir_cls_preds=dir_cls_preds
)
data_dict['batch_cls_preds'] = batch_cls_preds
data_dict['batch_box_preds'] = batch_box_preds
data_dict['cls_preds_normalized'] = False
elif self.model_type == 'teacher':
#assert not self.training
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=data_dict['batch_size'],
cls_preds=cls_preds, box_preds=box_preds, dir_cls_preds=dir_cls_preds
)
data_dict['batch_cls_preds'] = batch_cls_preds
data_dict['batch_box_preds'] = batch_box_preds
data_dict['cls_preds_normalized'] = False
elif self.model_type == 'student':
if self.training:
if 'gt_boxes' in data_dict:
targets_dict = self.assign_targets(
gt_boxes=data_dict['gt_boxes']
)
self.forward_ret_dict.update(targets_dict)
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=data_dict['batch_size'],
cls_preds=cls_preds, box_preds=box_preds, dir_cls_preds=dir_cls_preds
)
data_dict['batch_cls_preds'] = batch_cls_preds
data_dict['batch_box_preds'] = batch_box_preds
data_dict['cls_preds_normalized'] = False
else:
raise Exception('Unsupprted model type')
return data_dict
| 4,321
| 39.018519
| 136
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/dense_heads/point_head_box.py
|
import torch
from ...utils import box_coder_utils, box_utils
from .point_head_template import PointHeadTemplate
class PointHeadBox(PointHeadTemplate):
"""
A simple point-based segmentation head, which are used for PointRCNN.
Reference Paper: https://arxiv.org/abs/1812.04244
PointRCNN: 3D Object Proposal Generation and Detection from Point Cloud
"""
def __init__(self, num_class, input_channels, model_cfg, predict_boxes_when_training=False, **kwargs):
super().__init__(model_cfg=model_cfg, num_class=num_class)
self.predict_boxes_when_training = predict_boxes_when_training
self.cls_layers = self.make_fc_layers(
fc_cfg=self.model_cfg.CLS_FC,
input_channels=input_channels,
output_channels=num_class
)
target_cfg = self.model_cfg.TARGET_CONFIG
self.box_coder = getattr(box_coder_utils, target_cfg.BOX_CODER)(
**target_cfg.BOX_CODER_CONFIG
)
self.box_layers = self.make_fc_layers(
fc_cfg=self.model_cfg.REG_FC,
input_channels=input_channels,
output_channels=self.box_coder.code_size
)
def assign_targets(self, input_dict):
"""
Args:
input_dict:
point_features: (N1 + N2 + N3 + ..., C)
batch_size:
point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
gt_boxes (optional): (B, M, 8)
Returns:
point_cls_labels: (N1 + N2 + N3 + ...), long type, 0:background, -1:ignored
point_part_labels: (N1 + N2 + N3 + ..., 3)
"""
point_coords = input_dict['point_coords']
gt_boxes = input_dict['gt_boxes']
assert gt_boxes.shape.__len__() == 3, 'gt_boxes.shape=%s' % str(gt_boxes.shape)
assert point_coords.shape.__len__() in [2], 'points.shape=%s' % str(point_coords.shape)
batch_size = gt_boxes.shape[0]
extend_gt_boxes = box_utils.enlarge_box3d(
gt_boxes.view(-1, gt_boxes.shape[-1]), extra_width=self.model_cfg.TARGET_CONFIG.GT_EXTRA_WIDTH
).view(batch_size, -1, gt_boxes.shape[-1])
targets_dict = self.assign_stack_targets(
points=point_coords, gt_boxes=gt_boxes, extend_gt_boxes=extend_gt_boxes,
set_ignore_flag=True, use_ball_constraint=False,
ret_part_labels=False, ret_box_labels=True
)
return targets_dict
def get_loss(self, tb_dict=None):
tb_dict = {} if tb_dict is None else tb_dict
point_loss_cls, tb_dict_1 = self.get_cls_layer_loss()
point_loss_box, tb_dict_2 = self.get_box_layer_loss()
point_loss = point_loss_cls + point_loss_box
tb_dict.update(tb_dict_1)
tb_dict.update(tb_dict_2)
return point_loss, tb_dict
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
point_features: (N1 + N2 + N3 + ..., C) or (B, N, C)
point_features_before_fusion: (N1 + N2 + N3 + ..., C)
point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
point_labels (optional): (N1 + N2 + N3 + ...)
gt_boxes (optional): (B, M, 8)
Returns:
batch_dict:
point_cls_scores: (N1 + N2 + N3 + ..., 1)
point_part_offset: (N1 + N2 + N3 + ..., 3)
"""
if self.model_cfg.get('USE_POINT_FEATURES_BEFORE_FUSION', False):
point_features = batch_dict['point_features_before_fusion']
else:
point_features = batch_dict['point_features']
point_cls_preds = self.cls_layers(point_features) # (total_points, num_class)
point_box_preds = self.box_layers(point_features) # (total_points, box_code_size)
point_cls_preds_max, _ = point_cls_preds.max(dim=-1)
batch_dict['point_cls_scores'] = torch.sigmoid(point_cls_preds_max)
ret_dict = {'point_cls_preds': point_cls_preds,
'point_box_preds': point_box_preds}
if self.training:
targets_dict = self.assign_targets(batch_dict)
ret_dict['point_cls_labels'] = targets_dict['point_cls_labels']
ret_dict['point_box_labels'] = targets_dict['point_box_labels']
if not self.training or self.predict_boxes_when_training:
point_cls_preds, point_box_preds = self.generate_predicted_boxes(
points=batch_dict['point_coords'][:, 1:4],
point_cls_preds=point_cls_preds, point_box_preds=point_box_preds
)
batch_dict['batch_cls_preds'] = point_cls_preds
batch_dict['batch_box_preds'] = point_box_preds
batch_dict['batch_index'] = batch_dict['point_coords'][:, 0]
batch_dict['cls_preds_normalized'] = False
self.forward_ret_dict = ret_dict
return batch_dict
| 4,930
| 41.508621
| 106
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/dense_heads/point_head_simple.py
|
import torch
from ...utils import box_utils
from .point_head_template import PointHeadTemplate
class PointHeadSimple(PointHeadTemplate):
"""
A simple point-based segmentation head, which are used for PV-RCNN keypoint segmentaion.
Reference Paper: https://arxiv.org/abs/1912.13192
PV-RCNN: Point-Voxel Feature Set Abstraction for 3D Object Detection
"""
def __init__(self, num_class, input_channels, model_cfg, **kwargs):
super().__init__(model_cfg=model_cfg, num_class=num_class)
self.cls_layers = self.make_fc_layers(
fc_cfg=self.model_cfg.CLS_FC,
input_channels=input_channels,
output_channels=num_class
)
def assign_targets(self, input_dict):
"""
Args:
input_dict:
point_features: (N1 + N2 + N3 + ..., C)
batch_size:
point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
gt_boxes (optional): (B, M, 8)
Returns:
point_cls_labels: (N1 + N2 + N3 + ...), long type, 0:background, -1:ignored
point_part_labels: (N1 + N2 + N3 + ..., 3)
"""
point_coords = input_dict['point_coords']
gt_boxes = input_dict['gt_boxes']
assert gt_boxes.shape.__len__() == 3, 'gt_boxes.shape=%s' % str(gt_boxes.shape)
assert point_coords.shape.__len__() in [2], 'points.shape=%s' % str(point_coords.shape)
batch_size = gt_boxes.shape[0]
extend_gt_boxes = box_utils.enlarge_box3d(
gt_boxes.view(-1, gt_boxes.shape[-1]), extra_width=self.model_cfg.TARGET_CONFIG.GT_EXTRA_WIDTH
).view(batch_size, -1, gt_boxes.shape[-1])
targets_dict = self.assign_stack_targets(
points=point_coords, gt_boxes=gt_boxes, extend_gt_boxes=extend_gt_boxes, #modified: gt_boxex -> gt_boxes[:, :, 0:8]
set_ignore_flag=True, use_ball_constraint=False, #modified: extend_gt_boxes -> extend_gt_boxes[:, :, 0:8]
ret_part_labels=False
)
return targets_dict
def get_loss(self, tb_dict=None):
tb_dict = {} if tb_dict is None else tb_dict
point_loss_cls, tb_dict_1 = self.get_cls_layer_loss()
point_loss = point_loss_cls
tb_dict.update(tb_dict_1)
return point_loss, tb_dict
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
point_features: (N1 + N2 + N3 + ..., C) or (B, N, C)
point_features_before_fusion: (N1 + N2 + N3 + ..., C)
point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
point_labels (optional): (N1 + N2 + N3 + ...)
gt_boxes (optional): (B, M, 8)
Returns:
batch_dict:
point_cls_scores: (N1 + N2 + N3 + ..., 1)
point_part_offset: (N1 + N2 + N3 + ..., 3)
"""
if self.model_cfg.get('USE_POINT_FEATURES_BEFORE_FUSION', False):
point_features = batch_dict['point_features_before_fusion']
else:
point_features = batch_dict['point_features']
point_cls_preds = self.cls_layers(point_features) # (total_points, num_class)
ret_dict = {
'point_cls_preds': point_cls_preds,
}
point_cls_scores = torch.sigmoid(point_cls_preds)
batch_dict['point_cls_scores'], _ = point_cls_scores.max(dim=-1)
if self.training:
targets_dict = self.assign_targets(batch_dict)
ret_dict['point_cls_labels'] = targets_dict['point_cls_labels']
self.forward_ret_dict = ret_dict
return batch_dict
def get_point_score(self, batch_dict):
if self.model_cfg.get('USE_POINT_FEATURES_BEFORE_FUSION', False):
point_features = batch_dict['point_features_before_fusion']
else:
point_features = batch_dict['point_features']
point_cls_preds = self.cls_layers(point_features) # (total_points, num_class)
ret_dict = {
'point_cls_preds': point_cls_preds,
}
point_cls_scores = torch.sigmoid(point_cls_preds)
batch_dict['point_cls_scores'], _ = point_cls_scores.max(dim=-1)
return batch_dict
| 4,255
| 39.150943
| 128
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/dense_heads/point_head_semi.py
|
import torch
from ...utils import box_utils
from .point_head_template import PointHeadTemplate
class PointHeadSemi(PointHeadTemplate):
"""
A simple point-based segmentation head, which are used for PV-RCNN keypoint segmentaion.
Reference Paper: https://arxiv.org/abs/1912.13192
PV-RCNN: Point-Voxel Feature Set Abstraction for 3D Object Detection
"""
def __init__(self, num_class, input_channels, model_cfg, **kwargs):
super().__init__(model_cfg=model_cfg, num_class=num_class)
self.cls_layers = self.make_fc_layers(
fc_cfg=self.model_cfg.CLS_FC,
input_channels=input_channels,
output_channels=num_class
)
self.model_type = None
def assign_targets(self, input_dict):
"""
Args:
input_dict:
point_features: (N1 + N2 + N3 + ..., C)
batch_size:
point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
gt_boxes (optional): (B, M, 8)
Returns:
point_cls_labels: (N1 + N2 + N3 + ...), long type, 0:background, -1:ignored
point_part_labels: (N1 + N2 + N3 + ..., 3)
"""
point_coords = input_dict['point_coords']
gt_boxes = input_dict['gt_boxes']
assert gt_boxes.shape.__len__() == 3, 'gt_boxes.shape=%s' % str(gt_boxes.shape)
assert point_coords.shape.__len__() in [2], 'points.shape=%s' % str(point_coords.shape)
batch_size = gt_boxes.shape[0]
extend_gt_boxes = box_utils.enlarge_box3d(
gt_boxes.view(-1, gt_boxes.shape[-1]), extra_width=self.model_cfg.TARGET_CONFIG.GT_EXTRA_WIDTH
).view(batch_size, -1, gt_boxes.shape[-1])
targets_dict = self.assign_stack_targets(
points=point_coords, gt_boxes=gt_boxes, extend_gt_boxes=extend_gt_boxes, #modified: gt_boxex -> gt_boxes[:, :, 0:8]
set_ignore_flag=True, use_ball_constraint=False, #modified: extend_gt_boxes -> extend_gt_boxes[:, :, 0:8]
ret_part_labels=False
)
return targets_dict
def get_loss(self, tb_dict=None):
tb_dict = {} if tb_dict is None else tb_dict
point_loss_cls, tb_dict_1 = self.get_cls_layer_loss()
point_loss = point_loss_cls
tb_dict.update(tb_dict_1)
return point_loss, tb_dict
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
point_features: (N1 + N2 + N3 + ..., C) or (B, N, C)
point_features_before_fusion: (N1 + N2 + N3 + ..., C)
point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
point_labels (optional): (N1 + N2 + N3 + ...)
gt_boxes (optional): (B, M, 8)
Returns:
batch_dict:
point_cls_scores: (N1 + N2 + N3 + ..., 1)
point_part_offset: (N1 + N2 + N3 + ..., 3)
"""
if self.model_cfg.get('USE_POINT_FEATURES_BEFORE_FUSION', False):
point_features = batch_dict['point_features_before_fusion']
else:
point_features = batch_dict['point_features']
point_cls_preds = self.cls_layers(point_features) # (total_points, num_class)
ret_dict = {
'point_cls_preds': point_cls_preds,
}
point_cls_scores = torch.sigmoid(point_cls_preds)
batch_dict['point_cls_scores'], _ = point_cls_scores.max(dim=-1)
if self.model_type == 'origin':
if self.training:
targets_dict = self.assign_targets(batch_dict)
ret_dict['point_cls_labels'] = targets_dict['point_cls_labels']
self.forward_ret_dict = ret_dict
elif self.model_type == 'teacher':
self.forward_ret_dict = ret_dict
elif self.model_type == 'student':
if self.training:
if 'gt_boxes' in batch_dict:
targets_dict = self.assign_targets(batch_dict)
ret_dict['point_cls_labels'] = targets_dict['point_cls_labels']
self.forward_ret_dict = ret_dict
else:
raise Exception('Unsupprted model type')
return batch_dict
def get_point_score(self, batch_dict):
if self.model_cfg.get('USE_POINT_FEATURES_BEFORE_FUSION', False):
point_features = batch_dict['point_features_before_fusion']
else:
point_features = batch_dict['point_features']
point_cls_preds = self.cls_layers(point_features) # (total_points, num_class)
ret_dict = {
'point_cls_preds': point_cls_preds,
}
point_cls_scores = torch.sigmoid(point_cls_preds)
batch_dict['point_cls_scores'], _ = point_cls_scores.max(dim=-1)
return batch_dict
| 4,820
| 38.842975
| 128
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/dense_heads/__init__.py
|
from .anchor_head_multi import AnchorHeadMulti
from .anchor_head_single import AnchorHeadSingle
from .anchor_head_single import ActiveAnchorHeadSingle1
from .anchor_head_single import AnchorHeadSingle_TQS
from .anchor_head_template import AnchorHeadTemplate
from .point_head_box import PointHeadBox
from .point_head_simple import PointHeadSimple
from .point_intra_part_head import PointIntraPartOffsetHead
from .center_head import CenterHead
from .center_head_semi import CenterHeadSemi
from .center_head import ActiveCenterHead
from .IASSD_head import IASSD_Head
from .anchor_head_semi import AnchorHeadSemi
from .point_head_semi import PointHeadSemi
__all__ = {
'AnchorHeadTemplate': AnchorHeadTemplate,
'AnchorHeadSingle': AnchorHeadSingle,
'ActiveAnchorHeadSingle1': ActiveAnchorHeadSingle1,
'AnchorHeadSingle_TQS': AnchorHeadSingle_TQS,
'PointIntraPartOffsetHead': PointIntraPartOffsetHead,
'PointHeadSimple': PointHeadSimple,
'PointHeadBox': PointHeadBox,
'AnchorHeadMulti': AnchorHeadMulti,
'CenterHead': CenterHead,
'CenterHeadSemi': CenterHeadSemi,
'ActiveCenterHead': ActiveCenterHead,
'IASSD_Head': IASSD_Head,
'ActiveAnchorHeadSingle1': ActiveAnchorHeadSingle1,
'AnchorHeadSemi': AnchorHeadSemi,
'PointHeadSemi': PointHeadSemi,
}
| 1,302
| 39.71875
| 59
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/dense_heads/point_intra_part_head.py
|
import torch
from ...utils import box_coder_utils, box_utils
from .point_head_template import PointHeadTemplate
class PointIntraPartOffsetHead(PointHeadTemplate):
"""
Point-based head for predicting the intra-object part locations.
Reference Paper: https://arxiv.org/abs/1907.03670
From Points to Parts: 3D Object Detection from Point Cloud with Part-aware and Part-aggregation Network
"""
def __init__(self, num_class, input_channels, model_cfg, predict_boxes_when_training=False, **kwargs):
super().__init__(model_cfg=model_cfg, num_class=num_class)
self.predict_boxes_when_training = predict_boxes_when_training
self.cls_layers = self.make_fc_layers(
fc_cfg=self.model_cfg.CLS_FC,
input_channels=input_channels,
output_channels=num_class
)
self.part_reg_layers = self.make_fc_layers(
fc_cfg=self.model_cfg.PART_FC,
input_channels=input_channels,
output_channels=3
)
target_cfg = self.model_cfg.TARGET_CONFIG
if target_cfg.get('BOX_CODER', None) is not None:
self.box_coder = getattr(box_coder_utils, target_cfg.BOX_CODER)(
**target_cfg.BOX_CODER_CONFIG
)
self.box_layers = self.make_fc_layers(
fc_cfg=self.model_cfg.REG_FC,
input_channels=input_channels,
output_channels=self.box_coder.code_size
)
else:
self.box_layers = None
def assign_targets(self, input_dict):
"""
Args:
input_dict:
point_features: (N1 + N2 + N3 + ..., C)
batch_size:
point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
gt_boxes (optional): (B, M, 8)
Returns:
point_cls_labels: (N1 + N2 + N3 + ...), long type, 0:background, -1:ignored
point_part_labels: (N1 + N2 + N3 + ..., 3)
"""
point_coords = input_dict['point_coords']
gt_boxes = input_dict['gt_boxes']
assert gt_boxes.shape.__len__() == 3, 'gt_boxes.shape=%s' % str(gt_boxes.shape)
assert point_coords.shape.__len__() in [2], 'points.shape=%s' % str(point_coords.shape)
batch_size = gt_boxes.shape[0]
extend_gt_boxes = box_utils.enlarge_box3d(
gt_boxes.view(-1, gt_boxes.shape[-1]), extra_width=self.model_cfg.TARGET_CONFIG.GT_EXTRA_WIDTH
).view(batch_size, -1, gt_boxes.shape[-1])
targets_dict = self.assign_stack_targets(
points=point_coords, gt_boxes=gt_boxes, extend_gt_boxes=extend_gt_boxes,
set_ignore_flag=True, use_ball_constraint=False,
ret_part_labels=True, ret_box_labels=(self.box_layers is not None)
)
return targets_dict
def get_loss(self, tb_dict=None):
tb_dict = {} if tb_dict is None else tb_dict
point_loss_cls, tb_dict = self.get_cls_layer_loss(tb_dict)
point_loss_part, tb_dict = self.get_part_layer_loss(tb_dict)
point_loss = point_loss_cls + point_loss_part
if self.box_layers is not None:
point_loss_box, tb_dict = self.get_box_layer_loss(tb_dict)
point_loss += point_loss_box
return point_loss, tb_dict
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
point_features: (N1 + N2 + N3 + ..., C) or (B, N, C)
point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
point_labels (optional): (N1 + N2 + N3 + ...)
gt_boxes (optional): (B, M, 8)
Returns:
batch_dict:
point_cls_scores: (N1 + N2 + N3 + ..., 1)
point_part_offset: (N1 + N2 + N3 + ..., 3)
"""
point_features = batch_dict['point_features']
point_cls_preds = self.cls_layers(point_features) # (total_points, num_class)
point_part_preds = self.part_reg_layers(point_features)
ret_dict = {
'point_cls_preds': point_cls_preds,
'point_part_preds': point_part_preds,
}
if self.box_layers is not None:
point_box_preds = self.box_layers(point_features)
ret_dict['point_box_preds'] = point_box_preds
point_cls_scores = torch.sigmoid(point_cls_preds)
point_part_offset = torch.sigmoid(point_part_preds)
batch_dict['point_cls_scores'], _ = point_cls_scores.max(dim=-1)
batch_dict['point_part_offset'] = point_part_offset
if self.training:
targets_dict = self.assign_targets(batch_dict)
ret_dict['point_cls_labels'] = targets_dict['point_cls_labels']
ret_dict['point_part_labels'] = targets_dict.get('point_part_labels')
ret_dict['point_box_labels'] = targets_dict.get('point_box_labels')
if self.box_layers is not None and (not self.training or self.predict_boxes_when_training):
point_cls_preds, point_box_preds = self.generate_predicted_boxes(
points=batch_dict['point_coords'][:, 1:4],
point_cls_preds=point_cls_preds, point_box_preds=ret_dict['point_box_preds']
)
batch_dict['batch_cls_preds'] = point_cls_preds
batch_dict['batch_box_preds'] = point_box_preds
batch_dict['batch_index'] = batch_dict['point_coords'][:, 0]
batch_dict['cls_preds_normalized'] = False
self.forward_ret_dict = ret_dict
return batch_dict
| 5,568
| 42.507813
| 107
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/dense_heads/IASSD_head.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from ...utils import box_coder_utils, box_utils, loss_utils, common_utils
from .point_head_template import PointHeadTemplate
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
class IASSD_Head(PointHeadTemplate):
"""
A simple point-based detect head, which are used for IA-SSD.
"""
def __init__(self, num_class, input_channels, model_cfg, predict_boxes_when_training=False, **kwargs):
super().__init__(model_cfg=model_cfg, num_class=num_class)
self.predict_boxes_when_training = predict_boxes_when_training
target_cfg = self.model_cfg.TARGET_CONFIG
self.box_coder = getattr(box_coder_utils, target_cfg.BOX_CODER)(
**target_cfg.BOX_CODER_CONFIG
)
detector_dim = self.model_cfg.get('INPUT_DIM', input_channels) # for spec input_channel
self.cls_center_layers = self.make_fc_layers(
fc_cfg=self.model_cfg.CLS_FC,
input_channels=detector_dim,
output_channels=num_class
)
self.box_center_layers = self.make_fc_layers(
fc_cfg=self.model_cfg.REG_FC,
input_channels=detector_dim,
output_channels=self.box_coder.code_size
)
self.box_iou3d_layers = self.make_fc_layers(
fc_cfg=self.model_cfg.IOU_FC,
input_channels=detector_dim,
output_channels=1
) if self.model_cfg.get('IOU_FC', None) is not None else None
# self.init_weights(weight_init='xavier')
def init_weights(self, weight_init='xavier'):
if weight_init == 'kaiming':
init_func = nn.init.kaiming_normal_
elif weight_init == 'xavier':
init_func = nn.init.xavier_normal_
elif weight_init == 'normal':
init_func = nn.init.normal_
else:
raise NotImplementedError
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
if weight_init == 'normal':
init_func(m.weight, mean=0, std=0.001)
else:
init_func(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def build_losses(self, losses_cfg):
# classification loss
if losses_cfg.LOSS_CLS.startswith('WeightedBinaryCrossEntropy'):
self.add_module(
'cls_loss_func',
loss_utils.WeightedBinaryCrossEntropyLoss()
)
elif losses_cfg.LOSS_CLS.startswith('WeightedCrossEntropy'):
self.add_module(
'cls_loss_func',
loss_utils.WeightedClassificationLoss()
)
elif losses_cfg.LOSS_CLS.startswith('FocalLoss'):
self.add_module(
'cls_loss_func',
loss_utils.SigmoidFocalClassificationLoss(
**losses_cfg.get('LOSS_CLS_CONFIG', {})
)
)
else:
raise NotImplementedError
# regression loss
if losses_cfg.LOSS_REG == 'WeightedSmoothL1Loss':
self.add_module(
'reg_loss_func',
loss_utils.WeightedSmoothL1Loss(
code_weights=losses_cfg.LOSS_WEIGHTS.get('code_weights', None),
**losses_cfg.get('LOSS_REG_CONFIG', {})
)
)
elif losses_cfg.LOSS_REG == 'WeightedL1Loss':
self.add_module(
'reg_loss_func',
loss_utils.WeightedL1Loss(
code_weights=losses_cfg.LOSS_WEIGHTS.get('code_weights', None)
)
)
else:
raise NotImplementedError
# instance-aware loss
if losses_cfg.get('LOSS_INS', None) is not None:
if losses_cfg.LOSS_INS.startswith('WeightedBinaryCrossEntropy'):
self.add_module(
'ins_loss_func',
loss_utils.WeightedBinaryCrossEntropyLoss()
)
elif losses_cfg.LOSS_INS.startswith('WeightedCrossEntropy'):
self.add_module(
'ins_loss_func',
loss_utils.WeightedClassificationLoss()
)
elif losses_cfg.LOSS_INS.startswith('FocalLoss'):
self.add_module(
'ins_loss_func',
loss_utils.SigmoidFocalClassificationLoss(
**losses_cfg.get('LOSS_CLS_CONFIG', {})
)
)
else:
raise NotImplementedError
def assign_stack_targets_IASSD(self, points, gt_boxes, extend_gt_boxes=None, weighted_labels=False,
ret_box_labels=False, ret_offset_labels=True,
set_ignore_flag=True, use_ball_constraint=False, central_radius=2.0,
use_query_assign=False, central_radii=2.0, use_ex_gt_assign=False, fg_pc_ignore=False,
binary_label=False):
"""
Args:
points: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
gt_boxes: (B, M, 8)
extend_gt_boxes: [B, M, 8]
Returns:
point_cls_labels: (N1 + N2 + N3 + ...), long type, 0:background, -1:ignored
point_box_labels: (N1 + N2 + N3 + ..., code_size)
"""
assert len(points.shape) == 2 and points.shape[1] == 4, 'points.shape=%s' % str(points.shape)
assert len(gt_boxes.shape) == 3 and gt_boxes.shape[2] == 8, 'gt_boxes.shape=%s' % str(gt_boxes.shape)
assert extend_gt_boxes is None or len(extend_gt_boxes.shape) == 3 and extend_gt_boxes.shape[2] == 8, \
'extend_gt_boxes.shape=%s' % str(extend_gt_boxes.shape)
batch_size = gt_boxes.shape[0]
bs_idx = points[:, 0]
point_cls_labels = points.new_zeros(points.shape[0]).long()
point_box_labels = gt_boxes.new_zeros((points.shape[0], 8)) if ret_box_labels else None
box_idxs_labels = points.new_zeros(points.shape[0]).long()
gt_boxes_of_fg_points = []
gt_box_of_points = gt_boxes.new_zeros((points.shape[0], 8))
for k in range(batch_size):
bs_mask = (bs_idx == k)
points_single = points[bs_mask][:, 1:4]
point_cls_labels_single = point_cls_labels.new_zeros(bs_mask.sum())
box_idxs_of_pts = roiaware_pool3d_utils.points_in_boxes_gpu(
points_single.unsqueeze(dim=0), gt_boxes[k:k + 1, :, 0:7].contiguous()
).long().squeeze(dim=0)
box_fg_flag = (box_idxs_of_pts >= 0)
if use_query_assign: ##
centers = gt_boxes[k:k + 1, :, 0:3]
query_idxs_of_pts = roiaware_pool3d_utils.points_in_ball_query_gpu(
points_single.unsqueeze(dim=0), centers.contiguous(), central_radii
).long().squeeze(dim=0)
query_fg_flag = (query_idxs_of_pts >= 0)
if fg_pc_ignore:
fg_flag = query_fg_flag ^ box_fg_flag
extend_box_idxs_of_pts[box_idxs_of_pts!=-1] = -1
box_idxs_of_pts = extend_box_idxs_of_pts
else:
fg_flag = query_fg_flag
box_idxs_of_pts = query_idxs_of_pts
elif use_ex_gt_assign: ##
extend_box_idxs_of_pts = roiaware_pool3d_utils.points_in_boxes_gpu(
points_single.unsqueeze(dim=0), extend_gt_boxes[k:k+1, :, 0:7].contiguous()
).long().squeeze(dim=0)
extend_fg_flag = (extend_box_idxs_of_pts >= 0)
extend_box_idxs_of_pts[box_fg_flag] = box_idxs_of_pts[box_fg_flag] #instance points should keep unchanged
if fg_pc_ignore:
fg_flag = extend_fg_flag ^ box_fg_flag
extend_box_idxs_of_pts[box_idxs_of_pts!=-1] = -1
box_idxs_of_pts = extend_box_idxs_of_pts
else:
fg_flag = extend_fg_flag
box_idxs_of_pts = extend_box_idxs_of_pts
elif set_ignore_flag:
extend_box_idxs_of_pts = roiaware_pool3d_utils.points_in_boxes_gpu(
points_single.unsqueeze(dim=0), extend_gt_boxes[k:k+1, :, 0:7].contiguous()
).long().squeeze(dim=0)
fg_flag = box_fg_flag
ignore_flag = fg_flag ^ (extend_box_idxs_of_pts >= 0)
point_cls_labels_single[ignore_flag] = -1
elif use_ball_constraint:
box_centers = gt_boxes[k][box_idxs_of_pts][:, 0:3].clone()
box_centers[:, 2] += gt_boxes[k][box_idxs_of_pts][:, 5] / 2
ball_flag = ((box_centers - points_single).norm(dim=1) < central_radius)
fg_flag = box_fg_flag & ball_flag
else:
raise NotImplementedError
gt_box_of_fg_points = gt_boxes[k][box_idxs_of_pts[fg_flag]]
point_cls_labels_single[fg_flag] = 1 if self.num_class == 1 or binary_label else gt_box_of_fg_points[:, -1].long()
point_cls_labels[bs_mask] = point_cls_labels_single
bg_flag = (point_cls_labels_single == 0) # except ignore_id
# box_bg_flag
fg_flag = fg_flag ^ (fg_flag & bg_flag)
gt_box_of_fg_points = gt_boxes[k][box_idxs_of_pts[fg_flag]]
gt_boxes_of_fg_points.append(gt_box_of_fg_points)
box_idxs_labels[bs_mask] = box_idxs_of_pts
gt_box_of_points[bs_mask] = gt_boxes[k][box_idxs_of_pts]
if ret_box_labels and gt_box_of_fg_points.shape[0] > 0:
point_box_labels_single = point_box_labels.new_zeros((bs_mask.sum(), 8))
fg_point_box_labels = self.box_coder.encode_torch(
gt_boxes=gt_box_of_fg_points[:, :-1], points=points_single[fg_flag],
gt_classes=gt_box_of_fg_points[:, -1].long()
)
point_box_labels_single[fg_flag] = fg_point_box_labels
point_box_labels[bs_mask] = point_box_labels_single
gt_boxes_of_fg_points = torch.cat(gt_boxes_of_fg_points, dim=0)
targets_dict = {
'point_cls_labels': point_cls_labels,
'point_box_labels': point_box_labels,
'gt_box_of_fg_points': gt_boxes_of_fg_points,
'box_idxs_labels': box_idxs_labels,
'gt_box_of_points': gt_box_of_points,
}
return targets_dict
def assign_targets(self, input_dict):
"""
Args:
input_dict:
batch_size: int
centers: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
centers_origin: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
encoder_coords: List of point_coords in SA
gt_boxes (optional): (B, M, 8)
Returns:
target_dict:
...
"""
target_cfg = self.model_cfg.TARGET_CONFIG
gt_boxes = input_dict['gt_boxes']
if gt_boxes.shape[-1] == 10: #nscence
gt_boxes = torch.cat((gt_boxes[..., 0:7], gt_boxes[..., -1:]), dim=-1)
targets_dict_center = {}
# assert gt_boxes.shape.__len__() == 3, 'gt_boxes.shape=%s' % str(gt_boxes.shape)
# assert point_coords.shape.__len__() in [2], 'points.shape=%s' % str(point_coords.shape)
batch_size = input_dict['batch_size']
if target_cfg.get('EXTRA_WIDTH', False): # multi class extension
extend_gt = box_utils.enlarge_box3d_for_class(
gt_boxes.view(-1, gt_boxes.shape[-1]), extra_width=target_cfg.EXTRA_WIDTH
).view(batch_size, -1, gt_boxes.shape[-1])
else:
extend_gt = gt_boxes
extend_gt_boxes = box_utils.enlarge_box3d(
extend_gt.view(-1, extend_gt.shape[-1]), extra_width=target_cfg.GT_EXTRA_WIDTH
).view(batch_size, -1, gt_boxes.shape[-1])
assert gt_boxes.shape.__len__() == 3, 'gt_boxes.shape=%s' % str(gt_boxes.shape)
center_targets_dict = self.assign_stack_targets_IASSD(
points=input_dict['centers'].detach(),
gt_boxes=extend_gt, extend_gt_boxes=extend_gt_boxes,
set_ignore_flag=True, use_ball_constraint=False,
ret_box_labels=True
)
targets_dict_center['center_gt_box_of_fg_points'] = center_targets_dict['gt_box_of_fg_points']
targets_dict_center['center_cls_labels'] = center_targets_dict['point_cls_labels']
targets_dict_center['center_box_labels'] = center_targets_dict['point_box_labels'] #only center assign
targets_dict_center['center_gt_box_of_points'] = center_targets_dict['gt_box_of_points']
if target_cfg.get('INS_AWARE_ASSIGN', False):
sa_ins_labels, sa_gt_box_of_fg_points, sa_xyz_coords, sa_gt_box_of_points, sa_box_idxs_labels = [],[],[],[],[]
sa_ins_preds = input_dict['sa_ins_preds']
for i in range(1, len(sa_ins_preds)): # valid when i = 1,2 for IA-SSD
# if sa_ins_preds[i].__len__() == 0:
# continue
sa_xyz = input_dict['encoder_coords'][i]
if i == 1:
extend_gt_boxes = box_utils.enlarge_box3d(
gt_boxes.view(-1, gt_boxes.shape[-1]), extra_width=[0.5, 0.5, 0.5] #[0.2, 0.2, 0.2]
).view(batch_size, -1, gt_boxes.shape[-1])
sa_targets_dict = self.assign_stack_targets_IASSD(
points=sa_xyz.view(-1,sa_xyz.shape[-1]).detach(), gt_boxes=gt_boxes, extend_gt_boxes=extend_gt_boxes,
set_ignore_flag=True, use_ex_gt_assign= False
)
if i >= 2:
# if False:
extend_gt_boxes = box_utils.enlarge_box3d(
gt_boxes.view(-1, gt_boxes.shape[-1]), extra_width=[0.5, 0.5, 0.5]
).view(batch_size, -1, gt_boxes.shape[-1])
sa_targets_dict = self.assign_stack_targets_IASSD(
points=sa_xyz.view(-1,sa_xyz.shape[-1]).detach(), gt_boxes=gt_boxes, extend_gt_boxes=extend_gt_boxes,
set_ignore_flag=False, use_ex_gt_assign= True
)
# else:
# extend_gt_boxes = box_utils.enlarge_box3d(
# gt_boxes.view(-1, gt_boxes.shape[-1]), extra_width=[0.5, 0.5, 0.5]
# ).view(batch_size, -1, gt_boxes.shape[-1])
# sa_targets_dict = self.assign_stack_targets_IASSD(
# points=sa_xyz.view(-1,sa_xyz.shape[-1]).detach(), gt_boxes=gt_boxes, extend_gt_boxes=extend_gt_boxes,
# set_ignore_flag=False, use_ex_gt_assign= True
# )
sa_xyz_coords.append(sa_xyz)
sa_ins_labels.append(sa_targets_dict['point_cls_labels'])
sa_gt_box_of_fg_points.append(sa_targets_dict['gt_box_of_fg_points'])
sa_gt_box_of_points.append(sa_targets_dict['gt_box_of_points'])
sa_box_idxs_labels.append(sa_targets_dict['box_idxs_labels'])
targets_dict_center['sa_ins_labels'] = sa_ins_labels
targets_dict_center['sa_gt_box_of_fg_points'] = sa_gt_box_of_fg_points
targets_dict_center['sa_xyz_coords'] = sa_xyz_coords
targets_dict_center['sa_gt_box_of_points'] = sa_gt_box_of_points
targets_dict_center['sa_box_idxs_labels'] = sa_box_idxs_labels
extra_method = target_cfg.get('ASSIGN_METHOD', None)
if extra_method is not None and extra_method.NAME == 'extend_gt':
extend_gt_boxes = box_utils.enlarge_box3d(
gt_boxes.view(-1, gt_boxes.shape[-1]), extra_width=extra_method.EXTRA_WIDTH
).view(batch_size, -1, gt_boxes.shape[-1])
if extra_method.get('ASSIGN_TYPE', 'centers') == 'centers_origin':
points = input_dict['centers_origin'].detach()
else:
points = input_dict['centers'].detach() #default setting
targets_dict = self.assign_stack_targets_IASSD(
points=points, gt_boxes=gt_boxes, extend_gt_boxes=extend_gt_boxes,
set_ignore_flag=True, use_ball_constraint=False,
ret_box_labels=True,
use_ex_gt_assign=True, fg_pc_ignore=extra_method.FG_PC_IGNORE,
)
targets_dict_center['center_origin_gt_box_of_fg_points'] = targets_dict['gt_box_of_fg_points']
targets_dict_center['center_origin_cls_labels'] = targets_dict['point_cls_labels']
targets_dict_center['center_origin_box_idxs_of_pts'] = targets_dict['box_idxs_labels']
targets_dict_center['gt_box_of_center_origin'] = targets_dict['gt_box_of_points']
elif extra_method is not None and extra_method.NAME == 'extend_gt_factor':
extend_gt_boxes = box_utils.enlarge_box3d_with_factor(
gt_boxes.view(-1, gt_boxes.shape[-1]), factor=extra_method.EXTRA_FACTOR).view(batch_size, -1, gt_boxes.shape[-1])
if extra_method.get('ASSIGN_TYPE', 'centers') == 'centers_origin':
points = input_dict['centers_origin'].detach()
else:
points = input_dict['centers'].detach()
targets_dict = self.assign_stack_targets_IASSD(
points=points, gt_boxes=gt_boxes, extend_gt_boxes=extend_gt_boxes,
set_ignore_flag=True, use_ball_constraint=False,
ret_box_labels=False,
use_ex_gt_assign=True, fg_pc_ignore=extra_method.FG_PC_IGNORE,
)
targets_dict_center['center_origin_gt_box_of_fg_points'] = targets_dict['gt_box_of_fg_points']
targets_dict_center['center_origin_cls_labels'] = targets_dict['point_cls_labels']
elif extra_method is not None and extra_method.NAME == 'extend_gt_for_class':
extend_gt_boxes = box_utils.enlarge_box3d_for_class(
gt_boxes.view(-1, gt_boxes.shape[-1]), extra_width=extra_method.EXTRA_WIDTH
).view(batch_size, -1, gt_boxes.shape[-1])
if extra_method.get('ASSIGN_TYPE', 'centers') == 'centers_origin':
points = input_dict['centers_origin'].detach()
else:
points = input_dict['centers'].detach()
targets_dict = self.assign_stack_targets_IASSD(
points=points, gt_boxes=gt_boxes, extend_gt_boxes=extend_gt_boxes,
set_ignore_flag=True, use_ball_constraint=False,
ret_box_labels=False,
use_ex_gt_assign=True, fg_pc_ignore=extra_method.FG_PC_IGNORE,
)
targets_dict_center['center_origin_gt_box_of_fg_points'] = targets_dict['gt_box_of_fg_points']
targets_dict_center['center_origin_cls_labels'] = targets_dict['point_cls_labels']
elif extra_method is not None and extra_method.NAME == 'extend_query':
extend_gt_boxes = None
if extra_method.get('ASSIGN_TYPE', 'centers') == 'centers_origin':
points = input_dict['centers_origin'].detach()
elif extra_method.get('ASSIGN_TYPE', 'centers') == 'centers':
points = input_dict['centers'].detach()
targets_dict = self.assign_stack_targets_IASSD(
points=points, gt_boxes=gt_boxes, extend_gt_boxes=extend_gt_boxes,
set_ignore_flag=True, use_ball_constraint=False,
ret_box_labels=False,
use_query_assign=True, central_radii=extra_method.RADII, fg_pc_ignore=extra_method.FG_PC_IGNORE,
)
targets_dict_center['center_origin_gt_box_of_fg_points'] = targets_dict['gt_box_of_fg_points']
targets_dict_center['center_origin_cls_labels'] = targets_dict['point_cls_labels']
return targets_dict_center
def get_loss(self, tb_dict=None):
tb_dict = {} if tb_dict is None else tb_dict
# vote loss
if self.model_cfg.TARGET_CONFIG.get('ASSIGN_METHOD') is not None and \
self.model_cfg.TARGET_CONFIG.ASSIGN_METHOD.get('ASSIGN_TYPE')== 'centers_origin':
if self.model_cfg.LOSS_CONFIG.get('LOSS_VOTE_TYPE', 'none') == 'ver1':
center_loss_reg, tb_dict_3 = self.get_contextual_vote_loss_ver1()
elif self.model_cfg.LOSS_CONFIG.get('LOSS_VOTE_TYPE', 'none') == 'ver2':
center_loss_reg, tb_dict_3 = self.get_contextual_vote_loss_ver2()
else: # 'none'
center_loss_reg, tb_dict_3 = self.get_contextual_vote_loss()
else:
center_loss_reg, tb_dict_3 = self.get_vote_loss_loss() # center assign
tb_dict.update(tb_dict_3)
# semantic loss in SA layers
if self.model_cfg.LOSS_CONFIG.get('LOSS_INS', None) is not None:
assert ('sa_ins_preds' in self.forward_ret_dict) and ('sa_ins_labels' in self.forward_ret_dict)
sa_loss_cls, tb_dict_0 = self.get_sa_ins_layer_loss()
tb_dict.update(tb_dict_0)
else:
sa_loss_cls = 0
# cls loss
center_loss_cls, tb_dict_4 = self.get_center_cls_layer_loss()
tb_dict.update(tb_dict_4)
# reg loss
if self.model_cfg.TARGET_CONFIG.BOX_CODER == 'PointResidualCoder':
center_loss_box, tb_dict_5 = self.get_box_layer_loss()
else:
center_loss_box, tb_dict_5 = self.get_center_box_binori_layer_loss()
tb_dict.update(tb_dict_5)
# corner loss
if self.model_cfg.LOSS_CONFIG.get('CORNER_LOSS_REGULARIZATION', False):
corner_loss, tb_dict_6 = self.get_corner_layer_loss()
tb_dict.update(tb_dict_6)
# iou loss
iou3d_loss = 0
if self.model_cfg.LOSS_CONFIG.get('IOU3D_REGULARIZATION', False):
iou3d_loss, tb_dict_7 = self.get_iou3d_layer_loss()
tb_dict.update(tb_dict_7)
point_loss = center_loss_reg + center_loss_cls + center_loss_box + corner_loss + sa_loss_cls + iou3d_loss
return point_loss, tb_dict
def get_contextual_vote_loss(self, tb_dict=None):
pos_mask = self.forward_ret_dict['center_origin_cls_labels'] > 0
center_origin_loss_box = []
for i in self.forward_ret_dict['center_origin_cls_labels'].unique():
if i <= 0: continue
simple_pos_mask = self.forward_ret_dict['center_origin_cls_labels'] == i
center_box_labels = self.forward_ret_dict['center_origin_gt_box_of_fg_points'][:, 0:3][(pos_mask & simple_pos_mask)[pos_mask==1]]
centers_origin = self.forward_ret_dict['centers_origin']
ctr_offsets = self.forward_ret_dict['ctr_offsets']
centers_pred = centers_origin + ctr_offsets
centers_pred = centers_pred[simple_pos_mask][:, 1:4]
simple_center_origin_loss_box = F.smooth_l1_loss(centers_pred, center_box_labels)
center_origin_loss_box.append(simple_center_origin_loss_box.unsqueeze(-1))
center_origin_loss_box = torch.cat(center_origin_loss_box, dim=-1).mean()
center_origin_loss_box = center_origin_loss_box * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS.get('vote_weight')
if tb_dict is None:
tb_dict = {}
tb_dict.update({'center_origin_loss_reg': center_origin_loss_box.item()})
return center_origin_loss_box, tb_dict
def get_contextual_vote_loss_ver1(self, tb_dict=None):
box_idxs_of_pts = self.forward_ret_dict['center_origin_box_idxs_of_pts']
center_box_labels = self.forward_ret_dict['gt_box_of_center_origin']
centers_origin = self.forward_ret_dict['centers_origin']
ctr_offsets = self.forward_ret_dict['ctr_offsets']
centers_pred = centers_origin[:, 1:] + ctr_offsets[:, 1:]
centers_pred = torch.cat([centers_origin[:, :1], centers_pred], dim=-1)
batch_idx = self.forward_ret_dict['centers'][:,0]
ins_num, ins_vote_loss = [],[]
for cur_id in batch_idx.unique():
batch_mask = (batch_idx == cur_id)
for ins_idx in box_idxs_of_pts[batch_mask].unique():
if ins_idx < 0:
continue
ins_mask = (box_idxs_of_pts[batch_mask] == ins_idx)
ins_num.append(ins_mask.sum().long().unsqueeze(-1))
ins_vote_loss.append(F.smooth_l1_loss(centers_pred[batch_mask][ins_mask, 1:4], center_box_labels[batch_mask][ins_mask, 0:3], reduction='sum').unsqueeze(-1))
ins_num = torch.cat(ins_num, dim=-1).float()
ins_vote_loss = torch.cat(ins_vote_loss, dim=-1)
ins_vote_loss = ins_vote_loss / ins_num.float().clamp(min=1.0)
vote_loss = ins_vote_loss.mean()
vote_loss_ver1 = vote_loss * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['vote_weight']
if tb_dict is None:
tb_dict = {}
tb_dict.update({'vote_loss_ver1': vote_loss_ver1.item()})
return vote_loss_ver1, tb_dict
def get_contextual_vote_loss_ver2(self, tb_dict=None):
box_idxs_of_pts = self.forward_ret_dict['center_origin_box_idxs_of_pts']
center_box_labels = self.forward_ret_dict['gt_box_of_center_origin']
centers_origin = self.forward_ret_dict['centers_origin']
ctr_offsets = self.forward_ret_dict['ctr_offsets']
centers_pred = centers_origin[:, 1:] + ctr_offsets[:, 1:]
centers_pred = torch.cat([centers_origin[:, :1], centers_pred], dim=-1)
batch_idx = self.forward_ret_dict['centers'][:,0]
ins_num, ins_vote_loss, ins_mean_vote_loss = [],[],[]
for cur_id in batch_idx.unique():
batch_mask = (batch_idx == cur_id)
for ins_idx in box_idxs_of_pts[batch_mask].unique():
if ins_idx < 0:
continue
ins_mask = (box_idxs_of_pts[batch_mask] == ins_idx) # box_idxs_of_pts[batch_mask][ins_mask]
ins_num.append(ins_mask.sum().unsqueeze(-1))
ins_vote_loss.append(F.smooth_l1_loss(centers_pred[batch_mask][ins_mask, 1:4], center_box_labels[batch_mask][ins_mask, 0:3], reduction='sum').unsqueeze(-1))
ins_mean_vote_loss.append(F.smooth_l1_loss(centers_pred[batch_mask][ins_mask, 1:4], centers_pred[batch_mask][ins_mask, 1:4].mean(dim=0).repeat(centers_pred[batch_mask][ins_mask, 1:4].shape[0],1), reduction='sum').unsqueeze(-1))
ins_num = torch.cat(ins_num, dim=-1).float()
ins_vote_loss = torch.cat(ins_vote_loss, dim=-1)
ins_mean_vote_loss = torch.cat(ins_mean_vote_loss, dim=-1)
vote_loss = ins_vote_loss + ins_mean_vote_loss * 0.5
vote_loss = vote_loss / ins_num.clamp(min=1.0)
vote_loss = vote_loss.mean()
vote_loss_ver2 = vote_loss * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['vote_weight']
if tb_dict is None:
tb_dict = {}
tb_dict.update({'vote_loss_ver2': vote_loss_ver2.item()})
return vote_loss_ver2, tb_dict
def get_vote_loss_loss(self, tb_dict=None):
pos_mask = self.forward_ret_dict['center_cls_labels'] > 0
center_box_labels = self.forward_ret_dict['center_gt_box_of_fg_points'][:, 0:3]
centers_origin = self.forward_ret_dict['centers_origin']
ctr_offsets = self.forward_ret_dict['ctr_offsets']
centers_pred = centers_origin + ctr_offsets
centers_pred = centers_pred[pos_mask][:, 1:4]
vote_loss = F.smooth_l1_loss(centers_pred, center_box_labels, reduction='mean')
if tb_dict is None:
tb_dict = {}
tb_dict.update({'vote_loss': vote_loss.item()})
return vote_loss, tb_dict
def get_center_cls_layer_loss(self, tb_dict=None):
point_cls_labels = self.forward_ret_dict['center_cls_labels'].view(-1)
point_cls_preds = self.forward_ret_dict['center_cls_preds'].view(-1, self.num_class)
positives = (point_cls_labels > 0)
negative_cls_weights = (point_cls_labels == 0) * 1.0
cls_weights = (1.0 *negative_cls_weights + 1.0 * positives).float()
pos_normalizer = positives.sum(dim=0).float()
cls_weights /= torch.clamp(pos_normalizer, min=1.0)
one_hot_targets = point_cls_preds.new_zeros(*list(point_cls_labels.shape), self.num_class + 1)
one_hot_targets.scatter_(-1, (point_cls_labels * (point_cls_labels >= 0).long()).unsqueeze(dim=-1).long(), 1.0)
one_hot_targets = one_hot_targets[..., 1:]
if self.model_cfg.LOSS_CONFIG.CENTERNESS_REGULARIZATION:
centerness_mask = self.generate_center_ness_mask()
one_hot_targets = one_hot_targets * centerness_mask.unsqueeze(-1).repeat(1, one_hot_targets.shape[1])
point_loss_cls = self.cls_loss_func(point_cls_preds, one_hot_targets, weights=cls_weights).mean(dim=-1).sum()
loss_weights_dict = self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS
point_loss_cls = point_loss_cls * loss_weights_dict['point_cls_weight']
if tb_dict is None:
tb_dict = {}
tb_dict.update({
'center_loss_cls': point_loss_cls.item(),
'center_pos_num': pos_normalizer.item()
})
return point_loss_cls, tb_dict
def get_sa_ins_layer_loss(self, tb_dict=None):
sa_ins_labels = self.forward_ret_dict['sa_ins_labels']
sa_ins_preds = self.forward_ret_dict['sa_ins_preds']
sa_centerness_mask = self.generate_sa_center_ness_mask()
sa_ins_loss, ignore = 0, 0
for i in range(len(sa_ins_labels)): # valid when i =1, 2
if len(sa_ins_preds[i]) != 0:
try:
point_cls_preds = sa_ins_preds[i][...,1:].view(-1, self.num_class)
except:
point_cls_preds = sa_ins_preds[i][...,1:].view(-1, 1)
else:
ignore += 1
continue
point_cls_labels = sa_ins_labels[i].view(-1)
positives = (point_cls_labels > 0)
negative_cls_weights = (point_cls_labels == 0) * 1.0
cls_weights = (negative_cls_weights + 1.0 * positives).float()
pos_normalizer = positives.sum(dim=0).float()
cls_weights /= torch.clamp(pos_normalizer, min=1.0)
one_hot_targets = point_cls_preds.new_zeros(*list(point_cls_labels.shape), self.num_class + 1)
one_hot_targets.scatter_(-1, (point_cls_labels * (point_cls_labels >= 0).long()).unsqueeze(dim=-1).long(), 1.0)
one_hot_targets = one_hot_targets[..., 1:]
if ('ctr' in self.model_cfg.LOSS_CONFIG.SAMPLE_METHOD_LIST[i+1][0]):
centerness_mask = sa_centerness_mask[i]
one_hot_targets = one_hot_targets * centerness_mask.unsqueeze(-1).repeat(1, one_hot_targets.shape[1])
point_loss_ins = self.ins_loss_func(point_cls_preds, one_hot_targets, weights=cls_weights).mean(dim=-1).sum()
loss_weights_dict = self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS
point_loss_ins = point_loss_ins * loss_weights_dict.get('ins_aware_weight',[1]*len(sa_ins_labels))[i]
sa_ins_loss += point_loss_ins
if tb_dict is None:
tb_dict = {}
tb_dict.update({
'sa%s_loss_ins' % str(i): point_loss_ins.item(),
'sa%s_pos_num' % str(i): pos_normalizer.item()
})
sa_ins_loss = sa_ins_loss / (len(sa_ins_labels) - ignore)
tb_dict.update({
'sa_loss_ins': sa_ins_loss.item(),
})
return sa_ins_loss, tb_dict
def generate_center_ness_mask(self):
pos_mask = self.forward_ret_dict['center_cls_labels'] > 0
gt_boxes = self.forward_ret_dict['center_gt_box_of_fg_points']
centers = self.forward_ret_dict['centers'][:,1:]
centers = centers[pos_mask].clone().detach()
offset_xyz = centers[:, 0:3] - gt_boxes[:, 0:3]
offset_xyz_canical = common_utils.rotate_points_along_z(offset_xyz.unsqueeze(dim=1), -gt_boxes[:, 6]).squeeze(dim=1)
template = gt_boxes.new_tensor(([1, 1, 1], [-1, -1, -1])) / 2
margin = gt_boxes[:, None, 3:6].repeat(1, 2, 1) * template[None, :, :]
distance = margin - offset_xyz_canical[:, None, :].repeat(1, 2, 1)
distance[:, 1, :] = -1 * distance[:, 1, :]
distance_min = torch.where(distance[:, 0, :] < distance[:, 1, :], distance[:, 0, :], distance[:, 1, :])
distance_max = torch.where(distance[:, 0, :] > distance[:, 1, :], distance[:, 0, :], distance[:, 1, :])
centerness = distance_min / distance_max
centerness = centerness[:, 0] * centerness[:, 1] * centerness[:, 2]
centerness = torch.clamp(centerness, min=1e-6)
centerness = torch.pow(centerness, 1/3)
centerness_mask = pos_mask.new_zeros(pos_mask.shape).float()
centerness_mask[pos_mask] = centerness
return centerness_mask
def generate_sa_center_ness_mask(self):
sa_pos_mask = self.forward_ret_dict['sa_ins_labels']
sa_gt_boxes = self.forward_ret_dict['sa_gt_box_of_fg_points']
sa_xyz_coords = self.forward_ret_dict['sa_xyz_coords']
sa_centerness_mask = []
for i in range(len(sa_pos_mask)):
pos_mask = sa_pos_mask[i] > 0
gt_boxes = sa_gt_boxes[i]
xyz_coords = sa_xyz_coords[i].view(-1,sa_xyz_coords[i].shape[-1])[:,1:]
xyz_coords = xyz_coords[pos_mask].clone().detach()
offset_xyz = xyz_coords[:, 0:3] - gt_boxes[:, 0:3]
offset_xyz_canical = common_utils.rotate_points_along_z(offset_xyz.unsqueeze(dim=1), -gt_boxes[:, 6]).squeeze(dim=1)
template = gt_boxes.new_tensor(([1, 1, 1], [-1, -1, -1])) / 2
margin = gt_boxes[:, None, 3:6].repeat(1, 2, 1) * template[None, :, :]
distance = margin - offset_xyz_canical[:, None, :].repeat(1, 2, 1)
distance[:, 1, :] = -1 * distance[:, 1, :]
distance_min = torch.where(distance[:, 0, :] < distance[:, 1, :], distance[:, 0, :], distance[:, 1, :])
distance_max = torch.where(distance[:, 0, :] > distance[:, 1, :], distance[:, 0, :], distance[:, 1, :])
centerness = distance_min / distance_max
centerness = centerness[:, 0] * centerness[:, 1] * centerness[:, 2]
centerness = torch.clamp(centerness, min=1e-6)
centerness = torch.pow(centerness, 1/3)
centerness_mask = pos_mask.new_zeros(pos_mask.shape).float()
centerness_mask[pos_mask] = centerness
sa_centerness_mask.append(centerness_mask)
return sa_centerness_mask
def get_center_box_binori_layer_loss(self, tb_dict=None):
pos_mask = self.forward_ret_dict['center_cls_labels'] > 0
point_box_labels = self.forward_ret_dict['center_box_labels']
point_box_preds = self.forward_ret_dict['center_box_preds']
reg_weights = pos_mask.float()
pos_normalizer = pos_mask.sum().float()
reg_weights /= torch.clamp(pos_normalizer, min=1.0)
pred_box_xyzwhl = point_box_preds[:, :6]
label_box_xyzwhl = point_box_labels[:, :6]
point_loss_box_src = self.reg_loss_func(
pred_box_xyzwhl[None, ...], label_box_xyzwhl[None, ...], weights=reg_weights[None, ...]
)
point_loss_xyzwhl = point_loss_box_src.sum()
pred_ori_bin_id = point_box_preds[:, 6:6+self.box_coder.bin_size]
pred_ori_bin_res = point_box_preds[:, 6+self.box_coder.bin_size:]
label_ori_bin_id = point_box_labels[:, 6]
label_ori_bin_res = point_box_labels[:, 7]
criterion = torch.nn.CrossEntropyLoss(reduction='none')
loss_ori_cls = criterion(pred_ori_bin_id.contiguous(), label_ori_bin_id.long().contiguous())
loss_ori_cls = torch.sum(loss_ori_cls * reg_weights)
label_id_one_hot = F.one_hot(label_ori_bin_id.long().contiguous(), self.box_coder.bin_size)
pred_ori_bin_res = torch.sum(pred_ori_bin_res * label_id_one_hot.float(), dim=-1)
loss_ori_reg = F.smooth_l1_loss(pred_ori_bin_res, label_ori_bin_res)
loss_ori_reg = torch.sum(loss_ori_reg * reg_weights)
loss_weights_dict = self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS
loss_ori_cls = loss_ori_cls * loss_weights_dict.get('dir_weight', 1.0)
point_loss_box = point_loss_xyzwhl + loss_ori_reg + loss_ori_cls
point_loss_box = point_loss_box * loss_weights_dict['point_box_weight']
if tb_dict is None:
tb_dict = {}
tb_dict.update({'center_loss_box': point_loss_box.item()})
tb_dict.update({'center_loss_box_xyzwhl': point_loss_xyzwhl.item()})
tb_dict.update({'center_loss_box_ori_bin': loss_ori_cls.item()})
tb_dict.update({'center_loss_box_ori_res': loss_ori_reg.item()})
return point_loss_box, tb_dict
def get_center_box_layer_loss(self, tb_dict=None):
pos_mask = self.forward_ret_dict['center_cls_labels'] > 0
point_box_labels = self.forward_ret_dict['center_box_labels']
point_box_preds = self.forward_ret_dict['center_box_preds']
reg_weights = pos_mask.float()
pos_normalizer = pos_mask.sum().float()
reg_weights /= torch.clamp(pos_normalizer, min=1.0)
point_loss_box_src = self.reg_loss_func(
point_box_preds[None, ...], point_box_labels[None, ...], weights=reg_weights[None, ...]
)
point_loss = point_loss_box_src.sum()
point_loss_box = point_loss
loss_weights_dict = self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS
point_loss_box = point_loss_box * loss_weights_dict['point_box_weight']
if tb_dict is None:
tb_dict = {}
tb_dict.update({'center_loss_box': point_loss_box.item()})
return point_loss_box, tb_dict
def get_corner_layer_loss(self, tb_dict=None):
pos_mask = self.forward_ret_dict['center_cls_labels'] > 0
gt_boxes = self.forward_ret_dict['center_gt_box_of_fg_points']
pred_boxes = self.forward_ret_dict['point_box_preds']
pred_boxes = pred_boxes[pos_mask]
loss_corner = loss_utils.get_corner_loss_lidar(
pred_boxes[:, 0:7],
gt_boxes[:, 0:7]
)
loss_corner = loss_corner.mean()
loss_corner = loss_corner * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['corner_weight']
if tb_dict is None:
tb_dict = {}
tb_dict.update({'corner_loss_reg': loss_corner.item()})
return loss_corner, tb_dict
def get_iou3d_layer_loss(self, tb_dict=None):
pos_mask = self.forward_ret_dict['center_cls_labels'] > 0
gt_boxes = self.forward_ret_dict['center_gt_box_of_fg_points']
pred_boxes = self.forward_ret_dict['point_box_preds'].clone().detach()
pred_boxes = pred_boxes[pos_mask]
iou3d_targets, _ = loss_utils.generate_iou3d(pred_boxes[:, 0:7], gt_boxes[:, 0:7])
iou3d_preds = self.forward_ret_dict['box_iou3d_preds'].squeeze(-1)
iou3d_preds = iou3d_preds[pos_mask]
loss_iou3d = F.smooth_l1_loss(iou3d_preds, iou3d_targets)
loss_iou3d = loss_iou3d * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['iou3d_weight']
if tb_dict is None:
tb_dict = {}
tb_dict.update({'iou3d_loss_reg': loss_iou3d.item()})
return loss_iou3d, tb_dict
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
centers_features: (N1 + N2 + N3 + ..., C) or (B, N, C)
centers: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
encoder_xyz: List of points_coords in SA
gt_boxes (optional): (B, M, 8)
Returns:
batch_dict:
batch_cls_preds: (N1 + N2 + N3 + ..., num_class)
point_box_preds: (N1 + N2 + N3 + ..., 7)
"""
center_features = batch_dict['centers_features']
center_coords = batch_dict['centers']
center_cls_preds = self.cls_center_layers(center_features) # (total_centers, num_class)
center_box_preds = self.box_center_layers(center_features) # (total_centers, box_code_size)
box_iou3d_preds = self.box_iou3d_layers(center_features) if self.box_iou3d_layers is not None else None
ret_dict = {'center_cls_preds': center_cls_preds,
'center_box_preds': center_box_preds,
'ctr_offsets': batch_dict['ctr_offsets'],
'centers': batch_dict['centers'],
'centers_origin': batch_dict['centers_origin'],
'sa_ins_preds': batch_dict['sa_ins_preds'],
'box_iou3d_preds': box_iou3d_preds,
}
if self.training:
targets_dict = self.assign_targets(batch_dict)
ret_dict.update(targets_dict)
if not self.training or self.predict_boxes_when_training or \
self.model_cfg.LOSS_CONFIG.CORNER_LOSS_REGULARIZATION or \
self.model_cfg.LOSS_CONFIG.CENTERNESS_REGULARIZATION or \
self.model_cfg.LOSS_CONFIG.IOU3D_REGULARIZATION:
point_cls_preds, point_box_preds = self.generate_predicted_boxes(
points=center_coords[:, 1:4],
point_cls_preds=center_cls_preds, point_box_preds=center_box_preds
)
batch_dict['batch_cls_preds'] = point_cls_preds
batch_dict['batch_box_preds'] = point_box_preds
batch_dict['box_iou3d_preds'] = box_iou3d_preds
batch_dict['batch_index'] = center_coords[:,0]
batch_dict['cls_preds_normalized'] = False
ret_dict['point_box_preds'] = point_box_preds
self.forward_ret_dict = ret_dict
return batch_dict
| 42,278
| 49.212589
| 259
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/dense_heads/target_assigner/anchor_generator.py
|
import torch
class AnchorGenerator(object):
def __init__(self, anchor_range, anchor_generator_config):
super().__init__()
self.anchor_generator_cfg = anchor_generator_config
self.anchor_range = anchor_range
self.anchor_sizes = [config['anchor_sizes'] for config in anchor_generator_config]
self.anchor_rotations = [config['anchor_rotations'] for config in anchor_generator_config]
self.anchor_heights = [config['anchor_bottom_heights'] for config in anchor_generator_config]
self.align_center = [config.get('align_center', False) for config in anchor_generator_config]
assert len(self.anchor_sizes) == len(self.anchor_rotations) == len(self.anchor_heights)
self.num_of_anchor_sets = len(self.anchor_sizes)
def generate_anchors(self, grid_sizes):
assert len(grid_sizes) == self.num_of_anchor_sets
all_anchors = []
num_anchors_per_location = []
for grid_size, anchor_size, anchor_rotation, anchor_height, align_center in zip(
grid_sizes, self.anchor_sizes, self.anchor_rotations, self.anchor_heights, self.align_center):
num_anchors_per_location.append(len(anchor_rotation) * len(anchor_size) * len(anchor_height))
if align_center:
x_stride = (self.anchor_range[3] - self.anchor_range[0]) / grid_size[0]
y_stride = (self.anchor_range[4] - self.anchor_range[1]) / grid_size[1]
x_offset, y_offset = x_stride / 2, y_stride / 2
else:
x_stride = (self.anchor_range[3] - self.anchor_range[0]) / (grid_size[0] - 1)
y_stride = (self.anchor_range[4] - self.anchor_range[1]) / (grid_size[1] - 1)
x_offset, y_offset = 0, 0
x_shifts = torch.arange(
self.anchor_range[0] + x_offset, self.anchor_range[3] + 1e-5, step=x_stride, dtype=torch.float32,
).cuda()
y_shifts = torch.arange(
self.anchor_range[1] + y_offset, self.anchor_range[4] + 1e-5, step=y_stride, dtype=torch.float32,
).cuda()
z_shifts = x_shifts.new_tensor(anchor_height)
num_anchor_size, num_anchor_rotation = anchor_size.__len__(), anchor_rotation.__len__()
anchor_rotation = x_shifts.new_tensor(anchor_rotation)
anchor_size = x_shifts.new_tensor(anchor_size)
x_shifts, y_shifts, z_shifts = torch.meshgrid([
x_shifts, y_shifts, z_shifts
]) # [x_grid, y_grid, z_grid]
anchors = torch.stack((x_shifts, y_shifts, z_shifts), dim=-1) # [x, y, z, 3]
anchors = anchors[:, :, :, None, :].repeat(1, 1, 1, anchor_size.shape[0], 1)
anchor_size = anchor_size.view(1, 1, 1, -1, 3).repeat([*anchors.shape[0:3], 1, 1])
anchors = torch.cat((anchors, anchor_size), dim=-1)
anchors = anchors[:, :, :, :, None, :].repeat(1, 1, 1, 1, num_anchor_rotation, 1)
anchor_rotation = anchor_rotation.view(1, 1, 1, 1, -1, 1).repeat([*anchors.shape[0:3], num_anchor_size, 1, 1])
anchors = torch.cat((anchors, anchor_rotation), dim=-1) # [x, y, z, num_size, num_rot, 7]
anchors = anchors.permute(2, 1, 0, 3, 4, 5).contiguous()
#anchors = anchors.view(-1, anchors.shape[-1])
anchors[..., 2] += anchors[..., 5] / 2 # shift to box centers
all_anchors.append(anchors)
return all_anchors, num_anchors_per_location
if __name__ == '__main__':
from easydict import EasyDict
config = [
EasyDict({
'anchor_sizes': [[2.1, 4.7, 1.7], [0.86, 0.91, 1.73], [0.84, 1.78, 1.78]],
'anchor_rotations': [0, 1.57],
'anchor_heights': [0, 0.5]
})
]
A = AnchorGenerator(
anchor_range=[-75.2, -75.2, -2, 75.2, 75.2, 4],
anchor_generator_config=config
)
import pdb
pdb.set_trace()
A.generate_anchors([[188, 188]])
| 3,990
| 48.8875
| 122
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/dense_heads/target_assigner/axis_aligned_target_assigner.py
|
import numpy as np
import torch
from ....ops.iou3d_nms import iou3d_nms_utils
from ....utils import box_utils
class AxisAlignedTargetAssigner(object):
def __init__(self, model_cfg, class_names, box_coder, match_height=False):
super().__init__()
anchor_generator_cfg = model_cfg.ANCHOR_GENERATOR_CONFIG
anchor_target_cfg = model_cfg.TARGET_ASSIGNER_CONFIG
self.box_coder = box_coder
self.match_height = match_height
self.class_names = np.array(class_names)
self.anchor_class_names = [config['class_name'] for config in anchor_generator_cfg]
self.pos_fraction = anchor_target_cfg.POS_FRACTION if anchor_target_cfg.POS_FRACTION >= 0 else None
self.sample_size = anchor_target_cfg.SAMPLE_SIZE
self.norm_by_num_examples = anchor_target_cfg.NORM_BY_NUM_EXAMPLES
self.matched_thresholds = {}
self.unmatched_thresholds = {}
for config in anchor_generator_cfg:
self.matched_thresholds[config['class_name']] = config['matched_threshold']
self.unmatched_thresholds[config['class_name']] = config['unmatched_threshold']
self.use_multihead = model_cfg.get('USE_MULTIHEAD', False)
# self.separate_multihead = model_cfg.get('SEPARATE_MULTIHEAD', False)
# if self.seperate_multihead:
# rpn_head_cfgs = model_cfg.RPN_HEAD_CFGS
# self.gt_remapping = {}
# for rpn_head_cfg in rpn_head_cfgs:
# for idx, name in enumerate(rpn_head_cfg['HEAD_CLS_NAME']):
# self.gt_remapping[name] = idx + 1
def assign_targets(self, all_anchors, gt_boxes_with_classes):
"""
Args:
all_anchors: [(N, 7), ...]
gt_boxes: (B, M, 8)
Returns:
"""
bbox_targets = []
cls_labels = []
reg_weights = []
batch_size = gt_boxes_with_classes.shape[0]
gt_classes = gt_boxes_with_classes[:, :, -1]
gt_boxes = gt_boxes_with_classes[:, :, :-1]
for k in range(batch_size):
cur_gt = gt_boxes[k]
cnt = cur_gt.__len__() - 1
while cnt > 0 and cur_gt[cnt].sum() == 0:
cnt -= 1
cur_gt = cur_gt[:cnt + 1]
cur_gt_classes = gt_classes[k][:cnt + 1].int()
target_list = []
for anchor_class_name, anchors in zip(self.anchor_class_names, all_anchors):
if cur_gt_classes.shape[0] > 1:
# mask = torch.from_numpy(self.class_names[cur_gt_classes.cpu() - 1] == anchor_class_name) # old version
mask = torch.from_numpy(self.class_names[cur_gt_classes.cpu().abs() - 1] == anchor_class_name) # pseudo-labeling version
else:
# old version
# mask = torch.tensor([self.class_names[c - 1] == anchor_class_name
# for c in cur_gt_classes], dtype=torch.bool)
# pseudo-labeling version
mask = torch.tensor([self.class_names[torch.abs(c) - 1] == anchor_class_name
for c in cur_gt_classes], dtype=torch.bool)
if self.use_multihead:
anchors = anchors.permute(3, 4, 0, 1, 2, 5).contiguous().view(-1, anchors.shape[-1])
# if self.seperate_multihead:
# selected_classes = cur_gt_classes[mask].clone()
# if len(selected_classes) > 0:
# new_cls_id = self.gt_remapping[anchor_class_name]
# selected_classes[:] = new_cls_id
# else:
# selected_classes = cur_gt_classes[mask]
selected_classes = cur_gt_classes[mask]
else:
feature_map_size = anchors.shape[:3]
anchors = anchors.view(-1, anchors.shape[-1])
selected_classes = cur_gt_classes[mask]
single_target = self.assign_targets_single(
anchors,
cur_gt[mask],
gt_classes=selected_classes,
matched_threshold=self.matched_thresholds[anchor_class_name],
unmatched_threshold=self.unmatched_thresholds[anchor_class_name]
)
target_list.append(single_target)
if self.use_multihead:
target_dict = {
'box_cls_labels': [t['box_cls_labels'].view(-1) for t in target_list],
'box_reg_targets': [t['box_reg_targets'].view(-1, self.box_coder.code_size) for t in target_list],
'reg_weights': [t['reg_weights'].view(-1) for t in target_list]
}
target_dict['box_reg_targets'] = torch.cat(target_dict['box_reg_targets'], dim=0)
target_dict['box_cls_labels'] = torch.cat(target_dict['box_cls_labels'], dim=0).view(-1)
target_dict['reg_weights'] = torch.cat(target_dict['reg_weights'], dim=0).view(-1)
else:
target_dict = {
'box_cls_labels': [t['box_cls_labels'].view(*feature_map_size, -1) for t in target_list],
'box_reg_targets': [t['box_reg_targets'].view(*feature_map_size, -1, self.box_coder.code_size)
for t in target_list],
'reg_weights': [t['reg_weights'].view(*feature_map_size, -1) for t in target_list]
}
target_dict['box_reg_targets'] = torch.cat(
target_dict['box_reg_targets'], dim=-2
).view(-1, self.box_coder.code_size)
target_dict['box_cls_labels'] = torch.cat(target_dict['box_cls_labels'], dim=-1).view(-1)
target_dict['reg_weights'] = torch.cat(target_dict['reg_weights'], dim=-1).view(-1)
bbox_targets.append(target_dict['box_reg_targets'])
cls_labels.append(target_dict['box_cls_labels'])
reg_weights.append(target_dict['reg_weights'])
bbox_targets = torch.stack(bbox_targets, dim=0)
cls_labels = torch.stack(cls_labels, dim=0)
reg_weights = torch.stack(reg_weights, dim=0)
all_targets_dict = {
'box_cls_labels': cls_labels,
'box_reg_targets': bbox_targets,
'reg_weights': reg_weights
}
return all_targets_dict
def assign_targets_single(self, anchors, gt_boxes, gt_classes, matched_threshold=0.6, unmatched_threshold=0.45):
num_anchors = anchors.shape[0]
num_gt = gt_boxes.shape[0]
labels = torch.ones((num_anchors,), dtype=torch.int32, device=anchors.device) * -1
gt_ids = torch.ones((num_anchors,), dtype=torch.int32, device=anchors.device) * -1
if len(gt_boxes) > 0 and anchors.shape[0] > 0:
anchor_by_gt_overlap = iou3d_nms_utils.boxes_iou3d_gpu(anchors[:, 0:7], gt_boxes[:, 0:7]) \
if self.match_height else box_utils.boxes3d_nearest_bev_iou(anchors[:, 0:7], gt_boxes[:, 0:7])
# NOTE: The speed of these two versions depends the environment and the number of anchors
# anchor_to_gt_argmax = torch.from_numpy(anchor_by_gt_overlap.cpu().numpy().argmax(axis=1)).cuda()
anchor_to_gt_argmax = anchor_by_gt_overlap.argmax(dim=1)
anchor_to_gt_max = anchor_by_gt_overlap[torch.arange(num_anchors, device=anchors.device), anchor_to_gt_argmax]
# gt_to_anchor_argmax = torch.from_numpy(anchor_by_gt_overlap.cpu().numpy().argmax(axis=0)).cuda()
gt_to_anchor_argmax = anchor_by_gt_overlap.argmax(dim=0)
gt_to_anchor_max = anchor_by_gt_overlap[gt_to_anchor_argmax, torch.arange(num_gt, device=anchors.device)]
empty_gt_mask = gt_to_anchor_max == 0
gt_to_anchor_max[empty_gt_mask] = -1
anchors_with_max_overlap = (anchor_by_gt_overlap == gt_to_anchor_max).nonzero()[:, 0]
gt_inds_force = anchor_to_gt_argmax[anchors_with_max_overlap]
labels[anchors_with_max_overlap] = gt_classes[gt_inds_force]
gt_ids[anchors_with_max_overlap] = gt_inds_force.int()
pos_inds = anchor_to_gt_max >= matched_threshold
gt_inds_over_thresh = anchor_to_gt_argmax[pos_inds]
labels[pos_inds] = gt_classes[gt_inds_over_thresh]
gt_ids[pos_inds] = gt_inds_over_thresh.int()
bg_inds = (anchor_to_gt_max < unmatched_threshold).nonzero()[:, 0]
else:
bg_inds = torch.arange(num_anchors, device=anchors.device)
fg_inds = (labels > 0).nonzero()[:, 0]
if self.pos_fraction is not None:
num_fg = int(self.pos_fraction * self.sample_size)
if len(fg_inds) > num_fg:
num_disabled = len(fg_inds) - num_fg
disable_inds = torch.randperm(len(fg_inds))[:num_disabled]
labels[disable_inds] = -1
fg_inds = (labels > 0).nonzero()[:, 0]
num_bg = self.sample_size - (labels > 0).sum()
if len(bg_inds) > num_bg:
enable_inds = bg_inds[torch.randint(0, len(bg_inds), size=(num_bg,))]
labels[enable_inds] = 0
# bg_inds = torch.nonzero(labels == 0)[:, 0]
else:
if len(gt_boxes) == 0 or anchors.shape[0] == 0:
labels[:] = 0
else:
labels[bg_inds] = 0
labels[anchors_with_max_overlap] = gt_classes[gt_inds_force]
bbox_targets = anchors.new_zeros((num_anchors, self.box_coder.code_size))
if len(gt_boxes) > 0 and anchors.shape[0] > 0:
fg_gt_boxes = gt_boxes[anchor_to_gt_argmax[fg_inds], :]
fg_anchors = anchors[fg_inds, :]
bbox_targets[fg_inds, :] = self.box_coder.encode_torch(fg_gt_boxes, fg_anchors)
reg_weights = anchors.new_zeros((num_anchors,))
if self.norm_by_num_examples:
num_examples = (labels >= 0).sum()
num_examples = num_examples if num_examples > 1.0 else 1.0
reg_weights[labels > 0] = 1.0 / num_examples
else:
reg_weights[labels > 0] = 1.0
ret_dict = {
'box_cls_labels': labels,
'box_reg_targets': bbox_targets,
'reg_weights': reg_weights,
}
return ret_dict
| 10,465
| 47.453704
| 140
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/dense_heads/target_assigner/atss_target_assigner.py
|
import torch
from ....ops.iou3d_nms import iou3d_nms_utils
from ....utils import common_utils
class ATSSTargetAssigner(object):
"""
Reference: https://arxiv.org/abs/1912.02424
"""
def __init__(self, topk, box_coder, match_height=False):
self.topk = topk
self.box_coder = box_coder
self.match_height = match_height
def assign_targets(self, anchors_list, gt_boxes_with_classes, use_multihead=False):
"""
Args:
anchors: [(N, 7), ...]
gt_boxes: (B, M, 8)
Returns:
"""
if not isinstance(anchors_list, list):
anchors_list = [anchors_list]
single_set_of_anchor = True
else:
single_set_of_anchor = len(anchors_list) == 1
cls_labels_list, reg_targets_list, reg_weights_list = [], [], []
for anchors in anchors_list:
batch_size = gt_boxes_with_classes.shape[0]
gt_classes = gt_boxes_with_classes[:, :, -1]
gt_boxes = gt_boxes_with_classes[:, :, :-1]
if use_multihead:
anchors = anchors.permute(3, 4, 0, 1, 2, 5).contiguous().view(-1, anchors.shape[-1])
else:
anchors = anchors.view(-1, anchors.shape[-1])
cls_labels, reg_targets, reg_weights = [], [], []
for k in range(batch_size):
cur_gt = gt_boxes[k]
cnt = cur_gt.__len__() - 1
while cnt > 0 and cur_gt[cnt].sum() == 0:
cnt -= 1
cur_gt = cur_gt[:cnt + 1]
cur_gt_classes = gt_classes[k][:cnt + 1]
cur_cls_labels, cur_reg_targets, cur_reg_weights = self.assign_targets_single(
anchors, cur_gt, cur_gt_classes
)
cls_labels.append(cur_cls_labels)
reg_targets.append(cur_reg_targets)
reg_weights.append(cur_reg_weights)
cls_labels = torch.stack(cls_labels, dim=0)
reg_targets = torch.stack(reg_targets, dim=0)
reg_weights = torch.stack(reg_weights, dim=0)
cls_labels_list.append(cls_labels)
reg_targets_list.append(reg_targets)
reg_weights_list.append(reg_weights)
if single_set_of_anchor:
ret_dict = {
'box_cls_labels': cls_labels_list[0],
'box_reg_targets': reg_targets_list[0],
'reg_weights': reg_weights_list[0]
}
else:
ret_dict = {
'box_cls_labels': torch.cat(cls_labels_list, dim=1),
'box_reg_targets': torch.cat(reg_targets_list, dim=1),
'reg_weights': torch.cat(reg_weights_list, dim=1)
}
return ret_dict
def assign_targets_single(self, anchors, gt_boxes, gt_classes):
"""
Args:
anchors: (N, 7) [x, y, z, dx, dy, dz, heading]
gt_boxes: (M, 7) [x, y, z, dx, dy, dz, heading]
gt_classes: (M)
Returns:
"""
num_anchor = anchors.shape[0]
num_gt = gt_boxes.shape[0]
# select topk anchors for each gt_boxes
if self.match_height:
ious = iou3d_nms_utils.boxes_iou3d_gpu(anchors[:, 0:7], gt_boxes[:, 0:7]) # (N, M)
else:
ious = iou3d_nms_utils.boxes_iou_bev(anchors[:, 0:7], gt_boxes[:, 0:7])
distance = (anchors[:, None, 0:3] - gt_boxes[None, :, 0:3]).norm(dim=-1) # (N, M)
_, topk_idxs = distance.topk(self.topk, dim=0, largest=False) # (K, M)
candidate_ious = ious[topk_idxs, torch.arange(num_gt)] # (K, M)
iou_mean_per_gt = candidate_ious.mean(dim=0)
iou_std_per_gt = candidate_ious.std(dim=0)
iou_thresh_per_gt = iou_mean_per_gt + iou_std_per_gt + 1e-6
is_pos = candidate_ious >= iou_thresh_per_gt[None, :] # (K, M)
# check whether anchor_center in gt_boxes, only check BEV x-y axes
candidate_anchors = anchors[topk_idxs.view(-1)] # (KxM, 7)
gt_boxes_of_each_anchor = gt_boxes[:, :].repeat(self.topk, 1) # (KxM, 7)
xyz_local = candidate_anchors[:, 0:3] - gt_boxes_of_each_anchor[:, 0:3]
xyz_local = common_utils.rotate_points_along_z(
xyz_local[:, None, :], -gt_boxes_of_each_anchor[:, 6]
).squeeze(dim=1)
xy_local = xyz_local[:, 0:2]
lw = gt_boxes_of_each_anchor[:, 3:5][:, [1, 0]] # bugfixed: w ==> y, l ==> x in local coords
is_in_gt = ((xy_local <= lw / 2) & (xy_local >= -lw / 2)).all(dim=-1).view(-1, num_gt) # (K, M)
is_pos = is_pos & is_in_gt # (K, M)
for ng in range(num_gt):
topk_idxs[:, ng] += ng * num_anchor
# select the highest IoU if an anchor box is assigned with multiple gt_boxes
INF = -0x7FFFFFFF
ious_inf = torch.full_like(ious, INF).t().contiguous().view(-1) # (MxN)
index = topk_idxs.view(-1)[is_pos.view(-1)]
ious_inf[index] = ious.t().contiguous().view(-1)[index]
ious_inf = ious_inf.view(num_gt, -1).t() # (N, M)
anchors_to_gt_values, anchors_to_gt_indexs = ious_inf.max(dim=1)
# match the gt_boxes to the anchors which have maximum iou with them
max_iou_of_each_gt, argmax_iou_of_each_gt = ious.max(dim=0)
anchors_to_gt_indexs[argmax_iou_of_each_gt] = torch.arange(0, num_gt, device=ious.device)
anchors_to_gt_values[argmax_iou_of_each_gt] = max_iou_of_each_gt
cls_labels = gt_classes[anchors_to_gt_indexs]
cls_labels[anchors_to_gt_values == INF] = 0
matched_gts = gt_boxes[anchors_to_gt_indexs]
pos_mask = cls_labels > 0
reg_targets = matched_gts.new_zeros((num_anchor, self.box_coder.code_size))
reg_weights = matched_gts.new_zeros(num_anchor)
if pos_mask.sum() > 0:
reg_targets[pos_mask > 0] = self.box_coder.encode_torch(matched_gts[pos_mask > 0], anchors[pos_mask > 0])
reg_weights[pos_mask] = 1.0
return cls_labels, reg_targets, reg_weights
| 6,050
| 41.612676
| 117
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/dense_heads/target_assigner/__init__.py
| 0
| 0
| 0
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.