repo_id
stringlengths 19
138
| file_path
stringlengths 32
200
| content
stringlengths 1
12.9M
| __index_level_0__
int64 0
0
|
|---|---|---|---|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/dd3d/dd3d_v2_99_kitti.yml
|
_base_: '../_base_/kitti_mono.yml'
batch_size: 4 #total bs 16
iters: 100000
train_dataset:
transforms:
- type: LoadImage
reader: pillow
to_chw: False
to_rgb: False
- type: ResizeShortestEdge
short_edge_length: [288, 304, 320, 336, 352, 368, 384, 400, 416, 448, 480, 512, 544, 576]
max_size: 10000
sample_style: choice
- type: ToVisionBasedBox
- type: RandomHorizontalFlip
input_type: floating_point_coordinates
- type: RandomBrightness
intensity_min: 0.8
intensity_max: 1.2
- type: RandomSaturation
intensity_min: 0.8
intensity_max: 1.2
- type: RandomContrast
intensity_min: 0.8
intensity_max: 1.2
class_names: ["Car", "Pedestrian", "Cyclist", "Van", "Person_sitting"]
CLASS_MAP: {'Car': 0, 'Pedestrian': 1, 'Cyclist': 2, 'Van': 3, 'Person_sitting': 4}
class_balanced_sampling: True
val_dataset:
transforms:
- type: LoadImage
reader: pillow
to_chw: False
to_rgb: False
- type: ResizeShortestEdge
short_edge_length: [384]
max_size: 10000
sample_style: choice
- type: ToVisionBasedBox
class_names: ["Car", "Pedestrian", "Cyclist", "Van", "Person_sitting"]
CLASS_MAP: {'Car': 0, 'Pedestrian': 1, 'Cyclist': 2, 'Van': 3, 'Person_sitting': 4}
optimizer:
type: Momentum
momentum: 0.9
use_nesterov: False
weight_decay: 0.0001
lr_scheduler:
type: MultiStepDecay
milestones: [86000, 96000]
learning_rate: 0.0005
gamma: 0.1
model:
type: DD3D
backbone:
type: VoVNet99_eSE
norm_type: 'frozen_bn'
input_ch: 3
out_features: ['stage2', 'stage3', 'stage4', 'stage5']
feature_locations_offset: "none"
fpn:
type: FPN
in_strides: [4, 8, 16, 32]
in_channels: [256, 512, 768, 1024]
out_channel: 256
norm: 'FrozenBN'
top_block:
type: LastLevelP6
in_channels: 256
out_channels: 256
in_feature: 'p5'
fuse_type: "sum"
fcos2d_head:
type: FCOS2DHead
in_strides: [4, 8, 16, 32, 64]
in_channels: [256, 256, 256, 256, 256]
num_classes: 5
use_scale: True
box2d_scale_init_factor: 1.0
version: "v2"
num_cls_convs: 4
num_box_convs: 4
use_deformable: False
norm: "BN"
fcos2d_loss:
type: FCOS2DLoss
alpha: 0.25
gamma: 2.0
loc_loss_type: 'giou'
num_classes: 5
fcos2d_inference:
type: FCOS2DInference
thresh_with_ctr: True
pre_nms_thresh: 0.05
pre_nms_topk: 1000
post_nms_topk: 100
nms_thresh: 0.75
num_classes: 5
fcos3d_head:
type: FCOS3DHead
in_strides: [4, 8, 16, 32, 64]
in_channels: [256, 256, 256, 256, 256]
num_classes: 5
use_scale: True
depth_scale_init_factor: 0.3
proj_ctr_scale_init_factor: 1.0
use_per_level_predictors: False
mean_depth_per_level: [32.594, 15.178, 8.424, 5.004, 4.662]
std_depth_per_level: [14.682, 7.139, 4.345, 2.399, 2.587]
num_convs: 4
use_deformable: False
norm: 'FrozenBN'
class_agnostic_box3d: False
per_level_predictors: False
fcos3d_loss:
type: FCOS3DLoss
canon_box_sizes: [[1.61876949, 3.89154523, 1.52969237], # Car
[0.62806586, 0.82038497, 1.76784787], # Pedestrian
[0.56898187, 1.77149234, 1.7237099], # Cyclist
[1.9134491 , 5.15499603, 2.18998422], # Van
[2.61168401, 9.22692319, 3.36492722], # Truck
[0.5390196 , 1.08098042, 1.28392158], # Person_sitting
[2.36044838, 15.56991038, 3.5289238], # Tram
[1.24489164, 2.51495357, 1.61402478], # Misc
] # (width, length, height)
min_depth: 0.1
max_depth: 80.0
predict_allocentric_rot: True
scale_depth_by_focal_lengths: True
scale_depth_by_focal_lengths_factor: 500.0
predict_distance: False
smooth_l1_loss_beta: 0.05
max_loss_per_group: 20.0
box3d_loss_weight: 2.0
conf3d_loss_weight: 1.0
conf_3d_temperature: 1.0
num_classes: 5
class_agnostic: False
fcos3d_inference:
type: FCOS3DInference
canon_box_sizes: [[1.61876949, 3.89154523, 1.52969237], # Car
[0.62806586, 0.82038497, 1.76784787], # Pedestrian
[0.56898187, 1.77149234, 1.7237099], # Cyclist
[1.9134491 , 5.15499603, 2.18998422], # Van
[2.61168401, 9.22692319, 3.36492722], # Truck
[0.5390196 , 1.08098042, 1.28392158], # Person_sitting
[2.36044838, 15.56991038, 3.5289238], # Tram
[1.24489164, 2.51495357, 1.61402478], # Misc
] # (width, length, height)
min_depth: 0.1
max_depth: 80.0
predict_allocentric_rot: True
scale_depth_by_focal_lengths: True
scale_depth_by_focal_lengths_factor: 500.0
predict_distance: False
num_classes: 5
class_agnostic: False
prepare_targets:
type: DD3DTargetPreparer
input_strides: [4, 8, 16, 32, 64]
num_classes: 5
center_sample: True
radius: 1.5
dd3d_on: True
sizes_of_interest: [64, 128, 256, 512]
do_nms: True
nusc_sample_aggregate: False
num_classes: 5
pixel_mean: [103.53, 116.28, 123.675]
pixel_std: [57.375, 57.12, 58.395]
input_strides: [4, 8, 16, 32, 64]
size_divisibility: 64
| 0
|
apollo_public_repos/apollo-model-centerpoint
|
apollo_public_repos/apollo-model-centerpoint/test_tipc/common_func.sh
|
#!/bin/bash
function func_parser_key(){
strs=$1
IFS=":"
array=(${strs})
tmp=${array[0]}
echo ${tmp}
}
function func_parser_value(){
strs=$1
IFS=":"
array=(${strs})
tmp=${array[1]}
echo ${tmp}
}
function func_set_params(){
key=$1
value=$2
if [ ${key}x = "null"x ];then
echo " "
elif [[ ${value} = "null" ]] || [[ ${value} = " " ]] || [ ${#value} -le 0 ];then
echo " "
else
echo "${key}=${value}"
fi
}
function func_parser_params(){
strs=$1
IFS=":"
array=(${strs})
key=${array[0]}
tmp=${array[1]}
IFS="|"
res=""
for _params in ${tmp[*]}; do
IFS="="
array=(${_params})
mode=${array[0]}
value=${array[1]}
if [[ ${mode} = ${MODE} ]]; then
IFS="|"
#echo (funcsetparams"{mode}" "${value}")
echo $value
break
fi
IFS="|"
done
echo ${res}
}
function status_check(){
last_status=$1 # the exit code
run_command=$2
run_log=$3
if [ $last_status -eq 0 ]; then
echo -e "\033[33m Run successfully with command - ${run_command}! \033[0m" | tee -a ${run_log}
else
echo -e "\033[33m Run failed with command - ${run_command}! \033[0m" | tee -a ${run_log}
fi
}
| 0
|
apollo_public_repos/apollo-model-centerpoint
|
apollo_public_repos/apollo-model-centerpoint/test_tipc/prepare.sh
|
#!/bin/bash
source test_tipc/common_func.sh
FILENAME=$1
# MODE be one of ['lite_train_lite_infer']
MODE=$2
dataline=$(cat ${FILENAME})
# parser params
IFS=$'\n'
lines=(${dataline})
# The training params
model_name=$(func_parser_value "${lines[1]}")
trainer_list=$(func_parser_value "${lines[14]}")
# MODE be one of ['lite_train_lite_infer']
if [ ${MODE} = "lite_train_lite_infer" ];then
if [ ${model_name} == "PAConv" ]; then
rm -rf ./test_tipc/data/mini_modelnet40
mkdir -p ./test_tipc/data/mini_modelnet40
cd ./test_tipc/data/mini_modelnet40 && tar xf ../mini_modelnet40.tar.gz && cd ../../
else
echo "Not added into TIPC yet."
fi
fi
| 0
|
apollo_public_repos/apollo-model-centerpoint
|
apollo_public_repos/apollo-model-centerpoint/test_tipc/README.md
|
# 飞桨训推一体认证(TIPC)
## 1. 简介
飞桨除了基本的模型训练和预测,还提供了支持多端多平台的高性能推理部署工具。本文档提供了Paddle3D中所有模型的飞桨训推一体认证 (Training and Inference Pipeline Certification(TIPC)) 信息和测试工具,方便用户查阅每种模型的训练推理部署打通情况,并可以进行一键测试。
<div align="center">
<img src="docs/guide.png" width="1000">
</div>
## 2. 测试工具简介
### 目录介绍
```shell
test_tipc/
├── configs/ # 配置文件目录
├── paconv # paconv模型的测试配置文件目录
├── train_infer_python.txt # 测试Linux上python训练预测(基础训练预测)的配置文件
├── train_infer_python.md # 测试Linux上python训练预测(基础训练预测)的使用文档
├── results/ # 预测结果
├── prepare.sh # 完成test_*.sh运行所需要的数据和模型下载
├── test_train_inference_python.sh # 测试python训练预测的主程序
└── readme.md # 使用文档
```
### 测试流程概述
使用本工具,可以测试不同功能的支持情况,以及预测结果是否对齐,测试流程概括如下:
1. 运行prepare.sh准备测试所需数据和模型;
2. 运行要测试的功能对应的测试脚本`test_train_inference_python.sh`,产出log,由log可以看到不同配置是否运行成功;
测试单项功能仅需两行命令,**如需测试不同模型/功能,替换配置文件即可**,命令格式如下:
```shell
# 功能:准备数据
# 格式:bash + 运行脚本 + 参数1: 配置文件选择 + 参数2: 模式选择
bash test_tipc/prepare.sh configs/[model_name]/[params_file_name] [Mode]
# 功能:运行测试
# 格式:bash + 运行脚本 + 参数1: 配置文件选择 + 参数2: 模式选择
bash test_tipc/test_train_inference_python.sh configs/[model_name]/[params_file_name] [Mode]
```
以下为示例:
```shell
# 功能:准备数据
# 格式:bash + 运行脚本 + 参数1: 配置文件选择 + 参数2: 模式选择
bash test_tipc/prepare.sh ./test_tipc/configs/paconv/train_infer_python.txt 'lite_train_lite_infer'
# 功能:运行测试
# 格式:bash + 运行脚本 + 参数1: 配置文件选择 + 参数2: 模式选择
bash test_tipc/test_train_inference_python.sh ./test_tipc/configs/paconv/train_infer_python.txt 'lite_train_lite_infer'
```
| 0
|
apollo_public_repos/apollo-model-centerpoint
|
apollo_public_repos/apollo-model-centerpoint/test_tipc/test_train_inference_python.sh
|
#!/bin/bash
source test_tipc/common_func.sh
FILENAME=$1
# MODE be one of ['lite_train_lite_infer' 'lite_train_whole_infer' 'whole_train_whole_infer', 'whole_infer', 'klquant_whole_infer']
MODE=$2
dataline=$(awk 'NR==1, NR==51{print}' $FILENAME)
# parser params
IFS=$'\n'
lines=(${dataline})
# The training params
model_name=$(func_parser_value "${lines[1]}")
python=$(func_parser_value "${lines[2]}")
gpu_list=$(func_parser_value "${lines[3]}")
train_use_gpu_key=$(func_parser_key "${lines[4]}")
train_use_gpu_value=$(func_parser_value "${lines[4]}")
autocast_list=$(func_parser_value "${lines[5]}")
autocast_key=$(func_parser_key "${lines[5]}")
epoch_key=$(func_parser_key "${lines[6]}")
epoch_num=$(func_parser_value "${lines[6]}")
save_model_key=$(func_parser_key "${lines[7]}")
train_batch_key=$(func_parser_key "${lines[8]}")
train_batch_value=$(func_parser_value "${lines[8]}")
pretrain_model_key=$(func_parser_key "${lines[9]}")
pretrain_model_value=$(func_parser_value "${lines[9]}")
train_model_name=$(func_parser_value "${lines[10]}")
train_infer_video_dir=$(func_parser_value "${lines[11]}")
train_param_key1=$(func_parser_key "${lines[12]}")
train_param_value1=$(func_parser_value "${lines[12]}")
trainer_list=$(func_parser_value "${lines[14]}")
trainer_norm=$(func_parser_key "${lines[15]}")
norm_trainer=$(func_parser_value "${lines[15]}")
pact_key=$(func_parser_key "${lines[16]}")
pact_trainer=$(func_parser_value "${lines[16]}")
fpgm_key=$(func_parser_key "${lines[17]}")
fpgm_trainer=$(func_parser_value "${lines[17]}")
distill_key=$(func_parser_key "${lines[18]}")
distill_trainer=$(func_parser_value "${lines[18]}")
trainer_key1=$(func_parser_key "${lines[19]}")
trainer_value1=$(func_parser_value "${lines[19]}")
trainer_key2=$(func_parser_key "${lines[20]}")
trainer_value2=$(func_parser_value "${lines[20]}")
eval_py=$(func_parser_value "${lines[23]}")
eval_key1=$(func_parser_key "${lines[24]}")
eval_value1=$(func_parser_value "${lines[24]}")
save_infer_key=$(func_parser_key "${lines[27]}")
save_infer_value=$(func_parser_value "${lines[27]}")
export_weight=$(func_parser_key "${lines[28]}")
norm_export=$(func_parser_value "${lines[29]}")
pact_export=$(func_parser_value "${lines[30]}")
fpgm_export=$(func_parser_value "${lines[31]}")
distill_export=$(func_parser_value "${lines[32]}")
export_key1=$(func_parser_key "${lines[33]}")
export_value1=$(func_parser_value "${lines[33]}")
export_key2=$(func_parser_key "${lines[34]}")
export_value2=$(func_parser_value "${lines[34]}")
inference_dir=$(func_parser_value "${lines[35]}")
# parser inference model
infer_model_dir_list=$(func_parser_value "${lines[36]}")
infer_export_list=$(func_parser_value "${lines[37]}")
infer_is_quant=$(func_parser_value "${lines[38]}")
# parser inference
inference_py=$(func_parser_value "${lines[39]}")
use_gpu_key=$(func_parser_key "${lines[40]}")
use_gpu_list=$(func_parser_value "${lines[40]}")
use_mkldnn_key=$(func_parser_key "${lines[41]}")
use_mkldnn_list=$(func_parser_value "${lines[41]}")
cpu_threads_key=$(func_parser_key "${lines[42]}")
cpu_threads_list=$(func_parser_value "${lines[42]}")
batch_size_key=$(func_parser_key "${lines[43]}")
batch_size_list=$(func_parser_value "${lines[43]}")
use_trt_key=$(func_parser_key "${lines[44]}")
use_trt_list=$(func_parser_value "${lines[44]}")
precision_key=$(func_parser_key "${lines[45]}")
precision_list=$(func_parser_value "${lines[45]}")
infer_model_key=$(func_parser_key "${lines[46]}")
infer_model_value=$(func_parser_value "${lines[46]}")
video_dir_key=$(func_parser_key "${lines[47]}")
infer_video_dir=$(func_parser_value "${lines[47]}")
save_log_key=$(func_parser_key "${lines[48]}")
benchmark_key=$(func_parser_key "${lines[49]}")
benchmark_value=$(func_parser_value "${lines[49]}")
infer_key1=$(func_parser_key "${lines[50]}")
infer_value1=$(func_parser_value "${lines[50]}")
# parser klquant_infer
if [ ${MODE} = "klquant_whole_infer" ]; then
dataline=$(awk 'NR==1 NR==17{print}' $FILENAME)
lines=(${dataline})
model_name=$(func_parser_value "${lines[1]}")
python=$(func_parser_value "${lines[2]}")
# parser inference model
infer_model_dir_list=$(func_parser_value "${lines[3]}")
infer_export_list=$(func_parser_value "${lines[4]}")
infer_is_quant=$(func_parser_value "${lines[5]}")
# parser inference
inference_py=$(func_parser_value "${lines[6]}")
use_gpu_key=$(func_parser_key "${lines[7]}")
use_gpu_list=$(func_parser_value "${lines[7]}")
use_mkldnn_key=$(func_parser_key "${lines[8]}")
use_mkldnn_list=$(func_parser_value "${lines[8]}")
cpu_threads_key=$(func_parser_key "${lines[9]}")
cpu_threads_list=$(func_parser_value "${lines[9]}")
batch_size_key=$(func_parser_key "${lines[10]}")
batch_size_list=$(func_parser_value "${lines[10]}")
use_trt_key=$(func_parser_key "${lines[11]}")
use_trt_list=$(func_parser_value "${lines[11]}")
precision_key=$(func_parser_key "${lines[12]}")
precision_list=$(func_parser_value "${lines[12]}")
infer_model_key=$(func_parser_key "${lines[13]}")
video_dir_key=$(func_parser_key "${lines[14]}")
infer_video_dir=$(func_parser_value "${lines[14]}")
save_log_key=$(func_parser_key "${lines[15]}")
benchmark_key=$(func_parser_key "${lines[16]}")
benchmark_value=$(func_parser_value "${lines[16]}")
infer_key1=$(func_parser_key "${lines[17]}")
infer_value1=$(func_parser_value "${lines[17]}")
fi
LOG_PATH="./test_tipc/output/${model_name}"
mkdir -p ${LOG_PATH}
status_log="${LOG_PATH}/results_python.log"
function func_inference(){
IFS='|'
_python=$1
_script=$2
_model_dir=$3
_log_path=$4
_video_dir=$5
_flag_quant=$6
# inference
for use_gpu in ${use_gpu_list[*]}; do
if [ ${use_gpu} = "False" ] || [ ${use_gpu} = "cpu" ]; then
for use_mkldnn in ${use_mkldnn_list[*]}; do
if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then
continue
fi
for threads in ${cpu_threads_list[*]}; do
for batch_size in ${batch_size_list[*]}; do
for precision in ${precision_list[*]}; do
if [ ${use_mkldnn} = "False" ] && [ ${precision} = "fp16" ]; then
continue
fi # skip when enable fp16 but disable mkldnn
if [ ${_flag_quant} = "True" ] && [ ${precision} != "int8" ]; then
continue
fi # skip when quant model inference but precision is not int8
set_precision=$(func_set_params "${precision_key}" "${precision}")
_save_log_path="${_log_path}/python_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_precision_${precision}_batchsize_${batch_size}.log"
set_infer_data=$(func_set_params "${video_dir_key}" "${infer_video_dir}")
set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}")
set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}")
set_cpu_threads=$(func_set_params "${cpu_threads_key}" "${threads}")
set_model_dir=$(func_set_params "${infer_model_key}" "${infer_model_value}")
set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}")
command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_precision} ${set_infer_params1} > ${_save_log_path} 2>&1 "
eval $command
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check $last_status "${command}" "${status_log}"
done
done
done
done
elif [ ${use_gpu} = "True" ] || [ ${use_gpu} = "gpu" ]; then
for use_trt in ${use_trt_list[*]}; do
for precision in ${precision_list[*]}; do
if [[ ${_flag_quant} = "False" ]] && [[ ${precision} =~ "int8" ]]; then
continue
fi
if [[ ${precision} =~ "fp16" || ${precision} =~ "int8" ]] && [ ${use_trt} = "False" ]; then
continue
fi
if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [ ${_flag_quant} = "True" ]; then
continue
fi
for batch_size in ${batch_size_list[*]}; do
_save_log_path="${_log_path}/python_infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log"
set_infer_data=$(func_set_params "${video_dir_key}" "${infer_video_dir}")
set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}")
set_tensorrt=$(func_set_params "${use_trt_key}" "${use_trt}")
set_precision=$(func_set_params "${precision_key}" "${precision}")
set_model_dir=$(func_set_params "${infer_model_key}" "${infer_model_value}")
set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}")
command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} 2>&1 "
eval $command
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check $last_status "${command}" "${status_log}"
done
done
done
else
echo "Does not support hardware other than CPU and GPU Currently!"
fi
done
}
if [ ${MODE} = "whole_infer" ] || [ ${MODE} = "klquant_whole_infer" ]; then
GPUID=$3
if [ ${#GPUID} -le 0 ];then
env=" "
else
env="export CUDA_VISIBLE_DEVICES=${GPUID}"
fi
set CUDA_VISIBLE_DEVICES
eval $env
export Count=0
IFS="|"
infer_run_exports=(${infer_export_list})
infer_quant_flag=(${infer_is_quant})
for infer_model in ${infer_model_dir_list[*]}; do
# run export
if [ ${infer_run_exports[Count]} != "null" ];then
save_infer_dir=$(dirname $infer_model)
set_export_weight=$(func_set_params "${export_weight}" "${infer_model}")
set_save_infer_key=$(func_set_params "${save_infer_key}" "${save_infer_dir}")
export_cmd="${python} ${infer_run_exports[Count]} ${set_export_weight} ${set_save_infer_key}"
echo ${infer_run_exports[Count]}
eval $export_cmd
echo $export_cmd
status_export=$?
status_check $status_export "${export_cmd}" "${status_log}"
else
save_infer_dir=${infer_model}
fi
#run inference
is_quant=${infer_quant_flag[Count]}
if [ ${MODE} = "klquant_infer" ]; then
is_quant="True"
fi
func_inference "${python}" "${inference_py}" "${save_infer_dir}" "${LOG_PATH}" "${infer_video_dir}" ${is_quant}
Count=$(($Count + 1))
done
else
IFS="|"
export Count=0
USE_GPU_KEY=(${train_use_gpu_value})
for gpu in ${gpu_list[*]}; do
train_use_gpu=${USE_GPU_KEY[Count]}
Count=$(($Count + 1))
ips=""
if [ ${gpu} = "-1" ];then
env=""
elif [ ${#gpu} -le 1 ];then
env="export CUDA_VISIBLE_DEVICES=${gpu}"
eval ${env}
elif [ ${#gpu} -le 15 ];then
IFS=","
array=(${gpu})
env="export CUDA_VISIBLE_DEVICES=${array[0]}"
IFS="|"
else
IFS=";"
array=(${gpu})
ips=${array[0]}
gpu=${array[1]}
IFS="|"
env=" "
fi
for autocast in ${autocast_list[*]}; do
if [ ${autocast} = "amp" ]; then
set_amp_config="Global.use_amp=True Global.scale_loss=1024.0 Global.use_dynamic_loss_scaling=True"
else
set_amp_config=" "
fi
for trainer in ${trainer_list[*]}; do
flag_quant=False
if [ ${trainer} = ${pact_key} ]; then
run_train=${pact_trainer}
run_export=${pact_export}
flag_quant=True
elif [ ${trainer} = "${fpgm_key}" ]; then
run_train=${fpgm_trainer}
run_export=${fpgm_export}
elif [ ${trainer} = "${distill_key}" ]; then
run_train=${distill_trainer}
run_export=${distill_export}
elif [ ${trainer} = ${trainer_key1} ]; then
run_train=${trainer_value1}
run_export=${export_value1}
elif [[ ${trainer} = ${trainer_key2} ]]; then
run_train=${trainer_value2}
run_export=${export_value2}
else
run_train=${norm_trainer}
run_export=${norm_export}
fi
if [ ${run_train} = "null" ]; then
continue
fi
set_autocast=$(func_set_params "${autocast_key}" "${autocast}")
set_epoch=$(func_set_params "${epoch_key}" "${epoch_num}")
if [[ $MODE =~ "whole_train" ]]; then
set_epoch=""
fi
set_pretrain=$(func_set_params "${pretrain_model_key}" "${pretrain_model_value}")
set_batchsize=$(func_set_params "${train_batch_key}" "${train_batch_value}")
if [[ $MODE =~ "whole_train" ]]; then
train_param_key1=""
train_param_value1=""
fi
set_train_params1=$(func_set_params "${train_param_key1}" "${train_param_value1}")
set_use_gpu=$(func_set_params "${train_use_gpu_key}" "${train_use_gpu}")
if [ ${#ips} -le 26 ];then
save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}"
nodes=1
else
IFS=","
ips_array=(${ips})
IFS="|"
nodes=${#ips_array[@]}
save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}_nodes_${nodes}"
fi
# load pretrain from norm training if current trainer is pact or fpgm trainer
if ([ ${trainer} = ${pact_key} ] || [ ${trainer} = ${fpgm_key} ]) && [ ${nodes} -le 1 ]; then
set_pretrain="${load_norm_train_model}"
fi
set_save_model=$(func_set_params "${save_model_key}" "${save_log}")
if [ ${#gpu} -le 2 ];then # train with cpu or single gpu
cmd="${python} ${run_train}"
elif [ ${#ips} -le 26 ];then # train with multi-gpu
cmd="${python} -B -m paddle.distributed.launch --gpus=\"${gpu}\" ${run_train} ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_autocast} ${set_batchsize} ${set_train_params1} ${set_amp_config}"
else # train with multi-machine
cmd="${python} -B -m paddle.distributed.launch --ips=${ips} --gpus=\"${gpu}\" ${run_train} ${set_use_gpu} ${set_save_model} ${set_pretrain} ${set_epoch} ${set_autocast} ${set_batchsize} ${set_train_params1} ${set_amp_config}"
fi
# run train
eval "unset CUDA_VISIBLE_DEVICES"
eval $cmd
status_check $? "${cmd}" "${status_log}"
# set_eval_pretrain=$(func_set_params "${pretrain_model_key}" "${save_log}/${train_model_name}")
# save norm trained models to set pretrain for pact training and fpgm training
if [ [${trainer} = ${trainer_norm}] ] && [ [${nodes} -le 1] ]; then
load_norm_train_model=${set_eval_pretrain}
fi
# run test
if [ ${eval_py} != "null" ]; then
set_eval_params1=$(func_set_params "${eval_key1}" "${eval_value1}")
eval_cmd="${python} ${eval_py} ${set_use_gpu} ${set_eval_params1}"
eval $eval_cmd
status_check $? "${eval_cmd}" "${status_log}"
fi
# run export model
if [ ${run_export} != "null" ]; then
# run export model
save_infer_path="${save_log}"
set_export_weight=$(func_set_params "${export_weight}" "${eval_value1}")
set_save_infer_key=$(func_set_params "${save_infer_key}" "${save_infer_value}")
export_cmd="${python} ${run_export}"
eval $export_cmd
status_check $? "${export_cmd}" "${status_log}"
#run inference
eval $env
save_infer_path="${save_log}"
if [ ${inference_dir} != "null" ] && [ ${inference_dir} != '##' ]; then
infer_model_dir="${save_infer_path}/${inference_dir}"
else
infer_model_dir=${save_infer_path}
fi
func_inference "${python}" "${inference_py}" "${infer_model_dir}" "${LOG_PATH}" "${train_infer_video_dir}" "${flag_quant}"
eval "unset CUDA_VISIBLE_DEVICES"
fi
done # done with: for trainer in ${trainer_list[*]}; do
done # done with: for autocast in ${autocast_list[*]}; do
done # done with: for gpu in ${gpu_list[*]}; do
fi # end if [ ${MODE} = "infer" ]; then
| 0
|
apollo_public_repos/apollo-model-centerpoint/test_tipc
|
apollo_public_repos/apollo-model-centerpoint/test_tipc/docs/test_train_infer_python.md
|
# Linux端基础训练预测功能测试
Linux端基础训练预测功能测试的主程序为`test_train_inference_python.sh`,可以测试基于Python的模型训练、评估、推理等基本功能,包括裁剪、量化、蒸馏。
## 1. 测试结论汇总
- 训练相关:
| 算法名称 | 模型名称 | 单机单卡 |
| :---- |:-------| :---- |
| CNN | PAConv | 正常训练 |
- 预测相关:基于训练是否使用量化,可以将训练产出的模型可以分为`正常模型`和`量化模型`,这两类模型对应的预测功能汇总如下,
| 模型类型 |device | batchsize | tensorrt | mkldnn | cpu多线程 |
| ---- | ---- | ---- |:---------:| :----: | :----: |
| 正常模型 | GPU | 1 | fp32/fp16 | - | - |
| 正常模型 | CPU | 1 | - | - | 支持 |
## 2. 测试流程
### 2.1 安装依赖
- 安装PaddlePaddle == 2.3.1
- 安装Paddle3D依赖
```
pip install -r requirements.txt
```
- 安装Paddle3D
```
python setup.py develop
```
- 安装autolog(规范化日志输出工具)
```
git clone https://gitee.com/Double_V/AutoLog
cd AutoLog/
pip3 install -r requirements.txt
python3 setup.py bdist_wheel
pip3 install ./dist/auto_log-1.2.0-py3-none-any.whl
cd ../
```
### 2.2 功能测试
先运行`prepare.sh`准备数据和模型,然后运行`test_train_inference_python.sh`进行测试,最终在```test_tipc/output```目录下生成`python_infer_*.log`格式的日志文件。
`test_train_inference_python.sh`包含5种运行模式,每种模式的运行数据不同,分别用于测试速度和精度,该项目目前只支持模式1:
- 模式1:lite_train_lite_infer,使用少量数据训练,用于快速验证训练到预测的走通流程,不验证精度和速度;
```shell
bash test_tipc/prepare.sh ./test_tipc/configs/paconv/train_infer_python.txt 'lite_train_lite_infer'
bash test_tipc/test_train_inference_python.sh ./test_tipc/configs/paconv/train_infer_python.txt 'lite_train_lite_infer'
```
运行相应指令后,在`test_tipc/output`文件夹下自动会保存运行日志。如'lite_train_lite_infer'模式下,会运行训练+inference的链条,因此,在`test_tipc/output`文件夹有以下文件:
```
test_tipc/output/
|- results_python.log # 运行指令状态的日志
|- python_infer_cpu_usemkldnn_False_threads_1_precision_fp32_batchsize_1.log # CPU上关闭Mkldnn线程数设置为1,测试batch_size=1条件下的精度fp32预测运行日志
|- python_infer_gpu_usetrt_False_precision_fp32_batchsize_1.log # GPU上关闭TensorRT,测试batch_size=1的精度fp32预测日志
......
```
其中`results_python.log`中包含了每条指令的运行状态,如果运行成功会输出:
```
Run successfully with command - python3.7 infer.py --use_gpu=False --enable_mkldnn=False --cpu_threads=1 --model_file=output/model.pdmodel --batch_size=1 --input_file=test_tipc/data/predict_example.pkl --enable_benchmark=True --precision=fp32 --params_file=output/model.pdiparams > ./test_tipc/output/PAConv/python_infer_cpu_usemkldnn_False_threads_1_precision_fp32_batchsize_1.log 2>&1 !
Run successfully with command - python3.7 infer.py --use_gpu=False --enable_mkldnn=False --cpu_threads=1 --model_file=output/model.pdmodel --batch_size=2 --input_file=test_tipc/data/predict_example.pkl --enable_benchmark=True --precision=fp32 --params_file=output/model.pdiparams > ./test_tipc/output/PAConv/python_infer_cpu_usemkldnn_False_threads_1_precision_fp32_batchsize_2.log 2>&1 !
......
```
如果运行失败,会输出:
```
Run failed with command - python3.7 infer.py --use_gpu=False --enable_mkldnn=False --cpu_threads=1 --model_file=output/model.pdmodel --batch_size=1 --input_file=test_tipc/data/predict_example.pkl --enable_benchmark=True --precision=fp32 --params_file=output/model.pdiparams > ./test_tipc/output/PAConv/python_infer_cpu_usemkldnn_False_threads_1_precision_fp32_batchsize_1.log 2>&1 !
Run failed with command - python3.7 infer.py --use_gpu=False --enable_mkldnn=False --cpu_threads=1 --model_file=output/model.pdmodel --batch_size=2 --input_file=test_tipc/data/predict_example.pkl --enable_benchmark=True --precision=fp32 --params_file=output/model.pdiparams > ./test_tipc/output/PAConv/python_infer_cpu_usemkldnn_False_threads_1_precision_fp32_batchsize_2.log 2>&1 !
......
```
可以很方便的根据`results_python.log`中的内容判定哪一个指令运行错误。
| 0
|
apollo_public_repos/apollo-model-centerpoint/test_tipc/configs
|
apollo_public_repos/apollo-model-centerpoint/test_tipc/configs/paconv/train_infer_python.txt
|
===========================train_params===========================
model_name:PAConv
python:python3.7
gpu_list:0
Global.use_gpu:True|True
Global.auto_cast:fp32
Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=500
Global.save_model_dir:./output/
Train.loader.batch_size_per_card:lite_train_lite_infer=2|whole_train_whole_infer=4
Global.pretrained_model:null
train_model_name:latest
train_infer_img_dir:./train_data/icdar2015/text_localization/ch4_test_images/
null:null
##
trainer:norm_train
norm_train:./tools/train.py --config ./test_tipc/configs/paconv/paconv_modelnet40.yml --num_workers 0 --save_interval 1 --do_eval --save_dir ./test_tipc/output
pact_train:null
fpgm_train:null
distill_train:null
null:null
null:null
##
===========================eval_params===========================
eval:null
null:null
##
===========================infer_params===========================
Global.save_inference_dir:./output/
Global.pretrained_model:
norm_export:./tools/export.py --config ./test_tipc/configs/paconv/paconv_modelnet40.yml --model ./test_tipc/output/epoch_1/model.pdparams --save_dir ./test_tipc/output/exported_model --input_shape 1 1024 3
quant_export:null
fpgm_export:null
distill_export:null
export1:null
export2:null
inference_dir:null
infer_model:test_tipc/output/exported_model/paconv.pdmodel
infer_export:null
infer_quant:False
inference:deploy/paconv/python/infer.py
--use_gpu:True|False
--enable_mkldnn:False
--cpu_threads:1|2
--batch_size:1
null:null
--precision:fp32|fp16
--model_file:test_tipc/output/exported_model/paconv.pdmodel
--input_file:test_tipc/data/mini_modelnet40/ply_data_test0.h5
null:null
--enable_benchmark:True
--params_file:test_tipc/output/exported_model/paconv.pdiparams
| 0
|
apollo_public_repos/apollo-model-centerpoint/test_tipc/configs
|
apollo_public_repos/apollo-model-centerpoint/test_tipc/configs/paconv/paconv_modelnet40.yml
|
batch_size: 1
epochs: 1
train_dataset:
type: ModelNet40
dataset_root: ./test_tipc/data/mini_modelnet40
num_points: 1024
transforms:
- type: GlobalScale
min_scale: 0.667
max_scale: 1.5
size: 3
- type: GlobalTranslate
translation_std: 0.2
distribution: uniform
- type: ShufflePoint
mode: train
val_dataset:
type: ModelNet40
dataset_root: ./test_tipc/data/mini_modelnet40
num_points: 1024
mode: test
optimizer:
type: Momentum
momentum: 0.9
weight_decay: 0.0001
lr_scheduler:
type: CosineAnnealingDecay
learning_rate: 0.1
T_max: 107450
eta_min: 0.001
model:
type: PAConv
k_neighbors: 20
calc_scores: softmax
num_matrices: [8, 8, 8, 8]
dropout: 0.5
| 0
|
apollo_public_repos/apollo-model-centerpoint
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/env.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import importlib
import os
import platform
import subprocess
import sys
from typing import List, Optional
import cv2
import paddle
import paddleseg
import paddle3d
def init_distributed():
"""
"""
if not is_distributed_inited():
paddle.distributed.fleet.init(is_collective=True)
def is_distributed_inited():
"""
"""
return paddle.distributed.parallel.parallel_helper._is_parallel_ctx_initialized(
)
def get_package_version(package: str) -> str:
"""
"""
try:
module = importlib.import_module(package)
version = module.__version__
except:
version = 'Not Installed'
return version
def get_envrionment_flags(FLAG: str) -> str:
return os.environ.get(FLAG, 'Not set.')
def get_gcc_info() -> str:
"""
"""
try:
gcc = subprocess.check_output(['gcc', '--version']).decode()
gcc = gcc.strip().split('\n')[0]
except:
gcc = 'Not Found.'
return gcc
def get_nvcc_info(cuda_home):
if cuda_home is not None and os.path.isdir(cuda_home):
try:
nvcc = os.path.join(cuda_home, 'bin/nvcc')
nvcc = subprocess.check_output(
"{} -V".format(nvcc), shell=True).decode()
nvcc = nvcc.strip().split('\n')[-1]
except subprocess.SubprocessError:
nvcc = "Not Available"
else:
nvcc = "Not Available"
return nvcc
def get_cuda_device_info(devices: Optional[List[int]] = None) -> List[str]:
if devices is None:
try:
devices = get_envrionment_flags('CUDA_VISIBLE_DEVICES')
devices = [int(device) for device in devices.split(',')]
except:
devices = []
try:
cmds = ['nvidia-smi', '-L']
gpu_info = subprocess.check_output(cmds)
gpu_info = gpu_info.decode().strip().split('\n')
gpu_info = [' '.join(gpu_info[i].split(' ')[:4]) for i in devices]
except:
gpu_info = ['Not Found.']
return gpu_info
def get_env_info():
msgs = []
msgs.append('------------Environment Information-------------')
# add platform info
msgs.append('platform:')
msgs.append(' {}'.format(platform.platform()))
msgs.append(' {}'.format(get_gcc_info()))
msgs.append(' Python - {}'.format(sys.version.replace('\n', ' ')))
# add Science Toolkits info
st_pakcages = {
'cv2': get_package_version('cv2'),
'numpy': get_package_version('numpy'),
'numba': get_package_version('numba'),
'pandas': get_package_version('pandas'),
'pillow': get_package_version('PIL'),
'skimage': get_package_version('skimage')
}
msgs.append('\nScience Toolkits:')
for package, version in st_pakcages.items():
msgs.append(' {} - {}'.format(package, version))
if paddle.is_compiled_with_cuda():
_paddle = 'paddle(gpu)'
else:
_paddle = 'paddle'
paddle_packages = {
_paddle: paddle.__version__,
'paddle3d': paddle3d.__version__,
'paddleseg': paddleseg.__version__
}
paddle_flags = [
'FLAGS_cudnn_deterministic', 'FLAGS_cudnn_exhaustive_search'
]
# add Paddle info
msgs.append('\nPaddlePaddle:')
for package, version in paddle_packages.items():
msgs.append(' {} - {}'.format(package, version))
for flag in paddle_flags:
msgs.append(' {} - {}'.format(flag, get_envrionment_flags(flag)))
# add CUDA info
msgs.append('\nCUDA:')
msgs.append(' cudnn - {}'.format(paddle.get_cudnn_version()))
msgs.append(' nvcc - {}'.format(get_nvcc_info(get_cuda_home())))
# TODO: Add nccl version
# add GPU info
msgs.append('\nGPUs:')
for device in get_cuda_device_info():
msgs.append(' {}'.format(device))
msgs.append('------------------------------------------------')
return '\n'.join(msgs)
def get_cuda_home():
'''Finds the CUDA install path. It refers to the implementation of
pytorch <https://github.com/pytorch/pytorch/blob/master/torch/utils/cpp_extension.py>.
'''
# Guess #1
cuda_home = os.environ.get('CUDA_HOME') or os.environ.get('CUDA_PATH')
if cuda_home is None:
# Guess #2
try:
which = 'where' if IS_WINDOWS else 'which'
nvcc = subprocess.check_output([which, 'nvcc'],
stderr=subprocess.STDOUT)
cuda_home = os.path.dirname(
os.path.dirname(nvcc.decode().rstrip('\r\n')))
except Exception:
# Guess #3
if IS_WINDOWS:
cuda_homes = glob.glob(
'C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v*.*')
if len(cuda_homes) == 0:
cuda_home = ''
else:
cuda_home = cuda_homes[0]
else:
cuda_home = '/usr/local/cuda'
if not os.path.exists(cuda_home):
cuda_home = None
return cuda_home
def get_user_home() -> str:
return os.path.expanduser('~')
def get_paddle3d_home() -> str:
return os.path.join(get_user_home(), '.paddle3d')
def get_sub_home(directory: str) -> str:
home = os.path.join(get_paddle3d_home(), directory)
os.makedirs(home, exist_ok=True)
return home
USER_HOME = get_user_home()
PADDLE3D_HOME = get_paddle3d_home()
PRETRAINED_HOME = get_sub_home('pretrained')
TMP_HOME = get_sub_home('tmp')
IS_WINDOWS = sys.platform == 'win32'
nranks = paddle.distributed.ParallelEnv().nranks
local_rank = paddle.distributed.ParallelEnv().local_rank
# supress Numba warnings
os.environ["NUMBA_DISABLE_PERFORMANCE_WARNINGS"] = "1"
os.environ["NUMBA_CUDA_LOW_OCCUPANCY_WARNINGS"] = "0"
| 0
|
apollo_public_repos/apollo-model-centerpoint
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "1.0.0"
import paddle
from packaging.version import Version
paddle_version = Version(paddle.__version__)
minimum_paddle_version = Version("2.4.0")
develop_version = Version("0.0.0")
if paddle_version < minimum_paddle_version and paddle_version != develop_version:
raise RuntimeError("Please upgrade PaddlePaddle version to {}".format(
minimum_paddle_version))
from . import datasets, models, transforms
| 0
|
apollo_public_repos/apollo-model-centerpoint
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/sample.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Generic, List, Optional
class _EasyDict(dict):
def __getattr__(self, key: str):
if key in self:
return self[key]
return super().__getattr__(self, key)
def __setattr__(self, key: str, value: Generic):
self[key] = value
class SampleMeta(_EasyDict):
"""
"""
# yapf: disable
__slots__ = [
"camera_intrinsic",
# bgr or rgb
"image_format",
# pillow or cv2
"image_reader",
# chw or hwc
"channel_order",
# Unique ID of the sample
"id",
"time_lag",
"ref_from_curr"
]
# yapf: enable
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
class Sample(_EasyDict):
"""
"""
_VALID_MODALITIES = ["image", "lidar", "radar", "multimodal", "multiview"]
def __init__(self, path: str, modality: str):
if modality not in self._VALID_MODALITIES:
raise ValueError('Only modality {} is supported, but got {}'.format(
self._VALID_MODALITIES, modality))
self.meta = SampleMeta()
self.path = path
self.data = None
self.modality = modality.lower()
self.bboxes_2d = None
self.bboxes_3d = None
self.labels = None
self.sweeps = []
self.attrs = None
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/apis/config.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import codecs
import os
from collections.abc import Iterable, Mapping
from typing import Any, Dict, Generic, Optional
import paddle
import yaml
from paddle3d.utils.logger import logger
class Config(object):
'''Training configuration parsing. Only yaml/yml files are supported.
The following hyper-parameters are available in the config file:
batch_size: The number of samples per gpu.
iters: The total training steps.
epochs: The total training epochs.
train_dataset: A training data config including type/data_root/transforms/mode.
For data type, please refer to paddle3d.datasets.
For specific transforms, please refer to paddle3d.transforms.transforms.
val_dataset: A validation data config including type/data_root/transforms/mode.
optimizer: A optimizer config, but currently paddle3d only supports sgd with momentum in config file.
In addition, weight_decay could be set as a regularization.
learning_rate: A learning rate config. If decay is configured, learning _rate value is the starting learning rate,
where only poly decay is supported using the config file. In addition, decay power and end_lr are tuned experimentally.
model: A model config including type/backbone and model-dependent arguments.
For model type, please refer to paddle3d.models.
For backbone, please refer to paddle3d.models.backbones.
Args:
path (str) : The path of config file, supports yaml format only.
Examples:
from paddle3d.apis.config import Config
# Create a cfg object with yaml file path.
cfg = Config(yaml_cfg_path)
# Parsing the argument when its property is used.
train_dataset = cfg.train_dataset
# the argument of model should be parsed after dataset,
# since the model builder uses some properties in dataset.
model = cfg.model
...
'''
def __init__(self,
*,
path: str,
learning_rate: Optional[float] = None,
batch_size: Optional[int] = None,
iters: Optional[int] = None,
epochs: Optional[int] = None):
if not path:
raise ValueError('Please specify the configuration file path.')
if not os.path.exists(path):
raise FileNotFoundError('File {} does not exist'.format(path))
self._model = None
self._train_dataset = None
self._val_dataset = None
if path.endswith('yml') or path.endswith('yaml'):
self.dic = self._parse_from_yaml(path)
else:
raise RuntimeError('Config file should in yaml format!')
self.update(learning_rate=learning_rate,
batch_size=batch_size,
iters=iters,
epochs=epochs)
def _update_dic(self, dic: Dict, base_dic: Dict):
'''Update config from dic based base_dic
'''
base_dic = base_dic.copy()
dic = dic.copy()
if dic.get('_inherited_', True) == False:
dic.pop('_inherited_')
return dic
for key, val in dic.items():
if isinstance(val, dict) and key in base_dic:
base_dic[key] = self._update_dic(val, base_dic[key])
else:
base_dic[key] = val
dic = base_dic
return dic
def _parse_from_yaml(self, path: str):
'''Parse a yaml file and build config'''
with codecs.open(path, 'r', 'utf-8') as file:
dic = yaml.load(file, Loader=yaml.FullLoader)
if '_base_' in dic:
cfg_dir = os.path.dirname(path)
base_path = dic.pop('_base_')
base_path = os.path.join(cfg_dir, base_path)
base_dic = self._parse_from_yaml(base_path)
dic = self._update_dic(dic, base_dic)
return dic
def update(self,
learning_rate: Optional[float] = None,
batch_size: Optional[int] = None,
iters: Optional[int] = None,
epochs: Optional[int] = None):
'''Update config'''
if learning_rate is not None:
self.dic['lr_scheduler']['learning_rate'] = learning_rate
if batch_size is not None:
self.dic['batch_size'] = batch_size
if iters is not None:
self.dic['iters'] = iters
if epochs is not None:
self.dic['epochs'] = epochs
@property
def batch_size(self) -> int:
return self.dic.get('batch_size', 1)
@property
def iters(self) -> int:
iters = self.dic.get('iters')
return iters
@property
def epochs(self) -> int:
epochs = self.dic.get('epochs')
return epochs
@property
def lr_scheduler(self) -> paddle.optimizer.lr.LRScheduler:
if 'lr_scheduler' not in self.dic:
raise RuntimeError(
'No `lr_scheduler` specified in the configuration file.')
params = self.dic.get('lr_scheduler')
return self._load_object(params)
@property
def optimizer(self) -> paddle.optimizer.Optimizer:
params = self.dic.get('optimizer', {}).copy()
params['learning_rate'] = self.lr_scheduler
params['parameters'] = self.model.parameters()
return self._load_object(params)
@property
def model(self) -> paddle.nn.Layer:
model_cfg = self.dic.get('model').copy()
if not model_cfg:
raise RuntimeError('No model specified in the configuration file.')
if not self._model:
self._model = self._load_object(model_cfg)
return self._model
@property
def amp_config(self) -> int:
return self.dic.get('amp_cfg', None)
@property
def train_dataset_config(self) -> Dict:
return self.dic.get('train_dataset', {}).copy()
@property
def val_dataset_config(self) -> Dict:
return self.dic.get('val_dataset', {}).copy()
@property
def train_dataset_class(self) -> Generic:
dataset_type = self.train_dataset_config['type']
return self._load_component(dataset_type)
@property
def val_dataset_class(self) -> Generic:
dataset_type = self.val_dataset_config['type']
return self._load_component(dataset_type)
@property
def train_dataset(self) -> paddle.io.Dataset:
_train_dataset = self.train_dataset_config
if not _train_dataset:
return None
if not self._train_dataset:
self._train_dataset = self._load_object(_train_dataset)
return self._train_dataset
@property
def val_dataset(self) -> paddle.io.Dataset:
_val_dataset = self.val_dataset_config
if not _val_dataset:
return None
if not self._val_dataset:
self._val_dataset = self._load_object(_val_dataset)
return self._val_dataset
@property
def export_config(self) -> Dict:
return self.dic.get('export', {})
def _load_component(self, com_name: str) -> Any:
# lazy import
import paddle3d.apis.manager as manager
if com_name.lower().startswith('$paddleseg'):
return self._load_component_from_paddleseg(com_name[11:])
if com_name.lower().startswith('$paddledet'):
return self._load_component_from_paddledet(com_name[11:])
for com in manager.__all__:
com = getattr(manager, com)
if com_name in com.components_dict:
return com[com_name]
else:
if com_name in paddle.optimizer.lr.__all__:
return getattr(paddle.optimizer.lr, com_name)
elif com_name in paddle.optimizer.__all__:
return getattr(paddle.optimizer, com_name)
elif com_name in paddle.nn.__all__:
return getattr(paddle.nn, com_name)
raise RuntimeError(
'The specified component was not found {}.'.format(com_name))
def _load_component_from_paddleseg(self, com_name: str) -> Any:
from paddleseg.cvlibs import manager
com_list = [
manager.BACKBONES, manager.DATASETS, manager.MODELS,
manager.TRANSFORMS, manager.LOSSES
]
for com in com_list:
if com_name in com.components_dict:
return com[com_name]
raise RuntimeError(
'The specified component was not found {} in paddleseg.'.format(
com_name))
def _load_component_from_paddledet(self, com_name: str) -> Any:
from ppdet.core.workspace import global_config as ppdet_com_dict
if com_name in ppdet_com_dict:
component = ppdet_com_dict[com_name]
cls = getattr(component.pymodule, component.name)
return cls
raise RuntimeError(
'The specified component was not found {} in paddledet.'.format(
com_name))
def _load_object(self, obj: Generic, recursive: bool = True) -> Any:
if isinstance(obj, Mapping):
dic = obj.copy()
component = self._load_component(
dic.pop('type')) if 'type' in dic else dict
if recursive:
params = {}
for key, val in dic.items():
params[key] = self._load_object(obj=val,
recursive=recursive)
else:
params = dic
try:
return component(**params)
except Exception as e:
raise type(e)('{} {}'.format(component.__name__, e))
elif isinstance(obj, Iterable) and not isinstance(obj, str):
return [self._load_object(item) for item in obj]
return obj
def _is_meta_type(self, item: Any) -> bool:
return isinstance(item, dict) and 'type' in item
def __str__(self) -> str:
msg = '---------------Config Information---------------'
msg += '\n{}'.format(yaml.dump(self.dic))
msg += '------------------------------------------------'
return msg
def to_dict(self) -> Dict:
if self.iters is not None:
dic = {'iters': self.iters}
else:
dic = {'epochs': self.epochs}
dic.update({
'optimizer': self.optimizer,
'model': self.model,
'train_dataset': self.train_dataset,
'val_dataset': self.val_dataset,
'batch_size': self.batch_size,
'amp_cfg': self.amp_config
})
return dic
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/apis/checkpoint.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import contextlib
import copy
import os
import shutil
from typing import Generic, Hashable, Optional, Tuple
import filelock
import paddle
import yaml
from easydict import EasyDict
from paddle3d.utils.logger import logger
class CheckpointABC(abc.ABC):
"""
"""
@abc.abstractmethod
def have(self, tag: str):
"""
"""
@abc.abstractmethod
def get(self, tag: Optional[str] = None) -> Tuple[dict, dict]:
"""
"""
@abc.abstractmethod
def push(self, params_dict: dict, opt_dict: dict = None, **kwargs) -> str:
"""
"""
def pop(self, **kwargs) -> str:
"""
"""
@property
@abc.abstractmethod
def empty(self) -> bool:
"""
"""
@abc.abstractmethod
def record(self, key: Hashable, value: Generic) -> bool:
"""
"""
@property
@abc.abstractmethod
def meta(self) -> dict:
"""
"""
@property
@abc.abstractmethod
def metafile(self) -> str:
"""
"""
@property
@abc.abstractmethod
def rootdir(self) -> str:
"""
"""
class Checkpoint(CheckpointABC):
"""
"""
def __init__(self,
save_dir: str,
keep_checkpoint_max: int = 5,
overwrite: bool = True):
self.save_dir = save_dir
self._meta = EasyDict()
self._meta.overwrite = overwrite
self._meta.keep_checkpoint_max = keep_checkpoint_max
self._meta.counter = 0
self._meta.queue = []
os.makedirs(self.save_dir, exist_ok=True)
if os.path.exists(self.metafile):
with open(self.metafile) as file, self.rwlock():
dic = yaml.load(file, Loader=yaml.FullLoader)
self._meta.update(dic)
self._sync_to_file()
def have(self, tag: str):
"""
"""
return tag in self.meta.queue
def get(self, tag: Optional[str] = None) -> Tuple[dict, dict]:
"""
"""
if tag is None:
if len(self.meta.queue) == 0:
raise RuntimeError('The checkpoint queue is empty!')
tag = self.meta.queue[-1]
if not self.have(tag):
raise ValueError(
'There is no model parameter corresponding to the specified tag {{{}}} in checkpoint.'
.format(tag))
params_path = os.path.join(self.rootdir, tag, 'model.pdparams')
opt_path = os.path.join(self.rootdir, tag, 'model.pdopt')
params = paddle.load(params_path)
if os.path.exists(opt_path):
opt = paddle.load(opt_path)
else:
opt = {}
return params, opt
def push(self,
params_dict: dict,
opt_dict: dict = None,
tag: Optional[str] = None,
enqueue: bool = True,
verbose: bool = False) -> str:
"""
"""
tag = str(self._meta.counter) if tag is None else tag
dirname = os.path.join(self.rootdir, tag)
params_path = os.path.join(dirname, 'model.pdparams')
if enqueue:
if self._meta.keep_checkpoint_max > 0 and len(
self._meta.queue) >= self._meta.keep_checkpoint_max:
self.pop(verbose=verbose)
self._meta.queue.append(tag)
self._meta.counter += 1
else:
if os.path.exists(params_path) and not self._meta.overwrite:
raise RuntimeError(
'Unable to save parameters to non-empty path {}'.format(
params_path))
os.makedirs(dirname, exist_ok=True)
paddle.save(params_dict, params_path)
if opt_dict is not None:
opt_path = os.path.join(dirname, 'model.pdopt')
paddle.save(opt_dict, opt_path)
if verbose:
logger.info('Push model to checkpoint {}'.format(dirname))
self._sync_to_file()
return tag
def pop(self, verbose: bool = False) -> str:
"""
"""
if len(self._meta.queue) == 0:
raise RuntimeError('Checkpoint queue is empty!')
pop_idx = self._meta.queue[0]
pop_dir = os.path.join(self.rootdir, pop_idx)
shutil.rmtree(pop_dir)
if verbose:
logger.info('Pop model from {}'.format(pop_dir))
self._meta.queue = self._meta.queue[1:]
self._sync_to_file()
return pop_idx
@property
def empty(self):
"""
"""
return len(self._meta.queue) == 0
def record(self, key: Hashable, value: Generic) -> bool:
"""
"""
if key in self._meta and not self._meta.overwrite:
return False
self._meta[key] = value
self._sync_to_file()
return True
@property
def meta(self) -> dict:
"""
"""
return copy.deepcopy(self._meta)
@property
def metafile(self) -> str:
"""
"""
return os.path.join(self.rootdir, 'meta.yaml')
@property
def rootdir(self) -> str:
"""
"""
return self.save_dir
def _sync_to_file(self):
with open(self.metafile, 'w') as file, self.rwlock():
yaml.dump(dict(self.meta), file)
@contextlib.contextmanager
def rwlock(self):
lockfile = os.path.join(self.rootdir, '.lock')
with filelock.FileLock(lockfile):
yield
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/apis/__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .checkpoint import Checkpoint, CheckpointABC
from .config import Config
from .manager import ComponentManager
from .pipeline import training_step, validation_step
from .scheduler import Scheduler, SchedulerABC, SchedulerStatus
from .trainer import Trainer
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/apis/pipeline.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
from paddle.distributed.fleet.utils.hybrid_parallel_util import \
fused_allreduce_gradients
from paddle3d.sample import Sample
def parse_losses(losses):
total_loss = 0
if isinstance(losses, paddle.Tensor):
total_loss += losses
elif isinstance(losses, dict):
for k, v in losses.items():
total_loss += v
return total_loss
def training_step(model: paddle.nn.Layer,
optimizer: paddle.optimizer.Optimizer,
sample: Sample,
cur_iter: int,
scaler=None,
amp_cfg=dict()) -> dict:
if optimizer.__class__.__name__ == 'OneCycleAdam':
optimizer.before_iter(cur_iter - 1)
model.train()
if isinstance(model, paddle.DataParallel) and hasattr(model._layers, 'use_recompute') \
and model._layers.use_recompute:
with model.no_sync():
if scaler is not None:
with paddle.amp.auto_cast(**amp_cfg):
outputs = model(sample)
loss = parse_losses(outputs['loss'])
scaled_loss = scaler.scale(loss)
scaled_loss.backward()
else:
outputs = model(sample)
loss = parse_losses(outputs['loss'])
loss.backward()
fused_allreduce_gradients(list(model.parameters()), None)
else:
if scaler is not None:
with paddle.amp.auto_cast(**amp_cfg):
outputs = model(sample)
loss = parse_losses(outputs['loss'])
scaled_loss = scaler.scale(loss)
scaled_loss.backward()
else:
outputs = model(sample)
loss = parse_losses(outputs['loss'])
loss.backward()
if optimizer.__class__.__name__ == 'OneCycleAdam':
optimizer.after_iter()
else:
if scaler is not None:
scaler.step(optimizer)
scaler.update()
optimizer.clear_grad()
else:
optimizer.step()
model.clear_gradients()
if isinstance(optimizer._learning_rate,
paddle.optimizer.lr.LRScheduler):
optimizer._learning_rate.step()
with paddle.no_grad():
if paddle.distributed.is_initialized():
loss_clone = loss.clone()
paddle.distributed.all_reduce(
loss_clone.scale_(1. / paddle.distributed.get_world_size()))
outputs['total_loss'] = loss_clone
else:
outputs['total_loss'] = loss
return outputs
def validation_step(model: paddle.nn.Layer, sample: Sample) -> dict:
model.eval()
with paddle.no_grad():
outputs = model(sample)
return outputs['preds']
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/apis/scheduler.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from collections import namedtuple
from typing import Optional
SchedulerStatus = namedtuple('SchedulerStatus',
['do_eval', 'do_log', 'save_checkpoint'])
class SchedulerABC(abc.ABC):
"""
"""
@abc.abstractmethod
def step(self, cur_iter: Optional[int] = None) -> SchedulerStatus:
"""
"""
class Scheduler(SchedulerABC):
"""
"""
def __init__(self,
save_interval: int,
log_interval: int,
iters_per_epoch: int,
train_by_epoch: bool = False,
do_eval: bool = False):
self.save_interval = save_interval
self.log_interval = log_interval
self.do_eval = do_eval
self.cur_iter = 0
self.iters_per_epoch = iters_per_epoch
self.train_by_epoch = train_by_epoch
def step(self, cur_iter: Optional[int] = None) -> SchedulerStatus:
"""
"""
if cur_iter is None:
self.cur_iter += 1
else:
self.cur_iter = cur_iter
if self.train_by_epoch:
save_checkpoint = self.save_interval != 0 and self.cur_epoch % self.save_interval == 0 and self.is_last_iter_in_epoch
else:
save_checkpoint = self.save_interval != 0 and self.cur_iter % self.save_interval == 0
do_eval = save_checkpoint and self.do_eval
do_log = self.log_interval != 0 and self.cur_iter % self.log_interval == 0
return SchedulerStatus(do_eval, do_log, save_checkpoint)
@property
def is_first_iter_in_epoch(self) -> bool:
return self.cur_iter % self.iters_per_epoch == 1
@property
def is_last_iter_in_epoch(self) -> bool:
return self.cur_iter % self.iters_per_epoch == 0
@property
def cur_epoch(self) -> int:
return (self.cur_iter - 1) // self.iters_per_epoch + 1
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/apis/trainer.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
import sys
from collections import defaultdict
from typing import Callable, Optional, Union
import numpy as np
import paddle
from visualdl import LogWriter
import paddle3d.env as env
from paddle3d.apis.checkpoint import Checkpoint, CheckpointABC
from paddle3d.apis.pipeline import training_step, validation_step
from paddle3d.apis.scheduler import Scheduler, SchedulerABC
from paddle3d.utils.logger import logger
from paddle3d.utils.shm_utils import _get_shared_memory_size_in_M
from paddle3d.utils.timer import Timer
def default_dataloader_build_fn(**kwargs) -> paddle.io.DataLoader:
"""
"""
def _generate_loader(dataset: paddle.io.Dataset, model: paddle.nn.Layer):
args = kwargs.copy()
batch_size = args.pop('batch_size', 1)
shuffle = False if not dataset.is_train_mode else True
drop_last = args.pop('drop_last',
False if not dataset.is_train_mode else True)
if dataset.is_train_mode:
BatchSampler = paddle.io.DistributedBatchSampler
else:
# Do eval in single device
BatchSampler = paddle.io.BatchSampler
batch_sampler = BatchSampler(
dataset,
batch_size=batch_size,
shuffle=shuffle,
drop_last=drop_last)
if hasattr(model, 'collate_fn'):
collate_fn = model.collate_fn
else:
collate_fn = getattr(dataset, 'collate_fn', None)
# DataLoader do not start sub-process in Windows and Mac
# system, do not need to use shared memory
use_shared_memory = sys.platform not in ['win32', 'darwin']
# check whether shared memory size is bigger than 1G(1024M)
if use_shared_memory:
shm_size = _get_shared_memory_size_in_M()
if shm_size is not None and shm_size < 1024.:
logger.warning("Shared memory size is less than 1G, "
"disable shared_memory in DataLoader")
use_shared_memory = False
return paddle.io.DataLoader(
dataset=dataset,
batch_sampler=batch_sampler,
collate_fn=collate_fn,
use_shared_memory=use_shared_memory,
**args)
return _generate_loader
def default_checkpoint_build_fn(**kwargs) -> Checkpoint:
"""
"""
kwargs = kwargs.copy()
kwargs.setdefault('save_dir', 'output')
kwargs.setdefault('keep_checkpoint_max', 5)
kwargs.setdefault('overwrite', True)
return Checkpoint(**kwargs)
def default_scheduler_build_fn(**kwargs) -> Scheduler:
"""
"""
kwargs = kwargs.copy()
kwargs.setdefault('log_interval', 10)
kwargs.setdefault('do_eval', False)
if kwargs.get('train_by_epoch'):
kwargs.setdefault('save_interval', 5)
else:
kwargs.setdefault('save_interval', 1000)
return Scheduler(**kwargs)
class Trainer:
"""
"""
def __init__(
self,
model: paddle.nn.Layer,
optimizer: paddle.optimizer.Optimizer,
iters: Optional[int] = None,
epochs: Optional[int] = None,
train_dataset: Optional[paddle.io.Dataset] = None,
val_dataset: Optional[paddle.io.Dataset] = None,
resume: bool = False,
# TODO: Default parameters should not use mutable objects, there is a risk
checkpoint: Union[dict, CheckpointABC] = dict(),
scheduler: Union[dict, SchedulerABC] = dict(),
dataloader_fn: Union[dict, Callable] = dict(),
amp_cfg: Optional[dict] = None):
self.train_centerpoint = (''.join(model.full_name().split('_')[:2]) == 'centerpoint')
self.model = model
self.optimizer = optimizer
_dataloader_build_fn = default_dataloader_build_fn(
**dataloader_fn) if isinstance(dataloader_fn,
dict) else dataloader_fn
self.train_dataloader = _dataloader_build_fn(train_dataset, self.model)
self.eval_dataloader = _dataloader_build_fn(
val_dataset, self.model) if val_dataset else None
self.val_dataset = val_dataset
if self.val_dataset is not None:
assert train_dataset.class_names == val_dataset.class_names, \
"the class_names of train_dataset must be same as the val_dataset"
self.resume = resume
vdl_file_name = None
self.iters_per_epoch = len(self.train_dataloader)
if iters is None:
self.epochs = epochs
self.iters = epochs * self.iters_per_epoch
self.train_by_epoch = True
else:
self.iters = iters
self.epochs = (iters - 1) // self.iters_per_epoch + 1
self.train_by_epoch = False
def set_lr_scheduler_iters_per_epoch(lr_scheduler,
iters_per_epoch,
warmup_iters=0):
if isinstance(lr_scheduler, paddle.optimizer.lr.LinearWarmup):
return set_lr_scheduler_iters_per_epoch(
lr_scheduler.learning_rate, iters_per_epoch,
lr_scheduler.warmup_steps)
elif hasattr(lr_scheduler, 'learning_rate') and isinstance(
lr_scheduler.learning_rate,
paddle.optimizer.lr.LRScheduler):
return set_lr_scheduler_iters_per_epoch(
lr_scheduler.learning_rate, iters_per_epoch)
if hasattr(lr_scheduler, 'iters_per_epoch'):
print('set lr scheduler {} iters_per_epoch={}, warmup_iters={}'.format(lr_scheduler.__class__.__name__, \
iters_per_epoch, warmup_iters))
lr_scheduler.iters_per_epoch = iters_per_epoch
lr_scheduler.warmup_iters = warmup_iters
if hasattr(optimizer, '_learning_rate'):
set_lr_scheduler_iters_per_epoch(optimizer._learning_rate,
self.iters_per_epoch)
self.cur_iter = 0
self.cur_epoch = 0
if self.optimizer.__class__.__name__ == 'OneCycleAdam':
self.optimizer.before_run(max_iters=self.iters)
self.checkpoint = default_checkpoint_build_fn(
**checkpoint) if isinstance(checkpoint, dict) else checkpoint
if isinstance(scheduler, dict):
scheduler.setdefault('train_by_epoch', self.train_by_epoch)
scheduler.setdefault('iters_per_epoch', self.iters_per_epoch)
self.scheduler = default_scheduler_build_fn(**scheduler)
else:
self.scheduler = scheduler
if self.checkpoint is None:
return
if not self.checkpoint.empty:
if not resume:
raise RuntimeError(
'The checkpoint {} is not emtpy! Set `resume=True` to continue training or use another dir as checkpoint'
.format(self.checkpoint.rootdir))
if self.checkpoint.meta.get(
'train_by_epoch') != self.train_by_epoch:
raise RuntimeError(
'Unable to resume training since the train_by_epoch is inconsistent with that saved in the checkpoint'
)
params_dict, opt_dict = self.checkpoint.get()
self.model.set_dict(params_dict)
self.optimizer.set_state_dict(opt_dict)
self.cur_iter = self.checkpoint.meta.get('iters')
self.cur_epoch = self.checkpoint.meta.get('epochs')
self.scheduler.step(self.cur_iter)
logger.info(
'Resume model from checkpoint {}, current iter set to {}'.
format(self.checkpoint.rootdir, self.cur_iter))
vdl_file_name = self.checkpoint.meta['vdl_file_name']
elif resume:
logger.warning(
"Attempt to restore parameters from an empty checkpoint")
if env.local_rank == 0:
self.log_writer = LogWriter(
logdir=self.checkpoint.rootdir, file_name=vdl_file_name)
self.checkpoint.record('vdl_file_name',
os.path.basename(self.log_writer.file_name))
self.checkpoint.record('train_by_epoch', self.train_by_epoch)
self.scaler = None
self.amp_cfg = None
if amp_cfg is not None and amp_cfg['use_amp']:
scaler_cfg_ = dict(init_loss_scaling=2.**15)
scaler_cfg_.update(**amp_cfg.pop('scaler', dict()))
self.scaler = paddle.amp.GradScaler(**scaler_cfg_)
amp_cfg.pop('use_amp', False)
self.amp_cfg = amp_cfg
amp_cfg_ = copy.deepcopy(amp_cfg)
amp_cfg_.pop('enable', False)
self.model.amp_cfg_ = amp_cfg_
logger.info(
'Use AMP train, AMP config: {}, Scaler config: {}'.format(
amp_cfg_, scaler_cfg_))
def train(self):
"""
"""
sync_bn = (getattr(self.model, 'sync_bn', False) and env.nranks > 1)
if sync_bn:
sparse_conv = False
for layer in self.model.sublayers():
if 'sparse' in str(type(layer)):
sparse_conv = True
break
if sparse_conv:
self.model = paddle.sparse.nn.SyncBatchNorm.convert_sync_batchnorm(
self.model)
else:
self.model = paddle.nn.SyncBatchNorm.convert_sync_batchnorm(
self.model)
model = self.model
if env.nranks > 1:
if not paddle.distributed.parallel.parallel_helper._is_parallel_ctx_initialized(
):
paddle.distributed.init_parallel_env()
model = paddle.DataParallel(self.model)
losses_sum = defaultdict(float)
timer = Timer(iters=self.iters - self.cur_iter)
while self.cur_iter < self.iters:
for sample in self.train_dataloader:
self.cur_iter += 1
if self.cur_iter % self.iters_per_epoch == 1:
self.cur_epoch += 1
if self.cur_iter > self.iters:
break
lr = self.optimizer.get_lr()
output = training_step(
model,
self.optimizer,
sample,
self.cur_iter,
scaler=self.scaler,
amp_cfg=self.amp_cfg)
if isinstance(output['loss'], dict):
for k, v in output['loss'].items():
losses_sum[k] += float(v)
if self.train_centerpoint:
losses_sum = self.update_centerpoint_loss(output, losses_sum)
else:
losses_sum['total_loss'] += float(output['total_loss'])
timer.step()
status = self.scheduler.step()
if status.do_log and env.local_rank == 0:
loss_log = ''
self.log_writer.add_scalar(
tag='Training/learning_rate',
value=lr,
step=self.cur_iter)
if self.train_centerpoint:
losses_sum, loss_log = \
self.log_centerpoint_loss(output, losses_sum, loss_log)
else:
for k, v in losses_sum.items():
loss_val = v / self.scheduler.log_interval
loss_log += ', {}={:.6f}'.format(k, loss_val)
self.log_writer.add_scalar(
tag='Training/' + k,
value=loss_val,
step=self.cur_iter)
logger.info(
'[TRAIN] epoch={}/{}, iter={}/{} {}, lr={:.6f} | ELA {}, ETA {}'
.format(self.cur_epoch, self.epochs, self.cur_iter,
self.iters, loss_log, lr, timer.ela, timer.eta))
losses_sum.clear()
if status.do_eval and env.local_rank == 0:
# TODO: whether to save a checkpoint based on the metric
metrics = self.evaluate()
metrics = metrics[0] if isinstance(metrics, tuple) else metrics
if self.train_centerpoint and \
type(self.train_dataloader.dataset).__name__ == 'ApolloPCDataset':
self.log_apollo_eval_metric(metrics)
else:
for k, v in metrics.items():
if not isinstance(v, paddle.Tensor) or v.numel() != 1:
continue
self.log_writer.add_scalar(
tag='Evaluation/{}'.format(k),
value=float(v),
step=self.cur_iter)
if status.save_checkpoint and env.local_rank == 0:
if self.train_by_epoch:
tag = 'epoch_{}'.format(self.cur_epoch)
else:
tag = 'iter_{}'.format(self.cur_iter)
self.checkpoint.push(
tag=tag,
params_dict=self.model.state_dict(),
opt_dict=self.optimizer.state_dict(),
verbose=True)
self.checkpoint.record('iters', self.cur_iter)
self.checkpoint.record('epochs', self.cur_epoch)
logger.info('Training is complete.')
if env.local_rank == 0:
if self.train_by_epoch:
tag = 'epoch_{}'.format(self.epochs)
else:
tag = 'iter_{}'.format(self.iters)
if not self.checkpoint.have(tag):
self.checkpoint.push(
tag=tag,
params_dict=self.model.state_dict(),
opt_dict=self.optimizer.state_dict(),
verbose=True)
self.checkpoint.record('iters', self.iters)
self.checkpoint.record('epochs', self.epochs)
def evaluate(self) -> float:
"""
"""
sync_bn = (getattr(self.model, 'sync_bn', False) and env.nranks > 1)
if sync_bn:
sparse_conv = False
for layer in self.model.sublayers():
if 'sparse' in str(type(layer)):
sparse_conv = True
break
if sparse_conv:
self.model = paddle.sparse.nn.SyncBatchNorm.convert_sync_batchnorm(
self.model)
else:
self.model = paddle.nn.SyncBatchNorm.convert_sync_batchnorm(
self.model)
if self.val_dataset is None:
raise RuntimeError('No evaluation dataset specified!')
msg = 'evaluate on validate dataset'
metric_obj = self.val_dataset.metric
for idx, sample in logger.enumerate(self.eval_dataloader, msg=msg):
result = validation_step(self.model, sample)
metric_obj.update(predictions=result, ground_truths=sample)
metrics = metric_obj.compute(verbose=True)
return metrics
def update_centerpoint_loss(self, output, losses_sum):
for key, value in output.items():
if key not in ['loc_loss_elem']:
if key not in losses_sum:
losses_sum[key] = value
else:
if isinstance(value, list):
for idx in range(len(value)):
losses_sum[key][idx] += value[idx]
elif isinstance(value, paddle.Tensor):
losses_sum[key] += float(value)
return losses_sum
def log_centerpoint_loss(self, output, losses_sum, loss_log):
for key, value in losses_sum.items():
if isinstance(value, paddle.Tensor):
value = value / self.scheduler.log_interval
loss_log += ', {}={:.6f}'.format(key, float(value))
self.log_writer.add_scalar(
tag='Training/' + key,
value=value,
step=self.cur_iter)
elif isinstance(value, list):
sum_value = float(sum(value) / self.scheduler.log_interval)
loss_log += ', {}={:.6f}'.format(key, float(sum_value))
self.log_writer.add_scalar(
tag='Training/' + key,
value=sum_value,
step=self.cur_iter)
for task_id in range(len(value)):
task_value = float(value[task_id] / self.scheduler.log_interval)
loss_log += ', {}-{}={:.6f}'.format(key, task_id, task_value)
self.log_writer.add_scalar(
tag='Training/' + key + '/' + str(task_id),
value=task_value,
step=self.cur_iter)
return losses_sum, loss_log
def log_apollo_eval_metric(self, metrics):
mod_map = 0.0
dicts = {
'smallMot': (0.7, 0.7, 0.7),
'bigMot': (0.7, 0.7, 0.7),
'nonMot': (0.5, 0.5, 0.5),
'pedestrian': (0.5, 0.5, 0.5),
'TrafficCone': (0.4, 0.4, 0.4)
}
difficulty = ['easy', 'mod', 'hard']
for key, value in metrics.items():
for k, v in value.items():
if isinstance(v, dict) and k == dicts[key]:
for metric, score in v.items():
if metric == 'aos':
continue
if metric == '3d':
mod_map += score[1]
if isinstance(score, np.ndarray):
for i in range(score.shape[0]):
self.log_writer.add_scalar(
tag='Evaluation/' + str(key) + '/' \
+ str(k) + '/' + str(metric) + '/' \
+ str(difficulty[i]),
value=float(score[i]),
step=self.cur_iter)
self.log_writer.add_scalar(
tag='Evaluation/3D_Mod_Map',
value=float(mod_map / len(dicts)),
step=self.cur_iter)
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/apis/manager.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from collections.abc import Sequence
from typing import Callable, Iterable, Union
from paddle3d.utils.logger import logger
__all__ = [
'BACKBONES', 'MIDDLE_ENCODERS', 'MODELS', 'NECKS', 'VOXEL_ENCODERS',
'LOSSES', 'DATASETS', 'TRANSFORMS', 'LR_SCHEDULERS', 'OPTIMIZERS',
'VOXELIZERS', 'HEADS', 'POINT_ENCODERS', 'POSITIONAL_ENCODING',
'TRANSFORMERS', 'TRANSFORMER_ENCODERS', 'TRANSFORMER_ENCODER_LAYERS',
'ATTENTIONS', 'BBOX_CODERS', 'BBOX_ASSIGNERS', 'MATCH_COSTS',
'BBOX_SAMPLERS', 'TRANSFORMER_DECODER_LAYERS', 'TRANSFORMER_DECODERS'
]
class ComponentManager:
"""Implement a manager class to add the new component properly.
The component can be added as either class or function type.
Args:
name (str): The name of component.
description (str): Description of Component Manager
Returns:
A callable object of ComponentManager.
Examples 1:
from paddle3d.apis.manager import ComponentManager
model_manager = ComponentManager()
class AlexNet: ...
class ResNet: ...
model_manager.add_component(AlexNet)
model_manager.add_component(ResNet)
# Or pass a sequence alliteratively:
model_manager.add_component([AlexNet, ResNet])
print(model_manager.components_dict)
# {'AlexNet': <class '__main__.AlexNet'>, 'ResNet': <class '__main__.ResNet'>}
Examples 2:
# Or an easier way, using it as a Python decorator, while just add it above the class declaration.
from paddle3d.apis.manager import ComponentManager
model_manager = ComponentManager()
@model_manager.add_component
class AlexNet: ...
@model_manager.add_component
class ResNet: ...
print(model_manager.components_dict)
# {'AlexNet': <class '__main__.AlexNet'>, 'ResNet': <class '__main__.ResNet'>}
"""
def __init__(self, *, name: str, description: str = ''):
self._components_dict = dict()
self._name = name
self._description = description
def __len__(self):
return len(self._components_dict)
def __repr__(self):
name_str = self._name if self._name else self.__class__.__name__
return "{}:{}".format(name_str, list(self._components_dict.keys()))
def __getitem__(self, item: str):
if item not in self._components_dict.keys():
raise KeyError("{} does not exist in availabel {}".format(
item, self))
return self._components_dict[item]
@property
def components_dict(self) -> dict:
return self._components_dict
@property
def name(self) -> str:
return self._name
@property
def description(self) -> str:
return self._description
def _add_single_component(self, component: Callable):
"""
Add a single component into the corresponding manager.
Args:
component (function|class): A new component.
Raises:
TypeError: When `component` is neither class nor function.
KeyError: When `component` was added already.
"""
# Currently only support class or function type
if not (inspect.isclass(component) or inspect.isfunction(component)):
raise TypeError(
"Expect class/function type, but received {}".format(
type(component)))
# Obtain the internal name of the component
component_name = component.__name__
# Check whether the component was added already
if component_name in self._components_dict.keys():
logger.warning(
"{} exists already! It is now updated to {} !!!".format(
component_name, component))
self._components_dict[component_name] = component
else:
# Take the internal name of the component as its key
self._components_dict[component_name] = component
def add_component(self, components: Union[Callable, Iterable[Callable]]
) -> Union[Callable, Iterable[Callable]]:
"""
Add component(s) into the corresponding manager.
Args:
components (function|class|list|tuple): Support four types of components.
Returns:
components (function|class|list|tuple): Same with input components.
"""
# Check whether the type is a sequence
if isinstance(components, Sequence):
for component in components:
self._add_single_component(component)
else:
component = components
self._add_single_component(component)
return components
VOXEL_ENCODERS = ComponentManager(name="voxel_encoders")
MIDDLE_ENCODERS = ComponentManager(name="middle_encoders")
BACKBONES = ComponentManager(name="backbones")
MODELS = ComponentManager(name="models")
NECKS = ComponentManager(name="necks")
HEADS = ComponentManager(name="heads")
LOSSES = ComponentManager(name="losses")
DATASETS = ComponentManager(name="datasets")
TRANSFORMS = ComponentManager(name="transforms")
LR_SCHEDULERS = ComponentManager(name="lr_schedulers")
OPTIMIZERS = ComponentManager(name="optimizers")
VOXELIZERS = ComponentManager(name="voxelizers")
POINT_ENCODERS = ComponentManager(name="point_encoders")
POSITIONAL_ENCODING = ComponentManager(name="POSITIONAL_ENCODING")
TRANSFORMERS = ComponentManager(name="TRANSFORMERS")
TRANSFORMER_ENCODERS = ComponentManager(name="TRANSFORMER_ENCODERS")
TRANSFORMER_ENCODER_LAYERS = ComponentManager(name="TRANSFORMER_ENCODER_LAYERS")
ATTENTIONS = ComponentManager(name="ATTENTIONS")
BBOX_CODERS = ComponentManager(name="BBOX_CODERS")
BBOX_ASSIGNERS = ComponentManager(name="BBOX_ASSIGNERS")
MATCH_COSTS = ComponentManager(name="MATCH_COSTS")
BBOX_SAMPLERS = ComponentManager(name="BBOX_SAMPLERS")
TRANSFORMER_DECODER_LAYERS = ComponentManager(name="TRANSFORMER_DECODER_LAYERS")
TRANSFORMER_DECODERS = ComponentManager(name="TRANSFORMER_DECODERS")
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets/metrics.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from typing import List
from paddle3d.sample import Sample
class MetricABC(abc.ABC):
@abc.abstractmethod
def update(self,
predictions: List[Sample],
ground_truths: List[Sample] = None):
"""
"""
@abc.abstractmethod
def compute(self):
"""
"""
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets/__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .base import BaseDataset
from .apollo import ApolloDetDataset, ApolloPCDataset
from .kitti import KittiDepthDataset, KittiMonoDataset, KittiPCDataset
from .modelnet40 import ModelNet40
from .nuscenes import NuscenesMVDataset, NuscenesPCDataset
from .waymo import WaymoPCDataset
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets/generate_gt_database.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import os.path as osp
import pickle
from collections import defaultdict
from typing import List
import warnings
from paddle3d.apis.config import Config
from paddle3d.datasets.kitti.kitti_pointcloud_det import KittiPCDataset
from paddle3d.datasets.apollo.apollo_pointcloud_det import \
ApolloPCDataset
from paddle3d.datasets.apollo.apollo_utils import class_information
from paddle3d.datasets.nuscenes.nuscenes_pointcloud_det import \
NuscenesPCDataset
from paddle3d.geometries.bbox import get_mask_of_points_in_bboxes3d
from paddle3d.transforms.reader import (LoadPointCloud,
RemoveCameraInvisiblePointsKITTI)
from paddle3d.transforms.transform import FilterSmallBBox
from paddle3d.utils.logger import logger
def generate_kitti_gt_database(dataset_root: str,
save_dir: str = None,
load_point_dim: int = 4,
use_point_dim: int = 4):
if save_dir is None:
save_dir = dataset_root
save_dir = osp.join(save_dir, "kitti_train_gt_database")
transforms = [
LoadPointCloud(dim=load_point_dim, use_dim=use_point_dim),
RemoveCameraInvisiblePointsKITTI()
]
dataset = KittiPCDataset(
dataset_root=dataset_root, mode="train", transforms=transforms)
database = defaultdict(list)
msg = "Begin to generate a database for the KITTI dataset."
cls_names = dataset.class_names
for data_idx in logger.range(len(dataset), msg=msg):
sample = dataset[data_idx]
image_idx = int(sample.meta.id)
points = sample.data
bboxes_3d = sample.bboxes_3d
labels = sample.labels
difficulties = sample.difficulties
num_bboxes = len(bboxes_3d)
if num_bboxes == 0:
continue
masks = get_mask_of_points_in_bboxes3d(
points, bboxes_3d) # mask shape: [num_points, num_bboxes]
for box_idx in range(num_bboxes):
cls_name = cls_names[labels[box_idx]]
if cls_name.lower() == "dontcare":
continue
mask = masks[:, box_idx]
selected_points = points[mask]
selected_points[:, 0:3] -= bboxes_3d[box_idx, 0:3]
if not osp.exists(osp.join(save_dir, cls_name)):
os.makedirs(osp.join(save_dir, cls_name))
lidar_file = osp.join(
osp.join(save_dir, cls_name), "{}_{}_{}.bin".format(
image_idx, cls_name, box_idx))
with open(lidar_file, "w") as f:
selected_points.tofile(f)
anno_info = {
"lidar_file":
osp.join("kitti_train_gt_database", cls_name,
"{}_{}_{}.bin".format(image_idx, cls_name, box_idx)),
"cls_name":
cls_name,
"bbox_3d":
bboxes_3d[box_idx, :],
"box_idx":
box_idx,
"data_idx":
image_idx,
"num_points_in_box":
selected_points.shape[0],
"lidar_dim":
use_point_dim,
"difficulty":
difficulties[box_idx]
}
database[cls_name].append(anno_info)
db_anno_file = osp.join(osp.join(save_dir, 'anno_info_train.pkl'))
with open(db_anno_file, 'wb') as f:
pickle.dump(database, f)
logger.info("The database generation has been done.")
def generate_apollo_gt_database(config: str):
cfg = Config(path=config)
cfg = cfg.dic['train_dataset']['transforms']
load_point_dim = cfg[0]['dim']
use_point_dim = cfg[0]['use_dim']
sep = cfg[0]['sep']
dataset_root = cfg[1]['database_root']
dataset_list = cfg[1]['database_anno_list']
cls_names = cfg[1]['class_names']
for dataset_name in dataset_list:
save_dir = osp.join(dataset_root, dataset_name, "apollo_train_gt_database")
transforms = [
LoadPointCloud(dim=load_point_dim, use_dim=use_point_dim, sep=sep),
FilterSmallBBox(size_thr=[0.01, 0.01, 0.01])
]
dataset = ApolloPCDataset(
dataset_root=dataset_root, dataset_list=[dataset_name], mode="train",
transforms=transforms, class_names=cls_names, create_gt_database=True)
database = defaultdict(list)
msg = "Begin to generate a database for the apollo dataset."
for data_idx in logger.range(len(dataset), msg=msg):
sample = dataset[data_idx]
image_idx = int(sample.meta.id.split('/')[-1])
points = sample.data
bboxes_3d = sample.bboxes_3d
labels = sample.labels
num_bboxes = len(bboxes_3d)
if num_bboxes == 0:
continue
masks = get_mask_of_points_in_bboxes3d(
points, bboxes_3d) # mask shape: [num_points, num_bboxes]
for box_idx in range(num_bboxes):
cls_name = labels[box_idx]
mask = masks[:, box_idx]
selected_points = points[mask]
selected_points[:, 0:3] -= bboxes_3d[box_idx, 0:3]
num_points = selected_points.shape[0]
if num_points == 0:
warnings.warn("{} frame {}^th box is empty! size is {}, {}, {}".\
format(image_idx, box_idx, bboxes_3d[box_idx][3],
bboxes_3d[box_idx][4], bboxes_3d[box_idx][5]))
continue
if cls_name.lower() not in class_information:
difficulty = 0
else:
if num_points <= class_information[cls_name.lower()]['difficulty_threshold'][0]:
difficulty = 2
elif num_points <= class_information[cls_name.lower()]['difficulty_threshold'][1]:
difficulty = 1
else:
difficulty = 0
if not osp.exists(osp.join(save_dir, cls_name)):
os.makedirs(osp.join(save_dir, cls_name))
lidar_file = osp.join(
osp.join(save_dir, cls_name), "{}_{}_{}.bin".format(
image_idx, cls_name, box_idx))
with open(lidar_file, "w") as f:
selected_points.tofile(f)
anno_info = {
"dataset":
dataset_name,
"lidar_file":
osp.join("apollo_train_gt_database", cls_name,
"{}_{}_{}.bin".format(image_idx, cls_name, box_idx)),
"cls_name":
cls_name,
"bbox_3d":
bboxes_3d[box_idx, :],
"box_idx":
box_idx,
"data_idx":
image_idx,
"num_points_in_box":
num_points,
"lidar_dim":
use_point_dim,
"difficulty":
difficulty
}
database[cls_name].append(anno_info)
db_anno_file = osp.join(osp.join(save_dir, 'anno_info_train.pkl'))
with open(db_anno_file, 'wb') as f:
pickle.dump(database, f)
logger.info("The database generation has been done.")
def generate_nuscenes_gt_database(dataset_root: str,
class_names: List[str] = None,
save_dir: str = None,
max_sweeps: int = 10,
load_point_dim: int = 5,
use_point_dim: int = 4,
use_time_lag: bool = True,
sweep_remove_radius: int = 1):
if save_dir is None:
save_dir = dataset_root
save_dir = osp.join(
save_dir, "gt_database_train_nsweeps{}_withvelo".format(max_sweeps))
transforms = [
LoadPointCloud(
dim=load_point_dim,
use_dim=use_point_dim,
use_time_lag=use_time_lag,
sweep_remove_radius=sweep_remove_radius)
]
dataset = NuscenesPCDataset(
dataset_root=dataset_root,
mode='train',
transforms=transforms,
max_sweeps=max_sweeps,
class_names=class_names)
for cls_name in dataset.class_names:
if not osp.exists(osp.join(save_dir, cls_name)):
os.makedirs(osp.join(save_dir, cls_name))
database = defaultdict(list)
msg = "Begin to generate a database for the nuscenes dataset."
for data_idx in logger.range(len(dataset), msg=msg):
sample = dataset[data_idx]
points = sample.data
bboxes_3d = sample.bboxes_3d
velocities = sample.bboxes_3d.velocities
labels = sample.labels
num_bboxes = len(bboxes_3d)
if num_bboxes == 0:
continue
masks = get_mask_of_points_in_bboxes3d(
points, bboxes_3d) # mask shape: [num_points, num_bboxes]
for box_idx in range(num_bboxes):
mask = masks[:, box_idx]
selected_points = points[mask]
if len(selected_points) == 0:
continue
selected_points[:, 0:3] -= bboxes_3d[box_idx, 0:3]
cls_name = dataset.class_names[labels[box_idx]]
if not osp.exists(osp.join(save_dir, cls_name)):
os.makedirs(osp.join(save_dir, cls_name))
lidar_file = osp.join(
osp.join(save_dir, cls_name), "{}_{}_{}.bin".format(
data_idx, cls_name, box_idx))
with open(lidar_file, "w") as f:
selected_points.tofile(f)
anno_info = {
"lidar_file":
osp.join(
"gt_database_train_nsweeps{}_withvelo".format(max_sweeps),
osp.join(cls_name, "{}_{}_{}.bin".format(
data_idx, cls_name, box_idx))),
"cls_name":
cls_name,
"bbox_3d":
bboxes_3d[box_idx, :],
"velocity":
velocities[box_idx, :],
"box_idx":
box_idx,
"data_idx":
data_idx,
"num_points_in_box":
selected_points.shape[0],
"lidar_dim":
load_point_dim
}
database[cls_name].append(anno_info)
db_anno_file = osp.join(
osp.join(save_dir,
'anno_info_train_nsweeps{}_withvelo.pkl'.format(max_sweeps)))
with open(db_anno_file, 'wb') as f:
pickle.dump(database, f)
logger.info("The database generation has been done.")
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets/base.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import numbers
from collections.abc import Mapping, Sequence
from typing import List
import numpy as np
import paddle
from paddle3d.geometries import BBoxes2D, BBoxes3D
from paddle3d.sample import Sample
class BaseDataset(abc.ABC, paddle.io.Dataset):
"""
"""
@property
def is_train_mode(self) -> bool:
return 'train' in self.mode
@property
def is_test_mode(self) -> bool:
"""
"""
return self.mode == 'test'
@property
def is_val_mode(self) -> bool:
"""
"""
return self.mode == 'val'
def padding_sample(self, samples: List[Sample]):
maxlen = max([len(sample.labels) for sample in samples])
padding_lens = [maxlen - len(sample.labels) for sample in samples]
for padlen, sample in zip(padding_lens, samples):
if padlen == 0:
continue
sample.labels = np.append(sample.labels,
np.ones([padlen], dtype=np.int32) * -1)
if sample.bboxes_2d is not None:
empty_bbox = np.zeros([padlen, sample.bboxes_2d.shape[1]],
np.float32)
sample.bboxes_2d = BBoxes2D(
np.append(sample.bboxes_2d, empty_bbox, axis=0))
if sample.bboxes_3d is not None:
empty_bbox = np.zeros([padlen, sample.bboxes_3d.shape[1]],
np.float32)
sample.bboxes_3d = BBoxes3D(
np.append(sample.bboxes_3d, empty_bbox, axis=0))
def padding_data(self, samples: List[Sample]):
image_sizes = [(sample.data.shape[-2], sample.data.shape[-1])
for sample in samples]
max_size = np.stack(image_sizes).max(0)
for image_size, sample in zip(image_sizes, samples):
sample.data = np.pad(
sample.data, ((0, 0), (0, max_size[0] - image_size[0]),
(0, max_size[1] - image_size[1])),
'constant',
constant_values=0.0)
def collate_fn(self, batch: List):
"""
"""
sample = batch[0]
if isinstance(sample, np.ndarray):
batch = np.stack(batch, axis=0)
return batch
elif isinstance(sample, paddle.Tensor):
return paddle.stack(batch, axis=0)
elif isinstance(sample, numbers.Number):
batch = np.array(batch)
return batch
elif isinstance(sample, (str, bytes)):
return batch
elif isinstance(sample, Sample):
valid_keys = [
key for key, value in sample.items() if value is not None
]
self.padding_sample(batch)
if sample.data is not None:
shapes = {batch_.data.shape for batch_ in batch}
if len(shapes) != 1:
self.padding_data(batch)
return {
key: self.collate_fn([d[key] for d in batch])
for key in valid_keys
}
elif isinstance(sample, Mapping):
return {
key: self.collate_fn([d[key] for d in batch])
for key in sample
}
elif isinstance(sample, Sequence):
sample_fields_num = len(sample)
if not all(
len(sample) == sample_fields_num for sample in iter(batch)):
raise RuntimeError(
"fileds number not same among samples in a batch")
return [self.collate_fn(fields) for fields in zip(*batch)]
raise TypeError(
"batch data con only contains: tensor, numpy.ndarray, "
"dict, list, number, paddle3d.Sample, but got {}".format(
type(sample)))
@abc.abstractproperty
def name(self) -> str:
"""Name of dataset."""
@abc.abstractproperty
def labels(self) -> List[str]:
"""The category labels for the dataset."""
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets/modelnet40/modelnet40_metric.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
import paddle
from paddle3d.datasets.metrics import MetricABC
from paddle3d.sample import Sample
from paddle3d.utils.logger import logger
__all__ = ["AccuracyMetric"]
class AccuracyMetric(MetricABC):
def __init__(self, num_classes: int):
# classes
self.num_classes = num_classes
self.predictions = []
self.ground_truths = []
def update(self,
predictions: List[Sample],
ground_truths: List[Sample] = None):
self.predictions.append(predictions)
self.ground_truths.append(ground_truths)
def compute(self, verbose=False):
self.predictions = paddle.concat(self.predictions, axis=0)
self.ground_truths = paddle.concat(self.ground_truths, axis=0)
accuracy = paddle.metric.accuracy(self.predictions, self.ground_truths)
if verbose:
logger.info("Acc {:.3f}".format(float(accuracy)))
return dict(accuracy=accuracy)
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets/modelnet40/__init__.py
|
from .modelnet40_cls import ModelNet40
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets/modelnet40/modelnet40_cls.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
from typing import List
import h5py
import numpy as np
import paddle3d.transforms as T
from paddle3d.apis import manager
from paddle3d.datasets import BaseDataset
from paddle3d.datasets.modelnet40.modelnet40_metric import AccuracyMetric
from paddle3d.geometries import PointCloud
from paddle3d.sample import Sample
@manager.DATASETS.add_component
class ModelNet40(BaseDataset):
def __init__(self, dataset_root, num_points, transforms=None, mode='train'):
super().__init__()
self.data, self.label = self.load_data(dataset_root, mode)
self.num_points = num_points
self.mode = mode
if isinstance(transforms, list):
transforms = T.Compose(transforms)
self.transforms = transforms
def __getitem__(self, item):
sample = Sample(path="", modality='lidar')
sample.data = PointCloud(self.data[item][:self.num_points])
sample.labels = self.label[item]
if self.mode == 'train':
if self.transforms:
sample = self.transforms(sample)
return sample
def __len__(self):
return self.data.shape[0]
def load_data(self, dataset_root, mode):
all_data = []
all_label = []
for h5_name in glob.glob(
os.path.join(dataset_root, f"ply_data_{mode}*.h5")):
f = h5py.File(h5_name, mode='r')
data = f['data'][:].astype('float32')
label = f['label'][:].astype('int64')
f.close()
all_data.append(data)
all_label.append(label)
all_data = np.concatenate(all_data, axis=0)
all_label = np.concatenate(all_label, axis=0)
return all_data, all_label
@property
def metric(self):
return AccuracyMetric(num_classes=40)
@property
def name(self) -> str:
return "ModelNet40"
@property
def labels(self) -> List[str]:
return self.label
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets/kitti/kitti_mono_det.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
from paddle3d.apis import manager
from paddle3d.datasets.kitti.kitti_det import KittiDetDataset
from paddle3d.datasets.kitti.kitti_utils import camera_record_to_object
from paddle3d.sample import Sample
@manager.DATASETS.add_component
class KittiMonoDataset(KittiDetDataset):
"""
"""
def __getitem__(self, index: int) -> Sample:
filename = '{}.png'.format(self.data[index])
path = os.path.join(self.image_dir, filename)
calibs = self.load_calibration_info(index)
sample = Sample(path=path, modality="image")
# P2
sample.meta.camera_intrinsic = calibs[2][:3, :3]
sample.meta.id = self.data[index]
sample.calibs = calibs
if not self.is_test_mode:
kitti_records, ignored_kitti_records = self.load_annotation(index)
bboxes_2d, bboxes_3d, labels = camera_record_to_object(
kitti_records)
sample.bboxes_2d = bboxes_2d
sample.bboxes_3d = bboxes_3d
sample.labels = np.array(
[self.CLASS_MAP[label] for label in labels], dtype=np.int32)
if self.transforms:
sample = self.transforms(sample)
return sample
@property
def image_dir(self) -> str:
"""
"""
return os.path.join(self.base_dir, 'image_2')
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets/kitti/kitti_det.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import os
from typing import List, Tuple, Union, Dict
import numpy as np
import pandas
from paddle3d import transforms as T
from paddle3d.datasets import BaseDataset
from paddle3d.datasets.kitti.kitti_metric import KittiMetric
from paddle3d.transforms import TransformABC
class KittiDetDataset(BaseDataset):
"""
"""
def __init__(self,
dataset_root: str,
mode: str = "train",
transforms: Union[TransformABC, List[TransformABC]] = None,
class_names: Union[list, tuple] = None,
CLASS_MAP: Dict[str, int] = None,
class_balanced_sampling: bool = False,
use_road_plane: bool = False):
super().__init__()
self.dataset_root = dataset_root
self.mode = mode.lower()
if isinstance(transforms, list):
transforms = T.Compose(transforms)
self.transforms = transforms
self.class_names = class_names
self.use_road_plane = use_road_plane
if CLASS_MAP is None:
self.CLASS_MAP = {'Car': 0, 'Cyclist': 1, 'Pedestrian': 2}
else:
self.CLASS_MAP = CLASS_MAP
self.CLASS_MAP_REVERSE = {
value: key
for key, value in self.CLASS_MAP.items()
}
if self.class_names is None:
self.class_names = list(self.CLASS_MAP.keys())
if self.mode not in ['train', 'val', 'trainval', 'test']:
raise ValueError(
"mode should be 'train', 'val', 'trainval' or 'test', but got {}."
.format(self.mode))
# get file list
with open(self.imagesets_path) as file:
self.data = file.read().strip('\n').split('\n')
if class_balanced_sampling and self.mode.lower() == 'train' and len(
self.class_names) > 1:
cls_dist = {class_name: [] for class_name in self.class_names}
for index in range(len(self.data)):
file_idx = self.data[index]
kitti_records, ignored_kitti_records = self.load_annotation(
index)
gt_names = []
for anno in kitti_records:
class_name = anno[0]
if class_name in self.class_names:
gt_names.append(class_name)
for class_name in set(gt_names):
cls_dist[class_name].append(file_idx)
num_balanced_samples = sum([len(v) for k, v in cls_dist.items()])
num_balanced_samples = max(num_balanced_samples, 1)
balanced_frac = 1.0 / len(self.class_names)
fracs = [len(v) / num_balanced_samples for k, v in cls_dist.items()]
sampling_ratios = [balanced_frac / frac for frac in fracs]
resampling_data = []
for samples, sampling_ratio in zip(
list(cls_dist.values()), sampling_ratios):
resampling_data.extend(samples)
if sampling_ratio > 1.:
resampling_data.extend(
np.random.choice(
samples,
int(len(samples) * (sampling_ratio - 1.))).tolist())
self.data = resampling_data
self.use_road_plane = use_road_plane
def __len__(self):
return len(self.data)
@property
def base_dir(self) -> str:
"""
"""
dirname = 'testing' if self.is_test_mode else 'training'
return os.path.join(self.dataset_root, dirname)
@property
def label_dir(self) -> str:
"""
"""
return os.path.join(self.base_dir, 'label_2')
@property
def calib_dir(self) -> str:
"""
"""
return os.path.join(self.base_dir, 'calib')
@property
def imagesets_path(self) -> str:
"""
"""
return os.path.join(self.dataset_root, 'ImageSets',
'{}.txt'.format(self.mode))
def load_calibration_info(self, index: int, use_data: bool = True) -> Tuple:
"""
"""
if use_data:
filename = '{}.txt'.format(self.data[index])
else:
filename = '{}.txt'.format(index)
with open(os.path.join(self.calib_dir, filename), 'r') as csv_file:
reader = list(csv.reader(csv_file, delimiter=' '))
# parse camera intrinsics from calibration table
P0 = [float(i) for i in reader[0][1:]]
P0 = np.array(P0, dtype=np.float32).reshape(3, 4)
P1 = [float(i) for i in reader[1][1:]]
P1 = np.array(P1, dtype=np.float32).reshape(3, 4)
P2 = [float(i) for i in reader[2][1:]]
P2 = np.array(P2, dtype=np.float32).reshape(3, 4)
P3 = [float(i) for i in reader[3][1:]]
P3 = np.array(P3, dtype=np.float32).reshape(3, 4)
# parse correction matrix for camera 0.
R0_rect = [float(i) for i in reader[4][1:]]
R0_rect = np.array(R0_rect, dtype=np.float32).reshape(3, 3)
# parse matrix from velodyne to camera
V2C = [float(i) for i in reader[5][1:]]
V2C = np.array(V2C, dtype=np.float32).reshape(3, 4)
if len(reader) == 6:
# parse matrix from imu to velodyne
I2V = [float(i) for i in reader[6][1:]]
I2V = np.array(I2V, dtype=np.float32).reshape(3, 4)
else:
I2V = np.array([0, 4], dtype=np.float32)
return P0, P1, P2, P3, R0_rect, V2C, I2V
def load_annotation(self, index: int) -> Tuple[np.ndarray, np.ndarray]:
"""
"""
filename = '{}.txt'.format(self.data[index])
with open(os.path.join(self.label_dir, filename), 'r') as csv_file:
df = pandas.read_csv(csv_file, sep=' ', header=None)
array = np.array(df)
rows = []
ignored_rows = []
for row in array:
if row[0] in self.class_names:
rows.append(row)
elif row[0] != 'DontCare':
ignored_rows.append(row)
kitti_records = np.array(rows)
ignored_kitti_records = np.array(ignored_rows)
return kitti_records, ignored_kitti_records
@property
def metric(self):
gt = []
for idx in range(len(self)):
annos = self.load_annotation(idx)
if len(annos[0]) > 0 and len(annos[1]) > 0:
gt.append(np.concatenate((annos[0], annos[1]), axis=0))
elif len(annos[0]) > 0:
gt.append(annos[0])
else:
gt.append(annos[1])
return KittiMetric(
groundtruths=gt,
classmap={i: name
for i, name in enumerate(self.class_names)},
indexes=self.data)
@property
def name(self) -> str:
return "KITTI"
@property
def labels(self) -> List[str]:
return self.class_names
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets/kitti/kitti_depth_det.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import concurrent.futures as futures
import copy
import os
import pickle
from collections import defaultdict
import numpy as np
import paddle
import skimage.transform
from skimage import io
import paddle3d.transforms as T
from paddle3d.apis import manager
from paddle3d.datasets.kitti.kitti_det import KittiDetDataset
from paddle3d.datasets.kitti.kitti_metric import KittiDepthMetric
from paddle3d.datasets.kitti.kitti_utils import (Calibration,
get_objects_from_label)
from paddle3d.geometries.bbox import (boxes3d_kitti_camera_to_lidar,
boxes_to_corners_3d, in_hull,
mask_boxes_outside_range_numpy)
from paddle3d.thirdparty import kitti_eval
def get_pad_params(desired_size, cur_size):
"""
This code is based on https://github.com/TRAILab/CaDDN/blob/5a96b37f16b3c29dd2509507b1cdfdff5d53c558/pcdet/utils/common_utils.py#L112
Get padding parameters for np.pad function
Args:
desired_size [int]: Desired padded output size
cur_size [int]: Current size. Should always be less than or equal to cur_size
Returns:
pad_params [tuple(int)]: Number of values padded to the edges (before, after)
"""
assert desired_size >= cur_size
# Calculate amount to pad
diff = desired_size - cur_size
pad_params = (0, diff)
return pad_params
@manager.DATASETS.add_component
class KittiDepthDataset(KittiDetDataset):
"""
This code is based on https://github.com/TRAILab/CaDDN/blob/5a96b37f16b3c29dd2509507b1cdfdff5d53c558/pcdet/datasets/kitti/kitti_dataset.py#L17
"""
def __init__(self,
dataset_root,
mode,
point_cloud_range,
depth_downsample_factor,
voxel_size,
class_names,
remove_outside_boxes=True):
super(KittiDepthDataset, self).__init__(dataset_root, mode)
self.class_names = class_names
self.point_cloud_range = np.array(point_cloud_range, dtype=np.float32)
self.depth_downsample_factor = depth_downsample_factor
self._merge_all_iters_to_one_epoch = False
self.remove_outside_boxes = remove_outside_boxes
self.voxel_size = voxel_size
self.grid_size = None
self.training = mode == 'train'
self.kitti_infos = []
self.include_kitti_data(self.mode)
def include_kitti_data(self, mode):
kitti_infos = []
info_path = os.path.join(self.dataset_root,
"kitti_infos_" + mode + '.pkl')
if not os.path.exists(info_path):
return
with open(info_path, 'rb') as f:
infos = pickle.load(f)
kitti_infos.extend(infos)
self.kitti_infos.extend(kitti_infos)
self.calculate_grid_size()
def set_split(self, split):
split_dir = os.path.join(self.dataset_root, 'ImageSets', split + '.txt')
self.sample_id_list = [x.strip() for x in open(split_dir).readlines()
] if os.path.exists(split_dir) else None
def get_lidar(self, idx):
lidar_file = os.path.join(self.base_dir, 'velodyne', '%s.bin' % idx)
assert os.path.exists(lidar_file)
return np.fromfile(str(lidar_file), dtype=np.float32).reshape(-1, 4)
def get_image(self, idx):
"""
Loads image for a sample
Args:
idx [int]: Index of the image sample
Returns:
image [np.ndarray(H, W, 3)]: RGB Image
"""
img_file = os.path.join(self.base_dir, 'image_2', '%s.png' % idx)
assert os.path.exists(img_file)
image = io.imread(img_file)
image = image[:, :, :3]
image = image.astype(np.float32)
image /= 255.0
return image
def get_image_shape(self, idx):
img_file = os.path.join(self.base_dir, 'image_2', '%s.png' % idx)
assert os.path.exists(img_file)
return np.array(io.imread(img_file).shape[:2], dtype=np.int32)
def get_label(self, idx):
label_file = os.path.join(self.base_dir, 'label_2', '%s.txt' % idx)
assert os.path.exists(label_file)
return get_objects_from_label(label_file)
def get_depth_map(self, idx):
"""
Loads depth map for a sample
Args:
idx [str]: Index of the sample
Returns:
depth [np.ndarray(H, W)]: Depth map
"""
depth_file = os.path.join(self.base_dir, 'depth_2', '%s.png' % idx)
assert os.path.exists(depth_file)
depth = io.imread(depth_file)
depth = depth.astype(np.float32)
depth /= 256.0
depth = skimage.transform.downscale_local_mean(
image=depth,
factors=(self.depth_downsample_factor,
self.depth_downsample_factor))
return depth
def get_calib(self, idx):
_, _, P2, _, R0_rect, V2C, _ = self.load_calibration_info(
idx, use_data=False)
calib_dict = {"P2": P2, "R0": R0_rect, "Tr_velo2cam": V2C}
return Calibration(calib_dict)
def get_road_plane(self, idx):
plane_file = os.path.join(self.base_dir, 'planes', '%s.txt' % idx)
if not os.path.exists(plane_file):
return None
with open(plane_file, 'r') as f:
lines = f.readlines()
lines = [float(i) for i in lines[3].split()]
plane = np.asarray(lines)
# Ensure normal is always facing up, this is in the rectified camera coordinate
if plane[1] > 0:
plane = -plane
norm = np.linalg.norm(plane[0:3])
plane = plane / norm
return plane
@staticmethod
def get_fov_flag(pts_rect, img_shape, calib):
"""
Args:
pts_rect:
img_shape:
calib:
Returns:
"""
pts_img, pts_rect_depth = calib.rect_to_img(pts_rect)
val_flag_1 = np.logical_and(pts_img[:, 0] >= 0,
pts_img[:, 0] < img_shape[1])
val_flag_2 = np.logical_and(pts_img[:, 1] >= 0,
pts_img[:, 1] < img_shape[0])
val_flag_merge = np.logical_and(val_flag_1, val_flag_2)
pts_valid_flag = np.logical_and(val_flag_merge, pts_rect_depth >= 0)
return pts_valid_flag
def get_infos(self,
num_workers=4,
has_label=True,
count_inside_pts=True,
sample_id_list=None,
mode='train'):
def process_single_scene(sample_idx):
info = {}
pc_info = {'num_features': 4, 'lidar_idx': sample_idx}
info['point_cloud'] = pc_info
image_info = {
'image_idx': sample_idx,
'image_shape': self.get_image_shape(sample_idx)
}
info['image'] = image_info
calib = self.get_calib(sample_idx)
P2 = np.concatenate(
[calib.P2, np.array([[0., 0., 0., 1.]])], axis=0)
R0_4x4 = np.zeros([4, 4], dtype=calib.R0.dtype)
R0_4x4[3, 3] = 1.
R0_4x4[:3, :3] = calib.R0
V2C_4x4 = np.concatenate(
[calib.V2C, np.array([[0., 0., 0., 1.]])], axis=0)
calib_info = {
'P2': P2,
'R0_rect': R0_4x4,
'Tr_velo_to_cam': V2C_4x4
}
info['calib'] = calib_info
if has_label:
obj_list = self.get_label(sample_idx)
annotations = {}
annotations['name'] = np.array(
[obj.cls_type for obj in obj_list])
annotations['truncated'] = np.array(
[obj.truncation for obj in obj_list])
annotations['occluded'] = np.array(
[obj.occlusion for obj in obj_list])
annotations['alpha'] = np.array([obj.alpha for obj in obj_list])
annotations['bbox'] = np.concatenate(
[obj.box2d.reshape(1, 4) for obj in obj_list], axis=0)
annotations['dimensions'] = np.array(
[[obj.l, obj.h, obj.w]
for obj in obj_list]) # lhw(camera) format
annotations['location'] = np.concatenate(
[obj.loc.reshape(1, 3) for obj in obj_list], axis=0)
annotations['rotation_y'] = np.array(
[obj.ry for obj in obj_list])
annotations['score'] = np.array([obj.score for obj in obj_list])
annotations['difficulty'] = np.array(
[obj.level for obj in obj_list], np.int32)
num_objects = len([
obj.cls_type for obj in obj_list
if obj.cls_type != 'DontCare'
])
num_gt = len(annotations['name'])
index = list(range(num_objects)) + [-1] * (num_gt - num_objects)
annotations['index'] = np.array(index, dtype=np.int32)
loc = annotations['location'][:num_objects]
dims = annotations['dimensions'][:num_objects]
rots = annotations['rotation_y'][:num_objects]
loc_lidar = calib.rect_to_lidar(loc)
l, h, w = dims[:, 0:1], dims[:, 1:2], dims[:, 2:3]
loc_lidar[:, 2] += h[:, 0] / 2
gt_boxes_lidar = np.concatenate(
[loc_lidar, l, w, h, -(np.pi / 2 + rots[..., np.newaxis])],
axis=1)
annotations['gt_boxes_lidar'] = gt_boxes_lidar
info['annos'] = annotations
return info
self.mode = mode
sample_id_list = sample_id_list if sample_id_list is not None else self.sample_id_list
with futures.ThreadPoolExecutor(num_workers) as executor:
infos = executor.map(process_single_scene, sample_id_list)
return list(infos)
def update_data(self, data_dict):
"""
Updates data dictionary with additional items
Args:
data_dict [dict]: Data dictionary returned by __getitem__
Returns:
data_dict [dict]: Updated data dictionary returned by __getitem__
"""
# Image
data_dict['images'] = self.get_image(data_dict["frame_id"])
# Depth Map
data_dict['depth_maps'] = self.get_depth_map(data_dict["frame_id"])
# Calibration matricies
# Convert calibration matrices to homogeneous format and combine
calib = data_dict["calib"]
V2C = np.vstack((calib.V2C, np.array([0, 0, 0, 1],
dtype=np.float32))) # (4, 4)
R0 = np.hstack((calib.R0, np.zeros((3, 1), dtype=np.float32))) # (3, 4)
R0 = np.vstack((R0, np.array([0, 0, 0, 1], dtype=np.float32))) # (4, 4)
V2R = R0 @ V2C
data_dict.update({
"trans_lidar_to_cam": V2R,
"trans_cam_to_img": calib.P2,
"R0": calib.R0,
"Tr_velo2cam": calib.V2C
})
return data_dict
@property
def metric(self, **kwargs):
if 'annos' not in self.kitti_infos[0].keys():
return None, {}
eval_gt_annos = [
copy.deepcopy(info['annos']) for info in self.kitti_infos
]
return KittiDepthMetric(
eval_gt_annos=eval_gt_annos, class_names=self.class_names)
def mask_points_and_boxes_outside_range(self, data_dict):
if data_dict.get(
'gt_boxes', None
) is not None and self.remove_outside_boxes and self.training:
mask = mask_boxes_outside_range_numpy(
data_dict['gt_boxes'],
self.point_cloud_range,
min_num_corners=1)
data_dict['gt_boxes'] = data_dict['gt_boxes'][mask]
return data_dict
def calculate_grid_size(self):
grid_size = (self.point_cloud_range[3:6] -
self.point_cloud_range[0:3]) / np.array(self.voxel_size)
self.grid_size = np.round(grid_size).astype(np.int64)
def drop_info_with_name(self, info, name):
ret_info = {}
keep_indices = [i for i, x in enumerate(info['name']) if x != name]
for key in info.keys():
ret_info[key] = info[key][keep_indices]
return ret_info
def data_augmentor(self, data_dict):
data_dict = T.functional.random_depth_image_horizontal(data_dict)
data_dict['gt_boxes'][:, 6] = data_dict['gt_boxes'][:, 6] - np.floor(
data_dict['gt_boxes'][:, 6] / (2 * np.pi) + 0.5) * (2 * np.pi)
if 'calib' in data_dict:
data_dict.pop('calib')
if 'road_plane' in data_dict:
data_dict.pop('road_plane')
if 'gt_boxes_mask' in data_dict:
gt_boxes_mask = data_dict['gt_boxes_mask']
data_dict['gt_boxes'] = data_dict['gt_boxes'][gt_boxes_mask]
data_dict['gt_names'] = data_dict['gt_names'][gt_boxes_mask]
data_dict.pop('gt_boxes_mask')
return data_dict
def prepare_data(self, data_dict):
"""
Args:
data_dict:
gt_boxes: optional, (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
gt_names: optional, (N), string
...
Returns:
data_dict:
frame_id: string
gt_boxes: optional, (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
gt_names: optional, (N), string
...
"""
if data_dict.get('gt_boxes', None) is not None:
selected = [
i for i, x in enumerate(data_dict['gt_names'])
if x in self.class_names
]
selected = np.array(selected, dtype=np.int64)
data_dict['gt_names'] = data_dict['gt_names'][selected]
gt_classes = np.array(
[self.class_names.index(n) + 1 for n in data_dict['gt_names']],
dtype=np.int32)
gt_classes = gt_classes.reshape(-1, 1).astype(np.float32)
data_dict['gt_boxes'] = data_dict['gt_boxes'][selected]
gt_boxes = np.concatenate((data_dict['gt_boxes'], gt_classes),
axis=1)
data_dict['gt_boxes'] = gt_boxes
if data_dict.get('gt_box2d', None) is not None:
data_dict['gt_box2d'] = data_dict['gt_box2d'][selected]
gt_boxes_2d = np.concatenate(
(data_dict['gt_box2d'], gt_classes), axis=1)
data_dict['gt_box2d'] = gt_boxes_2d
if data_dict is not None:
data_dict = self.mask_points_and_boxes_outside_range(
data_dict=data_dict)
if self.training and len(data_dict['gt_boxes']) == 0:
new_index = np.random.randint(self.__len__())
return self.__getitem__(new_index)
data_dict.pop('gt_names', None)
data_dict.pop('calib', None)
data_dict.pop('frame_id', None)
return data_dict
def __len__(self):
if self._merge_all_iters_to_one_epoch:
return len(self.kitti_infos) * self.total_epochs
return len(self.kitti_infos)
def __getitem__(self, index):
if self._merge_all_iters_to_one_epoch:
index = index % len(self.kitti_infos)
info = copy.deepcopy(self.kitti_infos[index])
sample_idx = info['point_cloud']['lidar_idx']
calib = self.get_calib(sample_idx)
img_shape = info['image']['image_shape']
input_dict = {
'frame_id': sample_idx,
'calib': calib,
'calib_info': info['calib'],
'image_shape': img_shape
}
if 'annos' in info:
annos = info['annos']
annos = self.drop_info_with_name(annos, name='DontCare')
loc, dims, rots = annos['location'], annos['dimensions'], annos[
'rotation_y']
gt_names = annos['name']
bbox = annos['bbox']
gt_boxes_camera = np.concatenate([loc, dims, rots[..., np.newaxis]],
axis=1).astype(np.float32)
gt_boxes_lidar = boxes3d_kitti_camera_to_lidar(
gt_boxes_camera, calib)
input_dict.update({
'gt_names': gt_names,
'gt_boxes': gt_boxes_lidar,
'gt_boxes2d': bbox
})
road_plane = self.get_road_plane(sample_idx)
if road_plane is not None:
input_dict['road_plane'] = road_plane
data_dict = self.update_data(data_dict=input_dict)
data_dict = self.prepare_data(data_dict=data_dict)
data_dict['image_shape'] = img_shape
return data_dict
@staticmethod
def collate_fn(batch_list, _unused=False):
data_dict = defaultdict(list)
for cur_sample in batch_list:
for key, val in cur_sample.items():
data_dict[key].append(val)
batch_size = len(batch_list)
ret = {}
for key, val in data_dict.items():
try:
if key in ['gt_boxes']:
max_gt = max([len(x) for x in val])
batch_gt_boxes3d = np.zeros(
(batch_size, max_gt, val[0].shape[-1]),
dtype=np.float32)
for k in range(batch_size):
batch_gt_boxes3d[k, :val[k].__len__(), :] = val[k]
ret[key] = batch_gt_boxes3d
elif key in ['gt_boxes2d']:
max_boxes = 0
max_boxes = max([len(x) for x in val])
batch_boxes2d = np.zeros(
(batch_size, max_boxes, val[0].shape[-1]),
dtype=np.float32)
for k in range(batch_size):
if val[k].size > 0:
batch_boxes2d[k, :val[k].__len__(), :] = val[k]
ret[key] = batch_boxes2d
elif key in ["images", "depth_maps"]:
# Get largest image size (H, W)
max_h = 0
max_w = 0
for image in val:
max_h = max(max_h, image.shape[0])
max_w = max(max_w, image.shape[1])
# Change size of images
images = []
for image in val:
pad_h = get_pad_params(
desired_size=max_h, cur_size=image.shape[0])
pad_w = get_pad_params(
desired_size=max_w, cur_size=image.shape[1])
pad_width = (pad_h, pad_w)
# Pad with nan, to be replaced later in the pipeline.
pad_value = 0
if key == "images":
pad_width = (pad_h, pad_w, (0, 0))
elif key == "depth_maps":
pad_width = (pad_h, pad_w)
image_pad = np.pad(
image,
pad_width=pad_width,
mode='constant',
constant_values=pad_value)
images.append(image_pad)
ret[key] = np.stack(images, axis=0)
if key == "images":
ret[key] = ret[key].transpose([0, 3, 1, 2])
elif key in "calib_info":
continue
else:
ret[key] = np.stack(val, axis=0)
except:
raise TypeError("Error in collate_batch: key={}.".format(key))
ret['batch_size'] = batch_size
return ret
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets/kitti/__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .kitti_depth_det import KittiDepthDataset
from .kitti_mono_det import KittiMonoDataset
from .kitti_pointcloud_det import KittiPCDataset
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets/kitti/kitti_metric.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List
import numpy as np
from paddle3d.datasets.kitti.kitti_utils import (
Calibration, box_lidar_to_camera, filter_fake_result)
from paddle3d.datasets.metrics import MetricABC
from paddle3d.geometries.bbox import (
BBoxes2D, BBoxes3D, CoordMode, boxes3d_kitti_camera_to_imageboxes,
boxes3d_lidar_to_kitti_camera, project_to_image)
from paddle3d.sample import Sample
from paddle3d.thirdparty import kitti_eval
from paddle3d.utils.logger import logger
class KittiMetric(MetricABC):
"""
"""
def __init__(self, groundtruths: List[np.ndarray], classmap: Dict[int, str],
indexes: List):
self.gt_annos = groundtruths
self.predictions = []
self.classmap = classmap
self.indexes = indexes
def _parse_gt_to_eval_format(self,
groundtruths: List[np.ndarray]) -> List[dict]:
res = []
for rows in groundtruths:
if rows.size == 0:
res.append({
'name': np.zeros([0]),
'truncated': np.zeros([0]),
'occluded': np.zeros([0]),
'alpha': np.zeros([0]),
'bbox': np.zeros([0, 4]),
'dimensions': np.zeros([0, 3]),
'location': np.zeros([0, 3]),
'rotation_y': np.zeros([0]),
'score': np.zeros([0])
})
else:
res.append({
'name': rows[:, 0],
'truncated': rows[:, 1].astype(np.float64),
'occluded': rows[:, 2].astype(np.int64),
'alpha': rows[:, 3].astype(np.float64),
'bbox': rows[:, 4:8].astype(np.float64),
'dimensions': rows[:, [10, 8, 9]].astype(np.float64),
'location': rows[:, 11:14].astype(np.float64),
'rotation_y': rows[:, 14].astype(np.float64)
})
return res
def get_camera_box2d(self, bboxes_3d: BBoxes3D, proj_mat: np.ndarray):
box_corners = bboxes_3d.corners_3d
box_corners_in_image = project_to_image(box_corners, proj_mat)
minxy = np.min(box_corners_in_image, axis=1)
maxxy = np.max(box_corners_in_image, axis=1)
box_2d_preds = BBoxes2D(np.concatenate([minxy, maxxy], axis=1))
return box_2d_preds
def _parse_predictions_to_eval_format(
self, predictions: List[Sample]) -> List[dict]:
res = {}
for pred in predictions:
filter_fake_result(pred)
id = pred.meta.id
if pred.bboxes_3d is None:
det = {
'truncated': np.zeros([0]),
'occluded': np.zeros([0]),
'alpha': np.zeros([0]),
'name': np.zeros([0]),
'bbox': np.zeros([0, 4]),
'dimensions': np.zeros([0, 3]),
'location': np.zeros([0, 3]),
'rotation_y': np.zeros([0]),
'score': np.zeros([0]),
}
else:
num_boxes = pred.bboxes_3d.shape[0]
names = np.array(
[self.classmap[label] for label in pred.labels])
calibs = pred.calibs
alpha = pred.get('alpha', np.zeros([num_boxes]))
if pred.bboxes_3d.coordmode != CoordMode.KittiCamera:
bboxes_3d = box_lidar_to_camera(pred.bboxes_3d, calibs)
else:
bboxes_3d = pred.bboxes_3d
if bboxes_3d.origin != [.5, 1., .5]:
bboxes_3d[:, :3] += bboxes_3d[:, 3:6] * (
np.array([.5, 1., .5]) - np.array(bboxes_3d.origin))
bboxes_3d.origin = [.5, 1., .5]
if pred.bboxes_2d is None:
bboxes_2d = self.get_camera_box2d(bboxes_3d, calibs[2])
else:
bboxes_2d = pred.bboxes_2d
loc = bboxes_3d[:, :3]
dim = bboxes_3d[:, 3:6]
det = {
# fake value
'truncated': np.zeros([num_boxes]),
'occluded': np.zeros([num_boxes]),
# predict value
'alpha': alpha,
'name': names,
'bbox': bboxes_2d,
'dimensions': dim,
# TODO: coord trans
'location': loc,
'rotation_y': bboxes_3d[:, 6],
'score': pred.confidences,
}
res[id] = det
return [res[idx] for idx in self.indexes]
def update(self, predictions: List[Sample], **kwargs):
"""
"""
self.predictions += predictions
def compute(self, verbose=False, **kwargs) -> dict:
"""
"""
gt_annos = self._parse_gt_to_eval_format(self.gt_annos)
dt_annos = self._parse_predictions_to_eval_format(self.predictions)
if len(dt_annos) != len(gt_annos):
raise RuntimeError(
'The number of predictions({}) is not equal to the number of GroundTruths({})'
.format(len(dt_annos), len(gt_annos)))
metric_r40_dict = kitti_eval(
gt_annos,
dt_annos,
current_classes=list(self.classmap.values()),
metric_types=["bbox", "bev", "3d"],
recall_type='R40')
metric_r11_dict = kitti_eval(
gt_annos,
dt_annos,
current_classes=list(self.classmap.values()),
metric_types=["bbox", "bev", "3d"],
recall_type='R11')
if verbose:
for cls, cls_metrics in metric_r40_dict.items():
logger.info("{}:".format(cls))
for overlap_thresh, metrics in cls_metrics.items():
for metric_type, thresh in zip(["bbox", "bev", "3d"],
overlap_thresh):
if metric_type in metrics:
logger.info(
"{} AP_R40@{:.0%}: {:.2f} {:.2f} {:.2f}".format(
metric_type.upper().ljust(4), thresh,
*metrics[metric_type]))
for cls, cls_metrics in metric_r11_dict.items():
logger.info("{}:".format(cls))
for overlap_thresh, metrics in cls_metrics.items():
for metric_type, thresh in zip(["bbox", "bev", "3d"],
overlap_thresh):
if metric_type in metrics:
logger.info(
"{} AP_R11@{:.0%}: {:.2f} {:.2f} {:.2f}".format(
metric_type.upper().ljust(4), thresh,
*metrics[metric_type]))
return metric_r40_dict, metric_r11_dict
class KittiDepthMetric(MetricABC):
"""
"""
def __init__(self, eval_gt_annos, class_names):
self.eval_gt_annos = eval_gt_annos
self.predictions = []
self.class_names = class_names
def generate_prediction_dicts(self,
batch_dict,
pred_dicts,
output_path=None):
"""
Args:
batch_dict: list of batch_dict
pred_dicts: list of pred_dicts
pred_boxes: (N, 7), Tensor
pred_scores: (N), Tensor
pred_labels: (N), Tensor
output_path:
Returns:
"""
def get_template_prediction(num_samples):
ret_dict = {
'name': np.zeros(num_samples),
'truncated': np.zeros(num_samples),
'occluded': np.zeros(num_samples),
'alpha': np.zeros(num_samples),
'bbox': np.zeros([num_samples, 4]),
'dimensions': np.zeros([num_samples, 3]),
'location': np.zeros([num_samples, 3]),
'rotation_y': np.zeros(num_samples),
'score': np.zeros(num_samples),
'boxes_lidar': np.zeros([num_samples, 7])
}
return ret_dict
def generate_single_sample_dict(batch_index, box_dict):
pred_scores = box_dict['pred_scores'].cpu().numpy()
pred_boxes = box_dict['pred_boxes'].cpu().numpy()
pred_labels = box_dict['pred_labels'].cast("int64").cpu().numpy()
if pred_labels[0] < 0:
pred_dict = get_template_prediction(0)
return pred_dict
pred_dict = get_template_prediction(pred_scores.shape[0])
# calib = batch_dict['calib'][batch_index]
calib = Calibration({
"P2":
batch_dict["trans_cam_to_img"][batch_index].cpu().numpy(),
"R0":
batch_dict["R0"][batch_index].cpu().numpy(),
"Tr_velo2cam":
batch_dict["Tr_velo2cam"][batch_index].cpu().numpy()
})
image_shape = batch_dict['image_shape'][batch_index].cpu().numpy()
pred_boxes_camera = boxes3d_lidar_to_kitti_camera(pred_boxes, calib)
pred_boxes_img = boxes3d_kitti_camera_to_imageboxes(
pred_boxes_camera, calib, image_shape=image_shape)
pred_dict['name'] = np.array(self.class_names)[pred_labels - 1]
pred_dict['alpha'] = -np.arctan2(
-pred_boxes[:, 1], pred_boxes[:, 0]) + pred_boxes_camera[:, 6]
pred_dict['bbox'] = pred_boxes_img
pred_dict['dimensions'] = pred_boxes_camera[:, 3:6]
pred_dict['location'] = pred_boxes_camera[:, 0:3]
pred_dict['rotation_y'] = pred_boxes_camera[:, 6]
pred_dict['score'] = pred_scores
pred_dict['boxes_lidar'] = pred_boxes
return pred_dict
annos = []
for index, box_dict in enumerate(pred_dicts):
single_pred_dict = generate_single_sample_dict(index, box_dict)
annos.append(single_pred_dict)
return annos
def update(self, predictions, ground_truths, **kwargs):
"""
"""
self.predictions += self.generate_prediction_dicts(
ground_truths, predictions)
def compute(self, verbose=False, **kwargs) -> dict:
"""
"""
eval_gt_annos = self.eval_gt_annos
eval_det_annos = self.predictions
if len(eval_det_annos) != len(eval_gt_annos):
raise RuntimeError(
'The number of predictions({}) is not equal to the number of GroundTruths({})'
.format(len(eval_det_annos), len(eval_gt_annos)))
metric_dict = kitti_eval(eval_gt_annos, eval_det_annos,
self.class_names)
if verbose:
for cls, cls_metrics in metric_dict.items():
logger.info("{}:".format(cls))
for overlap_thresh, metrics in cls_metrics.items():
overlap_thresh = overlap_thresh + overlap_thresh
for metric_type, thresh in zip(["bbox", "bev", "3d", "aos"],
overlap_thresh):
if metric_type in metrics:
logger.info(
"{} AP@{:.0%}: {:.2f} {:.2f} {:.2f}".format(
metric_type.upper().ljust(4), thresh,
*metrics[metric_type]))
return metric_dict
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets/kitti/kitti_utils.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from typing import List, Tuple
import numpy as np
import paddle
from paddle3d.geometries import BBoxes2D, BBoxes3D, CoordMode
from paddle3d.sample import Sample
# kitti record fields
# type, truncated, occluded, alpha, xmin, ymin, xmax, ymax, dh, dw, dl, lx, ly, lz, ry
def camera_record_to_object(
kitti_records: np.ndarray) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
"""
if kitti_records.shape[0] == 0:
bboxes_2d = BBoxes2D(np.zeros([0, 4]))
bboxes_3d = BBoxes3D(
np.zeros([0, 7]),
origin=[.5, 1, .5],
coordmode=CoordMode.KittiCamera,
rot_axis=1)
labels = []
else:
centers = kitti_records[:, 11:14]
dims = kitti_records[:, 8:11]
yaws = kitti_records[:, 14:15]
bboxes_3d = BBoxes3D(
np.concatenate([centers, dims, yaws], axis=1),
origin=[.5, 1, .5],
coordmode=CoordMode.KittiCamera,
rot_axis=1)
bboxes_2d = BBoxes2D(kitti_records[:, 4:8])
labels = kitti_records[:, 0]
return bboxes_2d, bboxes_3d, labels
# lidar record fields
# type, truncated, occluded, alpha, xmin, ymin, xmax, ymax, dw, dl, dh, lx, ly, lz, rz
def lidar_record_to_object(
kitti_records: np.ndarray) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
"""
if kitti_records.shape[0] == 0:
bboxes_2d = BBoxes2D(np.zeros([0, 4]))
bboxes_3d = BBoxes3D(
np.zeros([0, 7]),
origin=[0.5, 0.5, 0.],
coordmode=CoordMode.KittiLidar,
rot_axis=2)
cls_names = []
else:
centers = kitti_records[:, 11:14]
dims = kitti_records[:, 8:11]
yaws = kitti_records[:, 14:15]
bboxes_3d = BBoxes3D(
np.concatenate([centers, dims, yaws], axis=1),
origin=[0.5, 0.5, 0.],
coordmode=CoordMode.KittiLidar,
rot_axis=2)
bboxes_2d = BBoxes2D(kitti_records[:, 4:8])
cls_names = kitti_records[:, 0]
return bboxes_2d, bboxes_3d, cls_names
def project_camera_to_velodyne(kitti_records: np.ndarray,
calibration_info: Tuple[np.ndarray]):
"""
"""
if kitti_records.shape[0] == 0:
return kitti_records
# locations
kitti_records[:, 11:14] = coord_camera_to_velodyne(kitti_records[:, 11:14],
calibration_info)
# rotations
# In kitti records, dimensions order is hwl format, but standard camera order is lhw format.
# We exchange lhw format to wlh format, which equal to yaw = yaw + np.pi / 2
kitti_records[:, 8:11] = kitti_records[:, [9, 10, 8]]
return kitti_records
def box_lidar_to_camera(bboxes_3d: BBoxes3D,
calibration_info: Tuple[np.ndarray]):
ox, oy, oz = bboxes_3d.origin
xyz_lidar = bboxes_3d[..., 0:3]
w, l, h = bboxes_3d[..., 3:4], bboxes_3d[..., 4:5], bboxes_3d[..., 5:6]
r = bboxes_3d[..., 6:7]
xyz = coord_velodyne_to_camera(xyz_lidar, calibration_info)
cam_bboxes = BBoxes3D(
data=np.concatenate([xyz, l, h, w, r], axis=-1),
coordmode=CoordMode.KittiCamera,
velocities=None,
origin=[1 - oy, 1 - oz, ox],
rot_axis=1)
return cam_bboxes
def coord_camera_to_velodyne(points: np.ndarray,
calibration_info: Tuple[np.ndarray]):
"""
"""
R0_rect = np.eye(4)
R0_rect[:3, :3] = calibration_info[4]
V2C = np.eye(4)
V2C[:3, :4] = calibration_info[5]
pads = np.ones([points.shape[0], 1])
points = np.concatenate([points, pads], axis=1)
points = points @ np.linalg.inv(R0_rect @ V2C).T
points = points[:, :3]
return points
def coord_velodyne_to_camera(points: np.ndarray, calibration_info: np.ndarray):
R0_rect = np.eye(4)
R0_rect[:3, :3] = calibration_info[4]
V2C = np.eye(4)
V2C[:3, :4] = calibration_info[5]
pads = np.ones([points.shape[0], 1])
points = np.concatenate([points, pads], axis=1)
points = points @ (R0_rect @ V2C).T
points = points[:, :3]
return points
def project_velodyne_to_camera(pointcloud: np.ndarray,
calibration_info: np.ndarray, image_shape):
"""
"""
R0_rect = np.eye(4)
R0_rect[:3, :3] = calibration_info[4]
V2C = np.eye(4)
V2C[:3, :4] = calibration_info[5]
P2 = np.eye(4)
P2[:3, :4] = calibration_info[2]
intensity = pointcloud[:, 3:4]
pointcloud = pointcloud[:, :3]
pads = np.ones([pointcloud.shape[0], 1])
pointcloud = np.concatenate([pointcloud, pads], axis=1).T
cam = P2 @ R0_rect @ V2C @ pointcloud
h, w = image_shape
valid_indexes = cam[2, :] >= 0
cam = cam[:2, :] / cam[2, :]
# keep only the points in front of the camera
valid_indexes &= cam[0, :] > 0
valid_indexes &= cam[0, :] < w
valid_indexes &= cam[1, :] > 0
valid_indexes &= cam[1, :] < h
pointcloud = pointcloud.T[valid_indexes, :3]
intensity = intensity[valid_indexes, :]
pointcloud = np.concatenate([pointcloud, intensity], axis=1)
return pointcloud
def assess_object_difficulties(kitti_records: np.ndarray,
min_height_thresh: List = [40, 25, 25],
max_occlusion_thresh: List = [0, 1, 2],
max_truncation_thresh: List = [0.15, 0.3, 0.5]):
num_objects = kitti_records.shape[0]
if num_objects == 0:
return np.full((num_objects, ), -1, dtype=np.int32)
heights = kitti_records[:, 7] - kitti_records[:, 5] # bboxes_2d heights
occlusions = kitti_records[:, 2]
truncations = kitti_records[:, 1]
easy_mask = np.ones((num_objects, ), dtype=bool)
moderate_mask = np.ones((num_objects, ), dtype=bool)
hard_mask = np.ones((num_objects, ), dtype=bool)
easy_mask[np.where((heights <= min_height_thresh[0])
| (occlusions > max_occlusion_thresh[0])
| (truncations > max_truncation_thresh[0]))] = False
moderate_mask[np.where((heights <= min_height_thresh[1])
| (occlusions > max_occlusion_thresh[1])
| (truncations > max_truncation_thresh[1]))] = False
hard_mask[np.where((heights <= min_height_thresh[2])
| (occlusions > max_occlusion_thresh[2])
| (truncations > max_truncation_thresh[2]))] = False
is_easy = easy_mask
is_moderate = np.logical_xor(easy_mask, moderate_mask)
is_hard = np.logical_xor(hard_mask, moderate_mask)
difficulties = np.full((num_objects, ), -1, dtype=np.int32)
difficulties[is_hard] = 2
difficulties[is_moderate] = 1
difficulties[is_easy] = 0
return difficulties
def projection_matrix_decomposition(proj):
"""
Calculate the camera calibration matrix, the invert of 3x3 rotation matrix,
and the 3x1 translation vector that projects 3D points into the camera.
Where:
proj = C @ [R|T]
Please refer to:
<https://github.com/traveller59/second.pytorch/blob/master/second/core/box_np_ops.py#L507>
"""
CR = proj[0:3, 0:3]
CT = proj[0:3, 3]
RinvCinv = np.linalg.inv(CR)
Rinv, Cinv = np.linalg.qr(RinvCinv)
C = np.linalg.inv(Cinv)
T = Cinv @ CT
return C, Rinv, T
def filter_fake_result(detection: Sample):
if detection.get('bboxes_3d', None) is None:
return
box3d = detection.bboxes_3d
scores = detection.confidences
labels = detection.labels
box_list = []
score_list = []
label_list = []
for i in range(box3d.shape[0]):
if scores[i] < 0:
continue
box_list.append(box3d[i])
score_list.append(scores[i])
label_list.append(labels[i])
if len(box_list) == 0:
detection.bboxes_3d = None
detection.labels = None
detection.confidences = None
else:
detection.bboxes_3d = BBoxes3D(
np.asarray(box_list),
origin=box3d.origin,
rot_axis=box3d.rot_axis,
coordmode=box3d.coordmode)
detection.labels = np.asarray(label_list)
detection.confidences = np.asarray(score_list)
def get_objects_from_label(label_file):
with open(label_file, 'r') as f:
lines = f.readlines()
objects = [Object3d(line) for line in lines]
return objects
def cls_type_to_id(cls_type):
type_to_id = {'Car': 1, 'Pedestrian': 2, 'Cyclist': 3, 'Van': 4}
if cls_type not in type_to_id.keys():
return -1
return type_to_id[cls_type]
class Object3d(object):
"""
This code is based on https://github.com/TRAILab/CaDDN/blob/5a96b37f16b3c29dd2509507b1cdfdff5d53c558/pcdet/utils/object3d_kitti.py#L18
"""
def __init__(self, line):
label = line.strip().split(' ')
self.src = line
self.cls_type = label[0]
self.cls_id = cls_type_to_id(self.cls_type)
self.truncation = float(label[1])
self.occlusion = float(
label[2]
) # 0:fully visible 1:partly occluded 2:largely occluded 3:unknown
self.alpha = float(label[3])
self.box2d = np.array((float(label[4]), float(label[5]), float(
label[6]), float(label[7])),
dtype=np.float32)
self.h = float(label[8])
self.w = float(label[9])
self.l = float(label[10])
self.loc = np.array(
(float(label[11]), float(label[12]), float(label[13])),
dtype=np.float32)
self.dis_to_cam = np.linalg.norm(self.loc)
self.ry = float(label[14])
self.score = float(label[15]) if label.__len__() == 16 else -1.0
self.level_str = None
self.level = self.get_kitti_obj_level()
def get_kitti_obj_level(self):
height = float(self.box2d[3]) - float(self.box2d[1]) + 1
if height >= 40 and self.truncation <= 0.15 and self.occlusion <= 0:
self.level_str = 'Easy'
return 0 # Easy
elif height >= 25 and self.truncation <= 0.3 and self.occlusion <= 1:
self.level_str = 'Moderate'
return 1 # Moderate
elif height >= 25 and self.truncation <= 0.5 and self.occlusion <= 2:
self.level_str = 'Hard'
return 2 # Hard
else:
self.level_str = 'UnKnown'
return -1
def generate_corners3d(self):
"""
generate corners3d representation for this object
:return corners_3d: (8, 3) corners of box3d in camera coord
"""
l, h, w = self.l, self.h, self.w
x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2]
y_corners = [0, 0, 0, 0, -h, -h, -h, -h]
z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]
R = np.array([[np.cos(self.ry), 0, np.sin(self.ry)], [0, 1, 0],
[-np.sin(self.ry), 0,
np.cos(self.ry)]])
corners3d = np.vstack([x_corners, y_corners, z_corners]) # (3, 8)
corners3d = np.dot(R, corners3d).T
corners3d = corners3d + self.loc
return corners3d
def to_str(self):
print_str = '%s %.3f %.3f %.3f box2d: %s hwl: [%.3f %.3f %.3f] pos: %s ry: %.3f' \
% (self.cls_type, self.truncation, self.occlusion, self.alpha, self.box2d, self.h, self.w, self.l,
self.loc, self.ry)
return print_str
def to_kitti_format(self):
kitti_str = '%s %.2f %d %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f' \
% (self.cls_type, self.truncation, int(self.occlusion), self.alpha, self.box2d[0], self.box2d[1],
self.box2d[2], self.box2d[3], self.h, self.w, self.l, self.loc[0], self.loc[1], self.loc[2],
self.ry)
return kitti_str
class Calibration(object):
"""
This code is based on https://github.com/TRAILab/CaDDN/blob/5a96b37f16b3c29dd2509507b1cdfdff5d53c558/pcdet/utils/calibration_kitti.py#L23
"""
def __init__(self, calib_dict):
calib = calib_dict
self.P2 = calib['P2'] # 3 x 4
self.R0 = calib['R0'] # 3 x 3
self.V2C = calib['Tr_velo2cam'] # 3 x 4
# Camera intrinsics and extrinsics
self.cu = self.P2[0, 2]
self.cv = self.P2[1, 2]
self.fu = self.P2[0, 0]
self.fv = self.P2[1, 1]
self.tx = self.P2[0, 3] / (-self.fu)
self.ty = self.P2[1, 3] / (-self.fv)
def cart_to_hom(self, pts):
"""
:param pts: (N, 3 or 2)
:return pts_hom: (N, 4 or 3)
"""
pts_hom = np.hstack((pts, np.ones((pts.shape[0], 1), dtype=np.float32)))
return pts_hom
def rect_to_lidar(self, pts_rect):
"""
:param pts_lidar: (N, 3)
:return pts_rect: (N, 3)
"""
pts_rect_hom = self.cart_to_hom(pts_rect) # (N, 4)
R0_ext = np.hstack((self.R0, np.zeros((3, 1),
dtype=np.float32))) # (3, 4)
R0_ext = np.vstack((R0_ext, np.zeros((1, 4),
dtype=np.float32))) # (4, 4)
R0_ext[3, 3] = 1
V2C_ext = np.vstack((self.V2C, np.zeros((1, 4),
dtype=np.float32))) # (4, 4)
V2C_ext[3, 3] = 1
pts_lidar = np.dot(pts_rect_hom, np.linalg.inv(
np.dot(R0_ext, V2C_ext).T))
return pts_lidar[:, 0:3]
def lidar_to_rect(self, pts_lidar):
"""
:param pts_lidar: (N, 3)
:return pts_rect: (N, 3)
"""
pts_lidar_hom = self.cart_to_hom(pts_lidar)
pts_rect = np.dot(pts_lidar_hom, np.dot(self.V2C.T, self.R0.T))
# pts_rect = reduce(np.dot, (pts_lidar_hom, self.V2C.T, self.R0.T))
return pts_rect
def rect_to_img(self, pts_rect):
"""
:param pts_rect: (N, 3)
:return pts_img: (N, 2)
"""
pts_rect_hom = self.cart_to_hom(pts_rect)
pts_2d_hom = np.dot(pts_rect_hom, self.P2.T)
pts_img = (pts_2d_hom[:, 0:2].T / pts_rect_hom[:, 2]).T # (N, 2)
pts_rect_depth = pts_2d_hom[:, 2] - self.P2.T[
3, 2] # depth in rect camera coord
return pts_img, pts_rect_depth
def lidar_to_img(self, pts_lidar):
"""
:param pts_lidar: (N, 3)
:return pts_img: (N, 2)
"""
pts_rect = self.lidar_to_rect(pts_lidar)
pts_img, pts_depth = self.rect_to_img(pts_rect)
return pts_img, pts_depth
def img_to_rect(self, u, v, depth_rect):
"""
:param u: (N)
:param v: (N)
:param depth_rect: (N)
:return:
"""
x = ((u - self.cu) * depth_rect) / self.fu + self.tx
y = ((v - self.cv) * depth_rect) / self.fv + self.ty
pts_rect = np.concatenate(
(x.reshape(-1, 1), y.reshape(-1, 1), depth_rect.reshape(-1, 1)),
axis=1)
return pts_rect
def corners3d_to_img_boxes(self, corners3d):
"""
:param corners3d: (N, 8, 3) corners in rect coordinate
:return: boxes: (None, 4) [x1, y1, x2, y2] in rgb coordinate
:return: boxes_corner: (None, 8) [xi, yi] in rgb coordinate
"""
sample_num = corners3d.shape[0]
corners3d_hom = np.concatenate((corners3d, np.ones((sample_num, 8, 1))),
axis=2) # (N, 8, 4)
img_pts = np.matmul(corners3d_hom, self.P2.T) # (N, 8, 3)
x, y = img_pts[:, :, 0] / img_pts[:, :, 2], img_pts[:, :,
1] / img_pts[:, :, 2]
x1, y1 = np.min(x, axis=1), np.min(y, axis=1)
x2, y2 = np.max(x, axis=1), np.max(y, axis=1)
boxes = np.concatenate((x1.reshape(-1, 1), y1.reshape(-1, 1),
x2.reshape(-1, 1), y2.reshape(-1, 1)),
axis=1)
boxes_corner = np.concatenate(
(x.reshape(-1, 8, 1), y.reshape(-1, 8, 1)), axis=2)
return boxes, boxes_corner
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets/kitti/kitti_pointcloud_det.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
from paddle3d.apis import manager
from paddle3d.datasets.kitti import kitti_utils
from paddle3d.datasets.kitti.kitti_det import KittiDetDataset
from paddle3d.datasets.kitti.kitti_utils import assess_object_difficulties
from paddle3d.sample import Sample
@manager.DATASETS.add_component
class KittiPCDataset(KittiDetDataset):
"""
"""
def __getitem__(self, index: int) -> Sample:
filename = '{}.bin'.format(self.data[index])
path = os.path.join(self.pointcloud_dir, filename)
sample = Sample(path=path, modality="lidar")
sample.meta.id = self.data[index]
calibs = self.load_calibration_info(index)
sample["calibs"] = calibs
if self.is_train_mode:
kitti_records, ignored_kitti_records = self.load_annotation(index)
difficulties = assess_object_difficulties(kitti_records)
lidar_records = kitti_utils.project_camera_to_velodyne(
kitti_records, calibs)
ignored_lidar_records = kitti_utils.project_camera_to_velodyne(
ignored_kitti_records, calibs)
_, bboxes_3d, cls_names = kitti_utils.lidar_record_to_object(
lidar_records)
_, ignored_bboxes_3d, _ = kitti_utils.lidar_record_to_object(
ignored_lidar_records)
sample.bboxes_3d = bboxes_3d
sample.labels = np.array(
[self.class_names.index(name) for name in cls_names])
sample.difficulties = difficulties
sample.ignored_bboxes_3d = ignored_bboxes_3d
if self.use_road_plane:
sample.road_plane = self.load_road_plane(index)
if self.transforms:
sample = self.transforms(sample)
return sample
def load_road_plane(self, index):
file_name = '{}.txt'.format(self.data[index])
plane_file = os.path.join(self.base_dir, 'planes', file_name)
if not os.path.exists(plane_file):
return None
with open(plane_file, 'r') as f:
lines = f.readlines()
lines = [float(i) for i in lines[3].split()]
plane = np.asarray(lines)
# Ensure normal is always facing up, this is in the rectified camera coordinate
if plane[1] > 0:
plane = -plane
norm = np.linalg.norm(plane[0:3])
plane = plane / norm
return plane
@property
def pointcloud_dir(self) -> str:
"""
"""
return os.path.join(self.base_dir, 'velodyne')
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets/nuscenes/nuscenes_manager.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nuscenes import NuScenes
from paddle3d.utils.logger import logger
class NuScenesManager:
_instance_map = {}
@classmethod
def get(cls, version: str, dataroot: str) -> NuScenes:
key = "{}+{}".format(version, dataroot)
if key not in cls._instance_map:
with logger.processing(
'Loading nuscenes metadata, this may take a few minutes'):
nusc = NuScenes(
version=version, dataroot=dataroot, verbose=False)
cls._instance_map[key] = nusc
return cls._instance_map[key]
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets/nuscenes/nuscenes_utils.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import operator
from typing import Tuple
import numpy as np
from nuscenes.utils.data_classes import Box
from pyquaternion import Quaternion
from paddle3d.geometries.bbox import BBoxes2D, BBoxes3D
from paddle3d.sample import Sample
# cls_attr_dist refers to https://github.com/tianweiy/CenterPoint/blob/master/det3d/datasets/nuscenes/nusc_common.py#L47
cls_attr_dist = {
"barrier": {
"cycle.with_rider": 0,
"cycle.without_rider": 0,
"pedestrian.moving": 0,
"pedestrian.sitting_lying_down": 0,
"pedestrian.standing": 0,
"vehicle.moving": 0,
"vehicle.parked": 0,
"vehicle.stopped": 0,
},
"bicycle": {
"cycle.with_rider": 2791,
"cycle.without_rider": 8946,
"pedestrian.moving": 0,
"pedestrian.sitting_lying_down": 0,
"pedestrian.standing": 0,
"vehicle.moving": 0,
"vehicle.parked": 0,
"vehicle.stopped": 0,
},
"bus": {
"cycle.with_rider": 0,
"cycle.without_rider": 0,
"pedestrian.moving": 0,
"pedestrian.sitting_lying_down": 0,
"pedestrian.standing": 0,
"vehicle.moving": 9092,
"vehicle.parked": 3294,
"vehicle.stopped": 3881,
},
"car": {
"cycle.with_rider": 0,
"cycle.without_rider": 0,
"pedestrian.moving": 0,
"pedestrian.sitting_lying_down": 0,
"pedestrian.standing": 0,
"vehicle.moving": 114304,
"vehicle.parked": 330133,
"vehicle.stopped": 46898,
},
"construction_vehicle": {
"cycle.with_rider": 0,
"cycle.without_rider": 0,
"pedestrian.moving": 0,
"pedestrian.sitting_lying_down": 0,
"pedestrian.standing": 0,
"vehicle.moving": 882,
"vehicle.parked": 11549,
"vehicle.stopped": 2102,
},
"ignore": {
"cycle.with_rider": 307,
"cycle.without_rider": 73,
"pedestrian.moving": 0,
"pedestrian.sitting_lying_down": 0,
"pedestrian.standing": 0,
"vehicle.moving": 165,
"vehicle.parked": 400,
"vehicle.stopped": 102,
},
"motorcycle": {
"cycle.with_rider": 4233,
"cycle.without_rider": 8326,
"pedestrian.moving": 0,
"pedestrian.sitting_lying_down": 0,
"pedestrian.standing": 0,
"vehicle.moving": 0,
"vehicle.parked": 0,
"vehicle.stopped": 0,
},
"pedestrian": {
"cycle.with_rider": 0,
"cycle.without_rider": 0,
"pedestrian.moving": 157444,
"pedestrian.sitting_lying_down": 13939,
"pedestrian.standing": 46530,
"vehicle.moving": 0,
"vehicle.parked": 0,
"vehicle.stopped": 0,
},
"traffic_cone": {
"cycle.with_rider": 0,
"cycle.without_rider": 0,
"pedestrian.moving": 0,
"pedestrian.sitting_lying_down": 0,
"pedestrian.standing": 0,
"vehicle.moving": 0,
"vehicle.parked": 0,
"vehicle.stopped": 0,
},
"trailer": {
"cycle.with_rider": 0,
"cycle.without_rider": 0,
"pedestrian.moving": 0,
"pedestrian.sitting_lying_down": 0,
"pedestrian.standing": 0,
"vehicle.moving": 3421,
"vehicle.parked": 19224,
"vehicle.stopped": 1895,
},
"truck": {
"cycle.with_rider": 0,
"cycle.without_rider": 0,
"pedestrian.moving": 0,
"pedestrian.sitting_lying_down": 0,
"pedestrian.standing": 0,
"vehicle.moving": 21339,
"vehicle.parked": 55626,
"vehicle.stopped": 11097,
},
}
def filter_fake_result(detection: Sample):
box3d = detection.bboxes_3d
velocities = detection.bboxes_3d.velocities
scores = detection.confidences
labels = detection.labels
box_list = []
velocity_list = []
score_list = []
label_list = []
for i in range(box3d.shape[0]):
if scores[i] < 0:
continue
box_list.append(box3d[i])
velocity_list.append(velocities[i])
score_list.append(scores[i])
label_list.append(labels[i])
detection.bboxes_3d = BBoxes3D(
np.asarray(box_list), velocities=np.asarray(velocity_list))
detection.labels = np.asarray(label_list)
detection.confidences = np.asarray(score_list)
def second_bbox_to_nuscenes_box(pred_sample: Sample):
"""
This function refers to https://github.com/tianweiy/CenterPoint/blob/master/det3d/datasets/nuscenes/nusc_common.py#L160
"""
pred_sample.bboxes_3d[:, -1] = -pred_sample.bboxes_3d[:, -1] - np.pi / 2
nuscenes_box_list = []
for i in range(pred_sample.bboxes_3d.shape[0]):
quat = Quaternion(axis=[0, 0, 1], radians=pred_sample.bboxes_3d[i, -1])
velocity = (*pred_sample.bboxes_3d.velocities[i, 0:2], 0.0)
box = Box(
pred_sample.bboxes_3d[i, :3],
pred_sample.bboxes_3d[i, 3:6],
quat,
label=pred_sample.labels[i],
score=pred_sample.confidences[i],
velocity=velocity,
)
nuscenes_box_list.append(box)
return nuscenes_box_list
def get_nuscenes_box_attribute(box: Box, label_name: str):
if np.sqrt(box.velocity[0]**2 + box.velocity[1]**2) > 0.2:
if label_name in [
"car",
"construction_vehicle",
"bus",
"truck",
"trailer",
]:
attr = "vehicle.moving"
elif label_name in ["bicycle", "motorcycle"]:
attr = "cycle.with_rider"
else:
attr = None
else:
if label_name in ["pedestrian"]:
attr = "pedestrian.standing"
elif label_name in ["bus"]:
attr = "vehicle.stopped"
else:
attr = None
if attr is None:
attr = max(
cls_attr_dist[label_name].items(), key=operator.itemgetter(1))[0]
return attr
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets/nuscenes/__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .nuscenes_multiview_det import NuscenesMVDataset
from .nuscenes_pointcloud_det import NuscenesPCDataset
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets/nuscenes/nuscenes_multiview_det.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import numbers
import os
import os.path as osp
import pickle
import random
from collections.abc import Mapping, Sequence
from functools import reduce
from pathlib import Path
from typing import List, Optional, Union
import numpy as np
import paddle
from nuscenes.eval.common.utils import Quaternion, quaternion_yaw
from nuscenes.utils import splits as nuscenes_split
from nuscenes.utils.data_classes import Box as NuScenesBox
from nuscenes.utils.geometry_utils import transform_matrix
from pyquaternion import Quaternion
from tqdm import tqdm
import paddle3d.transforms as T
from paddle3d.apis import manager
from paddle3d.datasets.nuscenes.nuscenes_det import NuscenesDetDataset
from paddle3d.datasets.nuscenes.nuscenes_manager import NuScenesManager
from paddle3d.geometries import BBoxes3D, CoordMode
from paddle3d.sample import Sample, SampleMeta
from paddle3d.transforms import TransformABC
from paddle3d.utils.logger import logger
def is_filepath(x):
return isinstance(x, str) or isinstance(x, Path)
@manager.DATASETS.add_component
class NuscenesMVDataset(NuscenesDetDataset):
"""
Nuscecens dataset for multi-view camera detection task.
"""
DATASET_NAME = "Nuscenes"
def __init__(self,
dataset_root: str,
ann_file: str = None,
mode: str = "train",
transforms: Union[TransformABC, List[TransformABC]] = None,
max_sweeps: int = 10,
class_balanced_sampling: bool = False,
class_names: Union[list, tuple] = None,
queue_length=None,
use_valid_flag=False,
with_velocity=True):
self.mode = mode
self.dataset_root = dataset_root
self.filter_empty_gt = True
self.box_type_3d = 'LiDAR'
self.box_mode_3d = None
self.ann_file = ann_file
self.version = self.VERSION_MAP[self.mode]
self.max_sweeps = max_sweeps
self._build_data()
self.metadata = self.data_infos['metadata']
self.data_infos = list(
sorted(self.data_infos['infos'], key=lambda e: e['timestamp']))
if isinstance(transforms, list):
transforms = T.Compose(transforms)
self.transforms = transforms
if 'train' in self.mode:
self.flag = np.zeros(len(self), dtype=np.uint8)
self.modality = dict(
use_camera=True,
use_lidar=False,
use_radar=False,
use_map=False,
use_external=True,
)
self.with_velocity = with_velocity
self.use_valid_flag = use_valid_flag
self.channel = "LIDAR_TOP"
if class_names is not None:
self.class_names = class_names
else:
self.class_names = list(self.CLASS_MAP.keys())
self.queue_length = queue_length
def __len__(self):
return len(self.data_infos)
def _rand_another(self, idx):
"""Randomly get another item with the same flag.
Returns:
int: Another index of item with the same flag.
"""
pool = np.where(self.flag == self.flag[idx])[0]
return np.random.choice(pool)
def get_ann_info(self, index):
"""Get annotation info according to the given index.
Args:
index (int): Index of the annotation data to get.
Returns:
dict: Annotation information consists of the following keys:
- gt_bboxes_3d (:obj:`LiDARInstance3DBoxes`): \
3D ground truth bboxes
- gt_labels_3d (np.ndarray): Labels of ground truths.
- gt_names (list[str]): Class names of ground truths.
"""
info = self.data_infos[index]
# filter out bbox containing no points
if self.use_valid_flag:
mask = info['valid_flag']
else:
mask = info['num_lidar_pts'] > 0
gt_bboxes_3d = info['gt_boxes'][mask]
gt_names_3d = info['gt_names'][mask]
gt_labels_3d = []
for cat in gt_names_3d:
if cat in self.CLASS_MAP:
# gt_labels_3d.append(self.CLASS_MAP[cat])
gt_labels_3d.append(self.class_names.index(cat))
else:
gt_labels_3d.append(-1)
gt_labels_3d = np.array(gt_labels_3d)
if self.with_velocity:
gt_velocity = info['gt_velocity'][mask]
nan_mask = np.isnan(gt_velocity[:, 0])
gt_velocity[nan_mask] = [0.0, 0.0]
gt_bboxes_3d = np.concatenate([gt_bboxes_3d, gt_velocity], axis=-1)
# the nuscenes box center is [0.5, 0.5, 0.5], we change it to be
# the same as KITTI (0.5, 0.5, 0)
origin = [0.5, 0.5, 0.5]
dst = np.array([0.5, 0.5, 0], dtype=gt_bboxes_3d.dtype)
src = np.array(origin, dtype=gt_bboxes_3d.dtype)
gt_bboxes_3d[:, :3] += gt_bboxes_3d[:, 3:6] * (dst - src)
gt_bboxes_3d = BBoxes3D(
gt_bboxes_3d, coordmode=2, origin=[0.5, 0.5, 0.5])
anns_results = dict(
gt_bboxes_3d=gt_bboxes_3d,
gt_labels_3d=gt_labels_3d,
gt_names=gt_names_3d)
return anns_results
def get_data_info(self, index):
"""Get data info according to the given index.
Args:
index (int): Index of the sample data to get.
Returns:
dict: Data information that will be passed to the data \
preprocessing pipelines. It includes the following keys:
- sample_idx (str): Sample index.
- pts_filename (str): Filename of point clouds.
- sweeps (list[dict]): Infos of sweeps.
- timestamp (float): Sample timestamp.
- img_filename (str, optional): Image filename.
- lidar2img (list[np.ndarray], optional): Transformations \
from lidar to different cameras.
- ann_info (dict): Annotation info.
"""
info = self.data_infos[index]
sample = Sample(path=None, modality="multiview")
sample.sample_idx = info['token']
sample.meta.id = info['token']
sample.pts_filename = osp.join(self.dataset_root, info['lidar_path'])
sample.sweeps = copy.deepcopy(info['sweeps'])
if self.queue_length is None:
for i in range(len(sample.sweeps)):
for cam_type in sample.sweeps[i].keys():
data_path = info['sweeps'][i][cam_type]['data_path']
sample.sweeps[i][cam_type]['data_path'] = osp.join(
self.dataset_root, data_path)
sample.timestamp = info['timestamp'] / 1e6
if self.queue_length is not None:
sample.ego2global_translation = info['ego2global_translation']
sample.ego2global_rotation = info['ego2global_rotation']
sample.prev_idx = info['prev']
sample.next_idx = info['next']
sample.scene_token = info['scene_token']
sample.can_bus = info['can_bus']
sample.frame_idx = info['frame_idx']
if self.modality['use_camera']:
image_paths = []
lidar2img_rts = []
intrinsics = []
extrinsics = []
img_timestamp = []
for cam_type, cam_info in info['cams'].items():
img_timestamp.append(cam_info['timestamp'] / 1e6)
image_paths.append(
osp.join(self.dataset_root, cam_info['data_path']))
# obtain lidar to image transformation matrix
lidar2cam_r = np.linalg.inv(cam_info['sensor2lidar_rotation'])
lidar2cam_t = cam_info[
'sensor2lidar_translation'] @ lidar2cam_r.T
lidar2cam_rt = np.eye(4)
lidar2cam_rt[:3, :3] = lidar2cam_r.T
lidar2cam_rt[3, :3] = -lidar2cam_t
intrinsic = cam_info['cam_intrinsic']
viewpad = np.eye(4)
viewpad[:intrinsic.shape[0], :intrinsic.shape[1]] = intrinsic
lidar2img_rt = (viewpad @ lidar2cam_rt.T)
intrinsics.append(viewpad)
# The extrinsics mean the tranformation from lidar to camera.
# If anyone want to use the extrinsics as sensor to lidar, please
# use np.linalg.inv(lidar2cam_rt.T) and modify the ResizeCropFlipImage
# and LoadMultiViewImageFromMultiSweepsFiles.
extrinsics.append(lidar2cam_rt)
lidar2img_rts.append(lidar2img_rt)
sample.update(
dict(
img_timestamp=img_timestamp,
img_filename=image_paths,
lidar2img=lidar2img_rts,
intrinsics=intrinsics,
extrinsics=extrinsics))
if 'train' in self.mode:
annos = self.get_ann_info(index)
sample.ann_info = annos
if self.queue_length is not None:
rotation = Quaternion(sample['ego2global_rotation'])
translation = sample['ego2global_translation']
can_bus = sample['can_bus']
can_bus[:3] = translation
can_bus[3:7] = rotation
patch_angle = quaternion_yaw(rotation) / np.pi * 180
if patch_angle < 0:
patch_angle += 360
can_bus[-2] = patch_angle / 180 * np.pi
can_bus[-1] = patch_angle
return sample
def __getitem__(self, index):
if 'train' not in self.mode:
sample = self.get_data_info(index)
sample['img_fields'] = []
sample['bbox3d_fields'] = []
sample['pts_mask_fields'] = []
sample['pts_seg_fields'] = []
sample['bbox_fields'] = []
sample['mask_fields'] = []
sample['seg_fields'] = []
sample['box_type_3d'] = self.box_type_3d
sample['box_mode_3d'] = self.box_mode_3d
sample = self.transforms(sample)
return sample
while True:
if self.queue_length is None:
sample = self.get_data_info(index)
if sample is None:
index = self._rand_another(index)
continue
sample['img_fields'] = []
sample['bbox3d_fields'] = []
sample['pts_mask_fields'] = []
sample['pts_seg_fields'] = []
sample['bbox_fields'] = []
sample['mask_fields'] = []
sample['seg_fields'] = []
sample['box_type_3d'] = self.box_type_3d
sample['box_mode_3d'] = self.box_mode_3d
sample = self.transforms(sample)
if self.is_train_mode and self.filter_empty_gt and \
(sample is None or len(sample['gt_labels_3d']) == 0 ):
index = self._rand_another(index)
continue
return sample
else:
queue = []
index_list = list(range(index - self.queue_length, index))
random.shuffle(index_list)
index_list = sorted(index_list[1:])
index_list.append(index)
for i in index_list:
i = max(0, i)
sample = self.get_data_info(i)
if sample is None:
break
sample['img_fields'] = []
sample['bbox3d_fields'] = []
sample['pts_mask_fields'] = []
sample['pts_seg_fields'] = []
sample['bbox_fields'] = []
sample['mask_fields'] = []
sample['seg_fields'] = []
sample['box_type_3d'] = self.box_type_3d
sample['box_mode_3d'] = self.box_mode_3d
sample = self.transforms(sample)
if self.filter_empty_gt and \
(sample is None or len(sample['gt_labels_3d']) == 0):
sample = None
break
queue.append(sample)
if sample is None:
index = self._rand_another(index)
continue
return self.union2one(queue)
def union2one(self, queue):
imgs_list = [each['img'] for each in queue]
metas_map = SampleMeta()
prev_scene_token = None
prev_pos = None
prev_angle = None
for i, each in enumerate(queue):
metas_map[i] = each['meta']
if metas_map[i]['scene_token'] != prev_scene_token:
metas_map[i]['prev_bev_exists'] = False
prev_scene_token = metas_map[i]['scene_token']
prev_pos = copy.deepcopy(metas_map[i]['can_bus'][:3])
prev_angle = copy.deepcopy(metas_map[i]['can_bus'][-1])
metas_map[i]['can_bus'][:3] = 0
metas_map[i]['can_bus'][-1] = 0
else:
metas_map[i]['prev_bev_exists'] = True
tmp_pos = copy.deepcopy(metas_map[i]['can_bus'][:3])
tmp_angle = copy.deepcopy(metas_map[i]['can_bus'][-1])
metas_map[i]['can_bus'][:3] -= prev_pos
metas_map[i]['can_bus'][-1] -= prev_angle
prev_pos = copy.deepcopy(tmp_pos)
prev_angle = copy.deepcopy(tmp_angle)
queue[-1]['img'] = np.stack(imgs_list)
queue[-1]['meta'] = metas_map
queue = queue[-1]
return queue
def _build_data(self):
test = 'test' in self.version
if self.ann_file is not None:
self.data_infos = pickle.load(open(self.ann_file, 'rb'))
return
if test:
test_ann_cache_file = os.path.join(
self.dataset_root,
'{}_annotation_test.pkl'.format(self.DATASET_NAME))
if os.path.exists(test_ann_cache_file):
self.data_infos = pickle.load(open(test_ann_cache_file, 'rb'))
return
else:
train_ann_cache_file = os.path.join(
self.dataset_root,
'{}_annotation_train.pkl'.format(self.DATASET_NAME))
val_ann_cache_file = os.path.join(
self.dataset_root,
'{}_annotation_val.pkl'.format(self.DATASET_NAME))
if os.path.exists(train_ann_cache_file):
self.data_infos = pickle.load(open(train_ann_cache_file, 'rb'))
return
self.nusc = NuScenesManager.get(
version=self.version, dataroot=self.dataset_root)
if self.version == 'v1.0-trainval':
train_scenes = nuscenes_split.train
val_scenes = nuscenes_split.val
elif self.version == 'v1.0-test':
train_scenes = nuscenes_split.test
val_scenes = []
elif self.version == 'v1.0-mini':
train_scenes = nuscenes_split.mini_train
val_scenes = nuscenes_split.mini_val
else:
raise ValueError('unknown nuscenes dataset version')
available_scenes = get_available_scenes(self.nusc)
available_scene_names = [s['name'] for s in available_scenes]
train_scenes = list(
filter(lambda x: x in available_scene_names, train_scenes))
val_scenes = list(
filter(lambda x: x in available_scene_names, val_scenes))
train_scenes = set([
available_scenes[available_scene_names.index(s)]['token']
for s in train_scenes
])
val_scenes = set([
available_scenes[available_scene_names.index(s)]['token']
for s in val_scenes
])
if test:
print('test scene: {}'.format(len(train_scenes)))
else:
print('train scene: {}, val scene: {}'.format(
len(train_scenes), len(val_scenes)))
train_nusc_infos, val_nusc_infos = _fill_trainval_infos(
self.nusc,
train_scenes,
val_scenes,
test,
max_sweeps=self.max_sweeps)
metadata = dict(version=self.version)
if test:
print('test sample: {}'.format(len(train_nusc_infos)))
data = dict(infos=train_nusc_infos, metadata=metadata)
pickle.dump(data, open(test_ann_cache_file, 'wb'))
self.data_infos = data
else:
print('train sample: {}, val sample: {}'.format(
len(train_nusc_infos), len(val_nusc_infos)))
data = dict(infos=train_nusc_infos, metadata=metadata)
pickle.dump(data, open(train_ann_cache_file, 'wb'))
if self.mode == 'train':
self.data_infos = data
data['infos'] = val_nusc_infos
pickle.dump(data, open(val_ann_cache_file, 'wb'))
if self.mode == 'val':
self.data_infos = data
def _filter(self, anno: dict, box: NuScenesBox = None) -> bool:
# filter out objects that are not being scanned
mask = (anno['num_lidar_pts'] + anno['num_radar_pts']) > 0 and \
anno['category_name'] in self.LABEL_MAP and \
self.LABEL_MAP[anno['category_name']] in self.class_names
return mask
def get_sweeps(self, index: int) -> List[str]:
"""
"""
sweeps = []
sample = self.data[index]
token = sample['data'][self.channel]
sample_data = self.nusc.get('sample_data', token)
if self.max_sweeps <= 0:
return sweeps
# Homogeneous transform of current sample from ego car coordinate to sensor coordinate
curr_sample_cs = self.nusc.get("calibrated_sensor",
sample_data["calibrated_sensor_token"])
curr_sensor_from_car = transform_matrix(
curr_sample_cs["translation"],
Quaternion(curr_sample_cs["rotation"]),
inverse=True)
# Homogeneous transformation matrix of current sample from global coordinate to ego car coordinate
curr_sample_pose = self.nusc.get("ego_pose",
sample_data["ego_pose_token"])
curr_car_from_global = transform_matrix(
curr_sample_pose["translation"],
Quaternion(curr_sample_pose["rotation"]),
inverse=True,
)
curr_timestamp = 1e-6 * sample_data["timestamp"]
prev_token = sample_data['prev']
while len(sweeps) < self.max_sweeps - 1:
if prev_token == "":
if len(sweeps) == 0:
sweeps.append({
"lidar_path":
osp.join(self.dataset_root, sample_data['filename']),
"time_lag":
0,
"ref_from_curr":
None,
})
else:
sweeps.append(sweeps[-1])
else:
prev_sample_data = self.nusc.get('sample_data', prev_token)
# Homogeneous transformation matrix of previous sample from ego car coordinate to global coordinate
prev_sample_pose = self.nusc.get(
"ego_pose", prev_sample_data["ego_pose_token"])
prev_global_from_car = transform_matrix(
prev_sample_pose["translation"],
Quaternion(prev_sample_pose["rotation"]),
inverse=False,
)
# Homogeneous transform of previous sample from sensor coordinate to ego car coordinate
prev_sample_cs = self.nusc.get(
"calibrated_sensor",
prev_sample_data["calibrated_sensor_token"])
prev_car_from_sensor = transform_matrix(
prev_sample_cs["translation"],
Quaternion(prev_sample_cs["rotation"]),
inverse=False,
)
curr_from_pre = reduce(
np.dot,
[
curr_sensor_from_car, curr_car_from_global,
prev_global_from_car, prev_car_from_sensor
],
)
prev_timestamp = 1e-6 * prev_sample_data["timestamp"]
time_lag = curr_timestamp - prev_timestamp
sweeps.append({
"lidar_path":
osp.join(self.dataset_root, prev_sample_data['filename']),
"time_lag":
time_lag,
"ref_from_curr":
curr_from_pre,
})
prev_token = prev_sample_data['prev']
return sweeps
@property
def metric(self):
if not hasattr(self, 'nusc'):
self.nusc = NuScenesManager.get(
version=self.version, dataroot=self.dataset_root)
return super().metric
def collate_fn(self, batch: List):
"""
"""
sample = batch[0]
if isinstance(sample, np.ndarray):
try:
batch = np.stack(batch, axis=0)
return batch
except Exception as e:
return batch
elif isinstance(sample, SampleMeta):
return batch
return super().collate_fn(batch)
def get_available_scenes(nusc):
"""Get available scenes from the input nuscenes class.
Given the raw data, get the information of available scenes for
further info generation.
Args:
nusc (class): Dataset class in the nuScenes dataset.
Returns:
available_scenes (list[dict]): List of basic information for the
available scenes.
"""
available_scenes = []
print('total scene num: {}'.format(len(nusc.scene)))
for scene in nusc.scene:
scene_token = scene['token']
scene_rec = nusc.get('scene', scene_token)
sample_rec = nusc.get('sample', scene_rec['first_sample_token'])
sd_rec = nusc.get('sample_data', sample_rec['data']['LIDAR_TOP'])
has_more_frames = True
scene_not_exist = False
while has_more_frames:
lidar_path, boxes, _ = nusc.get_sample_data(sd_rec['token'])
lidar_path = str(lidar_path)
if os.getcwd() in lidar_path:
# path from lyftdataset is absolute path
lidar_path = lidar_path.split(f'{os.getcwd()}/')[-1]
# relative path
if not is_filepath(lidar_path):
scene_not_exist = True
break
else:
break
if scene_not_exist:
continue
available_scenes.append(scene)
print('exist scene num: {}'.format(len(available_scenes)))
return available_scenes
def _fill_trainval_infos(nusc,
train_scenes,
val_scenes,
test=False,
max_sweeps=10):
"""Generate the train/val infos from the raw data.
Args:
nusc (:obj:`NuScenes`): Dataset class in the nuScenes dataset.
train_scenes (list[str]): Basic information of training scenes.
val_scenes (list[str]): Basic information of validation scenes.
test (bool, optional): Whether use the test mode. In test mode, no
annotations can be accessed. Default: False.
max_sweeps (int, optional): Max number of sweeps. Default: 10.
Returns:
tuple[list[dict]]: Information of training set and validation set
that will be saved to the info file.
"""
train_nusc_infos = []
val_nusc_infos = []
msg = "Begin to generate a info of nuScenes dataset."
for sample_idx in logger.range(len(nusc.sample), msg=msg):
sample = nusc.sample[sample_idx]
lidar_token = sample['data']['LIDAR_TOP']
sd_rec = nusc.get('sample_data', sample['data']['LIDAR_TOP'])
cs_record = nusc.get('calibrated_sensor',
sd_rec['calibrated_sensor_token'])
pose_record = nusc.get('ego_pose', sd_rec['ego_pose_token'])
lidar_path, boxes, _ = nusc.get_sample_data(lidar_token)
assert os.path.exists(lidar_path)
info = {
'lidar_token': lidar_token,
'lidar_path': lidar_path,
'token': sample['token'],
'sweeps': [],
'cams': dict(),
'lidar2ego_translation': cs_record['translation'],
'lidar2ego_rotation': cs_record['rotation'],
'ego2global_translation': pose_record['translation'],
'ego2global_rotation': pose_record['rotation'],
'timestamp': sample['timestamp'],
}
l2e_r = info['lidar2ego_rotation']
l2e_t = info['lidar2ego_translation']
e2g_r = info['ego2global_rotation']
e2g_t = info['ego2global_translation']
l2e_r_mat = Quaternion(l2e_r).rotation_matrix
e2g_r_mat = Quaternion(e2g_r).rotation_matrix
# obtain 6 image's information per frame
camera_types = [
'CAM_FRONT',
'CAM_FRONT_RIGHT',
'CAM_FRONT_LEFT',
'CAM_BACK',
'CAM_BACK_LEFT',
'CAM_BACK_RIGHT',
]
for cam in camera_types:
cam_token = sample['data'][cam]
cam_path, _, cam_intrinsic = nusc.get_sample_data(cam_token)
cam_info = obtain_sensor2top(nusc, cam_token, l2e_t, l2e_r_mat,
e2g_t, e2g_r_mat, cam)
cam_info.update(cam_intrinsic=cam_intrinsic)
info['cams'].update({cam: cam_info})
# obtain sweeps for a single key-frame
sd_rec = nusc.get('sample_data', sample['data']['LIDAR_TOP'])
sweeps = []
while len(sweeps) < max_sweeps:
if not sd_rec['prev'] == '':
sweep = obtain_sensor2top(nusc, sd_rec['prev'], l2e_t,
l2e_r_mat, e2g_t, e2g_r_mat, 'lidar')
sweeps.append(sweep)
sd_rec = nusc.get('sample_data', sd_rec['prev'])
else:
break
info['sweeps'] = sweeps
# obtain annotation
if not test:
annotations = [
nusc.get('sample_annotation', token) for token in sample['anns']
]
locs = np.array([b.center for b in boxes]).reshape(-1, 3)
dims = np.array([b.wlh for b in boxes]).reshape(-1, 3)
rots = np.array([b.orientation.yaw_pitch_roll[0]
for b in boxes]).reshape(-1, 1)
velocity = np.array(
[nusc.box_velocity(token)[:2] for token in sample['anns']])
valid_flag = np.array(
[(anno['num_lidar_pts'] + anno['num_radar_pts']) > 0
for anno in annotations],
dtype=bool).reshape(-1)
# convert velo from global to lidar
for i in range(len(boxes)):
velo = np.array([*velocity[i], 0.0])
velo = velo @ np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(
l2e_r_mat).T
velocity[i] = velo[:2]
names = [b.name for b in boxes]
for i in range(len(names)):
# NuscenesDetDataset.LABEL_MAP
if names[i] in NuscenesDetDataset.LABEL_MAP:
names[i] = NuscenesDetDataset.LABEL_MAP[names[i]]
names = np.array(names)
# we need to convert box size to
# the format of our lidar coordinate system
# which is x_size, y_size, z_size (corresponding to l, w, h)
gt_boxes = np.concatenate([locs, dims, -rots - np.pi / 2], axis=1)
assert len(gt_boxes) == len(
annotations), f'{len(gt_boxes)}, {len(annotations)}'
info['gt_boxes'] = gt_boxes
info['gt_names'] = names
info['gt_velocity'] = velocity.reshape(-1, 2)
info['num_lidar_pts'] = np.array(
[a['num_lidar_pts'] for a in annotations])
info['num_radar_pts'] = np.array(
[a['num_radar_pts'] for a in annotations])
info['valid_flag'] = valid_flag
if sample['scene_token'] in train_scenes:
train_nusc_infos.append(info)
else:
val_nusc_infos.append(info)
return train_nusc_infos, val_nusc_infos
def obtain_sensor2top(nusc,
sensor_token,
l2e_t,
l2e_r_mat,
e2g_t,
e2g_r_mat,
sensor_type='lidar'):
"""Obtain the info with RT matric from general sensor to Top LiDAR.
Args:
nusc (class): Dataset class in the nuScenes dataset.
sensor_token (str): Sample data token corresponding to the
specific sensor type.
l2e_t (np.ndarray): Translation from lidar to ego in shape (1, 3).
l2e_r_mat (np.ndarray): Rotation matrix from lidar to ego
in shape (3, 3).
e2g_t (np.ndarray): Translation from ego to global in shape (1, 3).
e2g_r_mat (np.ndarray): Rotation matrix from ego to global
in shape (3, 3).
sensor_type (str, optional): Sensor to calibrate. Default: 'lidar'.
Returns:
sweep (dict): Sweep information after transformation.
"""
sd_rec = nusc.get('sample_data', sensor_token)
cs_record = nusc.get('calibrated_sensor', sd_rec['calibrated_sensor_token'])
pose_record = nusc.get('ego_pose', sd_rec['ego_pose_token'])
data_path = str(nusc.get_sample_data_path(sd_rec['token']))
if os.getcwd() in data_path: # path from lyftdataset is absolute path
data_path = data_path.split(f'{os.getcwd()}/')[-1] # relative path
sweep = {
'data_path': data_path,
'type': sensor_type,
'sample_data_token': sd_rec['token'],
'sensor2ego_translation': cs_record['translation'],
'sensor2ego_rotation': cs_record['rotation'],
'ego2global_translation': pose_record['translation'],
'ego2global_rotation': pose_record['rotation'],
'timestamp': sd_rec['timestamp']
}
l2e_r_s = sweep['sensor2ego_rotation']
l2e_t_s = sweep['sensor2ego_translation']
e2g_r_s = sweep['ego2global_rotation']
e2g_t_s = sweep['ego2global_translation']
# obtain the RT from sensor to Top LiDAR
# sweep->ego->global->ego'->lidar
l2e_r_s_mat = Quaternion(l2e_r_s).rotation_matrix
e2g_r_s_mat = Quaternion(e2g_r_s).rotation_matrix
R = (l2e_r_s_mat.T @ e2g_r_s_mat.T) @ (
np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T)
T = (l2e_t_s @ e2g_r_s_mat.T + e2g_t_s) @ (
np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T)
T -= e2g_t @ (np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T
) + l2e_t @ np.linalg.inv(l2e_r_mat).T
sweep['sensor2lidar_rotation'] = R.T # points @ R.T + T
sweep['sensor2lidar_translation'] = T
return sweep
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets/nuscenes/nuscenes_det.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, List, Tuple, Union
import numpy as np
import paddle
from nuscenes.utils import splits as nuscenes_split
from pyquaternion import Quaternion
import paddle3d.transforms as T
from paddle3d.datasets import BaseDataset
from paddle3d.datasets.nuscenes.nuscenes_manager import NuScenesManager
from paddle3d.datasets.nuscenes.nuscenes_metric import NuScenesMetric
from paddle3d.geometries import BBoxes2D, BBoxes3D
from paddle3d.sample import Sample
from paddle3d.transforms import TransformABC
from paddle3d.utils.common import generate_tempdir
class NuscenesDetDataset(BaseDataset):
"""
"""
VERSION_MAP = {
'train': 'v1.0-trainval',
'val': 'v1.0-trainval',
'trainval': 'v1.0-trainval',
'test': 'v1.0-test',
'mini_train': 'v1.0-mini',
'mini_val': 'v1.0-mini'
}
LABEL_MAP = {
'human.pedestrian.adult': 'pedestrian',
'human.pedestrian.child': 'pedestrian',
'human.pedestrian.police_officer': 'pedestrian',
'human.pedestrian.construction_worker': 'pedestrian',
'vehicle.car': 'car',
'vehicle.motorcycle': 'motorcycle',
'vehicle.bicycle': 'bicycle',
'vehicle.bus.bendy': 'bus',
'vehicle.bus.rigid': 'bus',
'vehicle.truck': 'truck',
'vehicle.construction': 'construction_vehicle',
'vehicle.trailer': 'trailer',
'movable_object.barrier': 'barrier',
'movable_object.trafficcone': 'traffic_cone'
}
CLASS_MAP = {
'pedestrian': 0,
'car': 1,
'motorcycle': 2,
'bicycle': 3,
'bus': 4,
'truck': 5,
'construction_vehicle': 6,
'trailer': 7,
'barrier': 8,
'traffic_cone': 9
}
CLASS_MAP_REVERSE = {value: key for key, value in CLASS_MAP.items()}
ATTRIBUTE_MAP = {
'vehicle.moving': 0,
'vehicle.stopped': 1,
'vehicle.parked': 2,
'cycle.with_rider': 3,
'cycle.without_rider': 4,
'pedestrian.sitting_lying_down': 5,
'pedestrian.standing': 6,
'pedestrian.moving': 7,
'': 8
}
ATTRIBUTE_MAP_REVERSE = {value: key for key, value in ATTRIBUTE_MAP.items()}
SUPPORT_CHANNELS = [
"RADAR_FRONT", "RADAR_FRONT_LEFT", "RADAR_FRONT_RIGHT",
"RADAR_BACK_LEFT", "RADAR_BACK_RIGHT", "LIDAR_TOP", "CAM_BACK",
"CAM_BACK_LEFT", "CAM_BACK_RIGHT", "CAM_FRONT", "CAM_FRONT_LEFT",
"CAM_FRONT_RIGHT"
]
DEFAULT_ATTRIBUTE_MAP = {
'car': 'vehicle.parked',
'pedestrian': 'pedestrian.moving',
'trailer': 'vehicle.parked',
'truck': 'vehicle.parked',
'bus': 'vehicle.moving',
'motorcycle': 'cycle.without_rider',
'construction_vehicle': 'vehicle.parked',
'bicycle': 'cycle.without_rider',
'barrier': '',
'traffic_cone': ''
}
def __init__(self,
dataset_root: str,
channel: str,
mode: str = "train",
transforms: Union[TransformABC, List[TransformABC]] = None,
class_balanced_sampling: bool = False,
class_names: Union[list, tuple] = None):
super().__init__()
self.dataset_root = dataset_root
self.mode = mode.lower()
self.channel = channel
self.class_balanced_sampling = class_balanced_sampling
self.class_names = class_names
if self.class_names is None:
self.class_names = list(self.CLASS_MAP.keys())
if isinstance(transforms, list):
transforms = T.Compose(transforms)
self.transforms = transforms
if self.mode not in [
'train', 'val', 'trainval', 'test', 'mini_train', 'mini_val'
]:
raise ValueError(
"mode should be 'train', 'val', 'trainval', 'mini_train', 'mini_val' or 'test', but got {}."
.format(self.mode))
if self.channel not in self.SUPPORT_CHANNELS:
raise ValueError('Only channel {} is supported, but got {}'.format(
self.SUPPORT_CHANNELS, self.channel))
self.version = self.VERSION_MAP[self.mode]
self.nusc = NuScenesManager.get(
version=self.version, dataroot=self.dataset_root)
self._build_data(class_balanced_sampling)
def _build_data(self, class_balanced_sampling):
scenes = getattr(nuscenes_split, self.mode)
self.data = []
for scene in self.nusc.scene:
if scene['name'] not in scenes:
continue
first_sample_token = scene['first_sample_token']
last_sample_token = scene['last_sample_token']
cur_token = first_sample_token
first_sample = self.nusc.get('sample', first_sample_token)
while True:
sample = self.nusc.get('sample', cur_token)
self.data.append(sample)
if cur_token == last_sample_token:
break
cur_token = sample['next']
if self.class_balanced_sampling and self.mode.lower(
) == 'train' and len(self.class_names) > 1:
cls_dist = {class_name: [] for class_name in self.class_names}
for index in range(len(self.data)):
sample = self.data[index]
gt_names = []
for anno in sample['anns']:
anno = self.nusc.get('sample_annotation', anno)
if not self._filter(anno):
continue
class_name = self.LABEL_MAP[anno['category_name']]
if class_name in self.class_names:
gt_names.append(class_name)
for class_name in set(gt_names):
cls_dist[class_name].append(sample)
num_balanced_samples = sum([len(v) for k, v in cls_dist.items()])
num_balanced_samples = max(num_balanced_samples, 1)
balanced_frac = 1.0 / len(self.class_names)
fracs = [len(v) / num_balanced_samples for k, v in cls_dist.items()]
sampling_ratios = [balanced_frac / frac for frac in fracs]
resampling_data = []
for samples, sampling_ratio in zip(
list(cls_dist.values()), sampling_ratios):
resampling_data.extend(
np.random.choice(samples, int(
len(samples) * sampling_ratio)).tolist())
self.data = resampling_data
def __len__(self):
return len(self.data)
def load_annotation(self, index: int, filter: Callable = None
) -> Tuple[np.ndarray, np.ndarray]:
"""
"""
bboxes = []
labels = []
velocities = []
attrs = []
sample = self.data[index]
sample_data = self.nusc.get('sample_data', sample['data'][self.channel])
ego_pose = self.nusc.get('ego_pose', sample_data['ego_pose_token'])
channel_pose = self.nusc.get('calibrated_sensor',
sample_data['calibrated_sensor_token'])
for anno in sample['anns']:
box = self.nusc.get_box(anno)
box.velocity = self.nusc.box_velocity(box.token)
# from global-coord to ego-coord
box.translate(-np.array(ego_pose['translation']))
box.rotate(Quaternion(ego_pose['rotation']).inverse)
# from ego-coord to sensor-coord
box.translate(-np.array(channel_pose['translation']))
box.rotate(Quaternion(channel_pose['rotation']).inverse)
anno = self.nusc.get('sample_annotation', anno)
if not anno[
'category_name'] in self.LABEL_MAP: # also filter ["DontCare", "ignore", "UNKNOWN"]
continue
# filter out objects that do not meet the conditions
if filter and not filter(anno, box):
continue
# add velocity
# loaded velocity may be nan when using nuscenes_devkit<=1.1.9
# so we reset nan velocity to zero
velocity = np.array(box.velocity)
velocity[np.isnan(velocity)] = 0
velocities.append(velocity[:2])
# get attribute
clsname = self.LABEL_MAP[anno['category_name']]
label = self.class_names.index(clsname)
if len(anno['attribute_tokens']) == 0:
attr_name = self.DEFAULT_ATTRIBUTE_MAP[clsname]
else:
attr_token = anno['attribute_tokens'][0]
attr_name = self.nusc.get('attribute', attr_token)['name']
attrs.append(self.ATTRIBUTE_MAP[attr_name])
# TODO: Fix me
x, y, z = box.center
w, l, h = box.wlh
#yaw = box.orientation.yaw_pitch_roll[0] #TODO(luoqianhui): check this yaw
v = np.dot(box.orientation.rotation_matrix, np.array([1, 0, 0]))
yaw = np.arctan2(v[1], v[0])
bbox3d = np.array(
[x, y, z, w, l, h, -(yaw + np.pi / 2)
], #TODO(luoqianhui): check this positive sign of yaw
dtype=np.float32)
# loaded bounding box may be nan when using nuscenes_devkit<=1.1.9
# so we reset nan box to zero
bbox3d[np.isnan(bbox3d)] = 0
bboxes.append(bbox3d)
labels.append(label)
bboxes = BBoxes3D(
bboxes, origin=(0.5, 0.5, 0.5), velocities=np.array(velocities))
labels = np.array(labels, dtype=np.int32)
attrs = np.array(attrs, dtype=np.int32)
return bboxes, labels, attrs
def padding_sample(self, samples: List[Sample]):
# do nothing for sweeps
if samples[0].labels is None:
return
maxlen = max([len(sample.labels) for sample in samples])
padding_lens = [maxlen - len(sample.labels) for sample in samples]
for padlen, sample in zip(padding_lens, samples):
if padlen == 0:
continue
_pad_item = np.ones([padlen], np.int32) * -1
sample.labels = np.append(sample.labels, _pad_item)
if sample.bboxes_2d is not None:
_pad_item = np.zeros([padlen, sample.bboxes_2d.shape[1]],
np.float32)
sample.bboxes_2d = BBoxes2D(
np.append(sample.bboxes_2d, _pad_item, axis=0))
if sample.bboxes_3d is not None:
_pad_item = np.zeros([padlen, sample.bboxes_3d.shape[1]],
np.float32)
sample.bboxes_3d = BBoxes3D(
np.append(sample.bboxes_3d, _pad_item, axis=0))
if sample.velocities is not None:
_pad_item = np.zeros([padlen, 2], np.float32)
sample.velocities = np.append(
sample.velocities, _pad_item, axis=0)
if sample.attrs is not None:
_pad_item = np.ones([padlen], np.int32) * -1
sample.attrs = np.append(sample.attrs, _pad_item)
@property
def metric(self):
return NuScenesMetric(
nuscense=self.nusc,
mode=self.mode,
channel=self.channel,
class_names=self.class_names,
attrmap=self.ATTRIBUTE_MAP_REVERSE)
@property
def name(self) -> str:
return "nuScenes"
@property
def labels(self) -> List[str]:
return self.class_names
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets/nuscenes/nuscenes_metric.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from typing import List
import numpy as np
from nuscenes import NuScenes
from pyquaternion import Quaternion
from paddle3d.datasets.metrics import MetricABC
from paddle3d.geometries import StructureEncoder
from paddle3d.sample import Sample
from paddle3d.utils.common import generate_tempdir
from .nuscenes_utils import (filter_fake_result, get_nuscenes_box_attribute,
second_bbox_to_nuscenes_box)
class NuScenesMetric(MetricABC):
"""
"""
def __init__(self,
nuscense: NuScenes,
mode: str,
channel: str,
class_names: list,
attrmap: dict,
eval_version='detection_cvpr_2019'):
self.nusc = nuscense
self.mode = mode
self.channel = channel
self.class_names = class_names
self.attrmap = attrmap
self.predictions = []
from nuscenes.eval.detection.config import config_factory
self.eval_version = eval_version
self.eval_detection_configs = config_factory(self.eval_version)
def _parse_predictions_to_eval_format(self,
predictions: List[Sample]) -> dict:
# Nuscenes eval format:
# https://www.nuscenes.org/object-detection?externalData=all&mapData=all&modalities=Any
res = {}
for pred in predictions:
filter_fake_result(pred)
# transform bboxes from second format to nuscenes format
nus_box_list = second_bbox_to_nuscenes_box(pred)
# from sensor pose to global pose
sample = self.nusc.get('sample', pred.meta.id)
sample_data = self.nusc.get('sample_data',
sample['data'][self.channel])
ego_pose = self.nusc.get('ego_pose', sample_data['ego_pose_token'])
channel_pose = self.nusc.get('calibrated_sensor',
sample_data['calibrated_sensor_token'])
ego_quaternion = Quaternion(ego_pose['rotation'])
channel_quaternion = Quaternion(channel_pose['rotation'])
global_box_list = []
for box in nus_box_list:
# Move box to ego vehicle coord system
box.rotate(Quaternion(channel_pose["rotation"]))
box.translate(np.array(channel_pose["translation"]))
# filter det in ego.
# TODO(luoqianhui): where this filter is need?
cls_range_map = self.eval_detection_configs.class_range
radius = np.linalg.norm(box.center[:2], 2)
det_range = cls_range_map[self.class_names[box.label]]
if radius > det_range:
continue
# Move box to global coord system
box.rotate(Quaternion(ego_pose["rotation"]))
box.translate(np.array(ego_pose["translation"]))
global_box_list.append(box)
num_boxes = len(global_box_list)
res[pred.meta.id] = []
for idx in range(num_boxes):
box = global_box_list[idx]
label_name = self.class_names[box.label]
attr = get_nuscenes_box_attribute(box, label_name)
res[pred.meta.id].append({
'sample_token':
pred.meta.id,
'translation':
box.center.tolist(),
'size':
box.wlh.tolist(),
'rotation':
box.orientation.elements.tolist(),
'detection_name':
label_name,
'detection_score':
box.score,
'velocity':
box.velocity[:2].tolist(),
'attribute_name':
attr
})
return res
def update(self, predictions: List[Sample], **kwargs):
"""
"""
self.predictions += predictions
def compute(self, **kwargs) -> dict:
"""
"""
from nuscenes.eval.detection.config import config_factory
from nuscenes.eval.detection.evaluate import NuScenesEval
eval_version = 'detection_cvpr_2019'
eval_config = config_factory(eval_version)
dt_annos = {
'meta': {
'use_camera': True if self.channel.startswith('CAM') else False,
'use_lidar': True if self.channel == 'LIDAR_TOP' else False,
'use_radar': False,
'use_map': False,
'use_external': False,
},
'results': {}
}
dt_annos['results'].update(
self._parse_predictions_to_eval_format(self.predictions))
with generate_tempdir() as tmpdir:
result_file = os.path.join(tmpdir, 'nuscenes_pred.json')
with open(result_file, 'w') as file:
json.dump(dt_annos, file, cls=StructureEncoder)
evaluator = NuScenesEval(
self.nusc,
config=eval_config,
result_path=result_file,
eval_set=self.mode,
output_dir=tmpdir,
verbose=False,
)
metrics_summary = evaluator.main(
plot_examples=0, render_curves=False)
metric_file = os.path.join(tmpdir, 'metrics_summary.json')
with open(metric_file, 'r') as file:
metrics = json.load(file)
return metrics
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets/nuscenes/nuscenes_pointcloud_det.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import os.path as osp
from functools import reduce
from typing import List, Optional, Union
import numpy as np
from nuscenes.utils.data_classes import Box as NuScenesBox
from nuscenes.utils.geometry_utils import transform_matrix
from pyquaternion import Quaternion
from paddle3d.apis import manager
from paddle3d.datasets.nuscenes.nuscenes_det import NuscenesDetDataset
from paddle3d.geometries import CoordMode
from paddle3d.sample import Sample
from paddle3d.transforms import TransformABC
@manager.DATASETS.add_component
class NuscenesPCDataset(NuscenesDetDataset):
"""
"""
def __init__(self,
dataset_root: str,
mode: str = "train",
transforms: Union[TransformABC, List[TransformABC]] = None,
max_sweeps: int = 10,
class_balanced_sampling: bool = False,
class_names: Union[list, tuple] = None):
super().__init__(
dataset_root=dataset_root,
channel="LIDAR_TOP",
mode=mode,
transforms=transforms,
class_balanced_sampling=class_balanced_sampling,
class_names=class_names)
self.max_sweeps = max_sweeps
def _filter(self, anno: dict, box: NuScenesBox = None) -> bool:
# filter out objects that are not being scanned
mask = (anno['num_lidar_pts'] + anno['num_radar_pts']) > 0 and \
anno['category_name'] in self.LABEL_MAP and \
self.LABEL_MAP[anno['category_name']] in self.class_names
return mask
def get_sweeps(self, index: int) -> List[str]:
"""
"""
sweeps = []
sample = self.data[index]
token = sample['data'][self.channel]
sample_data = self.nusc.get('sample_data', token)
if self.max_sweeps <= 0:
return sweeps
# Homogeneous transform of current sample from ego car coordinate to sensor coordinate
curr_sample_cs = self.nusc.get("calibrated_sensor",
sample_data["calibrated_sensor_token"])
curr_sensor_from_car = transform_matrix(
curr_sample_cs["translation"],
Quaternion(curr_sample_cs["rotation"]),
inverse=True)
# Homogeneous transformation matrix of current sample from global coordinate to ego car coordinate
curr_sample_pose = self.nusc.get("ego_pose",
sample_data["ego_pose_token"])
curr_car_from_global = transform_matrix(
curr_sample_pose["translation"],
Quaternion(curr_sample_pose["rotation"]),
inverse=True,
)
curr_timestamp = 1e-6 * sample_data["timestamp"]
prev_token = sample_data['prev']
while len(sweeps) < self.max_sweeps - 1:
if prev_token == "":
if len(sweeps) == 0:
sweeps.append({
"lidar_path":
osp.join(self.dataset_root, sample_data['filename']),
"time_lag":
0,
"ref_from_curr":
None,
})
else:
sweeps.append(sweeps[-1])
else:
prev_sample_data = self.nusc.get('sample_data', prev_token)
# Homogeneous transformation matrix of previous sample from ego car coordinate to global coordinate
prev_sample_pose = self.nusc.get(
"ego_pose", prev_sample_data["ego_pose_token"])
prev_global_from_car = transform_matrix(
prev_sample_pose["translation"],
Quaternion(prev_sample_pose["rotation"]),
inverse=False,
)
# Homogeneous transform of previous sample from sensor coordinate to ego car coordinate
prev_sample_cs = self.nusc.get(
"calibrated_sensor",
prev_sample_data["calibrated_sensor_token"])
prev_car_from_sensor = transform_matrix(
prev_sample_cs["translation"],
Quaternion(prev_sample_cs["rotation"]),
inverse=False,
)
curr_from_pre = reduce(
np.dot,
[
curr_sensor_from_car, curr_car_from_global,
prev_global_from_car, prev_car_from_sensor
],
)
prev_timestamp = 1e-6 * prev_sample_data["timestamp"]
time_lag = curr_timestamp - prev_timestamp
sweeps.append({
"lidar_path":
osp.join(self.dataset_root, prev_sample_data['filename']),
"time_lag":
time_lag,
"ref_from_curr":
curr_from_pre,
})
prev_token = prev_sample_data['prev']
return sweeps
def __getitem__(self, index: int) -> Sample:
token = self.data[index]['data'][self.channel]
sample_data = self.nusc.get('sample_data', token)
path = os.path.join(self.dataset_root, sample_data['filename'])
sample = Sample(path=path, modality="lidar")
sample.meta.id = self.data[index]['token']
for sweep in self.get_sweeps(index):
sweep_sample = Sample(path=sweep["lidar_path"], modality="lidar")
sweep_sample.meta.time_lag = sweep["time_lag"]
sweep_sample.meta.ref_from_curr = sweep["ref_from_curr"]
sample.sweeps.append(sweep_sample)
if not self.is_test_mode:
bboxes_3d, labels, attrs = self.load_annotation(index, self._filter)
bboxes_3d.coordmode = CoordMode.NuScenesLidar
sample.bboxes_3d = bboxes_3d
sample.labels = labels
sample.attrs = attrs
if self.transforms:
sample = self.transforms(sample)
return sample
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets/waymo/waymo_det.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
import pickle
from typing import Callable, List, Tuple, Union
import numpy as np
import paddle
import paddle3d.transforms as T
from paddle3d.datasets import BaseDataset
from paddle3d.geometries import BBoxes2D, BBoxes3D
from paddle3d.sample import Sample
from paddle3d.transforms import TransformABC
from paddle3d.utils.logger import logger
class WaymoDetDataset(BaseDataset):
def __init__(self,
dataset_root: str,
sampled_interval: int,
mode: str = "train",
transforms: Union[TransformABC, List[TransformABC]] = None,
class_names: Union[list, tuple] = None,
processed_data_tag: str = "waymo_processed_data_v1_3_2"):
super().__init__()
self.dataset_root = dataset_root
self.data_path = os.path.join(self.dataset_root, processed_data_tag)
self.sampled_interval = sampled_interval
self.mode = mode.lower()
if isinstance(transforms, list):
transforms = T.Compose(transforms)
self.transforms = transforms
self.class_names = class_names
if self.mode not in ['train', 'val', 'test']:
raise ValueError(
"mode should be 'train', 'val' or 'test', but got {}.".format(
self.mode))
split_dir = os.path.join(self.dataset_root, "ImageSets",
self.mode + '.txt')
self.sample_sequence_list = [
x.strip() for x in open(split_dir).readlines()
]
self.infos = []
self.load_waymo_infos()
def load_waymo_infos(self):
logger.info("Loading Waymo Dataset")
waymo_infos = []
num_skipped_infos = 0
for k in range(len(self.sample_sequence_list)):
sequence_name = os.path.splitext(self.sample_sequence_list[k])[0]
info_path = os.path.join(self.data_path, sequence_name,
"{}.pkl".format(sequence_name))
if not os.path.exists(info_path):
num_skipped_infos += 1
continue
with open(info_path, 'rb') as f:
infos = pickle.load(f)
waymo_infos.extend(infos)
self.infos = waymo_infos
logger.info("Total skipped sequences {}".format(num_skipped_infos))
logger.info("Total samples for Waymo dataset: {}".format(
len(waymo_infos)))
if self.sampled_interval > 1:
sampled_waymo_infos = []
for k in range(0, len(self.infos), self.sampled_interval):
sampled_waymo_infos.append(self.infos[k])
self.infos = sampled_waymo_infos
logger.info("Total sampled samples for Waymo dataset: {}".format(
len(self.infos)))
def drop_info_with_name(self, info, name):
ret_info = {}
keep_indices = [i for i, x in enumerate(info['name']) if x != name]
for key in info.keys():
ret_info[key] = info[key][keep_indices]
return ret_info
def __len__(self):
return len(self.infos)
def load_annotation(self, index):
info = copy.deepcopy(self.infos[index])
annos = info["annos"]
# filter unknown class
annos = self.drop_info_with_name(annos, name='unknown')
# filter empty boxes for train
gt_boxes_lidar = annos['gt_boxes_lidar']
difficulty = annos['difficulty']
mask = (annos['num_points_in_gt'] > 0)
gt_names = annos['name'][mask]
gt_boxes_lidar = gt_boxes_lidar[mask]
difficulty = difficulty[mask]
# filter boxes with given classes
mask = [i for i, x in enumerate(gt_names) if x in self.class_names]
mask = np.array(mask, dtype=np.int64)
gt_names = gt_names[mask]
gt_boxes_lidar = gt_boxes_lidar[mask]
difficulty = difficulty[mask]
gt_labels = np.array([self.class_names.index(n) for n in gt_names],
dtype=np.int32)
return gt_boxes_lidar, gt_labels, difficulty
@property
def metric(self):
# lazy import to avoid tensorflow dependency in other tasks
from paddle3d.datasets.waymo.waymo_metric import WaymoMetric
eval_gt_annos = [copy.deepcopy(info['annos']) for info in self.infos]
return WaymoMetric(
eval_gt_annos, self.class_names, distance_thresh=1000)
@property
def name(self) -> str:
return "Waymo"
@property
def labels(self) -> List[str]:
return self.class_names
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets/waymo/__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .waymo_pointcloud_det import WaymoPCDataset
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets/waymo/waymo_metric.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
import numpy as np
import tensorflow as tf
from google.protobuf import text_format
from waymo_open_dataset.metrics.python import detection_metrics
from waymo_open_dataset.protos import metrics_pb2
from paddle3d.sample import Sample
from paddle3d.utils import box_utils
from paddle3d.utils.logger import logger
tf.get_logger().setLevel('INFO')
class WaymoMetric(tf.test.TestCase):
"""
AP/APH metric evaluation of Waymo Dataset.
This code is beased on:
<https://github.com/yifanzhang713/IA-SSD/blob/main/pcdet/datasets/waymo/waymo_eval.py>
"""
WAYMO_CLASSES = ['unknown', 'Vehicle', 'Pedestrian', 'Sign', 'Cyclist']
def __init__(self, gt_infos, class_names, distance_thresh):
self.gt_infos = gt_infos
self.class_names = class_names
self.distance_thresh = distance_thresh
self.predictions = []
def generate_prediction_infos(self, sample_list: List[Sample]):
prediction_infos = []
for sample in sample_list:
# there is no det
if sample["labels"] is None:
pred_dict = {
'name': np.zeros(0),
'score': np.zeros(0),
'boxes_lidar': np.zeros([0, 7])
}
# there is a det
else:
pred_dict = {
"name": np.array(self.class_names)[sample["labels"]],
"score": np.array(sample["confidences"]),
"boxes_lidar": np.array(sample["bboxes_3d"])
}
prediction_infos.append(pred_dict)
return prediction_infos
def parse_infos_to_eval_format(self,
infos: List[dict],
class_names,
is_gt=True,
is_kitti=True):
frame_id, boxes3d, obj_type, score, overlap_nlz, difficulty = [], [], [], [], [], []
for frame_index, info in enumerate(infos):
if is_gt:
box_mask = np.array([n in class_names for n in info['name']],
dtype=np.bool_)
if 'num_points_in_gt' in info:
zero_difficulty_mask = info['difficulty'] == 0
info['difficulty'][(info['num_points_in_gt'] > 5)
& zero_difficulty_mask] = 1
info['difficulty'][(info['num_points_in_gt'] <= 5)
& zero_difficulty_mask] = 2
nonzero_mask = info['num_points_in_gt'] > 0
box_mask = box_mask & nonzero_mask
else:
logger.info(
'Please provide the num_points_in_gt for evaluating on Waymo Dataset'
)
raise NotImplementedError
num_boxes = box_mask.sum()
box_name = info['name'][box_mask]
difficulty.append(info['difficulty'][box_mask])
score.append(np.ones(num_boxes))
boxes3d.append(info['gt_boxes_lidar'][box_mask])
else:
num_boxes = len(info['boxes_lidar'])
difficulty.append([0] * num_boxes)
score.append(info['score'])
if is_kitti:
info[
'boxes_lidar'] = box_utils.boxes3d_kitti_lidar_to_lidar(
info['boxes_lidar'])
boxes3d.append(np.array(info['boxes_lidar']))
box_name = info['name']
obj_type += [
self.WAYMO_CLASSES.index(name)
for i, name in enumerate(box_name)
]
frame_id.append(np.array([frame_index] * num_boxes))
overlap_nlz.append(np.zeros(num_boxes)) # set zero currently
frame_id = np.concatenate(frame_id).reshape(-1).astype(np.int64)
boxes3d = np.concatenate(boxes3d, axis=0)
obj_type = np.array(obj_type).reshape(-1)
score = np.concatenate(score).reshape(-1)
overlap_nlz = np.concatenate(overlap_nlz).reshape(-1)
difficulty = np.concatenate(difficulty).reshape(-1).astype(np.int8)
boxes3d[:, -1] = box_utils.limit_period(
boxes3d[:, -1], offset=0.5, period=np.pi * 2)
return frame_id, boxes3d, obj_type, score, overlap_nlz, difficulty
def mask_by_distance(self, distance_thresh, boxes_3d, *args):
mask = np.linalg.norm(boxes_3d[:, 0:2], axis=1) < distance_thresh + 0.5
boxes_3d = boxes_3d[mask]
ret_ans = [boxes_3d]
for arg in args:
ret_ans.append(arg[mask])
return tuple(ret_ans)
def build_config(self):
config = metrics_pb2.Config()
config_text = """
breakdown_generator_ids: OBJECT_TYPE
difficulties {
levels:1
levels:2
}
matcher_type: TYPE_HUNGARIAN
iou_thresholds: 0.0
iou_thresholds: 0.7
iou_thresholds: 0.5
iou_thresholds: 0.5
iou_thresholds: 0.5
box_type: TYPE_3D
"""
for x in range(0, 100):
config.score_cutoffs.append(x * 0.01)
config.score_cutoffs.append(1.0)
text_format.Merge(config_text, config)
return config
def build_graph(self, graph):
with graph.as_default():
self._pd_frame_id = tf.compat.v1.placeholder(dtype=tf.int64)
self._pd_bbox = tf.compat.v1.placeholder(dtype=tf.float32)
self._pd_type = tf.compat.v1.placeholder(dtype=tf.uint8)
self._pd_score = tf.compat.v1.placeholder(dtype=tf.float32)
self._pd_overlap_nlz = tf.compat.v1.placeholder(dtype=tf.bool)
self._gt_frame_id = tf.compat.v1.placeholder(dtype=tf.int64)
self._gt_bbox = tf.compat.v1.placeholder(dtype=tf.float32)
self._gt_type = tf.compat.v1.placeholder(dtype=tf.uint8)
self._gt_difficulty = tf.compat.v1.placeholder(dtype=tf.uint8)
metrics = detection_metrics.get_detection_metric_ops(
config=self.build_config(),
prediction_frame_id=self._pd_frame_id,
prediction_bbox=self._pd_bbox,
prediction_type=self._pd_type,
prediction_score=self._pd_score,
prediction_overlap_nlz=self._pd_overlap_nlz,
ground_truth_bbox=self._gt_bbox,
ground_truth_type=self._gt_type,
ground_truth_frame_id=self._gt_frame_id,
ground_truth_difficulty=self._gt_difficulty,
)
return metrics
def run_eval_ops(
self,
sess,
graph,
metrics,
prediction_frame_id,
prediction_bbox,
prediction_type,
prediction_score,
prediction_overlap_nlz,
ground_truth_frame_id,
ground_truth_bbox,
ground_truth_type,
ground_truth_difficulty,
):
sess.run(
[tf.group([value[1] for value in metrics.values()])],
feed_dict={
self._pd_bbox: prediction_bbox,
self._pd_frame_id: prediction_frame_id,
self._pd_type: prediction_type,
self._pd_score: prediction_score,
self._pd_overlap_nlz: prediction_overlap_nlz,
self._gt_bbox: ground_truth_bbox,
self._gt_type: ground_truth_type,
self._gt_frame_id: ground_truth_frame_id,
self._gt_difficulty: ground_truth_difficulty,
},
)
def eval_value_ops(self, sess, graph, metrics):
return {item[0]: sess.run([item[1][0]]) for item in metrics.items()}
def update(self,
predictions: List[Sample],
ground_truths: List[Sample] = None):
self.predictions += predictions
def compute(self, verbose):
prediction_infos = self.generate_prediction_infos(self.predictions)
assert len(prediction_infos) == len(self.gt_infos)
tf.compat.v1.disable_eager_execution()
# set is_kitti=True, because iassd's outputs is in kitti format
pd_frameid, pd_boxes3d, pd_type, pd_score, pd_overlap_nlz, _ = self.parse_infos_to_eval_format(
prediction_infos, self.class_names, is_gt=False, is_kitti=True)
gt_frameid, gt_boxes3d, gt_type, gt_score, gt_overlap_nlz, gt_difficulty = self.parse_infos_to_eval_format(
self.gt_infos, self.class_names, is_gt=True, is_kitti=False)
pd_boxes3d, pd_frameid, pd_type, pd_score, pd_overlap_nlz = self.mask_by_distance(
self.distance_thresh, pd_boxes3d, pd_frameid, pd_type, pd_score,
pd_overlap_nlz)
gt_boxes3d, gt_frameid, gt_type, gt_score, gt_difficulty = self.mask_by_distance(
self.distance_thresh, gt_boxes3d, gt_frameid, gt_type, gt_score,
gt_difficulty)
logger.info('Number: (pd, %d) VS. (gt, %d)' % (len(pd_boxes3d),
len(gt_boxes3d)))
logger.info('Level 1: %d, Level2: %d)' % ((gt_difficulty == 1).sum(),
(gt_difficulty == 2).sum()))
graph = tf.Graph()
metrics = self.build_graph(graph)
with self.session(graph=graph) as sess:
sess.run(tf.compat.v1.initializers.local_variables())
self.run_eval_ops(
sess,
graph,
metrics,
pd_frameid,
pd_boxes3d,
pd_type,
pd_score,
pd_overlap_nlz,
gt_frameid,
gt_boxes3d,
gt_type,
gt_difficulty,
)
with tf.compat.v1.variable_scope('detection_metrics', reuse=True):
aps = self.eval_value_ops(sess, graph, metrics)
if verbose:
for k, v in aps.items():
logger.info("{}: {:.4f}".format(k, v[0]))
return aps
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets/waymo/waymo_pointcloud_det.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
from typing import List, Tuple, Union
import numpy as np
from paddle3d.apis import manager
from paddle3d.datasets.waymo.waymo_det import WaymoDetDataset
from paddle3d.geometries import BBoxes3D, PointCloud
from paddle3d.sample import Sample
from paddle3d.transforms import TransformABC
from paddle3d.utils import box_utils
from paddle3d.utils.logger import logger
@manager.DATASETS.add_component
class WaymoPCDataset(WaymoDetDataset):
def __init__(self,
dataset_root: str,
sampled_interval: int,
mode: str = "train",
transforms: Union[TransformABC, List[TransformABC]] = None,
class_names: Union[list, tuple] = None,
processed_data_tag: str = "waymo_processed_data_v1_3_2",
disable_nlz_flag: bool = True):
super().__init__(
dataset_root=dataset_root,
sampled_interval=sampled_interval,
mode=mode,
transforms=transforms,
processed_data_tag=processed_data_tag,
class_names=class_names)
self.disable_nlz_flag = disable_nlz_flag
def get_lidar(self, lidar_path):
point_features = np.load(
lidar_path) # (N, 6) [x, y, z, intensity, elongation, NLZ_flag]
points_all, NLZ_flag = point_features[:, 0:5], point_features[:, 5]
if self.disable_nlz_flag:
points_all = points_all[NLZ_flag == -1]
points_all[:, 3] = np.tanh(points_all[:, 3])
return points_all
def __getitem__(self, index):
info = copy.deepcopy(self.infos[index])
pc_info = info["point_cloud"]
sequence_name = pc_info["lidar_sequence"]
sample_idx = pc_info["sample_idx"]
lidar_path = os.path.join(self.data_path, sequence_name,
"%04d.npy" % sample_idx)
points = self.get_lidar(lidar_path)
sample = Sample(path=lidar_path, modality='lidar')
sample.data = PointCloud(points)
if self.mode == "train":
# load boxes and labels, labels starts from 0.
gt_boxes_lidar, gt_labels, difficulties = self.load_annotation(
index)
sample.labels = gt_labels
sample.difficulties = difficulties
# TODO(liuxiao): unify coord system to avoid coord transform
# convert boxes from [x, y, z, l, w, h, heading] to [x, y, z, w, l, h, yaw], obj_center -> bottom_center.
# the purpose of this conversion is to reuse some data transform in paddle3d
gt_boxes_lidar = box_utils.boxes3d_lidar_to_kitti_lidar(
gt_boxes_lidar)
sample.bboxes_3d = BBoxes3D(
data=gt_boxes_lidar, coordmode=1, origin=[0.5, 0.5, 0])
if self.transforms:
sample = self.transforms(sample)
return sample
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets/waymo/waymo_utils.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This code is heavily based on: <https://github.com/yifanzhang713/IA-SSD/blob/main/pcdet/datasets/waymo/waymo_utils.py>
import os
import pickle
import numpy as np
import tensorflow as tf
from waymo_open_dataset import dataset_pb2
from waymo_open_dataset.utils import (frame_utils, range_image_utils,
transform_utils)
from paddle3d.utils.logger import logger
WAYMO_CLASSES = ['unknown', 'Vehicle', 'Pedestrian', 'Sign', 'Cyclist']
def process_single_sequence(sequence_file,
save_path,
sampled_interval,
use_two_returns=True):
sequence_name = os.path.splitext(os.path.basename(sequence_file))[0]
if not os.path.exists(sequence_file):
logger.info("FileNotFoundError: %s" % sequence_file)
return []
dataset = tf.data.TFRecordDataset(sequence_file, compression_type='')
cur_save_dir = os.path.join(save_path, sequence_name)
if not os.path.exists(cur_save_dir):
os.makedirs(cur_save_dir)
pkl_file = os.path.join(cur_save_dir, "%s.pkl" % sequence_name)
sequence_infos = []
if os.path.exists(pkl_file):
sequence_infos = pickle.load(open(pkl_file, 'rb'))
logger.info(
"Skip sequence since it has been processed before: %s" % pkl_file)
return sequence_infos
for cnt, data in enumerate(dataset):
if cnt % sampled_interval != 0:
continue
frame = dataset_pb2.Frame()
frame.ParseFromString(bytearray(data.numpy()))
info = {}
pc_info = {
"num_features": 5,
"lidar_sequence": sequence_name,
"sample_idx": cnt
}
info["point_cloud"] = pc_info
info["frame_id"] = sequence_name + ("_%03d" % cnt)
info["metadata"] = {
"context_name": frame.context.name,
"timestamp_micros": frame.timestamp_micros
}
image_info = {}
for j in range(5):
width = frame.context.camera_calibrations[j].width
height = frame.context.camera_calibrations[j].height
image_info.update({"image_shape_%d" % j: (height, width)})
info["image"] = image_info
pose = np.array(frame.pose.transform, dtype=np.float32).reshape(4, 4)
info["pose"] = pose
# parse annotations
annotations = generate_labels(frame)
info['annos'] = annotations
# parse scene point data and save it
num_points_of_each_lidar = save_lidar_points(
frame,
os.path.join(cur_save_dir, ("%04d.npy" % cnt)),
use_two_returns=use_two_returns)
info['num_points_of_each_lidar'] = num_points_of_each_lidar
sequence_infos.append(info)
with open(pkl_file, 'wb') as f:
pickle.dump(sequence_infos, f)
logger.info('Infos are saved to (sampled_interval=%d): %s' %
(sampled_interval, pkl_file))
return sequence_infos
def drop_info_with_name(info, name):
ret_info = {}
keep_indices = [i for i, x in enumerate(info['name']) if x != name]
for key in info.keys():
ret_info[key] = info[key][keep_indices]
return ret_info
def generate_labels(frame):
obj_name, difficulty, dimensions, locations, heading_angles = [], [], [], [], []
tracking_difficulty, speeds, accelerations, obj_ids = [], [], [], []
num_points_in_gt = []
laser_labels = frame.laser_labels
for i in range(len(laser_labels)):
box = laser_labels[i].box
class_ind = laser_labels[i].type
loc = [box.center_x, box.center_y, box.center_z]
heading_angles.append(box.heading)
obj_name.append(WAYMO_CLASSES[class_ind])
difficulty.append(laser_labels[i].detection_difficulty_level)
tracking_difficulty.append(laser_labels[i].tracking_difficulty_level)
dimensions.append([box.length, box.width, box.height])
locations.append(loc)
obj_ids.append(laser_labels[i].id)
num_points_in_gt.append(laser_labels[i].num_lidar_points_in_box)
annotations = {}
annotations['name'] = np.array(obj_name)
annotations['difficulty'] = np.array(difficulty)
annotations['dimensions'] = np.array(dimensions)
annotations['location'] = np.array(locations)
annotations['heading_angles'] = np.array(heading_angles)
annotations['obj_ids'] = np.array(obj_ids)
annotations['tracking_difficulty'] = np.array(tracking_difficulty)
annotations['num_points_in_gt'] = np.array(num_points_in_gt)
annotations = drop_info_with_name(annotations, name='unknown')
if annotations['name'].__len__() > 0:
gt_boxes_lidar = np.concatenate([
annotations['location'], annotations['dimensions'],
annotations['heading_angles'][..., np.newaxis]
],
axis=1)
else:
gt_boxes_lidar = np.zeros((0, 7))
annotations['gt_boxes_lidar'] = gt_boxes_lidar
return annotations
def save_lidar_points(frame, cur_save_path, use_two_returns=True):
range_images, camera_projections, range_image_top_pose = \
frame_utils.parse_range_image_and_camera_projection(frame)
points, cp_points, points_in_NLZ_flag, points_intensity, points_elongation = convert_range_image_to_point_cloud(
frame,
range_images,
camera_projections,
range_image_top_pose,
ri_index=(0, 1) if use_two_returns else (0, ))
# 3d points in vehicle frame.
points_all = np.concatenate(points, axis=0)
points_in_NLZ_flag = np.concatenate(
points_in_NLZ_flag, axis=0).reshape(-1, 1)
points_intensity = np.concatenate(points_intensity, axis=0).reshape(-1, 1)
points_elongation = np.concatenate(points_elongation, axis=0).reshape(-1, 1)
num_points_of_each_lidar = [point.shape[0] for point in points]
save_points = np.concatenate(
[points_all, points_intensity, points_elongation, points_in_NLZ_flag],
axis=-1).astype(np.float32)
np.save(cur_save_path, save_points)
return num_points_of_each_lidar
def convert_range_image_to_point_cloud(frame,
range_images,
camera_projections,
range_image_top_pose,
ri_index=(0, 1)):
calibrations = sorted(
frame.context.laser_calibrations, key=lambda c: c.name)
points = []
cp_points = []
points_NLZ = []
points_intensity = []
points_elongation = []
frame_pose = tf.convert_to_tensor(
np.reshape(np.array(frame.pose.transform), [4, 4]))
# [H, W, 6]
range_image_top_pose_tensor = tf.reshape(
tf.convert_to_tensor(range_image_top_pose.data),
range_image_top_pose.shape.dims)
# [H, W, 3, 3]
range_image_top_pose_tensor_rotation = transform_utils.get_rotation_matrix(
range_image_top_pose_tensor[..., 0],
range_image_top_pose_tensor[..., 1],
range_image_top_pose_tensor[..., 2])
range_image_top_pose_tensor_translation = range_image_top_pose_tensor[...,
3:]
range_image_top_pose_tensor = transform_utils.get_transform(
range_image_top_pose_tensor_rotation,
range_image_top_pose_tensor_translation)
for c in calibrations:
points_single, cp_points_single, points_NLZ_single, points_intensity_single, points_elongation_single \
= [], [], [], [], []
for cur_ri_index in ri_index:
range_image = range_images[c.name][cur_ri_index]
if len(c.beam_inclinations) == 0: # pylint: disable=g-explicit-length-test
beam_inclinations = range_image_utils.compute_inclination(
tf.constant(
[c.beam_inclination_min, c.beam_inclination_max]),
height=range_image.shape.dims[0])
else:
beam_inclinations = tf.constant(c.beam_inclinations)
beam_inclinations = tf.reverse(beam_inclinations, axis=[-1])
extrinsic = np.reshape(np.array(c.extrinsic.transform), [4, 4])
range_image_tensor = tf.reshape(
tf.convert_to_tensor(range_image.data), range_image.shape.dims)
pixel_pose_local = None
frame_pose_local = None
if c.name == dataset_pb2.LaserName.TOP:
pixel_pose_local = range_image_top_pose_tensor
pixel_pose_local = tf.expand_dims(pixel_pose_local, axis=0)
frame_pose_local = tf.expand_dims(frame_pose, axis=0)
range_image_mask = range_image_tensor[..., 0] > 0
range_image_NLZ = range_image_tensor[..., 3]
range_image_intensity = range_image_tensor[..., 1]
range_image_elongation = range_image_tensor[..., 2]
range_image_cartesian = range_image_utils.extract_point_cloud_from_range_image(
tf.expand_dims(range_image_tensor[..., 0], axis=0),
tf.expand_dims(extrinsic, axis=0),
tf.expand_dims(tf.convert_to_tensor(beam_inclinations), axis=0),
pixel_pose=pixel_pose_local,
frame_pose=frame_pose_local)
range_image_cartesian = tf.squeeze(range_image_cartesian, axis=0)
points_tensor = tf.gather_nd(range_image_cartesian,
tf.where(range_image_mask))
points_NLZ_tensor = tf.gather_nd(
range_image_NLZ, tf.compat.v1.where(range_image_mask))
points_intensity_tensor = tf.gather_nd(
range_image_intensity, tf.compat.v1.where(range_image_mask))
points_elongation_tensor = tf.gather_nd(
range_image_elongation, tf.compat.v1.where(range_image_mask))
cp = camera_projections[c.name][0]
cp_tensor = tf.reshape(tf.convert_to_tensor(cp.data), cp.shape.dims)
cp_points_tensor = tf.gather_nd(cp_tensor,
tf.where(range_image_mask))
points_single.append(points_tensor.numpy())
cp_points_single.append(cp_points_tensor.numpy())
points_NLZ_single.append(points_NLZ_tensor.numpy())
points_intensity_single.append(points_intensity_tensor.numpy())
points_elongation_single.append(points_elongation_tensor.numpy())
points.append(np.concatenate(points_single, axis=0))
cp_points.append(np.concatenate(cp_points_single, axis=0))
points_NLZ.append(np.concatenate(points_NLZ_single, axis=0))
points_intensity.append(np.concatenate(points_intensity_single, axis=0))
points_elongation.append(
np.concatenate(points_elongation_single, axis=0))
return points, cp_points, points_NLZ, points_intensity, points_elongation
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets/apollo/apollo_pointcloud_det.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
from paddle3d.apis import manager
from paddle3d.datasets.apollo import apollo_utils
from paddle3d.datasets.apollo.apollo_det import ApolloDetDataset
from paddle3d.sample import Sample
from paddle3d.utils import box_utils
@manager.DATASETS.add_component
class ApolloPCDataset(ApolloDetDataset):
"""
"""
def __getitem__(self, index: int) -> Sample:
filename = '{}.bin'.format(self.data[index])
path = os.path.join(self.dataset_root, filename.split('/')[0],
self.dirname, 'velodyne', filename.split('/')[1])
sample = Sample(path=path, modality="lidar")
sample.meta.id = self.data[index]
if self.is_train_mode:
kitti_records, ignored_kitti_records = self.load_annotation(index)
kitti_records = self.adjust_size_center_yaw(kitti_records)
ignored_kitti_records = self.adjust_size_center_yaw(ignored_kitti_records)
_, bboxes_3d, cls_names = apollo_utils.lidar_record_to_object(
kitti_records, show_warn=True)
_, ignored_bboxes_3d, _ = apollo_utils.lidar_record_to_object(
ignored_kitti_records, show_warn=False)
sample.bboxes_3d = bboxes_3d
if self.create_gt_database:
sample.labels = np.array(cls_names)
else:
sample.labels = np.array(
[self.class_names.index(name) for name in cls_names], dtype=np.int64)
sample.ignored_bboxes_3d = ignored_bboxes_3d
if self.use_road_plane:
sample.road_plane = self.load_road_plane(index)
if self.transforms:
sample = self.transforms(sample)
if 'path' not in sample:
sample.path = path
return sample
def load_road_plane(self, index):
file_name = '{}.txt'.format(self.data[index])
plane_file = os.path.join(self.base_dir, 'planes', file_name)
if not os.path.exists(plane_file):
return None
with open(plane_file, 'r') as f:
lines = f.readlines()
lines = [float(i) for i in lines[3].split()]
plane = np.asarray(lines)
# Ensure normal is always facing up, this is in the rectified camera coordinate
if plane[1] > 0:
plane = -plane
norm = np.linalg.norm(plane[0:3])
plane = plane / norm
return plane
def adjust_size_center_yaw(self, kitti_records):
if kitti_records.shape[0] == 0:
return kitti_records
# hwl -> wlh
kitti_records[:, 8:11] = kitti_records[:, [9, 10, 8]]
# geometric center -> bottom center
kitti_records[:, 13] -= kitti_records[:, 10] / 2
# yaw -> rotation_y
rotation_y = -kitti_records[:, -1] - np.pi/2
rotation_y = box_utils.limit_period(rotation_y, offset=0.5, period=np.pi * 2)
kitti_records[:, -1] = rotation_y
return kitti_records
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets/apollo/__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .apollo_det import ApolloDetDataset
from .apollo_pointcloud_det import ApolloPCDataset
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets/apollo/apollo_metric.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List
import warnings
import numpy as np
from paddle3d.datasets.kitti.kitti_utils import filter_fake_result
from paddle3d.datasets.metrics import MetricABC
from paddle3d.utils import box_utils
from paddle3d.sample import Sample
from paddle3d.thirdparty import apollo_eval
from paddle3d.utils.logger import logger
class ApolloMetric(MetricABC):
def __init__(self, groundtruths: List[np.ndarray], classmap: Dict[int, str],
indexes: List, eval_class_map: Dict[str, str] = None):
self.gt_annos = groundtruths
self.predictions = []
self.calibs = []
self.classmap = classmap
self.indexes = indexes
self.eval_class_map = eval_class_map
self.eval_class = []
for mapped_class in self.eval_class_map.values():
if mapped_class not in self.eval_class:
self.eval_class.append(mapped_class)
def _parse_gt_to_eval_format(self,
groundtruths: List[np.ndarray]) -> List[dict]:
res = []
for idx, rows in enumerate(groundtruths):
if rows.size == 0:
warnings.warn("here is a val frame without gt!")
res.append({
'name': np.zeros([0]),
'truncated': np.zeros([0]),
'occluded': np.zeros([0]),
'alpha': np.zeros([0]),
'bbox': np.zeros([0, 4]),
'dimensions': np.zeros([0, 3]),
'location': np.zeros([0, 3]),
'rotation_y': np.zeros([0]),
'score': np.zeros([0])
})
else:
rows[:, 13] -= rows[:, 8] / 2
names = []
for name in rows[:, 0]:
if name in self.eval_class_map:
name = self.eval_class_map[name]
names.append(name)
names = np.array(names, dtype=object)
res.append({
'name': names,
'truncated': rows[:, 1].astype(np.float64),
'occluded': rows[:, 2].astype(np.int64),
'alpha': rows[:, 3].astype(np.float64),
'bbox': rows[:, 4:8].astype(np.float64),
'dimensions': rows[:, [10, 9, 8]].astype(np.float64),
'location': rows[:, 11:14].astype(np.float64),
'rotation_y': rows[:, 14].astype(np.float64)
})
return res
def _parse_predictions_to_eval_format(
self, predictions: List[Sample]) -> List[dict]:
res = {}
for pred in predictions:
filter_fake_result(pred)
id = pred.meta.id
if pred.bboxes_3d is None:
det = {
'truncated': np.zeros([0]),
'occluded': np.zeros([0]),
'alpha': np.zeros([0]),
'name': np.zeros([0]),
'bbox': np.zeros([0, 4]),
'dimensions': np.zeros([0, 3]),
'location': np.zeros([0, 3]),
'rotation_y': np.zeros([0]),
'score': np.zeros([0]),
}
else:
num_boxes = pred.bboxes_3d.shape[0]
output_names = [self.classmap[label] for label in pred.labels]
if self.eval_class_map is None:
names = output_names
else:
names = np.array(
[self.eval_class_map[output_name] for output_name in output_names])
alpha = pred.get('alpha', np.zeros([num_boxes]))
bboxes_3d = pred.bboxes_3d
w = bboxes_3d[:, 3].copy()
l = bboxes_3d[:, 4].copy()
bboxes_3d[:, 3] = l
bboxes_3d[:, 4] = w
if bboxes_3d.origin != [.5, .5, 0]:
bboxes_3d[:, :3] += bboxes_3d[:, 3:6] * (
np.array([.5, .5, 0]) - np.array(bboxes_3d.origin))
bboxes_3d.origin = [.5, .5, 0]
bboxes_3d[:, 6] = -bboxes_3d[:, 6] - np.pi/2
bboxes_3d[:, 6] = box_utils.limit_period(bboxes_3d[:, 6], offset=0.5, period=np.pi * 2)
bboxes_2d = np.zeros([num_boxes, 4])
loc = bboxes_3d[:, :3]
dim = bboxes_3d[:, 3:6]
det = {
'truncated': np.zeros([num_boxes]),
'occluded': np.zeros([num_boxes]),
'alpha': alpha,
'bbox': bboxes_2d,
'name': names,
'dimensions': dim,
'location': loc,
'rotation_y': bboxes_3d[:, 6],
'score': pred.confidences,
}
res[id] = det
return [res[idx] for idx in self.indexes]
def update(self, predictions: List[Sample], ground_truths=None, **kwargs):
"""
"""
self.predictions += predictions
if 'calibs' in ground_truths:
self.calibs.append(ground_truths['calibs'])
def compute(self, verbose=False, **kwargs) -> dict:
"""
"""
gt_annos = self._parse_gt_to_eval_format(self.gt_annos)
dt_annos = self._parse_predictions_to_eval_format(self.predictions)
if len(dt_annos) != len(gt_annos):
raise RuntimeError(
'The number of predictions({}) is not equal to the number of GroundTruths({})'
.format(len(dt_annos), len(gt_annos)))
metric_r40_dict = apollo_eval(
gt_annos,
dt_annos,
current_classes=list(self.eval_class),
metric_types=["bev", "3d"],
recall_type='R40',
z_axis=2,
z_center=0.0)
if verbose:
for cls, cls_metrics in metric_r40_dict.items():
logger.info("{}:".format(cls))
for overlap_thresh, metrics in cls_metrics.items():
for metric_type, thresh in zip(["bbox", "bev", "3d"],
overlap_thresh):
if metric_type in metrics:
logger.info(
"{} AP_R40@{:.0%}: {:.2f} {:.2f} {:.2f}".format(
metric_type.upper().ljust(4), thresh,
*metrics[metric_type]))
return metric_r40_dict
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets/apollo/apollo_utils.py
|
import numpy as np
from typing import List, Tuple
from paddle3d.geometries import BBoxes2D, BBoxes3D, CoordMode
import warnings
def assess_apollo_object_difficulties(kitti_records: np.ndarray,
distances_thres: List = [20, 50]):
# 0~20m: easy, 20~50m: moderate, 50m~: hard
num_objects = kitti_records.shape[0]
if num_objects == 0:
return np.full((num_objects, ), -1, dtype=np.int32)
distances = np.sqrt((np.square(kitti_records[:, 11]) +
np.square(kitti_records[:, 12])).astype(float))
easy_mask = np.ones((num_objects, ), dtype=bool)
moderate_mask = np.ones((num_objects, ), dtype=bool)
hard_mask = np.ones((num_objects, ), dtype=bool)
easy_mask[np.where(distances >= distances_thres[0])] = False
moderate_mask[np.where(distances >= distances_thres[1])] = False
hard_mask[np.where(distances <= distances_thres[0])] = False
is_easy = easy_mask
is_moderate = np.logical_xor(easy_mask, moderate_mask)
is_hard = np.logical_xor(hard_mask, moderate_mask)
difficulties = np.full((num_objects, ), -1, dtype=np.int32)
difficulties[is_hard] = 2
difficulties[is_moderate] = 1
difficulties[is_easy] = 0
return difficulties
# lidar record fields
# type, truncated, occluded, alpha, xmin, ymin, xmax, ymax, dw, dl, dh, lx, ly, lz, rz
def lidar_record_to_object(
kitti_records: np.ndarray, show_warn: bool) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
"""
if kitti_records.shape[0] == 0:
if show_warn:
warnings.warn("fake object!")
bboxes_2d = BBoxes2D(np.zeros([0, 4]))
bboxes_3d = BBoxes3D(
np.zeros([0, 7]),
origin=[0.5, 0.5, 0.],
coordmode=CoordMode.KittiLidar,
rot_axis=2)
cls_names = []
else:
centers = kitti_records[:, 11:14]
dims = kitti_records[:, 8:11]
yaws = kitti_records[:, 14:15]
bboxes_3d = BBoxes3D(
np.concatenate([centers, dims, yaws], axis=1),
origin=[0.5, 0.5, 0.],
coordmode=CoordMode.KittiLidar,
rot_axis=2)
bboxes_2d = BBoxes2D(kitti_records[:, 4:8])
cls_names = kitti_records[:, 0]
return bboxes_2d, bboxes_3d, cls_names
def map_class(src_class: str) -> str:
if src_class.lower() not in class_information:
warnings.warn(
"Unknown class: {} ".format(src_class)
)
return src_class
else:
return class_information[src_class.lower()]['map_class']
class_information = {
# smallMot
'smallmot': {'map_class': 'smallMot', 'difficulty_threshold': [20, 40]},
'midmot': {'map_class': 'smallMot', 'difficulty_threshold': [20, 40]},
'smallcar': {'map_class': 'smallMot', 'difficulty_threshold': [20, 40]},
'smallvehicle': {'map_class': 'smallMot', 'difficulty_threshold': [20, 40]},
# bigMot
'bigmot': {'map_class': 'bigMot', 'difficulty_threshold': [30, 60]},
'verybigmot': {'map_class': 'bigMot', 'difficulty_threshold': [30, 60]},
'truck': {'map_class': 'bigMot', 'difficulty_threshold': [30, 60]},
'van': {'map_class': 'bigMot', 'difficulty_threshold': [30, 60]},
'bus': {'map_class': 'bigMot', 'difficulty_threshold': [30, 60]},
'bigvehicle': {'map_class': 'bigMot', 'difficulty_threshold': [30, 60]},
# pedestrian
'pedestrian': {'map_class': 'pedestrian', 'difficulty_threshold': [10, 20]},
'cluster': {'map_class': 'pedestrian', 'difficulty_threshold': [10, 20]},
# nonMot
'nonMot': {'map_class': 'nonMot', 'difficulty_threshold': [15, 30]},
'bicyclist': {'map_class': 'nonMot', 'difficulty_threshold': [15, 30]},
'motorcyclist': {'map_class': 'nonMot', 'difficulty_threshold': [15, 30]},
'onlybicycle': {'map_class': 'nonMot', 'difficulty_threshold': [10, 20]},
'motorcycle': {'map_class': 'nonMot', 'difficulty_threshold': [15, 30]},
'bicycle': {'map_class': 'nonMot', 'difficulty_threshold': [10, 20]},
'cyclist': {'map_class': 'nonMot', 'difficulty_threshold': [10, 20]},
'onlytricycle': {'map_class': 'nonMot', 'difficulty_threshold': [20, 35]},
'tricyclist': {'map_class': 'nonMot', 'difficulty_threshold': [20, 35]},
# TrafficCone
'trafficcone': {'map_class': 'TrafficCone', 'difficulty_threshold': [8, 15]},
'safetybarrier': {'map_class': 'TrafficCone', 'difficulty_threshold': [15, 25]},
'sign': {'map_class': 'TrafficCone', 'difficulty_threshold': [8, 15]},
'crashbarrel': {'map_class': 'TrafficCone', 'difficulty_threshold': [10, 20]},
# others
'stopbar': {'map_class': 'stopBar', 'difficulty_threshold': [8, 15]},
'spike': {'map_class': 'spike', 'difficulty_threshold': [4, 8]},
'smallmovable': {'map_class': 'smallMovable', 'difficulty_threshold': [8, 15]},
'smallunmovable': {'map_class': 'smallUnmovable', 'difficulty_threshold': [8, 15]},
'unknown': {'map_class': 'unknown', 'difficulty_threshold': [8, 15]},
'unknow': {'map_class': 'unknow', 'difficulty_threshold': [8, 15]},
'others': {'map_class': 'others', 'difficulty_threshold': [8, 15]},
'other': {'map_class': 'others', 'difficulty_threshold': [8, 15]},
'accessory': {'map_class': 'accessory', 'difficulty_threshold': [10, 20]},
'wheelbarrow': {'map_class': 'others', 'difficulty_threshold': [8, 15]},
'blend': {'map_class': 'others', 'difficulty_threshold': [8, 15]},
'peopleslightly': {'map_class': 'others', 'difficulty_threshold': [8, 15]},
'vehicleslightly': {'map_class': 'others', 'difficulty_threshold': [8, 15]},
'otherslightly': {'map_class': 'others', 'difficulty_threshold': [8, 15]},
'unknownunmovable': {'map_class': 'others', 'difficulty_threshold': [8, 15]},
'unknownmovable': {'map_class': 'others', 'difficulty_threshold': [8, 15]}
}
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets/apollo/apollo_det.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import os
from typing import List, Tuple, Union, Dict
import numpy as np
import pandas
from paddle3d import transforms as T
from paddle3d.datasets import BaseDataset
from paddle3d.datasets.apollo.apollo_metric import ApolloMetric
from paddle3d.datasets.apollo import apollo_utils
from paddle3d.transforms import TransformABC
class ApolloDetDataset(BaseDataset):
"""
"""
def __init__(self,
dataset_root: str,
dataset_list: Union[str, List[str]] = None,
mode: str = "train",
transforms: Union[TransformABC, List[TransformABC]] = None,
class_names: Union[list, tuple] = None,
class_balanced_sampling: bool = False,
use_road_plane: bool = False,
distance_threshold: float = 80.0,
create_gt_database: bool = False,
eval_class_map: Dict[str, str] = None):
super().__init__()
self.dataset_root = dataset_root
self.dataset_list = dataset_list
self.mode = mode.lower()
self.distance_threshold = distance_threshold
self.create_gt_database = create_gt_database
self.eval_class_map = eval_class_map
self.class_names = class_names
self.use_road_plane = use_road_plane
self.dirname = 'testing' if self.is_test_mode else 'training'
for transform in transforms:
if isinstance(transform, T.samplingV2.SamplingDatabaseV2):
assert transform.class_names == self.class_names, \
"dataset's class_name must be same as SamplingDatabaseV2"
if isinstance(transforms, list):
transforms = T.Compose(transforms)
self.transforms = transforms
if self.mode not in ['train', 'val', 'trainval', 'test']:
raise ValueError(
"mode should be 'train', 'val', 'trainval' or 'test', but got {}."
.format(self.mode))
self.imagesets = []
for dataset_name in self.dataset_list:
self.imagesets.append(os.path.join(self.dataset_root, dataset_name,
'ImageSets', '{}.txt'.format(self.mode)))
self.data = []
for dataset_name, split_path in zip(self.dataset_list, self.imagesets):
with open(split_path) as file:
data_list = file.read().strip('\n').split('\n')
for line in data_list:
line = dataset_name + '/' + line
self.data.append(line)
assert self.data != [], 'the data list is empty!'
if class_balanced_sampling and self.mode.lower() == 'train' and len(
self.class_names) > 1:
cls_dist = {class_name: [] for class_name in self.class_names}
for index in range(len(self.data)):
file_idx = self.data[index]
kitti_records, ignored_kitti_records = self.load_annotation(
index)
gt_names = []
for anno in kitti_records:
class_name = anno[0]
if class_name in self.class_names:
gt_names.append(class_name)
for class_name in set(gt_names):
cls_dist[class_name].append(file_idx)
num_balanced_samples = sum([len(v) for k, v in cls_dist.items()])
num_balanced_samples = max(num_balanced_samples, 1)
balanced_frac = 1.0 / len(self.class_names)
fracs = [len(v) / num_balanced_samples for k, v in cls_dist.items()]
sampling_ratios = [balanced_frac / frac for frac in fracs]
resampling_data = []
for samples, sampling_ratio in zip(
list(cls_dist.values()), sampling_ratios):
resampling_data.extend(samples)
if sampling_ratio > 1.:
resampling_data.extend(
np.random.choice(
samples,
int(len(samples) * (sampling_ratio - 1.))).tolist())
self.data = resampling_data
self.use_road_plane = use_road_plane
def __len__(self):
return len(self.data)
def load_annotation(self, index: int) -> Tuple[np.ndarray, np.ndarray]:
"""
"""
filename = '{}.txt'.format(self.data[index])
filename = os.path.join(self.dataset_root, filename.split('/')[0],
self.dirname, 'label_2', filename.split('/')[1])
with open(os.path.join(filename), 'r') as csv_file:
df = pandas.read_csv(csv_file, sep=' ', header=None)
array = np.array(df)
rows = []
ignored_rows = []
for row in array:
# if create gt database, do not filter by class
if self.create_gt_database:
rows.append(row)
else:
row[0] = apollo_utils.map_class(row[0])
if row[0] in self.class_names:
rows.append(row)
else:
ignored_rows.append(row)
kitti_records = np.array(rows)
ignored_kitti_records = np.array(ignored_rows)
return kitti_records, ignored_kitti_records
@property
def metric(self):
gt = []
for idx in range(len(self)):
annos = self.load_annotation(idx)
anno = self.FilterGTOutsideRange(annos[0])
ignored_anno = self.FilterGTOutsideRange(annos[1])
if len(anno) > 0 and len(ignored_anno) > 0:
gt.append(np.concatenate((anno, ignored_anno), axis=0))
elif len(anno) > 0:
gt.append(anno)
else:
gt.append(ignored_anno)
return ApolloMetric(
groundtruths=gt,
classmap={i: name
for i, name in enumerate(self.class_names)},
indexes=self.data,
eval_class_map=self.eval_class_map)
def FilterGTOutsideRange(self, annos):
if len(annos) > 0:
mask = (annos[:, -4] >= -self.distance_threshold) & \
(annos[:, -4] <= self.distance_threshold) & \
(annos[:, -3] >= -self.distance_threshold) & \
(annos[:, -3] <= self.distance_threshold)
annos = annos[mask]
return annos
@property
def name(self) -> str:
return "Apollo"
@property
def labels(self) -> List[str]:
return self.class_names
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets/semantic_kitti/semantic_kitti_seg.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numbers
from collections.abc import Mapping, Sequence
from pathlib import Path
from typing import List
import numpy as np
import paddle
from paddle3d.apis import manager
from paddle3d.datasets.semantic_kitti.semantic_kitti import \
SemanticKITTIDataset
from paddle3d.sample import Sample
from paddle3d.utils.logger import logger
from .semantic_kitti_metric import SemanticKITTIMetric
__all__ = ["SemanticKITTISegDataset"]
@manager.DATASETS.add_component
class SemanticKITTISegDataset(SemanticKITTIDataset):
"""
SemanticKITTI dataset for semantic segmentation task.
"""
def __getitem__(self, index: int) -> Sample:
sample = Sample(path=self.data[index], modality="lidar")
if not self.is_test_mode:
scan_path = Path(self.data[index])
label_path = (scan_path.parents[1] / "labels" /
scan_path.name).with_suffix(".label")
sample.labels = label_path
if self.transforms:
sample = self.transforms(sample)
if "proj_mask" in sample.meta:
sample.data *= sample.meta.pop("proj_mask")
return sample
def collate_fn(self, batch: List):
"""
"""
sample = batch[0]
if isinstance(sample, np.ndarray):
batch = np.stack(batch, axis=0)
return batch
elif isinstance(sample, paddle.Tensor):
return paddle.stack(batch, axis=0)
elif isinstance(sample, numbers.Number):
batch = np.array(batch)
return batch
elif isinstance(sample, (str, bytes)):
return batch
elif isinstance(sample, Sample):
var_len_fields = {"data", "labels", "proj_x", "proj_y"}
collated_batch = {}
for key, value in sample.items():
if value is None:
continue
if key not in var_len_fields or isinstance(
value, (Sample, Mapping)):
collated_batch[key] = self.collate_fn(
[d[key] for d in batch])
else:
collated_batch[key] = [d[key] for d in batch]
return collated_batch
elif isinstance(sample, Mapping):
var_len_fields = {"data", "labels", "proj_x", "proj_y"}
collated_batch = {}
for key, value in sample.items():
if key not in var_len_fields or isinstance(
value, (Sample, Mapping)):
collated_batch[key] = self.collate_fn(
[d[key] for d in batch])
else:
collated_batch[key] = [d[key] for d in batch]
return collated_batch
elif isinstance(sample, Sequence):
sample_fields_num = len(sample)
if not all(
len(sample) == sample_fields_num for sample in iter(batch)):
raise RuntimeError(
"fileds number not same among samples in a batch")
return [self.collate_fn(fields) for fields in zip(*batch)]
raise TypeError(
"batch data can only contains: tensor, numpy.ndarray, "
"dict, list, number, paddle3d.Sample, but got {}".format(
type(sample)))
@property
def metric(self):
ignore = []
for cl, ign in self.LEARNING_IGNORE.items():
if ign:
x_cl = int(cl)
ignore.append(x_cl)
logger.info(
"Cross-entropy class {} ignored in IoU evaluation".format(
x_cl))
return SemanticKITTIMetric(len(self.LEARNING_MAP_INV), ignore=ignore)
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets/semantic_kitti/__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .semantic_kitti_seg import SemanticKITTISegDataset
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets/semantic_kitti/semantic_kitti.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ["SemanticKITTIDataset"]
import os
from glob import glob
from typing import List, Tuple, Union
import numpy as np
from paddle3d import transforms as T
from paddle3d.datasets import BaseDataset
from paddle3d.transforms import TransformABC
class SemanticKITTIDataset(BaseDataset):
"""
SemanticKITTI dataset.
Class attributes (`LABELS`, `LEARNING_MAP`, `LEARNING_MAP_INV`, `CONTENT`,
`LEARNING_IGNORE`, `SEQUENCE_SPLITS`) are from SemanticKITTI dataset official
configuration. Please refer to:
<https://github.com/PRBonn/semantic-kitti-api/blob/master/config/semantic-kitti-all.yaml>.
Args:
dataset_root (str): Path to the root directory of SemanticKITTI dataset.
mode (str, optional): The mode of dataset. Default is 'train'.
sequences (list or tuple, optional): The data sequences of dataset.
If None, use default sequence splits according to `mode`. Default is None.
transforms (TransformABC or list[TransformABC], optional): The transforms of dataset. Default is None.
"""
LABELS = {
0: "unlabeled",
1: "outlier",
10: "car",
11: "bicycle",
13: "bus",
15: "motorcycle",
16: "on-rails",
18: "truck",
20: "other-vehicle",
30: "person",
31: "bicyclist",
32: "motorcyclist",
40: "road",
44: "parking",
48: "sidewalk",
49: "other-ground",
50: "building",
51: "fence",
52: "other-structure",
60: "lane-marking",
70: "vegetation",
71: "trunk",
72: "terrain",
80: "pole",
81: "traffic-sign",
99: "other-object",
252: "moving-car",
253: "moving-bicyclist",
254: "moving-person",
255: "moving-motorcyclist",
256: "moving-on-rails",
257: "moving-bus",
258: "moving-truck",
259: "moving-other-vehicle"
}
LEARNING_MAP = {
0: 0, # "unlabeled"
1: 0, # "outlier" mapped to "unlabeled" ------------------------mapped
10: 1, # "car"
11: 2, # "bicycle"
13: 5, # "bus" mapped to "other-vehicle" ------------------------mapped
15: 3, # "motorcycle"
16: 5, # "on-rails" mapped to "other-vehicle" -------------------mapped
18: 4, # "truck"
20: 5, # "other-vehicle"
30: 6, # "person"
31: 7, # "bicyclist"
32: 8, # "motorcyclist"
40: 9, # "road"
44: 10, # "parking"
48: 11, # "sidewalk"
49: 12, # "other-ground"
50: 13, # "building"
51: 14, # "fence"
52: 0, # "other-structure" mapped to "unlabeled" ----------------mapped
60: 9, # "lane-marking" to "road" -------------------------------mapped
70: 15, # "vegetation"
71: 16, # "trunk"
72: 17, # "terrain"
80: 18, # "pole"
81: 19, # "traffic-sign"
99: 0, # "other-object" to "unlabeled" --------------------------mapped
252:
1, # "moving-car" to "car" ----------------------------------mapped
253:
7, # "moving-bicyclist" to "bicyclist" ----------------------mapped
254:
6, # "moving-person" to "person" ----------------------------mapped
255:
8, # "moving-motorcyclist" to "motorcyclist" ----------------mapped
256:
5, # "moving-on-rails" mapped to "other-vehicle" ------------mapped
257:
5, # "moving-bus" mapped to "other-vehicle" -----------------mapped
258:
4, # "moving-truck" to "truck" ------------------------------mapped
259:
5, # "moving-other"-vehicle to "other-vehicle" --------------mapped
}
LEARNING_MAP_INV = { # inverse of previous map
0: 0, # "unlabeled", and others ignored
1: 10, # "car"
2: 11, # "bicycle"
3: 15, # "motorcycle"
4: 18, # "truck"
5: 20, # "other-vehicle"
6: 30, # "person"
7: 31, # "bicyclist"
8: 32, # "motorcyclist"
9: 40, # "road"
10: 44, # "parking"
11: 48, # "sidewalk"
12: 49, # "other-ground"
13: 50, # "building"
14: 51, # "fence"
15: 70, # "vegetation"
16: 71, # "trunk"
17: 72, # "terrain"
18: 80, # "pole"
19: 81, # "traffic-sign"
}
CONTENT = { # as a ratio with the total number of points
0: 0.018889854628292943,
1: 0.0002937197336781505,
10: 0.040818519255974316,
11: 0.00016609538710764618,
13: 2.7879693665067774e-05,
15: 0.00039838616015114444,
16: 0.0,
18: 0.0020633612104619787,
20: 0.0016218197275284021,
30: 0.00017698551338515307,
31: 1.1065903904919655e-08,
32: 5.532951952459828e-09,
40: 0.1987493871255525,
44: 0.014717169549888214,
48: 0.14392298360372,
49: 0.0039048553037472045,
50: 0.1326861944777486,
51: 0.0723592229456223,
52: 0.002395131480328884,
60: 4.7084144280367186e-05,
70: 0.26681502148037506,
71: 0.006035012012626033,
72: 0.07814222006271769,
80: 0.002855498193863172,
81: 0.0006155958086189918,
99: 0.009923127583046915,
252: 0.001789309418528068,
253: 0.00012709999297008662,
254: 0.00016059776092534436,
255: 3.745553104802113e-05,
256: 0.0,
257: 0.00011351574470342043,
258: 0.00010157861367183268,
259: 4.3840131989471124e-05,
}
LEARNING_IGNORE = {
0: True, # "unlabeled", and others ignored
1: False, # "car"
2: False, # "bicycle"
3: False, # "motorcycle"
4: False, # "truck"
5: False, # "other-vehicle"
6: False, # "person"
7: False, # "bicyclist"
8: False, # "motorcyclist"
9: False, # "road"
10: False, # "parking"
11: False, # "sidewalk"
12: False, # "other-ground"
13: False, # "building"
14: False, # "fence"
15: False, # "vegetation"
16: False, # "trunk"
17: False, # "terrain"
18: False, # "pole"
19: False, # "traffic-sign"
}
SEQUENCE_SPLITS = {
'train': (0, 1, 2, 3, 4, 5, 6, 7, 9, 10),
'val': (8, ),
'test': (11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21)
}
def __init__(self,
dataset_root: str,
mode: str = "train",
sequences: Union[List[int], Tuple[int], None] = None,
transforms: Union[TransformABC, List[TransformABC]] = None):
super().__init__()
self.mode = mode
if isinstance(transforms, list):
transforms = T.Compose(transforms)
self.transforms = transforms
if self.mode not in ['train', 'val', 'trainval', 'test']:
raise ValueError(
"mode should be 'train', 'val', 'trainval' or 'test', but got {}."
.format(self.mode))
if sequences is not None:
self.sequences = sequences
else:
self.sequences = self.SEQUENCE_SPLITS[self.mode]
# get file list
self.data = []
for seq in self.sequences:
seq_dir = os.path.join(dataset_root, 'sequences', '{0:02d}'.format(
int(seq)))
scans = sorted(glob(os.path.join(seq_dir, 'velodyne', '*.bin')))
self.data.extend(scans)
def __len__(self):
return len(self.data)
@staticmethod
def build_remap_lut():
"""
Make lookup table for mapping
"""
maxkey = max(SemanticKITTIDataset.LEARNING_MAP.keys())
# +100 hack making lut bigger just in case there are unknown labels
remap_lut = np.zeros((maxkey + 100), dtype=np.int32)
remap_lut[list(SemanticKITTIDataset.LEARNING_MAP.keys())] = list(
SemanticKITTIDataset.LEARNING_MAP.values())
return remap_lut
@property
def name(self) -> str:
return "SemanticKITTI"
@property
def labels(self) -> List[str]:
num_classes = len(self.LEARNING_MAP_INV)
class_names = [
self.LABELS[self.LEARNING_MAP_INV[i]] for i in range(num_classes)
]
return class_names
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/datasets/semantic_kitti/semantic_kitti_metric.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List
import numpy as np
import paddle
from paddle3d.datasets.metrics import MetricABC
from paddle3d.sample import Sample
from paddle3d.utils.logger import logger
from .semantic_kitti import SemanticKITTIDataset
__all__ = ["SemanticKITTIMetric"]
class SemanticKITTIMetric(MetricABC):
"""
IoU evaluation of semantic segmentation task on SemanticKITTI dataset, with Paddle as backend.
Please refer to:
<https://github.com/PRBonn/semantic-kitti-api/blob/master/auxiliary/np_ioueval.py>.
Args:
num_classes (int): The number of classes.
ignore (List[int]): Classes indices that are ignored during evaluation.
"""
def __init__(self, num_classes: int, ignore: List[int] = None):
# classes
self.num_classes = num_classes
# What to include and ignore from the means
include = [n for n in range(self.num_classes) if n not in ignore]
logger.info("[IOU EVAL] IGNORED CLASSES: {}".format(ignore))
logger.info("[IOU EVAL] INCLUDED CLASSES: {}".format(include))
self.ignore = paddle.to_tensor(ignore, dtype="int64")
self.include = paddle.to_tensor(
[n for n in range(self.num_classes) if n not in ignore],
dtype="int64")
# reset the class counters
self.reset()
def num_classes(self):
return self.num_classes
def reset(self):
self.conf_matrix = paddle.zeros((self.num_classes, self.num_classes),
dtype="int64")
def update(self, predictions: List[Sample],
ground_truths: Dict): # x=preds, y=targets
for pd_sample, gt in zip(predictions, ground_truths["labels"]):
pd = pd_sample.labels
if isinstance(pd, np.ndarray):
pd = paddle.to_tensor(pd, dtype="int64")
if isinstance(gt, np.ndarray):
gt = paddle.to_tensor(gt, dtype="int64")
# sizes should be matching
pd_row = pd.reshape([-1]) # de-batchify
gt_row = gt.reshape([-1]) # de-batchify
# check
assert (pd_row.shape == gt_row.shape)
# idxs are labels and predictions
idxs = paddle.stack([pd_row, gt_row], axis=-1)
updates = paddle.ones([idxs.shape[0]], dtype="int64")
# make confusion matrix (cols = gt, rows = pred)
self.conf_matrix = paddle.scatter_nd_add(self.conf_matrix, idxs,
updates)
def getStats(self):
# remove fp from confusion on the ignore classes cols
conf = self.conf_matrix.clone().astype("float64")
conf[:, self.ignore] = 0
# get the clean stats
tp = paddle.diag(conf, offset=0)
fp = conf.sum(axis=1) - tp
fn = conf.sum(axis=0) - tp
return tp, fp, fn
def getIoU(self):
tp, fp, fn = self.getStats()
intersection = tp
union = tp + fp + fn + 1e-15
iou = intersection / union
iou_mean = (intersection[self.include] / union[self.include]).mean()
return iou_mean, iou # returns "iou mean", "iou per class" ALL CLASSES
def getacc(self):
tp, fp, fn = self.getStats()
total_tp = tp.sum()
total = tp[self.include].sum() + fp[self.include].sum() + 1e-15
acc_mean = total_tp / total
return acc_mean # returns "acc mean"
def compute(self, verbose=False) -> dict:
m_accuracy = self.getacc()
m_jaccard, class_jaccard = self.getIoU()
if verbose:
logger.info("Acc avg {:.3f}".format(float(m_accuracy)))
logger.info("IoU avg {:.3f}".format(float(m_jaccard)))
for i, jacc in enumerate(class_jaccard):
if i not in self.ignore:
logger.info(
'IoU of class {i:} [{class_str:}] = {jacc:.3f}'.format(
i=i,
class_str=SemanticKITTIDataset.LABELS[
SemanticKITTIDataset.LEARNING_MAP_INV[i]],
jacc=float(jacc)))
return dict(
mean_acc=m_accuracy, mean_iou=m_jaccard, class_iou=class_jaccard)
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/utils/grid.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle
import paddle.nn as nn
from PIL import Image
def create_meshgrid3d(depth,
height,
width,
normalized_coordinates=True,
dtype=None):
"""Generate a coordinate grid for an image.
Args:
depth: the image depth (channels).
height: the image height (rows).
width: the image width (cols).
normalized_coordinates: whether to normalize
Return:
grid tensor with shape :math:`(1, D, H, W, 3)`.
"""
xs = paddle.linspace(0, width - 1, width, dtype=dtype)
ys = paddle.linspace(0, height - 1, height, dtype=dtype)
zs = paddle.linspace(0, depth - 1, depth, dtype=dtype)
# Fix TracerWarning
if normalized_coordinates:
xs = (xs / (width - 1) - 0.5) * 2
ys = (ys / (height - 1) - 0.5) * 2
zs = (zs / (depth - 1) - 0.5) * 2
# generate grid by stacking coordinates
base_grid = paddle.stack(paddle.meshgrid([zs, xs, ys]), axis=-1) # DxWxHx3
return base_grid.transpose([0, 2, 1, 3]).unsqueeze(0) # 1xDxHxWx3
def normalize_coords(coords, shape):
"""
This code is based on https://github.com/TRAILab/CaDDN/blob/5a96b37f16b3c29dd2509507b1cdfdff5d53c558/pcdet/utils/grid_utils.py#L4
Normalize coordinates of a grid between [-1, 1]
Args:
coords: Coordinates in grid
shape: Grid shape [H, W]
Returns:
norm_coords: Normalized coordinates in grid
"""
min_n = -1
max_n = 1
shape = paddle.flip(shape, axis=[0]) # Reverse ordering of shape
# Subtract 1 since pixel indexing from [0, shape - 1]
norm_coords = coords / (shape - 1) * (max_n - min_n) + min_n
return norm_coords
class GridMask(nn.Layer):
"""
This class is modified from https://github.com/fundamentalvision/BEVFormer/blob/master/projects/mmdet3d_plugin/models/utils/grid_mask.py#L70
"""
def __init__(self,
use_h,
use_w,
rotate=1,
offset=False,
ratio=0.5,
mode=0,
prob=1.):
super(GridMask, self).__init__()
self.use_h = use_h
self.use_w = use_w
self.rotate = rotate
self.offset = offset
self.ratio = ratio
self.mode = mode
self.st_prob = prob
self.prob = prob
def set_prob(self, epoch, max_epoch):
self.prob = self.st_prob * epoch / max_epoch #+ 1.#0.5
def forward(self, x):
#np.random.seed(0)
if np.random.rand() > self.prob or not self.training:
return x
n, c, h, w = x.shape
x = x.reshape([-1, h, w])
hh = int(1.5 * h)
ww = int(1.5 * w)
#np.random.seed(0)
d = np.random.randint(2, h)
self.l = min(max(int(d * self.ratio + 0.5), 1), d - 1)
mask = np.ones((hh, ww), np.float32)
#np.random.seed(0)
st_h = np.random.randint(d)
#np.random.seed(0)
st_w = np.random.randint(d)
if self.use_h:
for i in range(hh // d):
s = d * i + st_h
t = min(s + self.l, hh)
mask[s:t, :] *= 0
if self.use_w:
for i in range(ww // d):
s = d * i + st_w
t = min(s + self.l, ww)
mask[:, s:t] *= 0
#np.random.seed(0)
r = np.random.randint(self.rotate)
mask = Image.fromarray(np.uint8(mask))
mask = mask.rotate(r)
mask = np.asarray(mask)
mask = mask[(hh - h) // 2:(hh - h) // 2 +
h, (ww - w) // 2:(ww - w) // 2 + w]
mask = paddle.to_tensor(mask, dtype=x.dtype)
if self.mode == 1:
mask = 1 - mask
mask = mask.expand_as(x)
if self.offset:
#np.random.seed(0)
offset = paddle.to_tensor(
2 * (np.random.rand(h, w) - 0.5), dtype=x.dtype)
x = x * mask + offset * (1 - mask)
else:
x = x * mask
return x.reshape([n, c, h, w])
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/utils/box.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle
def boxes_iou_normal(boxes_a, boxes_b):
"""
This code is based on https://github.com/TRAILab/CaDDN/blob/5a96b37f16b3c29dd2509507b1cdfdff5d53c558/pcdet/utils/box_utils.py#L238
Args:
boxes_a: (N, 4) [x1, y1, x2, y2]
boxes_b: (M, 4) [x1, y1, x2, y2]
Returns:
"""
assert boxes_a.shape[1] == boxes_b.shape[1] == 4
x_min = paddle.maximum(boxes_a[:, 0, None], boxes_b[None, :, 0])
x_max = paddle.minimum(boxes_a[:, 2, None], boxes_b[None, :, 2])
y_min = paddle.maximum(boxes_a[:, 1, None], boxes_b[None, :, 1])
y_max = paddle.minimum(boxes_a[:, 3, None], boxes_b[None, :, 3])
x_len = paddle.clip(x_max - x_min, min=0)
y_len = paddle.clip(y_max - y_min, min=0)
area_a = (boxes_a[:, 2] - boxes_a[:, 0]) * (boxes_a[:, 3] - boxes_a[:, 1])
area_b = (boxes_b[:, 2] - boxes_b[:, 0]) * (boxes_b[:, 3] - boxes_b[:, 1])
a_intersect_b = x_len * y_len
iou = a_intersect_b / paddle.clip(
area_a[:, None] + area_b[None, :] - a_intersect_b, min=1e-6)
return iou
def boxes3d_lidar_to_aligned_bev_boxes(boxes3d):
"""
This code is based on https://github.com/TRAILab/CaDDN/blob/5a96b37f16b3c29dd2509507b1cdfdff5d53c558/pcdet/utils/box_utils.py#L261
Args:
boxes3d: (N, 7 + C) [x, y, z, dx, dy, dz, heading] in lidar coordinate
Returns:
aligned_bev_boxes: (N, 4) [x1, y1, x2, y2] in the above lidar coordinate
"""
rot_angle = paddle.abs(boxes3d[:, 6] -
paddle.floor(boxes3d[:, 6] / np.pi + 0.5) * np.pi)
choose_dims = paddle.where(
rot_angle[:, None] < np.pi / 4,
paddle.gather(boxes3d, index=paddle.to_tensor([3, 4]), axis=1),
paddle.gather(boxes3d, index=paddle.to_tensor([4, 3]), axis=1))
aligned_bev_boxes = paddle.concat(
[boxes3d[:, 0:2] - choose_dims / 2, boxes3d[:, 0:2] + choose_dims / 2],
axis=1)
return aligned_bev_boxes
def boxes3d_nearest_bev_iou(boxes_a, boxes_b):
"""
This code is based on https://github.com/TRAILab/CaDDN/blob/5a96b37f16b3c29dd2509507b1cdfdff5d53c558/pcdet/utils/box_utils.py#L275
Args:
boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
boxes_b: (N, 7) [x, y, z, dx, dy, dz, heading]
Returns:
"""
boxes_bev_a = boxes3d_lidar_to_aligned_bev_boxes(boxes_a)
boxes_bev_b = boxes3d_lidar_to_aligned_bev_boxes(boxes_b)
return boxes_iou_normal(boxes_bev_a, boxes_bev_b)
def normalize_bbox(bboxes, pc_range):
"""
This function is modified from https://github.com/fundamentalvision/BEVFormer/blob/master/projects/mmdet3d_plugin/core/bbox/util.py
"""
cx = bboxes[..., 0:1]
cy = bboxes[..., 1:2]
cz = bboxes[..., 2:3]
w = bboxes[..., 3:4].log()
l = bboxes[..., 4:5].log()
h = bboxes[..., 5:6].log()
rot = bboxes[..., 6:7]
if bboxes.shape[-1] > 7:
vx = bboxes[..., 7:8]
vy = bboxes[..., 8:9]
normalized_bboxes = paddle.concat(
(cx, cy, w, l, cz, h, rot.sin(), rot.cos(), vx, vy), axis=-1)
else:
normalized_bboxes = paddle.concat(
(cx, cy, w, l, cz, h, rot.sin(), rot.cos()), axis=-1)
return normalized_bboxes
def denormalize_bbox(normalized_bboxes, pc_range):
"""
This function is modified from https://github.com/fundamentalvision/BEVFormer/blob/master/projects/mmdet3d_plugin/core/bbox/util.py
"""
# rotation
rot_sine = normalized_bboxes[..., 6:7]
rot_cosine = normalized_bboxes[..., 7:8]
rot = paddle.atan2(rot_sine, rot_cosine)
# center in the bev
cx = normalized_bboxes[..., 0:1]
cy = normalized_bboxes[..., 1:2]
cz = normalized_bboxes[..., 4:5]
# size
w = normalized_bboxes[..., 2:3]
l = normalized_bboxes[..., 3:4]
h = normalized_bboxes[..., 5:6]
w = w.exp()
l = l.exp()
h = h.exp()
if normalized_bboxes.shape[-1] > 8:
# velocity
vx = normalized_bboxes[:, 8:9]
vy = normalized_bboxes[:, 9:10]
denormalized_bboxes = paddle.concat([cx, cy, cz, w, l, h, rot, vx, vy],
axis=-1)
else:
denormalized_bboxes = paddle.concat([cx, cy, cz, w, l, h, rot], axis=-1)
return denormalized_bboxes
def bbox_overlaps(bboxes1, bboxes2, mode='iou', eps=1e-6):
assert mode in ['iou', 'iof', 'giou'], f'Unsupported mode {mode}'
# Either the boxes are empty or the length of boxes' last dimension is 4
assert (bboxes1.shape[-1] == 4 or bboxes1.shape[0] == 0)
assert (bboxes2.shape[-1] == 4 or bboxes2.shape[0] == 0)
# Batch dim must be the same
# Batch dim: (B1, B2, ... Bn)
assert bboxes1.shape[:-2] == bboxes2.shape[:-2]
batch_shape = bboxes1.shape[:-2]
rows = bboxes1.shape[-2]
cols = bboxes2.shape[-2]
if rows * cols == 0:
return paddle.to_tensor(batch_shape + (rows, cols))
area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * (
bboxes1[..., 3] - bboxes1[..., 1])
area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * (
bboxes2[..., 3] - bboxes2[..., 1])
lt = paddle.maximum(bboxes1[..., :, None, :2],
bboxes2[..., None, :, :2]) # [B, rows, cols, 2]
rb = paddle.minimum(bboxes1[..., :, None, 2:],
bboxes2[..., None, :, 2:]) # [B, rows, cols, 2]
wh = paddle.clip(rb - lt, min=0)
overlap = wh[..., 0] * wh[..., 1]
if mode in ['iou', 'giou']:
union = area1[..., None] + area2[..., None, :] - overlap
else:
union = area1[..., None]
if mode == 'giou':
enclosed_lt = paddle.minimum(bboxes1[..., :, None, :2],
bboxes2[..., None, :, :2])
enclosed_rb = paddle.maximum(bboxes1[..., :, None, 2:],
bboxes2[..., None, :, 2:])
eps = paddle.to_tensor([eps])
union = paddle.maximum(union, eps)
ious = overlap / union
if mode in ['iou', 'iof']:
return ious
# calculate gious
enclose_wh = paddle.clip(enclosed_rb - enclosed_lt, min=0)
enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1]
enclose_area = paddle.maximum(enclose_area, eps)
gious = ious - (enclose_area - union) / enclose_area
return gious
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/utils/checkpoint.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Union
from urllib.parse import unquote, urlparse
import filelock
import paddle
from paddle3d.env import PRETRAINED_HOME, TMP_HOME
from paddle3d.utils.download import download_with_progress
from paddle3d.utils.logger import logger
from paddle3d.utils.xarfile import unarchive_with_progress
def load_pretrained_model_from_url(model: paddle.nn.Layer,
url: str,
overwrite: bool = False):
"""
"""
pretrained_model = unquote(url)
savename = pretrained_model.split('/')[-1]
savedir = os.path.join(PRETRAINED_HOME, savename.split('.')[0])
os.makedirs(savedir, exist_ok=True)
savepath = os.path.join(savedir, savename)
if os.path.exists(savepath) and not overwrite:
logger.warning(
"There is a file with the same name locally, we directly load the local file"
)
else:
if overwrite:
logger.warning(
"There is a file with the same name locally, we will delete the file."
)
os.remove(savepath)
# Add file lock to prevent multi-process download
with filelock.FileLock(os.path.join(TMP_HOME, savename)):
if not os.path.exists(savepath):
with logger.progressbar(
"download pretrained model from {}".format(url)) as bar:
for _, ds, ts in download_with_progress(url, savedir):
bar.update(float(ds) / ts)
#TODO: unzip the file if it is a compress one
load_pretrained_model_from_path(model, savepath)
def load_pretrained_model_from_path(model: paddle.nn.Layer, path: str):
"""
"""
para_state_dict = paddle.load(path)
load_pretrained_model_from_state_dict(model, para_state_dict)
def load_pretrained_model_from_state_dict(model: paddle.nn.Layer,
state_dict: dict):
"""
"""
model_state_dict = model.state_dict()
keys = model_state_dict.keys()
num_params_loaded = 0
for k in keys:
if k not in state_dict:
logger.warning("{} is not in pretrained model".format(k))
elif list(state_dict[k].shape) != list(model_state_dict[k].shape):
logger.warning(
"[SKIP] Shape of pretrained params {} doesn't match.(Pretrained: {}, Actual: {})"
.format(k, state_dict[k].shape, model_state_dict[k].shape))
else:
model_state_dict[k] = state_dict[k]
num_params_loaded += 1
model.set_dict(model_state_dict)
logger.info("There are {}/{} variables loaded into {}.".format(
num_params_loaded, len(model_state_dict), model.__class__.__name__))
def load_pretrained_model(model: paddle.nn.Layer,
pretrained_model: Union[dict, str]):
"""
"""
if isinstance(pretrained_model, dict):
load_pretrained_model_from_state_dict(model, pretrained_model)
elif isinstance(pretrained_model, str):
if urlparse(pretrained_model).netloc:
load_pretrained_model_from_url(model, pretrained_model)
elif os.path.exists(pretrained_model):
load_pretrained_model_from_path(model, pretrained_model)
else:
raise ValueError(
'{} is neither a valid path nor a valid URL.'.format(
pretrained_model))
else:
raise TypeError('Unsupported pretrained model type {}'.format(
type(pretrained_model)))
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/utils/amp_utils.py
|
from collections.abc import Mapping, Sequence
from typing import List
import paddle
def dtype2float32(src_tensors):
if isinstance(src_tensors,
paddle.Tensor) and src_tensors.dtype != 'float32':
return src_tensors.astype('float32')
elif isinstance(src_tensors, Sequence):
return type(src_tensors)([dtype2float32(x) for x in src_tensors])
elif isinstance(src_tensors, Mapping):
return {key: dtype2float32(x) for key, x in src_tensors.items()}
return src_tensors
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/utils/timer.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
class Timer:
"""
"""
def __init__(self, iters: int = 0, momentum: float = 0.5):
self.iters = iters
self.cur_iter = 0
self.start_time = time.time()
self.elasped_time = 0
self.last_time = None
self._moving_speed = None
self.momentum = momentum
def step(self):
"""
"""
self.cur_iter += 1
now = time.time()
if self.last_time is not None:
iter_speed = now - self.last_time
if self._moving_speed is None:
self._moving_speed = iter_speed
else:
self._moving_speed = self._moving_speed * self.momentum + (
1 - self.momentum) * iter_speed
self.elasped_time += iter_speed
self.last_time = now
@property
def ela(self):
ela_time = time.time() - self.start_time
result = "{:0>2}:{:0>2}:{:0>2}"
arr = []
for i in range(2, -1, -1):
arr.append(int(ela_time / 60**i))
ela_time %= 60**i
return result.format(*arr)
@property
def speed(self):
"""
"""
if self.cur_iter == 0:
return 0
return self.elasped_time / self.cur_iter
@property
def eta(self):
"""
"""
if self.iters == 0 or self._moving_speed is None:
return "--:--:--"
remaining_iter = max(self.iters - self.cur_iter, 0)
remaining_time = int(remaining_iter * self._moving_speed)
result = "{:0>2}:{:0>2}:{:0>2}"
arr = []
for i in range(2, -1, -1):
arr.append(int(remaining_time / 60**i))
remaining_time %= 60**i
return result.format(*arr)
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/utils/download.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Generator
from urllib.parse import urlparse
import requests
def download(url: str, path: str = None) -> str:
'''Download a file
Args:
url (str) : url to be downloaded
path (str, optional) : path to store downloaded products, default is current work directory
Examples:
.. code-block:: python
url = 'https://xxxxx.xx/xx.tar.gz'
download(url, path='./output')
'''
for savename, _, _ in download_with_progress(url, path):
...
return savename
def download_with_progress(url: str,
path: str = None) -> Generator[str, int, int]:
'''Download a file and return the downloading progress -> Generator[filename, download_size, total_size]
Args:
url (str) : url to be downloaded
path (str, optional) : path to store downloaded products, default is current work directory
Examples:
.. code-block:: python
url = 'https://xxxxx.xx/xx.tar.gz'
for filename, download_size, total_szie in download_with_progress(url, path='./output'):
print(filename, download_size, total_size)
'''
path = os.getcwd() if not path else path
if not os.path.exists(path):
os.makedirs(path)
parse_result = urlparse(url)
savename = parse_result.path.split('/')[-1]
savename = os.path.join(path, savename)
res = requests.get(url, stream=True)
download_size = 0
total_size = int(res.headers.get('content-length'))
with open(savename, 'wb') as _file:
for data in res.iter_content(chunk_size=4096):
_file.write(data)
download_size += len(data)
yield savename, download_size, total_size
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/utils/box_coder.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn.functional as F
from paddle3d.apis import manager
from paddle3d.utils.box import denormalize_bbox, normalize_bbox
class ResidualCoder(object):
"""
This code is based on https://github.com/TRAILab/CaDDN/blob/5a96b37f16b3c29dd2509507b1cdfdff5d53c558/pcdet/utils/box_coder_utils.py#L5
"""
def __init__(self, code_size=7, encode_angle_by_sincos=False, **kwargs):
super().__init__()
self.code_size = code_size
self.encode_angle_by_sincos = encode_angle_by_sincos
if self.encode_angle_by_sincos:
self.code_size += 1
def encode_paddle(self, boxes, anchors):
"""
Args:
boxes: (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
anchors: (N, 7 + C) [x, y, z, dx, dy, dz, heading or *[cos, sin], ...]
Returns:
"""
anchors[:, 3:6] = paddle.clip(anchors[:, 3:6], min=1e-5)
boxes[:, 3:6] = paddle.clip(boxes[:, 3:6], min=1e-5)
xa, ya, za, dxa, dya, dza, ra, *cas = paddle.split(anchors, 7, axis=-1)
xg, yg, zg, dxg, dyg, dzg, rg, *cgs = paddle.split(boxes, 7, axis=-1)
diagonal = paddle.sqrt(dxa**2 + dya**2)
xt = (xg - xa) / diagonal
yt = (yg - ya) / diagonal
zt = (zg - za) / dza
dxt = paddle.log(dxg / dxa)
dyt = paddle.log(dyg / dya)
dzt = paddle.log(dzg / dza)
if self.encode_angle_by_sincos:
rt_cos = paddle.cos(rg) - paddle.cos(ra)
rt_sin = paddle.sin(rg) - paddle.sin(ra)
rts = [rt_cos, rt_sin]
else:
rts = [rg - ra]
cts = [g - a for g, a in zip(cgs, cas)]
return paddle.concat([xt, yt, zt, dxt, dyt, dzt, *rts, *cts], axis=-1)
def decode_paddle(self, box_encodings, anchors):
"""
Args:
box_encodings: (B, N, 7 + C) or (N, 7 + C) [x, y, z, dx, dy, dz, heading or *[cos, sin], ...]
anchors: (B, N, 7 + C) or (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
Returns:
"""
xa, ya, za, dxa, dya, dza, ra, *cas = paddle.split(anchors, 7, axis=-1)
if not self.encode_angle_by_sincos:
xt, yt, zt, dxt, dyt, dzt, rt, *cts = paddle.split(
box_encodings, 7, axis=-1)
else:
xt, yt, zt, dxt, dyt, dzt, cost, sint, *cts = paddle.split(
box_encodings, 7, axis=-1)
diagonal = paddle.sqrt(dxa**2 + dya**2)
xg = xt * diagonal + xa
yg = yt * diagonal + ya
zg = zt * dza + za
dxg = paddle.exp(dxt) * dxa
dyg = paddle.exp(dyt) * dya
dzg = paddle.exp(dzt) * dza
if self.encode_angle_by_sincos:
rg_cos = cost + paddle.cos(ra)
rg_sin = sint + paddle.sin(ra)
rg = paddle.atan2(rg_sin, rg_cos)
else:
rg = rt + ra
cgs = [t + a for t, a in zip(cts, cas)]
return paddle.concat([xg, yg, zg, dxg, dyg, dzg, rg, *cgs], axis=-1)
@manager.BBOX_CODERS.add_component
class NMSFreeCoder(object):
"""Bbox coder for NMS-free detector.
This class is modified from https://github.com/fundamentalvision/BEVFormer/blob/master/projects/mmdet3d_plugin/core/bbox/coders/nms_free_coder.py
Args:
point_cloud_range (list[float]): Range of point cloud.
post_center_range (list[float]): Limit of the center.
Default: None.
max_num (int): Max number to be kept. Default: 100.
score_threshold (float): Threshold to filter boxes based on score.
Default: None.
code_size (int): Code size of bboxes. Default: 9
"""
def __init__(self,
point_cloud_range,
voxel_size=None,
post_center_range=None,
max_num=100,
score_threshold=None,
num_classes=10):
self.point_cloud_range = point_cloud_range
self.voxel_size = voxel_size
self.post_center_range = post_center_range
self.max_num = max_num
self.score_threshold = score_threshold
self.num_classes = num_classes
def decode_single(self, cls_scores, bbox_preds):
"""Decode bboxes.
Args:
cls_scores (Tensor): Outputs from the classification head, \
shape [num_query, cls_out_channels]. Note \
cls_out_channels should includes background.
bbox_preds (Tensor): Outputs from the regression \
head with normalized coordinate format (cx, cy, w, l, cz, h, rot_sine, rot_cosine, vx, vy). \
Shape [num_query, 9].
Returns:
list[dict]: Decoded boxes.
"""
max_num = self.max_num
cls_scores = F.sigmoid(cls_scores)
scores, indexs = cls_scores.reshape([-1]).topk(max_num)
labels = indexs % self.num_classes
bbox_index = indexs // self.num_classes
bbox_preds = bbox_preds[bbox_index]
final_box_preds = denormalize_bbox(bbox_preds, self.point_cloud_range)
final_scores = scores
final_preds = labels
# use score threshold
if self.score_threshold is not None:
thresh_mask = final_scores > self.score_threshold
tmp_score = self.score_threshold
while thresh_mask.sum() == 0:
tmp_score *= 0.9
if tmp_score < 0.01:
thresh_mask = final_scores > -1
break
thresh_mask = final_scores >= tmp_score
if self.post_center_range is not None:
self.post_center_range = paddle.to_tensor(self.post_center_range)
mask = (final_box_preds[..., :3] >=
self.post_center_range[:3]).all(1)
mask &= (final_box_preds[..., :3] <=
self.post_center_range[3:]).all(1)
if self.score_threshold:
mask &= thresh_mask
boxes3d = final_box_preds[mask]
scores = final_scores[mask]
labels = final_preds[mask]
predictions_dict = {
'bboxes': boxes3d,
'scores': scores,
'labels': labels
}
else:
raise NotImplementedError(
'Need to reorganize output as a batch, only '
'support post_center_range is not None for now!')
return predictions_dict
def decode(self, preds_dicts):
"""Decode bboxes.
Args:
all_cls_scores (Tensor): Outputs from the classification head, \
shape [nb_dec, bs, num_query, cls_out_channels]. Note \
cls_out_channels should includes background.
all_bbox_preds (Tensor): Sigmoid outputs from the regression \
head with normalized coordinate format (cx, cy, w, l, cz, h, rot_sine, rot_cosine, vx, vy). \
Shape [nb_dec, bs, num_query, 9].
Returns:
list[dict]: Decoded boxes.
"""
all_cls_scores = preds_dicts['all_cls_scores'][-1]
all_bbox_preds = preds_dicts['all_bbox_preds'][-1]
batch_size = all_cls_scores.shape[0]
predictions_list = []
for i in range(batch_size):
predictions_list.append(
self.decode_single(all_cls_scores[i], all_bbox_preds[i]))
return predictions_list
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/utils/__init__.py
|
from .amp_utils import dtype2float32
from .box import *
from .box_coder import *
from .box_utils import *
from .checkpoint import *
from .common import *
from .depth import *
from .download import *
from .grid import *
from .logger import *
from .timer import *
from .transform import *
from .xarfile import *
from .transform3d import *
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/utils/logger.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import functools
import logging
import sys
import threading
import time
from typing import Iterable
import colorlog
loggers = {}
log_config = {
'DEBUG': {
'level': 10,
'color': 'purple'
},
'INFO': {
'level': 20,
'color': 'cyan'
},
'WARNING': {
'level': 30,
'color': 'yellow'
},
'ERROR': {
'level': 40,
'color': 'red'
},
'CRITICAL': {
'level': 50,
'color': 'bold_red'
}
}
class Logger(object):
'''Deafult logger in Paddle3D
Args:
name(str) : Logger name, default is 'Paddle3D'
'''
def __init__(self, name: str = None):
name = 'Paddle3D' if not name else name
self.logger = logging.getLogger(name)
for key, conf in log_config.items():
logging.addLevelName(conf['level'], key)
self.__dict__[key] = functools.partial(self.__call__, conf['level'])
self.__dict__[key.lower()] = functools.partial(
self.__call__, conf['level'])
self.handler = logging.StreamHandler()
self.handler.setFormatter(self.format)
self.logger.addHandler(self.handler)
self.logger.setLevel(logging.DEBUG)
self.logger.propagate = False
self._enabled = True
@property
def format(self):
if sys.stdout.isatty():
color_format = '%(log_color)s%(asctime)-15s%(reset)s - %(levelname)8s - %(message)s'
log_colors = {
key: conf['color']
for key, conf in log_config.items()
}
return colorlog.ColoredFormatter(
color_format, log_colors=log_colors)
normal_format = '%(asctime)-15s - %(levelname)8s - %(message)s'
return logging.Formatter(normal_format)
def disable(self):
self._enabled = False
def enable(self):
self._enabled = True
@property
def enabled(self) -> bool:
return self._enabled
def __call__(self, log_level: str, msg: str):
if not self.enabled:
return
self.logger.log(log_level, msg)
@contextlib.contextmanager
def use_terminator(self, terminator: str):
old_terminator = self.handler.terminator
self.handler.terminator = terminator
yield
self.handler.terminator = old_terminator
@contextlib.contextmanager
def processing(self, msg: str, flush_interval: float = 0.1):
'''
Continuously print a progress bar with rotating special effects.
Args:
msg(str): Message to be printed.
flush_interval(float): Rotation interval. Default to 0.1.
'''
end = False
def _printer():
index = 0
flags = ['\\', '|', '/', '-']
while not end:
flag = flags[index % len(flags)]
with self.use_terminator('\r'):
self.info('{}: {}'.format(msg, flag))
time.sleep(flush_interval)
index += 1
self.info('{}'.format(msg))
if sys.stdout.isatty():
t = threading.Thread(target=_printer)
t.daemon = True
t.start()
else:
self.info('{}'.format(msg))
yield
end = True
@contextlib.contextmanager
def progressbar(self, msg: str, flush_interval: float = 0.1):
self.info(msg)
bar = ProgressBar(logger=self, flush_interval=flush_interval)
yield bar
bar._end = True
bar.update(1)
def range(self, stop: int, msg: str):
with self.progressbar(msg) as bar:
for idx in range(stop):
bar.update(float(idx) / stop)
yield idx
def enumerate(self, iterable: Iterable, msg: str):
totalnum = len(iterable)
with self.progressbar(msg) as bar:
for idx, item in enumerate(iterable):
bar.update(float(idx) / totalnum)
yield idx, item
class ProgressBar(object):
'''
Progress bar printer
Args:
title(str) : Title text
flush_interval(float): Flush rate of progress bar, default is 0.1.
Examples:
.. code-block:: python
with ProgressBar('Download module') as bar:
for i in range(100):
bar.update(i / 100)
# with continuous bar.update, the progress bar in the terminal
# will continue to update until 100%
#
# Download module
# [##################################################] 100.00%
'''
def __init__(self, logger: Logger, flush_interval: float = 0.1):
self.logger = logger
self.last_flush_time = time.time()
self.flush_interval = flush_interval
self._end = False
def update(self, progress: float):
'''
Update progress bar
Args:
progress: Processing progress, from 0.0 to 1.0
'''
msg = '[{:<50}] {:.2f}%'.format('#' * int(progress * 50),
progress * 100)
need_flush = (time.time() - self.last_flush_time) >= self.flush_interval
if (need_flush and sys.stdout.isatty()) or self._end:
with self.logger.use_terminator('\r'):
self.logger.info(msg)
self.last_flush_time = time.time()
if self._end:
self.logger.info('')
logger = Logger()
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/utils/box_utils.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
def limit_period(val, offset: float = 0.5, period: float = np.pi):
return val - np.floor(val / period + offset) * period
def boxes3d_kitti_lidar_to_lidar(boxes3d_lidar):
"""
convert boxes from [x, y, z, w, l, h, yaw] to [x, y, z, l, w, h, heading], bottom_center -> obj_center
"""
# yapf: disable
w, l, h, r = boxes3d_lidar[:, 3:4], boxes3d_lidar[:, 4:5], boxes3d_lidar[:, 5:6], boxes3d_lidar[:, 6:7]
boxes3d_lidar[:, 2] += h[:, 0] / 2
# yapf: enable
return np.concatenate([boxes3d_lidar[:, 0:3], l, w, h, -(r + np.pi / 2)],
axis=-1)
def boxes3d_lidar_to_kitti_lidar(boxes3d_lidar):
"""
convert boxes from [x, y, z, l, w, h, heading] to [x, y, z, w, l, h, yaw], obj_center -> bottom_center
"""
# yapf: disable
l, w, h, heading = boxes3d_lidar[:, 3:4], boxes3d_lidar[:, 4:5], boxes3d_lidar[:, 5:6], boxes3d_lidar[:,6:7]
boxes3d_lidar[:, 2] -= h[:, 0] / 2
# yapf: enable
return np.concatenate(
[boxes3d_lidar[:, 0:3], w, l, h, -(heading + np.pi / 2)], axis=-1)
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/utils/shm_utils.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
SIZE_UNIT = ['K', 'M', 'G', 'T']
SHM_QUERY_CMD = 'df -h'
SHM_KEY = 'shm'
SHM_DEFAULT_MOUNT = '/dev/shm'
# [ shared memory size check ]
# In detection models, image/target data occupies a lot of memory, and
# will occupy lots of shared memory in multi-process DataLoader, we use
# following code to get shared memory size and perform a size check to
# disable shared memory use if shared memory size is not enough.
# Shared memory getting process as follows:
# 1. use `df -h` get all mount info
# 2. pick up spaces whose mount info contains 'shm'
# 3. if 'shm' space number is only 1, return its size
# 4. if there are multiple 'shm' space, try to find the default mount
# directory '/dev/shm' is Linux-like system, otherwise return the
# biggest space size.
def _parse_size_in_M(size_str):
if size_str[-1] == 'B':
num, unit = size_str[:-2], size_str[-2]
else:
num, unit = size_str[:-1], size_str[-1]
assert unit in SIZE_UNIT, \
"unknown shm size unit {}".format(unit)
return float(num) * \
(1024 ** (SIZE_UNIT.index(unit) - 1))
def _get_shared_memory_size_in_M():
try:
df_infos = os.popen(SHM_QUERY_CMD).readlines()
except:
return None
else:
shm_infos = []
for df_info in df_infos:
info = df_info.strip()
if info.find(SHM_KEY) >= 0:
shm_infos.append(info.split())
if len(shm_infos) == 0:
return None
elif len(shm_infos) == 1:
return _parse_size_in_M(shm_infos[0][3])
else:
default_mount_infos = [
si for si in shm_infos if si[-1] == SHM_DEFAULT_MOUNT
]
if default_mount_infos:
return _parse_size_in_M(default_mount_infos[0][3])
else:
return max([_parse_size_in_M(si[3]) for si in shm_infos])
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/utils/common.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import tempfile
from paddle3d.env import TMP_HOME
@contextlib.contextmanager
def generate_tempfile(directory: str = None, **kwargs):
'''Generate a temporary file'''
directory = TMP_HOME if not directory else directory
with tempfile.NamedTemporaryFile(dir=directory, **kwargs) as file:
yield file
@contextlib.contextmanager
def generate_tempdir(directory: str = None, **kwargs):
'''Generate a temporary directory'''
directory = TMP_HOME if not directory else directory
with tempfile.TemporaryDirectory(dir=directory, **kwargs) as _dir:
yield _dir
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/utils/xarfile.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tarfile
import zipfile
from typing import Callable, Generator, List
import rarfile
class XarInfo(object):
'''Informational class which holds the details about an archive member given by a XarFile.'''
def __init__(self, _xarinfo, arctype='tar'):
self._info = _xarinfo
self.arctype = arctype
@property
def name(self) -> str:
if self.arctype == 'tar':
return self._info.name
return self._info.filename
@property
def size(self) -> int:
if self.arctype == 'tar':
return self._info.size
return self._info.file_size
class XarFile(object):
'''
The XarFile Class provides an interface to tar/rar/zip archives.
Args:
name(str) : file or directory name to be archived
mode(str) : specifies the mode in which the file is opened, it must be:
======== ==============================================================================================
Charater Meaning
-------- ----------------------------------------------------------------------------------------------
'r' open for reading
'w' open for writing, truncating the file first, file will be saved according to the arctype field
'a' open for writing, appending to the end of the file if it exists
======== ===============================================================================================
arctype(str) : archive type, support ['tar' 'rar' 'zip' 'tar.gz' 'tar.bz2' 'tar.xz' 'tgz' 'txz'], if
the mode if 'w' or 'a', the default is 'tar', if the mode is 'r', it will be based on actual
archive type of file
'''
def __init__(self, name: str, mode: str, arctype: str = 'tar', **kwargs):
# if mode is 'w', adjust mode according to arctype field
if mode == 'w':
if arctype in ['tar.gz', 'tgz']:
mode = 'w:gz'
self.arctype = 'tar'
elif arctype == 'tar.bz2':
mode = 'w:bz2'
self.arctype = 'tar'
elif arctype in ['tar.xz', 'txz']:
mode = 'w:xz'
self.arctype = 'tar'
else:
self.arctype = arctype
# if mode is 'r', adjust mode according to actual archive type of file
elif mode == 'r':
if tarfile.is_tarfile(name):
self.arctype = 'tar'
mode = 'r:*'
elif zipfile.is_zipfile(name):
self.arctype = 'zip'
elif rarfile.is_rarfile(name):
self.arctype = 'rar'
elif mode == 'a':
self.arctype = arctype
else:
raise RuntimeError('Unsupported mode {}'.format(mode))
if self.arctype in ['tar.gz', 'tar.bz2', 'tar.xz', 'tar', 'tgz', 'txz']:
self._archive_fp = tarfile.open(name, mode, **kwargs)
elif self.arctype == 'zip':
self._archive_fp = zipfile.ZipFile(name, mode, **kwargs)
elif self.arctype == 'rar':
self._archive_fp = rarfile.RarFile(name, mode, **kwargs)
else:
raise RuntimeError('Unsupported archive type {}'.format(
self.arctype))
def __del__(self):
self._archive_fp.close()
def __enter__(self):
return self
def __exit__(self, exit_exception, exit_value, exit_traceback):
if exit_exception:
print(exit_traceback)
raise exit_exception(exit_value)
self._archive_fp.close()
return self
def add(self,
name: str,
arcname: str = None,
recursive: bool = True,
exclude: Callable = None):
'''
Add the file `name' to the archive. `name' may be any type of file (directory, fifo, symbolic link, etc.).
If given, `arcname' specifies an alternative name for the file in the archive. Directories are added
recursively by default. This can be avoided by setting `recursive' to False. `exclude' is a function that
should return True for each filename to be excluded.
'''
if self.arctype == 'tar':
self._archive_fp.add(name, arcname, recursive, filter=exclude)
else:
self._archive_fp.write(name)
if not recursive or not os.path.isdir(name):
return
items = []
for _d, _sub_ds, _files in os.walk(name):
items += [os.path.join(_d, _file) for _file in _files]
items += [os.path.join(_d, _sub_d) for _sub_d in _sub_ds]
for item in items:
if exclude and not exclude(item):
continue
self._archive_fp.write(item)
def extract(self, name: str, path: str):
'''Extract a file from the archive to the specified path.'''
return self._archive_fp.extract(name, path)
def extractall(self, path: str):
'''Extract all files from the archive to the specified path.'''
return self._archive_fp.extractall(path)
def getnames(self) -> List[str]:
'''Return a list of file names in the archive.'''
if self.arctype == 'tar':
return self._archive_fp.getnames()
return self._archive_fp.namelist()
def getxarinfo(self, name: str) -> List[XarInfo]:
'''Return the instance of XarInfo given 'name'.'''
if self.arctype == 'tar':
return XarInfo(self._archive_fp.getmember(name), self.arctype)
return XarInfo(self._archive_fp.getinfo(name), self.arctype)
def open(name: str, mode: str = 'w', **kwargs) -> XarFile:
'''
Open a xar archive for reading, writing or appending. Return
an appropriate XarFile class.
'''
return XarFile(name, mode, **kwargs)
def archive(filename: str,
recursive: bool = True,
exclude: Callable = None,
arctype: str = 'tar') -> str:
'''
Archive a file or directory
Args:
name(str) : file or directory path to be archived
recursive(bool) : whether to recursively archive directories
exclude(Callable) : function that should return True for each filename to be excluded
arctype(str) : archive type, support ['tar' 'rar' 'zip' 'tar.gz' 'tar.bz2' 'tar.xz' 'tgz' 'txz']
Returns:
str: archived file path
Examples:
.. code-block:: python
archive_path = '/PATH/TO/FILE'
archive(archive_path, arcname='output.tar.gz', arctype='tar.gz')
'''
basename = os.path.splitext(os.path.basename(filename))[0]
savename = '{}.{}'.format(basename, arctype)
with open(savename, mode='w', arctype=arctype) as file:
file.add(filename, recursive=recursive, exclude=exclude)
return savename
def unarchive(name: str, path: str):
'''
Unarchive a file
Args:
name(str) : file or directory name to be unarchived
path(str) : storage name of archive file
Examples:
.. code-block:: python
unarchive_path = '/PATH/TO/FILE'
unarchive(unarchive_path, path='./output')
'''
with open(name, mode='r') as file:
file.extractall(path)
def unarchive_with_progress(name: str, path: str) -> Generator[str, int, int]:
'''
Unarchive a file and return the unarchiving progress -> Generator[filename, extrace_size, total_size]
Args:
name(str) : file or directory name to be unarchived
path(str) : storage name of archive file
Examples:
.. code-block:: python
unarchive_path = 'test.tar.gz'
for filename, extract_size, total_szie in unarchive_with_progress(unarchive_path, path='./output'):
print(filename, extract_size, total_size)
'''
with open(name, mode='r') as file:
total_size = extract_size = 0
for filename in file.getnames():
total_size += file.getxarinfo(filename).size
for filename in file.getnames():
file.extract(filename, path)
extract_size += file.getxarinfo(filename).size
yield filename, extract_size, total_size
def is_xarfile(file: str) -> bool:
'''Return True if xarfile supports specific file, otherwise False'''
_x_func = [zipfile.is_zipfile, tarfile.is_tarfile, rarfile.is_rarfile]
for _f in _x_func:
if _f(file):
return True
return False
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/utils/depth.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import paddle
def bin_depths(depth_map, mode, depth_min, depth_max, num_bins, target=False):
"""
This code is based on https://github.com/TRAILab/CaDDN/blob/5a96b37f16b3c29dd2509507b1cdfdff5d53c558/pcdet/utils/depth_utils.py#L4
Converts depth map into bin indices
Args:
depth_map: Depth Map
mode [string]: Discretiziation mode (See https://arxiv.org/pdf/2005.13423.pdf for more details)
UD: Uniform discretiziation
LID: Linear increasing discretiziation
SID: Spacing increasing discretiziation
depth_min [float]: Minimum depth value
depth_max [float]: Maximum depth value
num_bins [int]: Number of depth bins
target [bool]: Whether the depth bins indices will be used for a target tensor in loss comparison
Returns:
indices [Tensor(H, W)]: Depth bin indices
"""
if mode == "UD":
bin_size = (depth_max - depth_min) / num_bins
indices = ((depth_map - depth_min) / bin_size)
elif mode == "LID":
bin_size = 2 * (depth_max - depth_min) / (num_bins * (1 + num_bins))
indices = -0.5 + 0.5 * paddle.sqrt(1 + 8 *
(depth_map - depth_min) / bin_size)
elif mode == "SID":
indices = num_bins * (paddle.log(1 + depth_map) - math.log(1 + depth_min)) / \
(math.log(1 + depth_max) - math.log(1 + depth_min))
else:
raise NotImplementedError
if target:
# Remove indicies outside of bounds
mask = (indices < 0) | (indices >
num_bins) | (~paddle.isfinite(indices))
indices[mask] = num_bins
sub_val = paddle.full(shape=mask.shape, fill_value=num_bins)
indices_ = paddle.where(mask, sub_val, indices)
# Convert to integer
indices = indices_.cast("int64")
return indices
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/utils/transform.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn.functional as F
def convert_points_to_homogeneous(points):
"""Function that converts points from Euclidean to homogeneous space.
Args:
points: the points to be transformed with shape :math:`(*, N, D)`.
Returns:
the points in homogeneous coordinates :math:`(*, N, D+1)`.
"""
if len(list(paddle.shape(points))) == 3:
data_format = "NCL"
elif len(list(paddle.shape(points))) == 5:
data_format = "NCDHW"
return paddle.nn.functional.pad(
points, [0, 1], "constant", 1.0, data_format=data_format)
def convert_points_from_homogeneous_3d(points, eps=1e-8):
"""Function that converts points from homogeneous to Euclidean space.
Args:
points: the points to be transformed of shape :math:`(B, N, D)`.
eps: to avoid division by zero.
Returns:
the points in Euclidean space :math:`(B, N, D-1)`.
Examples:
>>> input = paddle.to_tensor([[0., 0., 1.]])
>>> convert_points_from_homogeneous(input)
tensor([[0., 0.]])
"""
# we check for points at max_val
z_vec = points[..., 3:]
mask = paddle.abs(z_vec) > eps
scale = paddle.where(mask, 1.0 / (z_vec + eps), paddle.ones_like(z_vec))
return scale * points[..., :3]
def convert_points_from_homogeneous_2d(points, eps=1e-8):
"""Function that converts points from homogeneous to Euclidean space.
Args:
points: the points to be transformed of shape :math:`(B, N, D)`.
eps: to avoid division by zero.
Returns:
the points in Euclidean space :math:`(B, N, D-1)`.
Examples:
>>> input = paddle.to_tensor([[0., 0., 1.]])
>>> convert_points_from_homogeneous(input)
tensor([[0., 0.]])
"""
# we check for points at max_val
z_vec = points[..., 2:]
mask = paddle.abs(z_vec) > eps
scale = paddle.where(mask, 1.0 / (z_vec + eps), paddle.ones_like(z_vec))
return scale * points[..., :2]
def project_to_image(project, points):
"""
Project points to image
Args:
project [paddle.tensor(..., 3, 4)]: Projection matrix
points [paddle.Tensor(..., 3)]: 3D points
Returns:
points_img [paddle.Tensor(..., 2)]: Points in image
points_depth [paddle.Tensor(...)]: Depth of each point
"""
shape_inp = list(paddle.shape(points))
shape_inp_len = len(shape_inp)
points = points.reshape(
[-1, shape_inp[shape_inp_len - 2], shape_inp[shape_inp_len - 1]])
shape_inp[shape_inp_len - 1] += 1
points = convert_points_to_homogeneous(points)
points = points.reshape([shape_inp[0], -1,
shape_inp[-1]]).transpose([0, 2, 1])
project_shape = project.shape
project = project.reshape(
[project_shape[0], project_shape[-2], project_shape[-1]])
# Transform points to image and get depths
points_shape = points.shape
points_t = project @ points
points_t = points_t.transpose([0, 2, 1])
points_t_shape = points_t.shape
points_t = points_t.reshape([
points_t_shape[0], shape_inp[1], shape_inp[2], shape_inp[3],
points_t_shape[-1]
])
points_img = convert_points_from_homogeneous_2d(points_t)
project = project.reshape(project_shape).unsqueeze(axis=1)
points_depth = points_t[..., -1] - project[..., 2, 3]
return points_img, points_depth
def transform_points_3d(trans_01, points_1):
r"""Function that applies transformations to a set of points.
Args:
trans_01 (Tensor): tensor for transformations of shape
:math:`(B, D+1, D+1)`.
points_1 (Tensor): tensor of points of shape :math:`(B, N, D)`.
Returns:
Tensor: tensor of N-dimensional points.
Shape:
- Output: :math:`(B, N, D)`
"""
# We reshape to BxNxD in case we get more dimensions, e.g., MxBxNxD
shape_inp = list(paddle.shape(points_1))
dim_num = len(shape_inp)
points_1 = points_1.reshape([-1, shape_inp[dim_num - 2], 3])
trans_01 = trans_01.reshape([-1, 4, 4])
# We expand trans_01 to match the dimensions needed for bmm
trans_01 = trans_01.tile(
[paddle.shape(points_1)[0] // paddle.shape(trans_01)[0], 1, 1])
# to homogeneous
points_1_h = convert_points_to_homogeneous(points_1) # BxNxD+1
# transform coordinates
points_0_h = paddle.matmul(points_1_h, trans_01.transpose([0, 2, 1]))
points_0_h = paddle.squeeze(points_0_h, axis=-1)
# to euclidean
points_0 = convert_points_from_homogeneous_3d(points_0_h) # BxNxD
# reshape to the input shape
points_0_shape = paddle.shape(points_0)
points_0_shape_len = len(list(points_0_shape))
shape_inp[dim_num - 2] = points_0_shape[points_0_shape_len - 2]
shape_inp[dim_num - 1] = points_0_shape[points_0_shape_len - 1]
points_0 = points_0.reshape(shape_inp)
return points_0
def quaternion_to_matrix(quaternions):
"""
Convert rotations given as quaternions to rotation matrices.
Args:
quaternions: quaternions with real part first,
as tensor of shape (..., 4).
Returns:
Rotation matrices as tensor of shape (..., 3, 3).
"""
r, i, j, k = paddle.unbind(quaternions, -1)
two_s = 2.0 / (quaternions * quaternions).sum(-1)
o = paddle.stack(
(
1 - two_s * (j * j + k * k),
two_s * (i * j - k * r),
two_s * (i * k + j * r),
two_s * (i * j + k * r),
1 - two_s * (i * i + k * k),
two_s * (j * k - i * r),
two_s * (i * k - j * r),
two_s * (j * k + i * r),
1 - two_s * (i * i + j * j),
),
-1,
)
return o.reshape(quaternions.shape[:-1] + [3, 3])
def _copysign(a, b):
"""
Return a tensor where each element has the absolute value taken from the,
corresponding element of a, with sign taken from the corresponding
element of b. This is like the standard copysign floating-point operation,
but is not careful about negative 0 and NaN.
Args:
a: source tensor.
b: tensor whose signs will be used, of the same shape as a.
Returns:
Tensor of the same shape as a with the signs of b.
"""
signs_differ = (a < 0) != (b < 0)
return paddle.where(signs_differ, -a, a)
def _sqrt_positive_part(x):
"""
Returns torch.sqrt(torch.max(0, x))
but with a zero subgradient where x is 0.
"""
ret = paddle.where(x > 0, x, paddle.zeros_like(x))
ret = paddle.sqrt(ret)
return ret
def matrix_to_quaternion(matrix):
"""
Convert rotations given as rotation matrices to quaternions.
Args:
matrix: Rotation matrices as tensor of shape (..., 3, 3).
Returns:
quaternions with real part first, as tensor of shape (..., 4).
"""
if matrix.shape[-1] != 3 or matrix.shape[-2] != 3:
raise ValueError(f"Invalid rotation matrix shape f{matrix.shape}.")
m00 = matrix[..., 0, 0]
m11 = matrix[..., 1, 1]
m22 = matrix[..., 2, 2]
o0 = 0.5 * _sqrt_positive_part(1 + m00 + m11 + m22)
x = 0.5 * _sqrt_positive_part(1 + m00 - m11 - m22)
y = 0.5 * _sqrt_positive_part(1 - m00 + m11 - m22)
z = 0.5 * _sqrt_positive_part(1 - m00 - m11 + m22)
o1 = _copysign(x, matrix[..., 2, 1] - matrix[..., 1, 2])
o2 = _copysign(y, matrix[..., 0, 2] - matrix[..., 2, 0])
o3 = _copysign(z, matrix[..., 1, 0] - matrix[..., 0, 1])
return paddle.stack((o0, o1, o2, o3), -1)
def bbox_cxcywh_to_xyxy(bbox):
"""Convert bbox coordinates from (cx, cy, w, h) to (x1, y1, x2, y2).
This function is modified from https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/transforms.py#L245
Args:
bbox (Tensor): Shape (n, 4) for bboxes.
Returns:
Tensor: Converted bboxes.
"""
cx, cy, w, h = bbox.split((1, 1, 1, 1), axis=-1)
bbox_new = [(cx - 0.5 * w), (cy - 0.5 * h), (cx + 0.5 * w), (cy + 0.5 * h)]
return paddle.concat(bbox_new, axis=-1)
def bbox_xyxy_to_cxcywh(bbox):
"""Convert bbox coordinates from (x1, y1, x2, y2) to (cx, cy, w, h).
This function is modified from https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/transforms.py#L259
Args:
bbox (Tensor): Shape (n, 4) for bboxes.
Returns:
Tensor: Converted bboxes.
"""
x1, y1, x2, y2 = bbox.split((1, 1, 1, 1), axis=-1)
bbox_new = [(x1 + x2) / 2, (y1 + y2) / 2, (x2 - x1), (y2 - y1)]
return paddle.concat(bbox_new, axis=-1)
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/utils/transform3d.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import warnings
from typing import Optional
import paddle
class Transform3d:
"""
A Transform3d object encapsulates a batch of N 3D transformations, and knows
how to transform points and normal vectors.
This code is based on https://github.com/facebookresearch/pytorch3d/blob/46cb5aaaae0cd40f729fd41a39c0c9a232b484c0/pytorch3d/transforms/transform3d.py#L20
"""
def __init__(self, dtype='float32', matrix=None):
"""
Args:
dtype: The data type of the transformation matrix.
to be used if `matrix = None`.
matrix: A tensor of shape (4, 4) or of shape (minibatch, 4, 4)
representing the 4x4 3D transformation matrix.
If `None`, initializes with identity using
the specified `dtype`.
"""
if matrix is None:
self._matrix = paddle.eye(4, dtype=dtype).reshape([1, 4, 4])
else:
if len(matrix.shape) not in (2, 3):
raise ValueError(
'"matrix" has to be a 2- or a 3-dimensional tensor.')
if matrix.shape[-2] != 4 or matrix.shape[-1] != 4:
raise ValueError(
'"matrix" has to be a tensor of shape (minibatch, 4, 4)')
self._matrix = matrix.reshape([-1, 4, 4])
self._transforms = [] # store transforms to compose
self._lu = None
def __len__(self):
return self.get_matrix().shape[0]
def compose(self, *others):
"""
Return a new Transform3d with the tranforms to compose stored as
an internal list.
Args:
*others: Any number of Transform3d objects
Returns:
A new Transform3d with the stored transforms
"""
out = Transform3d()
out._matrix = self._matrix.clone()
for other in others:
if not isinstance(other, Transform3d):
msg = "Only possible to compose Transform3d objects; got %s"
raise ValueError(msg % type(other))
out._transforms = self._transforms + list(others)
return out
def get_matrix(self):
"""
Return a matrix which is the result of composing this transform
with others stored in self.transforms. Where necessary transforms
are broadcast against each other.
For example, if self.transforms contains transforms t1, t2, and t3, and
given a set of points x, the following should be true:
.. code-block:: python
y1 = t1.compose(t2, t3).transform(x)
y2 = t3.transform(t2.transform(t1.transform(x)))
y1.get_matrix() == y2.get_matrix()
Returns:
A transformation matrix representing the composed inputs.
"""
composed_matrix = self._matrix.clone()
if len(self._transforms) > 0:
for other in self._transforms:
other_matrix = other.get_matrix()
composed_matrix = _broadcast_bmm(composed_matrix, other_matrix)
return composed_matrix
def _get_matrix_inverse(self):
"""
Return the inverse of self._matrix.
"""
return paddle.inverse(self._matrix)
def inverse(self, invert_composed: bool = False):
"""
Returns a new Transform3D object that represents an inverse of the
current transformation.
Args:
invert_composed:
- True: First compose the list of stored transformations
and then apply inverse to the result. This is
potentially slower for classes of transformations
with inverses that can be computed efficiently
(e.g. rotations and translations).
- False: Invert the individual stored transformations
independently without composing them.
Returns:
A new Transform3D object contaning the inverse of the original
transformation.
"""
tinv = Transform3d()
if invert_composed:
# first compose then invert
tinv._matrix = paddle.inverse(self.get_matrix())
else:
# self._get_matrix_inverse() implements efficient inverse
# of self._matrix
i_matrix = self._get_matrix_inverse()
# 2 cases:
if len(self._transforms) > 0:
# a) Either we have a non-empty list of transforms:
# Here we take self._matrix and append its inverse at the
# end of the reverted _transforms list. After composing
# the transformations with get_matrix(), this correctly
# right-multiplies by the inverse of self._matrix
# at the end of the composition.
tinv._transforms = [
t.inverse() for t in reversed(self._transforms)
]
last = Transform3d()
last._matrix = i_matrix
tinv._transforms.append(last)
else:
# b) Or there are no stored transformations
# we just set inverted matrix
tinv._matrix = i_matrix
return tinv
def stack(self, *others):
transforms = [self] + list(others)
matrix = paddle.concat([t._matrix for t in transforms], axis=0)
out = Transform3d()
out._matrix = matrix
return out
def transform_points(self, points, eps: Optional[float] = None):
"""
Use this transform to transform a set of 3D points. Assumes row major
ordering of the input points.
Args:
points: Tensor of shape (P, 3) or (N, P, 3)
eps: If eps!=None, the argument is used to clamp the
last coordinate before peforming the final division.
The clamping corresponds to:
last_coord := (last_coord.sign() + (last_coord==0)) *
paddle.clamp(last_coord.abs(), eps),
i.e. the last coordinates that are exactly 0 will
be clamped to +eps.
Returns:
points_out: points of shape (N, P, 3) or (P, 3) depending
on the dimensions of the transform
"""
points_batch = points.clone()
if len(points_batch.shape) == 2:
points_batch = points_batch[None] # (P, 3) -> (1, P, 3)
if len(points_batch.shape) != 3:
msg = "Expected points to have dim = 2 or dim = 3: got shape %r"
raise ValueError(msg % repr(points.shape))
N, P, _3 = points_batch.shape
ones = paddle.ones([N, P, 1], dtype=points.dtype)
points_batch = paddle.concat([points_batch, ones], axis=2)
composed_matrix = self.get_matrix()
points_out = _broadcast_bmm(points_batch, composed_matrix)
denom = points_out[..., 3:] # denominator
if eps is not None:
denom_sign = denom.sign() + (denom == 0.0).cast(denom.dtype)
denom = denom_sign * paddle.clip(denom.abs(), eps)
points_out = points_out[..., :3] / denom
# When transform is (1, 4, 4) and points is (P, 3) return
# points_out of shape (P, 3)
if points_out.shape[0] == 1 and len(points.shape) == 2:
points_out = points_out.reshape(points.shape)
return points_out
def transform_normals(self, normals):
"""
Use this transform to transform a set of normal vectors.
Args:
normals: Tensor of shape (P, 3) or (N, P, 3)
Returns:
normals_out: Tensor of shape (P, 3) or (N, P, 3) depending
on the dimensions of the transform
"""
if len(normals.shape) not in [2, 3]:
msg = "Expected normals to have dim = 2 or dim = 3: got shape %r"
raise ValueError(msg % (normals.shape, ))
composed_matrix = self.get_matrix()
# TODO: inverse is bad! Solve a linear system instead
mat = composed_matrix[:, :3, :3]
normals_out = _broadcast_bmm(normals,
mat.transpose([0, 2, 1]).inverse())
# When transform is (1, 4, 4) and normals is (P, 3) return
# normals_out of shape (P, 3)
if normals_out.shape[0] == 1 and len(normals.shape) == 2:
normals_out = normals_out.reshape(normals.shape)
return normals_out
def translate(self, *args, **kwargs):
return self.compose(Translate(*args, **kwargs))
def clone(self):
"""
Deep copy of Transforms object. All internal tensors are cloned
individually.
Returns:
new Transforms object.
"""
other = Transform3d()
if self._lu is not None:
other._lu = [elem.clone() for elem in self._lu]
other._matrix = self._matrix.clone()
other._transforms = [t.clone() for t in self._transforms]
return other
def to(self, copy: bool = False, dtype=None):
"""
Match functionality of paddle.cast()
Args:
copy: Boolean indicator whether or not to clone self. Default False.
dtype: If not None, casts the internal tensor variables
to a given paddle.dtype.
Returns:
Transform3d object.
"""
if not copy and self.dtype == dtype:
return self
other = self.clone()
other._matrix = self._matrix.to(dtype=dtype)
for t in other._transforms:
t.to(copy=copy, dtype=dtype)
return other
class Translate(Transform3d):
def __init__(self, x, y=None, z=None, dtype='float32'):
"""
Create a new Transform3d representing 3D translations.
Option I: Translate(xyz, dtype='float32')
xyz should be a tensor of shape (N, 3)
Option II: Translate(x, y, z, dtype='float32')
Here x, y, and z will be broadcast against each other and
concatenated to form the translation. Each can be:
- A python scalar
- A paddle scalar
- A 1D paddle tensor
This code is based on https://github.com/facebookresearch/pytorch3d/blob/46cb5aaaae0cd40f729fd41a39c0c9a232b484c0/pytorch3d/transforms/transform3d.py#L525
"""
super().__init__()
xyz = _handle_input(x, y, z, dtype, "Translate")
N = xyz.shape[0]
mat = paddle.eye(4, dtype=dtype)
mat = mat.reshape([1, 4, 4]).tile([N, 1, 1])
mat[:, 3, :3] = xyz
self._matrix = mat
def _get_matrix_inverse(self):
"""
Return the inverse of self._matrix.
"""
inv_mask = paddle.ones([1, 4, 4], dtype=self._matrix.dtype)
inv_mask[0, 3, :3] = -1.0
i_matrix = self._matrix * inv_mask
return i_matrix
class Rotate(Transform3d):
"""
This code is based on https://github.com/facebookresearch/pytorch3d/blob/46cb5aaaae0cd40f729fd41a39c0c9a232b484c0/pytorch3d/transforms/transform3d.py#L615
"""
def __init__(self, R, dtype='float32', orthogonal_tol: float = 1e-5):
"""
Create a new Transform3d representing 3D rotation using a rotation
matrix as the input.
Args:
R: a tensor of shape (3, 3) or (N, 3, 3)
orthogonal_tol: tolerance for the test of the orthogonality of R
"""
super().__init__()
if len(R.shape) == 2:
R = R[None]
if R.shape[-2:] != [3, 3]:
msg = "R must have shape (3, 3) or (N, 3, 3); got %s"
raise ValueError(msg % repr(R.shape))
R = R.cast(dtype=dtype)
_check_valid_rotation_matrix(R, tol=orthogonal_tol)
N = R.shape[0]
mat = paddle.eye(4, dtype=dtype)
mat = mat.reshape([1, 4, 4]).tile([N, 1, 1])
mat[:, :3, :3] = R
self._matrix = mat
def _get_matrix_inverse(self):
"""
Return the inverse of self._matrix.
"""
return self._matrix.transpose([0, 2, 1])
def _handle_coord(c, dtype):
"""
Helper function for _handle_input.
Args:
c: Python scalar, paddle scalar, or 1D paddle.tensor
Returns:
c_vec: 1D paddle.tensor
"""
if not paddle.is_tensor(c):
c = paddle.to_tensor(c, dtype=dtype)
if len(c.shape) == 0:
c = c.reshape([1])
return c
def _handle_input(x, y, z, dtype, name: str, allow_singleton: bool = False):
"""
Helper function to handle parsing logic for building transforms. The output
is always a tensor of shape (N, 3), but there are several types of allowed
input.
Case I: Single Matrix
In this case x is a tensor of shape (N, 3), and y and z are None. Here just
return x.
Case II: Vectors and Scalars
In this case each of x, y, and z can be one of the following
- Python scalar
- Torch scalar
- Torch tensor of shape (N, 1) or (1, 1)
In this case x, y and z are broadcast to tensors of shape (N, 1)
and concatenated to a tensor of shape (N, 3)
Case III: Singleton (only if allow_singleton=True)
In this case y and z are None, and x can be one of the following:
- Python scalar
- Torch scalar
- Torch tensor of shape (N, 1) or (1, 1)
Here x will be duplicated 3 times, and we return a tensor of shape (N, 3)
Returns:
xyz: Tensor of shape (N, 3)
This code is based on https://github.com/facebookresearch/pytorch3d/blob/46cb5aaaae0cd40f729fd41a39c0c9a232b484c0/pytorch3d/transforms/transform3d.py#L716
"""
# If x is actually a tensor of shape (N, 3) then just return it
if paddle.is_tensor(x) and len(x.shape) == 2:
if x.shape[1] != 3:
msg = "Expected tensor of shape (N, 3); got %r (in %s)"
raise ValueError(msg % (x.shape, name))
if y is not None or z is not None:
msg = "Expected y and z to be None (in %s)" % name
raise ValueError(msg)
return x
if allow_singleton and y is None and z is None:
y = x
z = x
# Convert all to 1D tensors
xyz = [_handle_coord(c, dtype) for c in [x, y, z]]
# Broadcast and concatenate
sizes = [c.shape[0] for c in xyz]
N = max(sizes)
for c in xyz:
if c.shape[0] != 1 and c.shape[0] != N:
msg = "Got non-broadcastable sizes %r (in %s)" % (sizes, name)
raise ValueError(msg)
xyz = [c.expand(N) for c in xyz]
xyz = paddle.stack(xyz, axis=1)
return xyz
def _broadcast_bmm(a, b):
"""
Batch multiply two matrices and broadcast if necessary.
Args:
a: paddle tensor of shape (P, K) or (M, P, K)
b: paddle tensor of shape (N, K, K)
Returns:
a and b broadcast multipled. The output batch dimension is max(N, M).
To broadcast transforms across a batch dimension if M != N then
expect that either M = 1 or N = 1. The tensor with batch dimension 1 is
expanded to have shape N or M.
This code is based on https://github.com/facebookresearch/pytorch3d/blob/46cb5aaaae0cd40f729fd41a39c0c9a232b484c0/pytorch3d/transforms/transform3d.py#L802
"""
if len(a.shape) == 2:
a = a[None]
if len(a) != len(b):
if not ((len(a) == 1) or (len(b) == 1)):
msg = "Expected batch dim for bmm to be equal or 1; got %r, %r"
raise ValueError(msg % (a.shape, b.shape))
if len(a) == 1:
a = a.expand(len(b), -1, -1)
if len(b) == 1:
b = b.expand(len(a), -1, -1)
return a.bmm(b)
def _check_valid_rotation_matrix(R, tol: float = 1e-7):
"""
Determine if R is a valid rotation matrix by checking it satisfies the
following conditions:
``RR^T = I and det(R) = 1``
Args:
R: an (N, 3, 3) matrix
Returns:
None
Emits a warning if R is an invalid rotation matrix.
This code is based on https://github.com/facebookresearch/pytorch3d/blob/46cb5aaaae0cd40f729fd41a39c0c9a232b484c0/pytorch3d/transforms/transform3d.py#L831
"""
N = R.shape[0]
eye = paddle.eye(3, dtype=R.dtype)
eye = eye.reshape([1, 3, 3]).expand([N, -1, -1])
orthogonal = paddle.allclose(R.bmm(R.transpose([0, 2, 1])), eye, atol=tol)
det_R = paddle.linalg.det(R)
no_distortion = paddle.allclose(det_R, paddle.ones_like(det_R))
if not (orthogonal and no_distortion):
msg = "R is not a valid rotation matrix"
warnings.warn(msg)
return
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .backbones import *
from .classification import *
from .common import *
from .detection import *
from .heads import *
from .layers import *
from .losses import *
from .middle_encoders import *
from .necks import *
from .optimizers import *
from .point_encoders import *
from .segmentation import *
from .transformers import *
from .voxel_encoders import *
from .voxelizers import *
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/classification/__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .paconv import PAConv
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/classification
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/classification/paconv/paconv.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle3d.apis import manager
from paddle3d.models.layers import constant_init, kaiming_normal_init
from paddle3d.ops import assign_score_withk
from paddle3d.utils.logger import logger
from .score_net import ScoreNet
@manager.MODELS.add_component
class PAConv(nn.Layer):
def __init__(self,
k_neighbors=20,
calc_scores='softmax',
num_matrices=(8, 8, 8, 8),
dropout=0.5):
super(PAConv, self).__init__()
if calc_scores not in ['softmax', 'sigmoid']:
raise ValueError(
"Unsupported calc scores type {}".format(calc_scores))
self.k = k_neighbors
self.calc_scores = calc_scores
self.assign_score_withk = assign_score_withk.assign_score_withk
self.m1, self.m2, self.m3, self.m4 = num_matrices
self.scorenet1 = ScoreNet(6, self.m1, hidden_unit=[16])
self.scorenet2 = ScoreNet(6, self.m2, hidden_unit=[16])
self.scorenet3 = ScoreNet(6, self.m3, hidden_unit=[16])
self.scorenet4 = ScoreNet(6, self.m4, hidden_unit=[16])
i1 = 3 # channel dim of input_1st
o1 = i2 = 64 # channel dim of output_1st and input_2nd
o2 = i3 = 64 # channel dim of output_2st and input_3rd
o3 = i4 = 128 # channel dim of output_3rd and input_4th
o4 = 256 # channel dim of output_4th
params = paddle.zeros(shape=[self.m1, i1 * 2, o1], dtype='float32')
kaiming_normal_init(params, nonlinearity='relu')
params = paddle.transpose(params,
[1, 0, 2]).reshape([i1 * 2, self.m1 * o1])
matrice1 = paddle.create_parameter(
shape=[i1 * 2, self.m1 * o1],
dtype='float32',
default_initializer=nn.initializer.Assign(params))
self.add_parameter('matrice1', matrice1)
params = paddle.zeros(shape=[self.m2, i2 * 2, o2], dtype='float32')
kaiming_normal_init(params, nonlinearity='relu')
params = paddle.transpose(params,
[1, 0, 2]).reshape([i2 * 2, self.m2 * o2])
matrice2 = paddle.create_parameter(
shape=[i2 * 2, self.m2 * o2],
dtype='float32',
default_initializer=nn.initializer.Assign(params))
self.add_parameter('matrice2', matrice2)
params = paddle.create_parameter(
shape=[self.m3, i3 * 2, o3], dtype='float32')
kaiming_normal_init(params, nonlinearity='relu')
params = paddle.transpose(params,
[1, 0, 2]).reshape([i3 * 2, self.m3 * o3])
matrice3 = paddle.create_parameter(
shape=[i3 * 2, self.m3 * o3],
dtype='float32',
default_initializer=nn.initializer.Assign(params))
self.add_parameter('matrice3', matrice3)
params = paddle.create_parameter(
shape=[self.m4, i4 * 2, o4], dtype='float32')
kaiming_normal_init(params, nonlinearity='relu')
params = paddle.transpose(params,
[1, 0, 2]).reshape([i4 * 2, self.m4 * o4])
matrice4 = paddle.create_parameter(
shape=[i4 * 2, self.m4 * o4],
dtype='float32',
default_initializer=nn.initializer.Assign(params))
self.add_parameter('matrice4', matrice4)
self.bn1 = nn.BatchNorm1D(o1)
self.bn2 = nn.BatchNorm1D(o2)
self.bn3 = nn.BatchNorm1D(o3)
self.bn4 = nn.BatchNorm1D(o4)
self.bn5 = nn.BatchNorm1D(1024)
self.conv5 = nn.Sequential(
nn.Conv1D(512, 1024, kernel_size=1, bias_attr=False), self.bn5)
self.linear1 = nn.Linear(2048, 512, bias_attr=False)
self.bn11 = nn.BatchNorm1D(512)
self.dp1 = nn.Dropout(p=dropout)
self.linear2 = nn.Linear(512, 256, bias_attr=False)
self.bn22 = nn.BatchNorm1D(256)
self.dp2 = nn.Dropout(p=dropout)
self.linear3 = nn.Linear(256, 40)
self.apply(self.weight_init)
def weight_init(self, m):
if isinstance(m, paddle.nn.Linear):
kaiming_normal_init(m.weight, reverse=True)
if m.bias is not None:
constant_init(m.bias, value=0)
elif isinstance(m, paddle.nn.Conv2D):
kaiming_normal_init(m.weight)
if m.bias is not None:
constant_init(m.bias, value=0)
elif isinstance(m, paddle.nn.Conv1D):
kaiming_normal_init(m.weight)
if m.bias is not None:
constant_init(m.bias, value=0)
elif isinstance(m, paddle.nn.BatchNorm2D):
constant_init(m.weight, value=1)
constant_init(m.bias, value=0)
elif isinstance(m, paddle.nn.BatchNorm1D):
constant_init(m.weight, value=1)
constant_init(m.bias, value=0)
def knn(self, x, k):
B, _, N = x.shape
inner = -2 * paddle.matmul(x.transpose([0, 2, 1]), x)
xx = paddle.sum(x**2, axis=1, keepdim=True)
pairwise_distance = -xx - inner - xx.transpose([0, 2, 1])
_, idx = pairwise_distance.topk(
k=k, axis=-1) # (batch_size, num_points, k)
return idx, pairwise_distance
def get_scorenet_input(self, x, idx, k):
"""(neighbor, neighbor-center)"""
batch_size = x.shape[0]
num_points = x.shape[2]
x = x.reshape([batch_size, -1, num_points])
idx_base = paddle.arange(0, batch_size).reshape([-1, 1, 1]) * num_points
idx = idx + idx_base
idx = idx.reshape([-1])
_, num_dims, _ = x.shape
x = paddle.transpose(x, [0, 2, 1])
neighbor = x.reshape([batch_size * num_points, -1])
neighbor = paddle.gather(neighbor, idx, axis=0)
neighbor = neighbor.reshape([batch_size, num_points, k, num_dims])
x = x.reshape([batch_size, num_points, 1, num_dims]).tile([1, 1, k, 1])
xyz = paddle.concat((neighbor - x, neighbor),
axis=3).transpose([0, 3, 1, 2]) # b,6,n,k
return xyz
def feat_trans_dgcnn(self, point_input, kernel, m):
"""transforming features using weight matrices"""
B, _, N = point_input.shape # b, 2cin, n
point_output = paddle.matmul(
point_input.transpose([0, 2, 1]).tile([1, 1, 2]),
kernel).reshape([B, N, m, -1]) # b,n,m,cout
center_output = paddle.matmul(
point_input.transpose([0, 2, 1]),
kernel[:point_input.shape[1]]).reshape([B, N, m, -1]) # b,n,m,cout
return point_output, center_output
def get_loss(self, pred, label):
label = paddle.reshape(
label, [-1]) # gold is the groundtruth label in the dataloader
eps = 0.2
n_class = pred.shape[
1] # the number of feature_dim of the output, which is output channels
one_hot = F.one_hot(label, n_class)
one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1)
log_prb = F.log_softmax(pred, axis=1)
loss = -(one_hot * log_prb).sum(axis=1).mean()
losses = {"loss": loss}
return losses
def forward(self, inputs):
x = inputs['data']
label = None
if 'labels' in inputs.keys():
label = inputs['labels']
x = paddle.transpose(x, [0, 2, 1])
B, C, N = x.shape
idx, _ = self.knn(
x, k=self.k
) # different with DGCNN, the knn search is only in 3D space
xyz = self.get_scorenet_input(
x, idx=idx, k=self.k
) # ScoreNet input: 3D coord difference concat with coord: b,6,n,k
# replace all the DGCNN-EdgeConv with PAConv:
"""CUDA implementation of PAConv: (presented in the supplementary material of the paper)"""
"""feature transformation:"""
point1, center1 = self.feat_trans_dgcnn(
point_input=x, kernel=self.matrice1, m=self.m1) # b,n,m1,o1
score1 = self.scorenet1(
xyz, calc_scores=self.calc_scores, bias_attr=0.5)
"""assemble with scores:"""
point1 = self.assign_score_withk(
scores=score1, points=point1, centers=center1,
knn_idx=idx) # b,o1,n
point1 = F.relu(self.bn1(point1))
point2, center2 = self.feat_trans_dgcnn(
point_input=point1, kernel=self.matrice2, m=self.m2)
score2 = self.scorenet2(
xyz, calc_scores=self.calc_scores, bias_attr=0.5)
point2 = self.assign_score_withk(
scores=score2, points=point2, centers=center2, knn_idx=idx)
point2 = F.relu(self.bn2(point2))
point3, center3 = self.feat_trans_dgcnn(
point_input=point2, kernel=self.matrice3, m=self.m3)
score3 = self.scorenet3(
xyz, calc_scores=self.calc_scores, bias_attr=0.5)
point3 = self.assign_score_withk(
scores=score3, points=point3, centers=center3, knn_idx=idx)
point3 = F.relu(self.bn3(point3))
point4, center4 = self.feat_trans_dgcnn(
point_input=point3, kernel=self.matrice4, m=self.m4)
score4 = self.scorenet4(
xyz, calc_scores=self.calc_scores, bias_attr=0.5)
point4 = self.assign_score_withk(
scores=score4, points=point4, centers=center4, knn_idx=idx)
point4 = F.relu(self.bn4(point4))
point = paddle.concat((point1, point2, point3, point4), axis=1)
point = F.relu(self.conv5(point))
point11 = F.adaptive_max_pool1d(point, 1).reshape([B, -1])
point22 = F.adaptive_avg_pool1d(point, 1).reshape([B, -1])
point = paddle.concat((point11, point22), 1)
point = F.relu(self.bn11(self.linear1(point)))
point = self.dp1(point)
point = F.relu(self.bn22(self.linear2(point)))
point = self.dp2(point)
point = self.linear3(point)
if self.training:
loss = self.get_loss(point, label)
return loss
else:
if not getattr(self, "in_export_mode", False):
return {'preds': point}
else:
return F.softmax(point, axis=-1)
def export(self, save_dir: str, input_shape=(1, 1024, 3), **kwargs):
self.in_export_mode = True
save_path = os.path.join(save_dir, 'paconv')
paddle.jit.to_static(
self,
input_spec=[{
'data':
paddle.static.InputSpec(shape=input_shape, dtype='float32')
}])
paddle.jit.save(self, save_path)
logger.info("Exported model is saved in {}".format(save_dir))
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/classification
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/classification/paconv/__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .paconv import PAConv
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/classification
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/classification/paconv/score_net.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
class ScoreNet(nn.Layer):
def __init__(self, in_channel, out_channel, hidden_unit=[16],
last_bn=False):
super(ScoreNet, self).__init__()
self.hidden_unit = hidden_unit
self.last_bn = last_bn
self.mlp_convs_hidden = nn.LayerList()
self.mlp_bns_hidden = nn.LayerList()
if hidden_unit is None or len(hidden_unit) == 0:
self.mlp_convs_nohidden = nn.Conv2D(
in_channel, out_channel, 1, bias_attr=not last_bn)
if self.last_bn:
self.mlp_bns_nohidden = nn.BatchNorm2D(out_channel)
else:
self.mlp_convs_hidden.append(
nn.Conv2D(in_channel, hidden_unit[0], 1,
bias_attr=False)) # from in_channel to first hidden
self.mlp_bns_hidden.append(nn.BatchNorm2D(hidden_unit[0]))
for i in range(1, len(hidden_unit)
): # from 2nd hidden to next hidden to last hidden
self.mlp_convs_hidden.append(
nn.Conv2D(
hidden_unit[i - 1], hidden_unit[i], 1, bias_attr=False))
self.mlp_bns_hidden.append(nn.BatchNorm2D(hidden_unit[i]))
self.mlp_convs_hidden.append(
nn.Conv2D(
hidden_unit[-1], out_channel, 1,
bias_attr=not last_bn)) # from last hidden to out_channel
self.mlp_bns_hidden.append(nn.BatchNorm2D(out_channel))
def forward(self, xyz, calc_scores='softmax', bias_attr=0):
B, _, N, K = xyz.shape
scores = xyz
if self.hidden_unit is None or len(self.hidden_unit) == 0:
if self.last_bn:
scores = self.mlp_bns_nohidden(self.mlp_convs_nohidden(scores))
else:
scores = self.mlp_convs_nohidden(scores)
else:
for i, conv in enumerate(self.mlp_convs_hidden):
if i == len(self.mlp_convs_hidden
) - 1: # if the output layer, no ReLU
if self.last_bn:
bn = self.mlp_bns_hidden[i]
scores = bn(conv(scores))
else:
scores = conv(scores)
else:
bn = self.mlp_bns_hidden[i]
scores = F.relu(bn(conv(scores)))
if calc_scores == 'softmax':
scores = F.softmax(
scores, axis=1
) + bias_attr # B*m*N*K, where bias may bring larger gradient
elif calc_scores == 'sigmoid':
scores = F.sigmoid(scores) + bias_attr # B*m*N*K
else:
raise ValueError('Not Implemented!')
scores = paddle.transpose(scores, [0, 2, 3, 1])
return scores
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/losses/disentangled_box3d_loss.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
from paddle import nn
from .smooth_l1_loss import smooth_l1_loss
from paddle3d.utils import transform3d as t3d
from paddle3d.utils.transform import quaternion_to_matrix
BOX3D_CORNER_MAPPING = [[1, 1, 1, 1, -1, -1, -1, -1],
[1, -1, -1, 1, 1, -1, -1, 1],
[1, 1, -1, -1, 1, 1, -1, -1]]
def homogenize_points(xy):
"""
Args:
xy (paddle.Tensor): xy coordinates, shape=(N, ..., 2)
E.g., (N, 2) or (N, K, 2) or (N, H, W, 2)
Returns:
paddle.Tensor: appended to the last dimension. shape=(N, ..., 3)
E.g, (N, 3) or (N, K, 3) or (N, H, W, 3).
"""
# NOTE: this seems to work for arbitrary number of dimensions of input
pad = nn.Pad1D(padding=[0, 1], mode='constant', value=1.0)
return pad(xy.unsqueeze(0)).squeeze(0)
def unproject_points2d(points2d, inv_K, scale=1.0):
"""
Args:
points2d (paddle.Tensor): xy coordinates. shape=(N, ..., 2)
E.g., (N, 2) or (N, K, 2) or (N, H, W, 2)
inv_K (paddle.Tensor): Inverted intrinsics; shape=(N, 3, 3)
scale (float): Scaling factor, default: 1.0s
Returns:
paddle.Tensor: Unprojected 3D point. shape=(N, ..., 3)
E.g., (N, 3) or (N, K, 3) or (N, H, W, 3)
"""
points2d = homogenize_points(points2d)
siz = points2d.shape
points2d = points2d.reshape([-1, 3]).unsqueeze(-1) # (N, 3, 1)
unprojected = paddle.matmul(inv_K,
points2d) # (N, 3, 3) x (N, 3, 1) -> (N, 3, 1)
unprojected = unprojected.reshape(siz)
return unprojected * scale
class DisentangledBox3DLoss(nn.Layer):
"""
This code is based on https://github.com/TRI-ML/dd3d/blob/da25b614a29344830c96c2848c02a15b35380c4b/tridet/modeling/dd3d/disentangled_box3d_loss.py#L13
"""
def __init__(self, smooth_l1_loss_beta=0.05, max_loss_per_group=20.0):
super(DisentangledBox3DLoss, self).__init__()
self.smooth_l1_loss_beta = smooth_l1_loss_beta
self.max_loss_per_group = max_loss_per_group
def forward(self,
box3d_pred,
box3d_targets,
locations,
inv_intrinsics,
weights=None):
box3d_pred = box3d_pred.cast('float32')
box3d_targets = box3d_targets.cast('float32')
target_corners = self.corners(
box3d_targets[..., :4], box3d_targets[..., 4:6],
box3d_targets[..., 6:7], box3d_targets[..., 7:], inv_intrinsics)
disentangled_losses = {}
index = [0, 4, 6, 7, 10]
for i, component_key in enumerate(["quat", "proj_ctr", "depth",
"size"]):
disentangled_boxes = box3d_targets.clone()
disentangled_boxes[..., index[i]:index[
i + 1]] = box3d_pred[..., index[i]:index[i + 1]]
pred_corners = self.corners(
disentangled_boxes[..., :4], disentangled_boxes[..., 4:6],
disentangled_boxes[..., 6:7], disentangled_boxes[..., 7:],
inv_intrinsics)
loss = smooth_l1_loss(
pred_corners, target_corners, beta=self.smooth_l1_loss_beta)
n = paddle.abs(pred_corners - target_corners)
# Bound the loss
loss = loss.clip(max=self.max_loss_per_group)
if weights is not None:
loss = paddle.sum(loss.reshape([-1, 24]).mean(axis=1) * weights)
else:
loss = loss.reshape([-1, 24]).mean()
disentangled_losses["loss_box3d_" + component_key] = loss
pred_corners = self.corners(box3d_pred[..., :4], box3d_pred[..., 4:6],
box3d_pred[..., 6:7], box3d_pred[..., 7:],
inv_intrinsics)
entangled_l1_dist = (
target_corners - pred_corners).detach().abs().reshape(
[-1, 24]).mean(axis=1)
return disentangled_losses, entangled_l1_dist
def corners(self, quat, proj_ctr, depth, size, inv_intrinsics):
ray = unproject_points2d(proj_ctr, inv_intrinsics)
tvec = ray * depth
translation = t3d.Translate(tvec)
R = quaternion_to_matrix(quat)
rotation = t3d.Rotate(R=R.transpose(
[0, 2, 1])) # Need to transpose to make it work.
tfm = rotation.compose(translation)
_corners = 0.5 * paddle.to_tensor(BOX3D_CORNER_MAPPING).T
lwh = paddle.stack([size[:, 1], size[:, 0], size[:, 2]],
-1) # wlh -> lwh
corners_in_obj_frame = lwh.unsqueeze(1) * _corners.unsqueeze(0)
corners3d = tfm.transform_points(corners_in_obj_frame)
return corners3d
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/losses/__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .focal_loss import (FastFocalLoss, FocalLoss, MultiFocalLoss,
SigmoidFocalClassificationLoss, sigmoid_focal_loss,
WeightedFocalLoss)
from .reg_loss import RegLoss, L1Loss
from .iou_loss import IOULoss, GIoULoss
from .smooth_l1_loss import smooth_l1_loss
from .disentangled_box3d_loss import DisentangledBox3DLoss, unproject_points2d
from .weight_loss import (WeightedCrossEntropyLoss, WeightedSmoothL1Loss,
get_corner_loss_lidar)
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/losses/focal_loss.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn.functional as F
from paddle import nn
from paddle3d.apis import manager
from paddle3d.models.layers.layer_libs import _transpose_and_gather_feat
from paddle3d.models.losses.utils import weight_reduce_loss
class FocalLoss(nn.Layer):
"""Focal loss class
"""
def __init__(self, alpha=2, beta=4):
super().__init__()
self.alpha = alpha
self.beta = beta
def forward(self, prediction, target):
"""forward
Args:
prediction (paddle.Tensor): model prediction
target (paddle.Tensor): ground truth
Returns:
paddle.Tensor: focal loss
"""
positive_index = (target == 1).astype("float32")
negative_index = (target < 1).astype("float32")
negative_weights = paddle.pow(1 - target, self.beta)
loss = 0.
positive_loss = paddle.log(prediction) \
* paddle.pow(1 - prediction, self.alpha) * positive_index
negative_loss = paddle.log(1 - prediction) \
* paddle.pow(prediction, self.alpha) * negative_weights * negative_index
num_positive = positive_index.sum()
positive_loss = positive_loss.sum()
negative_loss = negative_loss.sum()
if num_positive == 0:
loss -= negative_loss
else:
loss -= (positive_loss + negative_loss) / num_positive
return loss
class FastFocalLoss(nn.Layer):
'''
This function refers to https://github.com/tianweiy/CenterPoint/blob/cb25e870b271fe8259e91c5d17dcd429d74abc91/det3d/models/losses/centernet_loss.py#L26.
'''
def __init__(self):
super(FastFocalLoss, self).__init__()
def forward(self, out, target, ind, mask, cat):
'''
Arguments:
out, target: B x C x H x W
ind, mask: B x M
cat (category id for peaks): B x M
'''
mask = mask.cast('float32')
gt = paddle.pow(1 - target, 4)
neg_loss = paddle.log(1 - out) * paddle.pow(out, 2) * gt
neg_loss = neg_loss.sum()
pos_pred_pix = _transpose_and_gather_feat(out, ind) # B x M x C
bs_ind = []
for i in range(pos_pred_pix.shape[0]):
bs_idx = paddle.full(
shape=[1, pos_pred_pix.shape[1], 1],
fill_value=i,
dtype=ind.dtype)
bs_ind.append(bs_idx)
bs_ind = paddle.concat(bs_ind, axis=0)
m_ind = []
for i in range(pos_pred_pix.shape[1]):
m_idx = paddle.full(
shape=[pos_pred_pix.shape[0], 1, 1],
fill_value=i,
dtype=ind.dtype)
m_ind.append(m_idx)
m_ind = paddle.concat(m_ind, axis=1)
cat = paddle.concat([bs_ind, m_ind, cat.unsqueeze(2)], axis=-1)
pos_pred = pos_pred_pix.gather_nd(cat) # B x M
num_pos = mask.sum()
pos_loss = paddle.log(pos_pred) * paddle.pow(1 - pos_pred, 2) * \
mask
pos_loss = pos_loss.sum()
if num_pos == 0:
return -neg_loss
return -(pos_loss + neg_loss) / num_pos
class MultiFocalLoss(nn.Layer):
"""Focal loss class
"""
def __init__(self, alpha=2, beta=4):
super().__init__()
self.alpha = alpha
self.beta = beta
self.eps = 1e-6
def forward(self, prediction, target):
"""forward
Args:
prediction (paddle.Tensor): model prediction
target (paddle.Tensor): ground truth
Returns:
paddle.Tensor: focal loss
"""
n = prediction.shape[0]
out_size = [n] + prediction.shape[2:]
if target.shape[1:] != prediction.shape[2:]:
raise ValueError(
f'Expected target size {out_size}, got {target.shape}')
# compute softmax over the classes axis
input_soft = F.softmax(prediction, axis=1) + self.eps
# create the labels one hot tensor
target_one_hot = F.one_hot(
target, num_classes=prediction.shape[1]).cast(
prediction.dtype) + self.eps
new_shape = [0, len(target_one_hot.shape) - 1
] + [i for i in range(1,
len(target_one_hot.shape) - 1)]
target_one_hot = target_one_hot.transpose(new_shape)
# compute the actual focal loss
weight = paddle.pow(-input_soft + 1.0, self.beta)
focal = -self.alpha * weight * paddle.log(input_soft)
loss = paddle.sum(target_one_hot * focal, axis=1)
# loss = paddle.einsum('bc...,bc...->b...', target_one_hot, focal)
return loss
class SigmoidFocalClassificationLoss(nn.Layer):
"""
This code is based on https://github.com/TRAILab/CaDDN/blob/5a96b37f16b3c29dd2509507b1cdfdff5d53c558/pcdet/utils/loss_utils.py#L14
"""
def __init__(self, gamma=2.0, alpha=0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(SigmoidFocalClassificationLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
def sigmoid_cross_entropy_with_logits(self, prediction, target):
""" Implementation for sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = paddle.clip(prediction, min=0) - prediction * target + \
paddle.log1p(paddle.exp(-paddle.abs(prediction)))
return loss
def forward(self, prediction, target, weights):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = F.sigmoid(prediction)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * paddle.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(prediction, target)
loss = focal_weight * bce_loss
if weights.shape.__len__() == 2 or \
(weights.shape.__len__() == 1 and target.shape.__len__() == 2):
weights = weights.unsqueeze(-1)
assert weights.shape.__len__() == loss.shape.__len__()
return loss * weights
def sigmoid_focal_loss(inputs, targets, alpha=-1, gamma=2, reduction="none"):
"""
Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
This code is based on https://github.com/facebookresearch/fvcore/blob/6a5360691be65c76188ed99b57ccbbf5fc19924a/fvcore/nn/focal_loss.py#L7
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
alpha: (optional) Weighting factor in range (0,1) to balance
positive vs negative examples. Default = -1 (no weighting).
gamma: Exponent of the modulating factor (1 - p_t) to
balance easy vs hard examples.
reduction: 'none' | 'mean' | 'sum'
'none': No reduction will be applied to the output.
'mean': The output will be averaged.
'sum': The output will be summed.
Returns:
Loss tensor with the reduction option applied.
"""
inputs = inputs.cast('float32')
targets = targets.cast('float32')
p = F.sigmoid(inputs)
ce_loss = F.binary_cross_entropy_with_logits(
inputs, targets, reduction="none")
p_t = p * targets + (1 - p) * (1 - targets)
loss = ce_loss * ((1 - p_t)**gamma)
if alpha >= 0:
alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
loss = alpha_t * loss
if reduction == "mean":
loss = loss.mean()
elif reduction == "sum":
loss = loss.sum()
return loss
@manager.LOSSES.add_component
class WeightedFocalLoss(nn.Layer):
def __init__(self,
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
reduction='mean',
loss_weight=1.0):
"""`Focal Loss <https://arxiv.org/abs/1708.02002>`_
This code is modified from https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/losses/focal_loss.py#L160
Args:
use_sigmoid (bool, optional): Whether to the prediction is
used for sigmoid or softmax. Defaults to True.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
alpha (float, optional): A balanced form for Focal Loss.
Defaults to 0.25.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'. Options are "none", "mean" and
"sum".
loss_weight (float, optional): Weight of loss. Defaults to 1.0.
"""
super(WeightedFocalLoss, self).__init__()
assert use_sigmoid is True, 'Only sigmoid focal loss supported now.'
self.use_sigmoid = use_sigmoid
self.gamma = gamma
self.alpha = alpha
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (paddle.Tensor): The prediction.
target (paddle.Tensor): The learning label of the prediction.
weight (paddle.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Options are "none", "mean" and "sum".
Returns:
paddle.Tensor: The calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (reduction_override
if reduction_override else self.reduction)
if self.use_sigmoid:
num_classes = pred.shape[1]
target = F.one_hot(target, num_classes=num_classes + 1)
target = target[:, :num_classes]
loss_cls = self.loss_weight * py_sigmoid_focal_loss(
pred,
target,
weight,
gamma=self.gamma,
alpha=self.alpha,
reduction=reduction,
avg_factor=avg_factor)
else:
raise NotImplementedError
return loss_cls
def py_sigmoid_focal_loss(pred,
target,
weight=None,
gamma=2.0,
alpha=0.25,
reduction='mean',
avg_factor=None):
"""paddle version of `Focal Loss <https://arxiv.org/abs/1708.02002>`_.
This function is modified from https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/losses/focal_loss.py#L12
"""
pred_sigmoid = F.sigmoid(pred)
target = target.astype(pred.dtype)
pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target)
focal_weight = (alpha * target + (1 - alpha) * (1 - target)) * pt.pow(gamma)
loss = F.binary_cross_entropy_with_logits(
pred, target, reduction='none') * focal_weight
if weight is not None:
if weight.shape != loss.shape:
if weight.shape[0] == loss.shape[0]:
# For most cases, weight is of shape (num_priors, ),
# which means it does not have the second axis num_class
weight = weight.reshape([-1, 1])
else:
# Sometimes, weight per anchor per class is also needed. e.g.
# in FSAF. But it may be flattened of shape
# (num_priors x num_class, ), while loss is still of shape
# (num_priors, num_class).
assert weight.numel() == loss.numel()
weight = weight.reshape([loss.shape[0], -1])
assert weight.ndim == loss.ndim
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/losses/utils.py
|
import functools
import paddle
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
This function is modified from https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/losses/utils.py#L9
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
# none: 0, elementwise_mean:1, sum: 2
if reduction == 'none':
return loss
elif reduction == 'mean':
return loss.mean()
elif reduction == 'sum':
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
This function is modified from https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/losses/utils.py#L30
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
# if weight is specified, apply element-wise weight
if weight is not None:
loss = loss * weight
# if avg_factor is not specified, just reduce the loss
if avg_factor is None:
loss = reduce_loss(loss, reduction)
else:
# if reduction is mean, then average the loss by avg_factor
if reduction == 'mean':
loss = loss.sum() / avg_factor
# if reduction is 'none', then do nothing, otherwise raise an error
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/losses/reg_loss.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle3d.apis import manager
from paddle3d.models.layers.layer_libs import _transpose_and_gather_feat
from paddle3d.models.losses.utils import weight_reduce_loss
class RegLoss(nn.Layer):
"""
This code is based on https://github.com/tianweiy/CenterPoint/blob/cb25e870b271fe8259e91c5d17dcd429d74abc91/det3d/models/losses/centernet_loss.py#L6
"""
def __init__(self):
super(RegLoss, self).__init__()
def forward(self, output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
mask = mask.cast('float32').unsqueeze(2)
loss = F.l1_loss(pred * mask, target * mask, reduction='none')
loss = loss / (mask.sum() + 1e-4)
loss = loss.transpose([2, 1, 0]).sum(axis=2).sum(axis=1)
return loss
def l1_loss(pred, target, weight=None, reduction='mean', avg_factor=None):
"""L1 loss.
This function is modified from https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/losses/smooth_l1_loss.py#L37
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
Returns:
torch.Tensor: Calculated loss
"""
assert pred.shape == target.shape and target.numel() > 0
loss = paddle.abs(pred - target)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
@manager.LOSSES.add_component
class L1Loss(nn.Layer):
"""L1 loss.
This class is modified from https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/losses/smooth_l1_loss.py#L108
Args:
reduction (str, optional): The method to reduce the loss.
Options are "none", "mean" and "sum".
loss_weight (float, optional): The weight of loss.
"""
def __init__(self, reduction='mean', loss_weight=1.0):
super(L1Loss, self).__init__()
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (reduction_override
if reduction_override else self.reduction)
loss_bbox = self.loss_weight * l1_loss(
pred, target, weight, reduction=reduction, avg_factor=avg_factor)
return loss_bbox
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/losses/weight_loss.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle3d.apis import manager
from paddle3d.models.common import boxes_to_corners_3d
class WeightedCrossEntropyLoss(nn.Layer):
"""
This code is based on https://github.com/TRAILab/CaDDN/blob/5a96b37f16b3c29dd2509507b1cdfdff5d53c558/pcdet/utils/loss_utils.py#L187
"""
def __init__(self):
super(WeightedCrossEntropyLoss, self).__init__()
def forward(self, prediction, target, weights):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
target = target.argmax(axis=-1)
loss = F.cross_entropy(prediction, target, reduction='none') * weights
return loss
@manager.LOSSES.add_component
class WeightedSmoothL1Loss(nn.Layer):
"""
This code is based on https://github.com/TRAILab/CaDDN/blob/5a96b37f16b3c29dd2509507b1cdfdff5d53c558/pcdet/utils/loss_utils.py#L80
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
"""
def __init__(self, beta=1.0 / 9.0, code_weights=None):
"""
Args:
beta: Scalar float.
L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedSmoothL1Loss, self).__init__()
self.beta = beta
self.code_weights = paddle.to_tensor(code_weights)
@staticmethod
def smooth_l1_loss(diff, beta):
if beta < 1e-5:
loss = paddle.abs(diff)
else:
n_diff = paddle.abs(diff)
loss = paddle.where(n_diff < beta, 0.5 * n_diff**2 / beta,
n_diff - 0.5 * beta)
return loss
def forward(self, input, target, weights=None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = paddle.where(paddle.isnan(target), input,
target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.reshape([1, 1, -1])
loss = self.smooth_l1_loss(diff, self.beta)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[
1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
def get_corner_loss_lidar(pred_bbox3d, gt_bbox3d):
"""
Args:
pred_bbox3d: (N, 7) float Tensor.
gt_bbox3d: (N, 7) float Tensor.
Returns:
corner_loss: (N) float Tensor.
"""
assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]
pred_box_corners = boxes_to_corners_3d(pred_bbox3d)
gt_box_corners = boxes_to_corners_3d(gt_bbox3d)
gt_bbox3d_flip = gt_bbox3d.clone()
gt_bbox3d_flip[:, 6] += np.pi
gt_box_corners_flip = boxes_to_corners_3d(gt_bbox3d_flip)
# (N, 8)
corner_dist = paddle.minimum(
paddle.linalg.norm(pred_box_corners - gt_box_corners, axis=2),
paddle.linalg.norm(pred_box_corners - gt_box_corners_flip, axis=2))
# (N, 8)
corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0)
return corner_loss.mean(axis=1)
@manager.LOSSES.add_component
class WeightedL1Loss(nn.Layer):
"""
"""
def __init__(self, reduction='mean', loss_weight=1.0):
"""
Args:
beta: Scalar float.
L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedL1Loss, self).__init__()
self.loss_weight = loss_weight
self.reduction = reduction
self.loss = nn.L1Loss(reduction='none')
def forward(self, input, target, weight=None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
loss = self.loss(input, target)
if weight is not None:
loss *= weight
if self.reduction == 'mean':
loss = loss.mean()
elif self.reduction == 'sum':
loss = loss.sum()
return loss * self.loss_weight
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/losses/iou_loss.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
from paddle import nn
import paddle.nn.functional as F
from paddle3d.apis import manager
from paddle3d.models.losses.utils import weight_reduce_loss
from paddle3d.utils.box import bbox_overlaps
class IOULoss(nn.Layer):
"""
Intersetion Over Union (IoU) loss
This code is based on https://github.com/aim-uofa/AdelaiDet/blob/master/adet/layers/iou_loss.py
"""
def __init__(self, loc_loss_type='iou'):
"""
Args:
loc_loss_type: str, supports three IoU computations: 'iou', 'linear_iou', 'giou'.
"""
super(IOULoss, self).__init__()
self.loc_loss_type = loc_loss_type
def forward(self, pred, target, weight=None):
"""
Args:
pred: Nx4 predicted bounding boxes
target: Nx4 target bounding boxes
weight: N loss weight for each instance
"""
pred_left = pred[:, 0]
pred_top = pred[:, 1]
pred_right = pred[:, 2]
pred_bottom = pred[:, 3]
target_left = target[:, 0]
target_top = target[:, 1]
target_right = target[:, 2]
target_bottom = target[:, 3]
target_aera = (target_left + target_right) * \
(target_top + target_bottom)
pred_aera = (pred_left + pred_right) * \
(pred_top + pred_bottom)
w_intersect = paddle.minimum(pred_left, target_left) + \
paddle.minimum(pred_right, target_right)
h_intersect = paddle.minimum(pred_bottom, target_bottom) + \
paddle.minimum(pred_top, target_top)
g_w_intersect = paddle.maximum(pred_left, target_left) + \
paddle.maximum(pred_right, target_right)
g_h_intersect = paddle.maximum(pred_bottom, target_bottom) + \
paddle.maximum(pred_top, target_top)
ac_uion = g_w_intersect * g_h_intersect
area_intersect = w_intersect * h_intersect
area_union = target_aera + pred_aera - area_intersect
ious = (area_intersect + 1.0) / (area_union + 1.0)
gious = ious - (ac_uion - area_union) / ac_uion
if self.loc_loss_type == 'iou':
losses = -paddle.log(ious)
elif self.loc_loss_type == 'linear_iou':
losses = 1 - ious
elif self.loc_loss_type == 'giou':
losses = 1 - gious
else:
raise NotImplementedError
if weight is not None:
return (losses * weight).sum()
else:
return losses.sum()
def giou_loss(pred, target, weight, eps=1e-7, reduction='mean',
avg_factor=None):
r"""`Generalized Intersection over Union: A Metric and A Loss for Bounding
Box Regression <https://arxiv.org/abs/1902.09630>`_.
This function is modified from https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/losses/iou_loss.py#L102
Args:
pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (torch.Tensor): Corresponding gt bboxes, shape (n, 4).
eps (float): Eps to avoid log(0).
Return:
Tensor: Loss tensor.
"""
gious = bbox_overlaps(pred, target, mode='giou', is_aligned=True, eps=eps)
loss = 1 - gious
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
@manager.LOSSES.add_component
class GIoULoss(nn.Layer):
'''
This class is modified from https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/losses/iou_loss.py#L358
'''
def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0):
super(GIoULoss, self).__init__()
self.eps = eps
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
if weight is not None and not paddle.any(weight > 0):
if pred.dim() == weight.dim() + 1:
weight = weight.unsqueeze([1])
return (pred * weight).sum() # 0
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (reduction_override
if reduction_override else self.reduction)
if weight is not None and weight.dim() > 1:
# TODO: remove this in the future
# reduce the weight of shape (n, 4) to (n,) to match the
# giou_loss of shape (n,)
assert weight.shape == pred.shape
weight = weight.mean(-1)
loss = self.loss_weight * giou_loss(
pred,
target,
weight,
eps=self.eps,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/losses/smooth_l1_loss.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
def smooth_l1_loss(input, target, beta, reduction="none"):
"""
Smooth L1 loss defined in the Fast R-CNN paper as:
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
Args:
input (Tensor): input tensor of any shape
target (Tensor): target value tensor with the same shape as input
beta (float): L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
reduction: 'none' | 'mean' | 'sum'
'none': No reduction will be applied to the output.
'mean': The output will be averaged.
'sum': The output will be summed.
Returns:
The loss with the reduction option applied.
This code is based on https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py
"""
beta = paddle.to_tensor(beta).cast(input.dtype)
if beta < 1e-5:
loss = paddle.abs(input - target)
else:
n = paddle.abs(input - target)
loss = paddle.where(n < beta, 0.5 * n**2, n - 0.5 * beta)
if reduction == "mean":
loss = loss.mean()
elif reduction == "sum":
loss = loss.sum()
return loss
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/layers/pyramid_pool.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn.functional as F
from paddle import nn
from paddle3d.models.layers import ConvBNReLU, SeparableConvBNReLU
class ASPPModule(nn.Layer):
"""
Atrous Spatial Pyramid Pooling.
Args:
aspp_ratios (tuple): The dilation rate using in ASSP module.
in_channels (int): The number of input channels.
out_channels (int): The number of output channels.
align_corners (bool): An argument of F.interpolate. It should be set to False when the output size of feature
is even, e.g. 1024x512, otherwise it is True, e.g. 769x769.
use_sep_conv (bool, optional): If using separable conv in ASPP module. Default: False.
image_pooling (bool, optional): If augmented with image-level features. Default: False
"""
def __init__(self,
aspp_ratios,
in_channels,
out_channels,
align_corners,
use_sep_conv=False,
image_pooling=False,
data_format='NCHW',
bias_attr=True):
super().__init__()
self.align_corners = align_corners
self.data_format = data_format
self.aspp_blocks = nn.LayerList()
for ratio in aspp_ratios:
if use_sep_conv and ratio > 1:
conv_func = SeparableConvBNReLU
else:
conv_func = ConvBNReLU
block = conv_func(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1 if ratio == 1 else 3,
dilation=ratio,
padding=0 if ratio == 1 else ratio,
data_format=data_format,
bias_attr=bias_attr)
self.aspp_blocks.append(block)
out_size = len(self.aspp_blocks)
if image_pooling:
self.global_avg_pool = nn.Sequential(
nn.AdaptiveAvgPool2D(
output_size=(1, 1), data_format=data_format),
ConvBNReLU(
in_channels,
out_channels,
kernel_size=1,
bias_attr=bias_attr,
data_format=data_format))
out_size += 1
self.image_pooling = image_pooling
self.conv_bn_relu = ConvBNReLU(
in_channels=out_channels * out_size,
out_channels=out_channels,
kernel_size=1,
data_format=data_format,
bias_attr=bias_attr)
self.dropout = nn.Dropout(p=0.1) # drop rate
def forward(self, x):
outputs = []
if self.data_format == 'NCHW':
interpolate_shape = paddle.shape(x)[2:]
axis = 1
else:
interpolate_shape = paddle.shape(x)[1:3]
axis = -1
for block in self.aspp_blocks:
y = block(x)
outputs.append(y)
if self.image_pooling:
img_avg = self.global_avg_pool(x)
img_avg = F.interpolate(
img_avg,
interpolate_shape,
mode='bilinear',
align_corners=self.align_corners,
data_format=self.data_format)
outputs.append(img_avg)
x = paddle.concat(outputs, axis=axis)
x = self.conv_bn_relu(x)
x = self.dropout(x)
return x
class PPModule(nn.Layer):
"""
Pyramid pooling module originally in PSPNet.
Args:
in_channels (int): The number of intput channels to pyramid pooling module.
out_channels (int): The number of output channels after pyramid pooling module.
bin_sizes (tuple, optional): The out size of pooled feature maps. Default: (1, 2, 3, 6).
dim_reduction (bool, optional): A bool value represents if reducing dimension after pooling. Default: True.
align_corners (bool): An argument of F.interpolate. It should be set to False when the output size of feature
is even, e.g. 1024x512, otherwise it is True, e.g. 769x769.
"""
def __init__(self, in_channels, out_channels, bin_sizes, dim_reduction,
align_corners):
super().__init__()
self.bin_sizes = bin_sizes
inter_channels = in_channels
if dim_reduction:
inter_channels = in_channels // len(bin_sizes)
# we use dimension reduction after pooling mentioned in original implementation.
self.stages = nn.LayerList([
self._make_stage(in_channels, inter_channels, size)
for size in bin_sizes
])
self.conv_bn_relu2 = ConvBNReLU(
in_channels=in_channels + inter_channels * len(bin_sizes),
out_channels=out_channels,
kernel_size=3,
padding=1)
self.align_corners = align_corners
def _make_stage(self, in_channels, out_channels, size):
"""
Create one pooling layer.
In our implementation, we adopt the same dimension reduction as the original paper that might be
slightly different with other implementations.
After pooling, the channels are reduced to 1/len(bin_sizes) immediately, while some other implementations
keep the channels to be same.
Args:
in_channels (int): The number of intput channels to pyramid pooling module.
size (int): The out size of the pooled layer.
Returns:
conv (Tensor): A tensor after Pyramid Pooling Module.
"""
prior = nn.AdaptiveAvgPool2D(output_size=(size, size))
conv = ConvBNReLU(
in_channels=in_channels, out_channels=out_channels, kernel_size=1)
return nn.Sequential(prior, conv)
def forward(self, input):
cat_layers = []
for stage in self.stages:
x = stage(input)
x = F.interpolate(
x,
paddle.shape(input)[2:],
mode='bilinear',
align_corners=self.align_corners)
cat_layers.append(x)
cat_layers = [input] + cat_layers[::-1]
cat = paddle.concat(cat_layers, axis=1)
out = self.conv_bn_relu2(cat)
return out
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/layers/layer_libs.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle3d.models import layers
from paddle3d.ops import iou3d_nms_cuda
from .param_init import (constant_init, kaiming_normal_init,
kaiming_uniform_init, normal_init, reset_parameters,
uniform_init, xavier_uniform_init)
def sigmoid_hm(hm_features):
"""sigmoid to headmap
Args:
hm_features (paddle.Tensor): heatmap
Returns:
paddle.Tensor: sigmoid heatmap
"""
x = F.sigmoid(hm_features)
x = x.clip(min=1e-4, max=1 - 1e-4)
return x
def nms_hm(heat_map, kernel=3):
"""Do max_pooling for nms
Args:
heat_map (paddle.Tensor): pred cls heatmap
kernel (int, optional): max_pool kernel size. Defaults to 3.
Returns:
heatmap after nms
"""
pad = (kernel - 1) // 2
hmax = F.max_pool2d(
heat_map, kernel_size=(kernel, kernel), stride=1, padding=pad)
eq_index = (hmax == heat_map).astype("float32")
return heat_map * eq_index
def select_topk(heat_map, K=100):
"""
Args:
heat_map: heat_map in [N, C, H, W]
K: top k samples to be selected
score: detection threshold
Returns:
"""
#batch, c, height, width = paddle.shape(heat_map)
batch, c = heat_map.shape[:2]
height = paddle.shape(heat_map)[2]
width = paddle.shape(heat_map)[3]
# First select topk scores in all classes and batchs
# [N, C, H, W] -----> [N, C, H*W]
heat_map = paddle.reshape(heat_map, (batch, c, -1))
# Both in [N, C, K]
topk_scores_all, topk_inds_all = paddle.topk(heat_map, K)
# topk_inds_all = topk_inds_all % (height * width) # todo: this seems redudant
topk_ys = (topk_inds_all // width).astype("float32")
topk_xs = (topk_inds_all % width).astype("float32")
# Select topK examples across channel
# [N, C, K] -----> [N, C*K]
topk_scores_all = paddle.reshape(topk_scores_all, (batch, -1))
# Both in [N, K]
topk_scores, topk_inds = paddle.topk(topk_scores_all, K)
topk_clses = (topk_inds // K).astype("float32")
# First expand it as 3 dimension
topk_inds_all = paddle.reshape(
_gather_feat(paddle.reshape(topk_inds_all, (batch, -1, 1)), topk_inds),
(batch, K))
topk_ys = paddle.reshape(
_gather_feat(paddle.reshape(topk_ys, (batch, -1, 1)), topk_inds),
(batch, K))
topk_xs = paddle.reshape(
_gather_feat(paddle.reshape(topk_xs, (batch, -1, 1)), topk_inds),
(batch, K))
return dict({
"topk_score": topk_scores,
"topk_inds_all": topk_inds_all,
"topk_clses": topk_clses,
"topk_ys": topk_ys,
"topk_xs": topk_xs
})
def _gather_feat(feat, ind, mask=None):
"""Select specific indexs on featuremap
Args:
feat: all results in 3 dimensions
ind: positive index
Returns:
"""
channel = feat.shape[-1]
ind = ind.unsqueeze(-1).expand((ind.shape[0], ind.shape[1], channel))
feat = gather(feat, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, channel)
return feat
def _transpose_and_gather_feat(feat, ind):
def _gather_feat(feat, ind, mask=None):
dim = feat.shape[2]
ind = ind.unsqueeze(2)
bs_ind = paddle.arange(ind.shape[0], dtype=ind.dtype)
bs_ind = paddle.tile(bs_ind, repeat_times=[1, ind.shape[1], 1])
bs_ind = bs_ind.transpose([2, 1, 0])
ind = paddle.concat([bs_ind, ind], axis=-1)
feat = feat.gather_nd(ind)
feat = feat.reshape(feat.shape[0:2] + [dim])
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.reshape([-1, dim])
return feat
feat = feat.transpose([0, 2, 3, 1])
feat = feat.reshape([feat.shape[0], -1, feat.shape[3]])
feat = _gather_feat(feat, ind)
return feat
def gather(feature: paddle.Tensor, ind: paddle.Tensor):
"""Simplified version of torch.gather. Always gather based on axis 1.
Args:
feature: all results in 3 dimensions, such as [n, h * w, c]
ind: positive index in 3 dimensions, such as [n, k, 1]
Returns:
gather feature
"""
bs_ind = paddle.arange(ind.shape[0], dtype=ind.dtype)
bs_ind = bs_ind.unsqueeze(1).unsqueeze(2)
bs_ind = bs_ind.expand([ind.shape[0], ind.shape[1], 1])
ind = paddle.concat([bs_ind, ind], axis=-1)
return feature.gather_nd(ind)
def select_point_of_interest(batch, index, feature_maps):
"""
Select POI(point of interest) on feature map
Args:
batch: batch size
index: in point format or index format
feature_maps: regression feature map in [N, C, H, W]
Returns:
"""
w = feature_maps.shape[3]
index_length = len(index.shape)
if index_length == 3:
index = index[:, :, 1] * w + index[:, :, 0]
index = paddle.reshape(index, (batch, -1))
# [N, C, H, W] -----> [N, H, W, C]
feature_maps = paddle.transpose(feature_maps, (0, 2, 3, 1))
channel = feature_maps.shape[-1]
# [N, H, W, C] -----> [N, H*W, C]
feature_maps = paddle.reshape(feature_maps, (batch, -1, channel))
index = index.unsqueeze(-1)
# select specific features bases on POIs
feature_maps = gather(feature_maps, index)
return feature_maps
def rotate_nms_pcdet(boxes,
scores,
thresh,
pre_max_size=None,
post_max_size=None):
"""
:param boxes: (N, 5) [x, y, z, l, w, h, theta]
:param scores: (N)
:param thresh:
:return:
"""
# transform back to pcdet's coordinate
index = paddle.to_tensor(
[0, 1, 2, 4, 3, 5, int(boxes.shape[-1]) - 1], dtype='int32')
boxes = paddle.index_select(boxes, index=index, axis=-1)
#boxes = boxes[:, [0, 1, 2, 4, 3, 5, -1]]
boxes[:, -1] = -boxes[:, -1] - np.pi / 2
order = scores.argsort(0, descending=True)
if pre_max_size is not None:
order = order[:pre_max_size]
boxes = boxes[order]
# TODO(luoqianhui): when order.shape is (1,),
# boxes[order].shape is (7,) but supposed to be (1, 7),
# so we add a reshape op
boxes = boxes.reshape([-1, 7])
keep, num_out = iou3d_nms_cuda.nms_gpu(boxes, thresh)
selected = order[keep[:num_out]]
if post_max_size is not None:
selected = selected[:post_max_size]
return selected
def inverse_sigmoid(x, eps=1e-5):
"""Inverse function of sigmoid.
"""
x = x.clip(min=0, max=1)
x1 = x.clip(min=eps)
x2 = (1 - x).clip(min=eps)
return paddle.log(x1 / x2)
def init_layer_use_config(param, cfg, *args, **kargs):
if param is None:
return
assert 'type' in cfg
cfg_ = copy.deepcopy(cfg)
init_type = cfg_.pop('type')
eval(init_type)(param, *args, **kargs, **cfg_)
def conv_layer_from_config(cfg, *args, **kargs):
"""Build convolution layer."""
if cfg is None:
conv_type = 'Conv2D'
cfg_ = dict()
init_cfg = None
else:
assert 'type' in cfg
cfg_ = copy.deepcopy(cfg)
conv_type = cfg_.pop('type')
init_cfg = cfg_.pop('init_cfg', None)
conv_layer = getattr(nn, conv_type)(*args, **kargs, **cfg_)
if init_cfg is None:
reset_parameters(conv_layer)
else:
init_layer_use_config(conv_layer.weight, init_cfg)
if conv_layer.bias is not None:
constant_init(conv_layer.bias, value=0)
return conv_layer
def norm_layer_from_config(cfg, *args, **kargs):
"""Build normalization layer."""
assert 'type' in cfg
cfg_ = copy.deepcopy(cfg)
norm_type = cfg_.pop('type')
norm_layer = getattr(nn, norm_type)(*args, **kargs, **cfg_)
return norm_layer
def act_layer_from_config(cfg):
"""Build activation layer."""
assert 'type' in cfg
cfg_ = copy.deepcopy(cfg)
act_type = cfg_.pop('type')
act_layer = getattr(nn, act_type)(**cfg_)
return act_layer
class ConvBNReLU(nn.Layer):
def __init__(self,
in_channels,
out_channels,
kernel_size,
padding='same',
**kwargs):
super().__init__()
self._conv = nn.Conv2D(
in_channels, out_channels, kernel_size, padding=padding, **kwargs)
if 'data_format' in kwargs:
data_format = kwargs['data_format']
else:
data_format = 'NCHW'
self._batch_norm = nn.BatchNorm2D(out_channels, data_format=data_format)
self._relu = nn.ReLU()
def forward(self, x):
x = self._conv(x)
x = self._batch_norm(x)
x = self._relu(x)
return x
class ConvBN(nn.Layer):
def __init__(self,
in_channels,
out_channels,
kernel_size,
padding='same',
**kwargs):
super().__init__()
self._conv = nn.Conv2D(
in_channels, out_channels, kernel_size, padding=padding, **kwargs)
if 'data_format' in kwargs:
data_format = kwargs['data_format']
else:
data_format = 'NCHW'
self._batch_norm = nn.BatchNorm2D(out_channels, data_format=data_format)
def forward(self, x):
x = self._conv(x)
x = self._batch_norm(x)
return x
class SeparableConvBNReLU(nn.Layer):
def __init__(self,
in_channels,
out_channels,
kernel_size,
padding='same',
pointwise_bias=None,
**kwargs):
super().__init__()
self.depthwise_conv = ConvBN(
in_channels,
out_channels=in_channels,
kernel_size=kernel_size,
padding=padding,
groups=in_channels,
**kwargs)
if 'data_format' in kwargs:
data_format = kwargs['data_format']
else:
data_format = 'NCHW'
self.piontwise_conv = ConvBNReLU(
in_channels,
out_channels,
kernel_size=1,
groups=1,
data_format=data_format,
bias_attr=pointwise_bias)
def forward(self, x):
x = self.depthwise_conv(x)
x = self.piontwise_conv(x)
return x
class ConvNormActLayer(nn.Layer):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
conv_cfg=None,
norm_cfg=None,
act_cfg=dict(type='ReLU')):
super(ConvNormActLayer, self).__init__()
bias_attr = bias if bias else None
self.conv = conv_layer_from_config(
conv_cfg,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
groups,
bias_attr=bias_attr)
self.norm_cfg = norm_cfg
if self.norm_cfg is not None:
self.norm = norm_layer_from_config(norm_cfg)
self.act_cfg = act_cfg
if self.act_cfg is not None:
self.act = act_layer_from_config(act_cfg)
def forward(self, x):
x = self.conv(x)
if self.norm_cfg is not None:
x = self.norm(x)
if self.act_cfg is not None:
x = self.act(x)
return x
class NormedLinear(nn.Linear):
"""Normalized Linear Layer.
Args:
tempeature (float, optional): Tempeature term. Default to 20.
power (int, optional): Power term. Default to 1.0.
eps (float, optional): The minimal value of divisor to
keep numerical stability. Default to 1e-6.
"""
def __init__(self, *args, tempearture=20, power=1.0, eps=1e-6, **kwargs):
super(NormedLinear, self).__init__(*args, **kwargs)
self.tempearture = tempearture
self.power = power
self.eps = eps
self.init_weights()
def init_weights(self):
normal_init(self.weight, mean=0, std=0.01)
if self.bias is not None:
constant_init(self.bias, 0)
def forward(self, x):
weight_ = self.weight / (
self.weight.norm(dim=1, keepdim=True).pow(self.power) + self.eps)
x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps)
x_ = x_ * self.tempearture
return F.linear(x_, weight_, self.bias)
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/layers/__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .layer_norm import *
from .layer_libs import *
from .param_init import *
from .petr_transformer import (PETRDNTransformer, PETRTransformer,
PETRTransformerDecoder,
PETRTransformerDecoderLayer)
from .positional_encoding import SinePositionalEncoding3D
from .pyramid_pool import *
from .normalization import *
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/layers/param_init.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import warnings
import numpy as np
import paddle
import paddle.nn as nn
from paddle3d.utils.logger import logger
def constant_init(param, **kwargs):
"""
Initialize the `param` with constants.
Args:
param (Tensor): Tensor that needs to be initialized.
Examples:
from paddle3d.models.layers import param_init
import paddle.nn as nn
linear = nn.Linear(2, 4)
param_init.constant_init(linear.weight, value=2.0)
print(linear.weight.numpy())
# result is [[2. 2. 2. 2.], [2. 2. 2. 2.]]
"""
initializer = nn.initializer.Constant(**kwargs)
initializer(param, param.block)
def normal_init(param, **kwargs):
"""
Initialize the `param` with a Normal distribution.
Args:
param (Tensor): Tensor that needs to be initialized.
Examples:
from paddle3d.models.layers import param_init
import paddle.nn as nn
linear = nn.Linear(2, 4)
param_init.normal_init(linear.weight, loc=0.0, scale=1.0)
"""
initializer = nn.initializer.Normal(**kwargs)
initializer(param, param.block)
def uniform_init(param, a, b):
"""
Modified tensor inspace using uniform_
Args:
param (paddle.Tensor): paddle Tensor
a (float|int): min value.
b (float|int): max value.
Return:
tensor
"""
return _no_grad_uniform_(param, a, b)
def xavier_normal_init(tensor, gain=1, reverse=False):
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor, reverse=reverse)
std = gain * math.sqrt(2.0 / float(fan_in + fan_out))
return _no_grad_normal_(tensor, 0., std)
def kaiming_normal_init(tensor,
a=0,
mode='fan_in',
nonlinearity='leaky_relu',
reverse=False):
"""
Modified tensor inspace using kaiming_normal method
Args:
param (paddle.Tensor): paddle Tensor
mode (str): ['fan_in', 'fan_out'], 'fin_in' defalut
nonlinearity (str): nonlinearity method name
reverse (bool): reverse (bool: False): tensor data format order, False by default as [fout, fin, ...].
Return:
tensor
"""
if 0 in tensor.shape:
logger.warning("Initializing zero-element tensors is a no-op")
return tensor
fan = _calculate_correct_fan(tensor, mode, reverse)
gain = _calculate_gain(nonlinearity, a)
std = gain / math.sqrt(fan)
with paddle.no_grad():
initializer = paddle.nn.initializer.Normal(mean=0, std=std)
initializer(tensor)
def kaiming_uniform_init(param,
a=0,
mode='fan_in',
nonlinearity='leaky_relu',
reverse=False):
"""
Modified tensor inspace using kaiming_uniform method
Args:
param (paddle.Tensor): paddle Tensor
mode (str): ['fan_in', 'fan_out'], 'fin_in' defalut
nonlinearity (str): nonlinearity method name
reverse (bool): reverse (bool: False): tensor data format order, False by default as [fout, fin, ...].
Return:
tensor
"""
fan = _calculate_correct_fan(param, mode, reverse)
gain = _calculate_gain(nonlinearity, a)
std = gain / math.sqrt(fan)
k = math.sqrt(3.0) * std
return _no_grad_uniform_(param, -k, k)
def xavier_uniform_init(param, gain=1., reverse=False):
"""
Modified tensor inspace using xavier_uniform method
Args:
param (paddle.Tensor): paddle Tensor
gain (float): a factor apply to std. Default: 1.
Return:
tensor
"""
fan_in, fan_out = _calculate_fan_in_and_fan_out(param, reverse=reverse)
std = gain * math.sqrt(2.0 / float(fan_in + fan_out))
a = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation
return _no_grad_uniform_(param, -a, a)
def _calculate_fan_in_and_fan_out(tensor, reverse=False):
"""
Calculate (fan_in, _fan_out) for tensor
Args:
tensor (Tensor): paddle.Tensor
reverse (bool: False): tensor data format order, False by default as [fout, fin, ...].
e.g. : conv.weight [cout, cin, kh, kw] is False; linear.weight [cin, cout] is True
Return:
Tuple[fan_in, fan_out]
"""
if tensor.ndim < 2:
raise ValueError(
"Fan in and fan out can not be computed for tensor with fewer than 2 dimensions"
)
if reverse:
num_input_fmaps, num_output_fmaps = tensor.shape[0], tensor.shape[1]
else:
num_input_fmaps, num_output_fmaps = tensor.shape[1], tensor.shape[0]
receptive_field_size = 1
if tensor.ndim > 2:
receptive_field_size = np.prod(tensor.shape[2:])
fan_in = num_input_fmaps * receptive_field_size
fan_out = num_output_fmaps * receptive_field_size
return fan_in, fan_out
# reference: https://pytorch.org/docs/stable/_modules/torch/nn/init.html
def _calculate_correct_fan(tensor, mode, reverse=False):
mode = mode.lower()
valid_modes = ['fan_in', 'fan_out']
if mode not in valid_modes:
raise ValueError("Mode {} not supported, please use one of {}".format(
mode, valid_modes))
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor, reverse)
return fan_in if mode == 'fan_in' else fan_out
def _calculate_gain(nonlinearity, param=None):
linear_fns = [
'linear', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose1d',
'conv_transpose2d', 'conv_transpose3d'
]
if nonlinearity in linear_fns or nonlinearity == 'sigmoid':
return 1
elif nonlinearity == 'tanh':
return 5.0 / 3
elif nonlinearity == 'relu':
return math.sqrt(2.0)
elif nonlinearity == 'leaky_relu':
if param is None:
negative_slope = 0.01
elif not isinstance(param, bool) and isinstance(
param, int) or isinstance(param, float):
# True/False are instances of int, hence check above
negative_slope = param
else:
raise ValueError(
"negative_slope {} not a valid number".format(param))
return math.sqrt(2.0 / (1 + negative_slope**2))
elif nonlinearity == 'selu':
return 3.0 / 4
else:
raise ValueError("Unsupported nonlinearity {}".format(nonlinearity))
def _no_grad_uniform_(tensor, a, b):
with paddle.no_grad():
tensor.set_value(
paddle.uniform(
shape=tensor.shape, dtype=tensor.dtype, min=a, max=b))
return tensor
def _no_grad_normal_(tensor, mean, std):
with paddle.no_grad():
tensor.set_value(paddle.normal(mean, std, shape=tensor.shape))
return tensor
def reset_parameters(m, reverse=False):
if not hasattr(m, 'weight'):
return
if m.weight.ndim < 2:
return
if isinstance(m, nn.Linear):
reverse = True
kaiming_uniform_init(m.weight, a=math.sqrt(5), reverse=reverse)
if m.bias is not None:
fan_in, _ = _calculate_fan_in_and_fan_out(m.weight, reverse=reverse)
bound = 1 / math.sqrt(fan_in)
_no_grad_uniform_(m.bias, -bound, bound)
def init_bias_by_prob(prob):
bias_val = float(-np.log((1 - prob) / prob))
return bias_val
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/layers/petr_transformer.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
# Copyright (c) 2022 megvii-model. All Rights Reserved.
# ------------------------------------------------------------------------
# Modified from DETR3D (https://github.com/WangYueFt/detr3d)
# Copyright (c) 2021 Wang, Yue
# ------------------------------------------------------------------------
# Modified from mmdetection3d (https://github.com/open-mmlab/mmdetection3d)
# Copyright (c) OpenMMLab. All rights reserved.
# ------------------------------------------------------------------------
import copy
import math
import warnings
from typing import Sequence
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.distributed.fleet.utils import recompute
from paddle3d.apis import manager
from paddle3d.models.layers.param_init import (constant_init,
xavier_uniform_init)
from paddle3d.models.voxel_encoders.pillar_encoder import build_norm_layer
from .transformer_layers import (FFN, BaseTransformerLayer, MultiHeadAttention,
TransformerLayerSequence)
@manager.MODELS.add_component
class PETRTransformer(nn.Layer):
"""Implements the DETR transformer.
Following the official DETR implementation.
See `paper: End-to-End Object Detection with Transformers
<https://arxiv.org/pdf/2005.12872>`_ for details.
"""
def __init__(self,
decoder_embed_dims,
encoder=None,
decoder=None,
init_cfg=None,
cross=False):
super(PETRTransformer, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.embed_dims = decoder_embed_dims # self.decoder.embed_dims
self.init_cfg = init_cfg
self.cross = cross
def init_weights(self):
# follow the official DETR to init parameters
for m in self.sublayers():
if hasattr(m, 'weight') and m.weight.dim() > 1:
reverse = False
if isinstance(m, nn.Linear):
reverse = True
xavier_uniform_init(m.weight, reverse=reverse)
if hasattr(m, 'bias') and m.bias is not None:
constant_init(m.bias, value=0)
def forward(self, x, mask, query_embed, pos_embed, reg_branch=None):
"""Forward function for `Transformer`.
Args:
x (Tensor): Input query with shape [bs, c, h, w] where
c = embed_dims.
mask (Tensor): The key_padding_mask used for encoder and decoder,
with shape [bs, h, w].
query_embed (Tensor): The query embedding for decoder, with shape
[num_query, c].
pos_embed (Tensor): The positional encoding for encoder and
decoder, with the same shape as `x`.
Returns:
tuple[Tensor]: results of decoder containing the following tensor.
- out_dec: Output from decoder. If return_intermediate_dec \
is True output has shape [num_dec_layers, bs,
num_query, embed_dims], else has shape [1, bs, \
num_query, embed_dims].
- memory: Output results from encoder, with shape \
[bs, embed_dims, h, w].
"""
bs, n, c, h, w = x.shape
# [bs, n, c, h, w] -> [n*h*w, bs, c]
memory = x.transpose([1, 3, 4, 0, 2]).reshape([-1, bs, c])
# [bs, n, c, h, w] -> [n*h*w, bs, c]
pos_embed = pos_embed.transpose([1, 3, 4, 0, 2]).reshape([-1, bs, c])
# [num_query, dim] -> [num_query, bs, dim]
query_embed = query_embed.unsqueeze(1).tile([1, bs, 1])
# [bs, n, h, w] -> [bs, n*h*w]
mask = mask.reshape([bs, -1])
target = paddle.zeros_like(query_embed)
out_dec = self.decoder(
query=target,
key=memory,
value=memory,
key_pos=pos_embed,
query_pos=query_embed,
key_padding_mask=mask,
reg_branch=reg_branch,
)
out_dec = out_dec.transpose([0, 2, 1, 3])
memory = memory.reshape([n, h, w, bs, c]).transpose([3, 0, 4, 1, 2])
return out_dec, memory
@manager.MODELS.add_component
class PETRDNTransformer(nn.Layer):
"""Implements the DETR transformer.
Following the official DETR implementation, this module copy-paste
"""
def __init__(self, embed_dims, encoder=None, decoder=None, cross=False):
super(PETRDNTransformer, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.embed_dims = embed_dims
self.cross = cross
def init_weights(self):
# follow the official DETR to init parameters
for m in self.sublayers():
if hasattr(m, 'weight') and m.weight.dim() > 1:
reverse = False
if isinstance(m, nn.Linear):
reverse = True
xavier_uniform_init(m.weight, reverse=reverse)
def forward(self,
x,
mask,
query_embed,
pos_embed,
attn_masks=None,
reg_branch=None):
"""Forward function for `Transformer`.
"""
bs, n, c, h, w = x.shape
memory = x.transpose([1, 3, 4, 0, 2]).reshape(
[-1, bs, c]) # [bs, n, c, h, w] -> [n*h*w, bs, c]
pos_embed = pos_embed.transpose([1, 3, 4, 0, 2]).reshape(
[-1, bs, c]) # [bs, n, c, h, w] -> [n*h*w, bs, c]
query_embed = query_embed.transpose(
[1, 0, 2]) # [num_query, dim] -> [num_query, bs, dim]
mask = mask.reshape([bs, -1]) # [bs, n, h, w] -> [bs, n*h*w]
target = paddle.zeros_like(query_embed)
# out_dec: [num_layers, num_query, bs, dim]
out_dec = self.decoder(
query=target,
key=memory,
value=memory,
key_pos=pos_embed,
query_pos=query_embed,
key_padding_mask=mask,
attn_masks=[attn_masks, None],
reg_branch=reg_branch,
)
out_dec = out_dec.transpose([0, 2, 1, 3])
memory = memory.reshape([n, h, w, bs, c]).transpose([3, 0, 4, 1, 2])
return out_dec, memory
@manager.MODELS.add_component
class PETRTransformerDecoderLayer(BaseTransformerLayer):
"""Implements decoder layer in DETR transformer.
"""
def __init__(self,
attns,
feedforward_channels,
ffn_dropout=0.0,
operation_order=None,
act_cfg=dict(type='ReLU', inplace=True),
norm_cfg=dict(type='LayerNorm'),
ffn_num_fcs=2,
use_recompute=True,
**kwargs):
super(PETRTransformerDecoderLayer, self).__init__(
attns=attns,
feedforward_channels=feedforward_channels,
ffn_dropout=ffn_dropout,
operation_order=operation_order,
act_cfg=act_cfg,
norm_cfg=norm_cfg,
ffn_num_fcs=ffn_num_fcs,
**kwargs)
assert len(operation_order) == 6
assert set(operation_order) == set(
['self_attn', 'norm', 'cross_attn', 'ffn'])
self.use_recompute = use_recompute
def _forward(
self,
query,
key=None,
value=None,
query_pos=None,
key_pos=None,
attn_masks=None,
query_key_padding_mask=None,
key_padding_mask=None,
):
"""Forward function for `TransformerCoder`.
Returns:
Tensor: forwarded results with shape [num_query, bs, embed_dims].
"""
x = super(PETRTransformerDecoderLayer, self).forward(
query,
key=key,
value=value,
query_pos=query_pos,
key_pos=key_pos,
attn_masks=attn_masks,
query_key_padding_mask=query_key_padding_mask,
key_padding_mask=key_padding_mask,
)
return x
def forward(self,
query,
key=None,
value=None,
query_pos=None,
key_pos=None,
attn_masks=None,
query_key_padding_mask=None,
key_padding_mask=None,
**kwargs):
"""Forward function for `TransformerCoder`.
Returns:
Tensor: forwarded results with shape [num_query, bs, embed_dims].
"""
if self.use_recompute and self.training:
x = recompute(
self._forward,
query,
key,
value,
query_pos,
key_pos,
attn_masks,
query_key_padding_mask,
key_padding_mask,
)
else:
x = self._forward(
query,
key=key,
value=value,
query_pos=query_pos,
key_pos=key_pos,
attn_masks=attn_masks,
query_key_padding_mask=query_key_padding_mask,
key_padding_mask=key_padding_mask)
return x
@manager.MODELS.add_component
class PETRMultiheadAttention(nn.Layer):
"""A wrapper for ``paddle.nn.MultiheadAttention``.
This module implements MultiheadAttention with identity connection,
and positional encoding is also passed as input.
"""
def __init__(self,
embed_dims,
num_heads,
attn_drop=0.,
proj_drop=0.,
drop_prob=0.,
init_cfg=None,
batch_first=True,
**kwargs):
super(PETRMultiheadAttention, self).__init__()
self.embed_dims = embed_dims
self.num_heads = num_heads
self.batch_first = batch_first
self.attn = nn.MultiHeadAttention(embed_dims, num_heads, attn_drop,
**kwargs)
self.proj_drop = nn.Dropout(proj_drop)
self.dropout = nn.Dropout(
drop_prob) if drop_prob > 0. else nn.Identity()
def forward(self,
query,
key=None,
value=None,
identity=None,
query_pos=None,
key_pos=None,
attn_mask=None,
key_padding_mask=None,
**kwargs):
"""Forward function for `MultiheadAttention`.
"""
if key is None:
key = query
if value is None:
value = key
if identity is None:
identity = query
if key_pos is None:
if query_pos is not None:
# use query_pos if key_pos is not available
if query_pos.shape == key.shape:
key_pos = query_pos
else:
warnings.warn(f'position encoding of key is'
f'missing in {self.__class__.__name__}.')
if query_pos is not None:
query = query + query_pos
if key_pos is not None:
key = key + key_pos
if True:
query = query.transpose([1, 0, 2])
key = key.transpose([1, 0, 2])
value = value.transpose([1, 0, 2])
if key_padding_mask is None:
if attn_mask is not None:
attn_mask = ~attn_mask
out = self.attn(
query=query,
key=key,
value=value,
attn_mask=attn_mask,
)
else:
if attn_mask is None:
attn_mask = ~key_padding_mask
attn_mask = attn_mask.unsqueeze(1).unsqueeze(1)
out = self.attn(
query=query,
key=key,
value=value,
attn_mask=attn_mask,
)
else:
raise NotImplementedError('key_padding_mask is not None')
if True:
out = out.transpose([1, 0, 2])
return identity + self.dropout(self.proj_drop(out))
class PETRTransformerEncoder(TransformerLayerSequence):
"""TransformerEncoder of DETR.
Args:
post_norm (nn.Layer): normalization layer. Default:
`LN`. Only used when `self.pre_norm` is `True`
"""
def __init__(self, *args, post_norm=None, **kwargs):
super(PETRTransformerEncoder, self).__init__(*args, **kwargs)
if post_norm is not None:
self.post_norm = post_norm
else:
assert not self.pre_norm, f'Use prenorm in ' \
f'{self.__class__.__name__},' \
f'Please specify post_norm_cfg'
self.post_norm = None
def forward(self, *args, **kwargs):
"""Forward function for `TransformerCoder`.
Returns:
Tensor: forwarded results with shape [num_query, bs, embed_dims].
"""
x = super(PETRTransformerEncoder, self).forward(*args, **kwargs)
if self.post_norm is not None:
x = self.post_norm(x)
return x
@manager.MODELS.add_component
class PETRTransformerDecoder(TransformerLayerSequence):
"""Implements the decoder in DETR transformer.
"""
def __init__(self,
*args,
post_norm_cfg=dict(type='LN'),
return_intermediate=False,
**kwargs):
super(PETRTransformerDecoder, self).__init__(*args, **kwargs)
self.return_intermediate = return_intermediate
if post_norm_cfg is not None:
# TODO hard code
self.post_norm = nn.LayerNorm(self.embed_dims)
else:
self.post_norm = None
def forward(self, query, *args, **kwargs):
"""Forward function for `TransformerDecoder`.
"""
if not self.return_intermediate:
x = super().forward(query, *args, **kwargs)
if self.post_norm:
x = self.post_norm(x)[None]
return x
intermediate = []
for layer in self.layers:
query = layer(query, *args, **kwargs)
if self.return_intermediate:
if self.post_norm is not None:
intermediate.append(self.post_norm(query))
else:
intermediate.append(query)
return paddle.stack(intermediate)
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/layers/positional_encoding.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
# Copyright (c) 2022 megvii-model. All Rights Reserved.
# ------------------------------------------------------------------------
# Modified from mmdetection (https://github.com/open-mmlab/mmdetection)
# Copyright (c) OpenMMLab. All rights reserved.
# ------------------------------------------------------------------------
import math
import numpy as np
import paddle
import paddle.nn as nn
from paddle3d.apis import manager
@manager.MIDDLE_ENCODERS.add_component
class SinePositionalEncoding3D(nn.Layer):
"""Position encoding with sine and cosine functions.
See `End-to-End Object Detection with Transformers
<https://arxiv.org/pdf/2005.12872>`_ for details.
Args:
num_feats (int): The feature dimension for each position
along x-axis or y-axis. Note the final returned dimension
for each position is 2 times of this value.
temperature (int, optional): The temperature used for scaling
the position embedding. Defaults to 10000.
normalize (bool, optional): Whether to normalize the position
embedding. Defaults to False.
scale (float, optional): A scale factor that scales the position
embedding. The scale will be used only when `normalize` is True.
Defaults to 2*pi.
eps (float, optional): A value added to the denominator for
numerical stability. Defaults to 1e-6.
offset (float): offset add to embed when do the normalization.
Defaults to 0.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
num_feats,
temperature=10000,
normalize=False,
scale=2 * math.pi,
eps=1e-6,
offset=0.,
init_cfg=None):
super(SinePositionalEncoding3D, self).__init__()
if normalize:
assert isinstance(scale, (float, int)), 'when normalize is set,' \
'scale should be provided and in float or int type, ' \
f'found {type(scale)}'
self.num_feats = num_feats
self.temperature = temperature
self.normalize = normalize
self.scale = scale
self.eps = eps
self.offset = offset
def forward(self, mask):
"""Forward function for `SinePositionalEncoding`.
Args:
mask (Tensor): ByteTensor mask. Non-zero values representing
ignored positions, while zero values means valid positions
for this image. Shape [bs, h, w].
Returns:
pos (Tensor): Returned position embedding with shape
[bs, num_feats*2, h, w].
"""
# For convenience of exporting to ONNX, it's required to convert
# `masks` from bool to int.
mask = mask.astype('int32')
# logical_not
not_mask = 1 - mask
n_embed = not_mask.cumsum(1, dtype='float32')
y_embed = not_mask.cumsum(2, dtype='float32')
x_embed = not_mask.cumsum(3, dtype='float32')
if self.normalize:
n_embed = (n_embed + self.offset) / \
(n_embed[:, -1:, :, :] + self.eps) * self.scale
y_embed = (y_embed + self.offset) / \
(y_embed[:, :, -1:, :] + self.eps) * self.scale
x_embed = (x_embed + self.offset) / \
(x_embed[:, :, :, -1:] + self.eps) * self.scale
dim_t = paddle.arange(self.num_feats, dtype='int32')
dim_t = self.temperature**(2 * (dim_t // 2) / self.num_feats)
pos_n = n_embed[:, :, :, :, None] / dim_t
pos_x = x_embed[:, :, :, :, None] / dim_t
pos_y = y_embed[:, :, :, :, None] / dim_t
B, N, H, W = mask.shape
pos_n = paddle.stack(
(pos_n[:, :, :, :, 0::2].sin(), pos_n[:, :, :, :, 1::2].cos()),
axis=4).reshape([B, N, H, W, self.num_feats])
pos_x = paddle.stack(
(pos_x[:, :, :, :, 0::2].sin(), pos_x[:, :, :, :, 1::2].cos()),
axis=4).reshape([B, N, H, W, self.num_feats])
pos_y = paddle.stack(
(pos_y[:, :, :, :, 0::2].sin(), pos_y[:, :, :, :, 1::2].cos()),
axis=4).reshape([B, N, H, W, self.num_feats])
pos = paddle.concat((pos_n, pos_y, pos_x),
axis=4).transpose([0, 1, 4, 2, 3])
return pos
@manager.MIDDLE_ENCODERS.add_component
class LearnedPositionalEncoding3D(nn.Layer):
"""Position embedding with learnable embedding weights.
Args:
num_feats (int): The feature dimension for each position
along x-axis or y-axis. The final returned dimension for
each position is 2 times of this value.
row_num_embed (int, optional): The dictionary size of row embeddings.
Default 50.
col_num_embed (int, optional): The dictionary size of col embeddings.
Default 50.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(
self,
num_feats,
row_num_embed=50,
col_num_embed=50,
):
super(LearnedPositionalEncoding3D, self).__init__()
self.row_embed = nn.Embedding(row_num_embed, num_feats)
self.col_embed = nn.Embedding(col_num_embed, num_feats)
self.num_feats = num_feats
self.row_num_embed = row_num_embed
self.col_num_embed = col_num_embed
def forward(self, mask):
"""Forward function for `LearnedPositionalEncoding`.
Args:
mask (Tensor): ByteTensor mask. Non-zero values representing
ignored positions, while zero values means valid positions
for this image. Shape [bs, h, w].
Returns:
pos (Tensor): Returned position embedding with shape
[bs, num_feats*2, h, w].
"""
h, w = mask.shape[-2:]
x = paddle.arange(w, device=mask.device)
y = paddle.arange(h, device=mask.device)
x_embed = self.col_embed(x)
y_embed = self.row_embed(y)
pos = paddle.concat((x_embed.unsqueeze(0).repeat(h, 1, 1),
y_embed.unsqueeze(1).repeat(1, w, 1)),
dim=-1).transpose([2, 0, 1]).unsqueeze(0).repeat(
mask.shape[0], 1, 1, 1)
return pos
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/layers/normalization.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
from paddle import nn
class Scale(nn.Layer):
"""
This code is based on https://github.com/aim-uofa/AdelaiDet/blob/master/adet/modeling/fcos/fcos.py#L20
"""
def __init__(self, init_value=1.0):
super(Scale, self).__init__()
init_value = paddle.to_tensor([init_value])
self.scale = paddle.create_parameter(
shape=init_value.shape,
dtype='float32',
default_initializer=nn.initializer.Assign(init_value))
def forward(self, input):
return input * self.scale
class Offset(nn.Layer):
"""
This code is based on https://github.com/TRI-ML/dd3d/blob/main/tridet/layers/normalization.py#L21
"""
def __init__(self, init_value=0.):
super(Offset, self).__init__()
init_value = paddle.to_tensor([init_value])
self.bias = paddle.create_parameter(
shape=init_value.shape,
dtype='float32',
default_initializer=nn.initializer.Assign(init_value))
def forward(self, input):
return input + self.bias
class LayerListDial(nn.LayerList):
"""
This code is based on https://github.com/aim-uofa/AdelaiDet/blob/master/adet/modeling/fcos/fcos.py#L29
"""
def __init__(self, layers=None):
super(LayerListDial, self).__init__(layers)
self.cur_position = 0
def forward(self, x):
result = self[self.cur_position](x)
self.cur_position += 1
if self.cur_position >= len(self):
self.cur_position = 0
return result
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/layers/layer_norm.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
def group_norm(out_channels):
"""group normal function
Args:
out_channels (int): out channel nums
Returns:
nn.Layer: GroupNorm op
"""
num_groups = 32
if out_channels % 32 == 0:
return nn.GroupNorm(num_groups, out_channels)
else:
return nn.GroupNorm(num_groups // 2, out_channels)
class FrozenBatchNorm2d(nn.Layer):
"""
BatchNorm2d where the batch statistics and the affine parameters are fixed.
It contains non-trainable buffers called
"weight" and "bias", "running_mean", "running_var",
initialized to perform identity transformation.
The pre-trained backbone models from Caffe2 only contain "weight" and "bias",
which are computed from the original four parameters of BN.
The affine transform `x * weight + bias` will perform the equivalent
computation of `(x - running_mean) / sqrt(running_var) * weight + bias`.
When loading a backbone model from Caffe2, "running_mean" and "running_var"
will be left unchanged as identity transformation.
Other pre-trained backbone models may contain all 4 parameters.
The forward is implemented by `F.batch_norm(..., training=False)`.
This code is based on https://github.com/facebookresearch/detectron2/blob/32b61e64c76118b2e9fc2237f283a8e9c938bd16/detectron2/layers/batch_norm.py#L13
"""
def __init__(self, num_features, eps=1e-5):
super().__init__()
self.num_features = num_features
self.eps = eps
self.register_buffer("weight", paddle.ones([num_features]))
self.register_buffer("bias", paddle.zeros([num_features]))
self.register_buffer("_mean", paddle.zeros([num_features]))
self.register_buffer("_variance", paddle.ones([num_features]) - eps)
def forward(self, x):
if not x.stop_gradient:
# When gradients are needed, F.batch_norm will use extra memory
# because its backward op computes gradients for weight/bias as well.
scale = self.weight * (self._variance + self.eps).rsqrt()
bias = self.bias - self._mean * scale
scale = scale.reshape([1, -1, 1, 1])
bias = bias.reshape([1, -1, 1, 1])
out_dtype = x.dtype # may be half
return x * scale.cast(out_dtype) + bias.cast(out_dtype)
else:
# When gradients are not needed, F.batch_norm is a single fused op
# and provide more optimization opportunities.
return F.batch_norm(
x,
self._mean,
self._variance,
self.weight,
self.bias,
training=False,
epsilon=self.eps,
)
def __repr__(self):
return "FrozenBatchNorm2d(num_features={}, eps={})".format(
self.num_features, self.eps)
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/layers/transformer_layers.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
# Copyright (c) 2022 megvii-model. All Rights Reserved.
# ------------------------------------------------------------------------
# Modified from DETR3D (https://github.com/WangYueFt/detr3d)
# Copyright (c) 2021 Wang, Yue
# ------------------------------------------------------------------------
# Modified from mmdetection3d (https://github.com/open-mmlab/mmdetection3d)
# Copyright (c) OpenMMLab. All rights reserved.
# ------------------------------------------------------------------------
import copy
import math
import warnings
from typing import Sequence
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.distributed.fleet.utils import recompute
from paddle3d.apis import manager
from paddle3d.models.layers.layer_libs import act_layer_from_config
from paddle3d.models.layers.param_init import (constant_init,
xavier_uniform_init)
class FFN(nn.Layer):
"""Implements feed-forward networks (FFNs) with identity connection.
"""
def __init__(self,
embed_dims=256,
feedforward_channels=1024,
num_fcs=2,
act_cfg=dict(type='ReLU'),
ffn_drop=0.,
dropout_prob=0.,
add_identity=True,
init_cfg=None,
**kwargs):
super(FFN, self).__init__()
assert num_fcs >= 2, 'num_fcs should be no less ' \
f'than 2. got {num_fcs}.'
self.embed_dims = embed_dims
self.feedforward_channels = feedforward_channels
self.num_fcs = num_fcs
self.act_cfg = act_cfg
self.act_layer = act_layer_from_config(act_cfg)
layers = []
in_channels = embed_dims
for _ in range(num_fcs - 1):
layers.append(
nn.Sequential(
nn.Linear(in_channels, feedforward_channels),
self.act_layer, nn.Dropout(ffn_drop)))
in_channels = feedforward_channels
layers.append(nn.Linear(feedforward_channels, embed_dims))
layers.append(nn.Dropout(ffn_drop))
self.layers = nn.Sequential(*layers)
self.dropout_layer = nn.Dropout(
dropout_prob) if dropout_prob else nn.Identity()
self.add_identity = add_identity
def forward(self, x, identity=None):
"""Forward function for `FFN`.
The function would add x to the output tensor if residue is None.
"""
out = self.layers(x)
if not self.add_identity:
return self.dropout_layer(x)
if identity is None:
identity = x
return identity + self.dropout_layer(out)
class BaseTransformerLayer(nn.Layer):
"""Base `TransformerLayer` for vision transformer.
"""
def __init__(self,
attns=None,
ffn_cfgs=dict(
type='FFN',
embed_dims=256,
feedforward_channels=1024,
num_fcs=2,
ffn_drop=0.,
act_cfg=dict(type='ReLU'),
),
operation_order=None,
norm_cfg=dict(type='LayerNorm'),
init_cfg=None,
batch_first=True,
**kwargs):
super(BaseTransformerLayer, self).__init__()
self.batch_first = batch_first
if 'feedforward_channels' in kwargs:
ffn_cfgs['feedforward_channels'] = kwargs['feedforward_channels']
if 'ffn_dropout' in kwargs:
ffn_cfgs['ffn_drop'] = kwargs['ffn_dropout']
assert set(operation_order) & set(
['self_attn', 'norm', 'ffn', 'cross_attn']) == \
set(operation_order), f'The operation_order of' \
f' {self.__class__.__name__} should ' \
f'contains all four operation type ' \
f"{['self_attn', 'norm', 'ffn', 'cross_attn']}"
num_attn = operation_order.count('self_attn') + operation_order.count(
'cross_attn')
assert num_attn == len(attns), f'The length ' \
f'of attn_cfg {num_attn} is ' \
f'not consistent with the number of attention' \
f'in operation_order {operation_order}.'
self.num_attn = num_attn
self.operation_order = operation_order
self.norm_cfg = norm_cfg
self.pre_norm = operation_order[0] == 'norm'
self.attentions = nn.LayerList()
index = 0
for operation_name in operation_order:
if operation_name in ['self_attn', 'cross_attn']:
attns[index].batch_first = self.batch_first
# Some custom attentions used as `self_attn`
# or `cross_attn` can have different behavior.
attns[index].operation_name = operation_name
self.attentions.append(attns[index])
index += 1
self.embed_dims = self.attentions[0].embed_dims
self.ffns = nn.LayerList()
num_ffns = operation_order.count('ffn')
if isinstance(ffn_cfgs, dict):
ffn_cfgs = [copy.deepcopy(ffn_cfgs) for _ in range(num_ffns)]
assert len(ffn_cfgs) == num_ffns
for ffn_index in range(num_ffns):
if 'embed_dims' not in ffn_cfgs[ffn_index]:
ffn_cfgs['embed_dims'] = self.embed_dims
else:
assert ffn_cfgs[ffn_index]['embed_dims'] == self.embed_dims
self.ffns.append(FFN(**ffn_cfgs[ffn_index]))
self.norms = nn.LayerList()
num_norms = operation_order.count('norm')
for _ in range(num_norms):
# TODO hard code
self.norms.append(nn.LayerNorm(self.embed_dims))
def forward(self,
query,
key=None,
value=None,
query_pos=None,
key_pos=None,
attn_masks=None,
query_key_padding_mask=None,
key_padding_mask=None,
**kwargs):
"""Forward function for `TransformerDecoderLayer`.
"""
norm_index = 0
attn_index = 0
ffn_index = 0
identity = query
if attn_masks is None:
attn_masks = [None for _ in range(self.num_attn)]
elif isinstance(attn_masks, paddle.Tensor):
attn_masks = [
copy.deepcopy(attn_masks) for _ in range(self.num_attn)
]
warnings.warn(f'Use same attn_mask in all attentions in '
f'{self.__class__.__name__} ')
else:
assert len(attn_masks) == self.num_attn, f'The length of ' \
f'attn_masks {len(attn_masks)} must be equal ' \
f'to the number of attention in ' \
f'operation_order {self.num_attn}'
for layer in self.operation_order:
if layer == 'self_attn':
temp_key = temp_value = query
query = self.attentions[attn_index](
query,
temp_key,
temp_value,
identity if self.pre_norm else None,
query_pos=query_pos,
key_pos=query_pos,
attn_mask=attn_masks[attn_index],
key_padding_mask=query_key_padding_mask,
**kwargs)
attn_index += 1
identity = query
elif layer == 'norm':
query = self.norms[norm_index](query)
norm_index += 1
elif layer == 'cross_attn':
query = self.attentions[attn_index](
query,
key,
value,
identity if self.pre_norm else None,
query_pos=query_pos,
key_pos=key_pos,
attn_mask=attn_masks[attn_index],
key_padding_mask=key_padding_mask,
**kwargs)
attn_index += 1
identity = query
elif layer == 'ffn':
query = self.ffns[ffn_index](
query, identity if self.pre_norm else None)
ffn_index += 1
return query
class TransformerLayerSequence(nn.Layer):
"""Base class for TransformerEncoder and TransformerDecoder in vision
transformer.
As base-class of Encoder and Decoder in vision transformer.
Support customization such as specifying different kind
of `transformer_layer` in `transformer_coder`.
Args:
transformerlayer: paddle.nn.Layer. Default: None.
num_layers (int): The number of `TransformerLayer`. Default: None.
"""
def __init__(self, transformerlayers=None, num_layers=None):
super(TransformerLayerSequence, self).__init__()
self.num_layers = num_layers
self.layers = nn.LayerList()
self.layers.append(transformerlayers)
for i in range(num_layers - 1):
self.layers.append(copy.deepcopy(transformerlayers))
self.embed_dims = self.layers[0].embed_dims
self.pre_norm = self.layers[0].pre_norm
def forward(self,
query,
key,
value,
query_pos=None,
key_pos=None,
attn_masks=None,
query_key_padding_mask=None,
key_padding_mask=None,
**kwargs):
"""Forward function for `TransformerCoder`.
"""
for layer in self.layers:
query = layer(
query,
key,
value,
query_pos=query_pos,
key_pos=key_pos,
attn_masks=attn_masks,
query_key_padding_mask=query_key_padding_mask,
key_padding_mask=key_padding_mask,
**kwargs)
return query
@manager.MODELS.add_component
class MultiHeadAttention(nn.Layer):
"""A wrapper for ``paddle.nn.MultiheadAttention``.
"""
def __init__(self,
embed_dims,
num_heads,
attn_drop=0.,
proj_drop=0.,
drop_prob=0.,
batch_first=True,
**kwargs):
super(MultiHeadAttention, self).__init__()
self.embed_dims = embed_dims
self.num_heads = num_heads
# only support batch first
self.batch_first = True
self.attn = nn.MultiHeadAttention(embed_dims, num_heads, attn_drop,
**kwargs)
self.proj_drop = nn.Dropout(proj_drop)
self.dropout_layer = nn.Dropout(
drop_prob) if drop_prob > 0 else nn.Identity()
def forward(self,
query,
key=None,
value=None,
identity=None,
query_pos=None,
key_pos=None,
attn_mask=None,
key_padding_mask=None,
**kwargs):
"""Forward function for `MultiHeadAttention`.
"""
if key is None:
key = query
if value is None:
value = key
if identity is None:
identity = query
if key_pos is None:
if query_pos is not None:
# use query_pos if key_pos is not available
if query_pos.shape == key.shape:
key_pos = query_pos
else:
warnings.warn(f'position encoding of key is'
f'missing in {self.__class__.__name__}.')
if query_pos is not None:
query = query + query_pos
if key_pos is not None:
key = key + key_pos
# paddle only support batch first
if self.batch_first:
query = query.transpose([1, 0, 2])
key = key.transpose([1, 0, 2])
value = value.transpose([1, 0, 2])
if key_padding_mask is None:
if attn_mask is not None:
attn_mask = ~attn_mask
out = self.attn(
query=query,
key=key,
value=value,
attn_mask=attn_mask,
)
else:
raise NotImplementedError(
'key_padding_mask is not None not support now')
if self.batch_first:
out = out.transpose([1, 0, 2])
return identity + self.dropout_layer(self.proj_drop(out))
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/segmentation/__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .squeezesegv3 import *
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/segmentation
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/segmentation/squeezesegv3/squeezesegv3.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import os
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle3d.apis import manager
from paddle3d.models.base import add_export_args
from paddle3d.models.layers import param_init
from paddle3d.sample import Sample
from paddle3d.utils import checkpoint
from paddle3d.utils.logger import logger
__all__ = ["SqueezeSegV3"]
@manager.MODELS.add_component
class SqueezeSegV3(nn.Layer):
"""
The SqueezeSegV3 implementation based on PaddlePaddle.
Please refer to:
Xu, Chenfeng, et al. “SqueezeSegV3: Spatially-Adaptive Convolution for Efficient Point-Cloud Segmentation.”
CoRR, vol. abs/2004.01803, 2020, https://arxiv.org/abs/2004.01803.
Args:
backbone (paddle.nn.Layer): Backbone network.
loss (paddle.nn.Layer): Loss layer.
num_classes (int): Number of classes.
pretrained (str): Path to pretrained model.
"""
def __init__(self,
backbone: paddle.nn.Layer,
loss: paddle.nn.Layer,
num_classes: int = 20,
pretrained: str = None):
super().__init__()
self.backbone = backbone
self.loss = loss
self.heads = nn.LayerList([
nn.Conv2D(256, num_classes, 1, padding=0),
nn.Conv2D(256, num_classes, 1, padding=0),
nn.Conv2D(128, num_classes, 1, padding=0),
nn.Conv2D(64, num_classes, 1, padding=0),
nn.Conv2D(32, num_classes, 3, padding=1)
])
self.pretrained = pretrained
self.init_weight()
self.sync_bn = True
def forward(self, samples):
range_images = paddle.stack(samples["data"], axis=0)
feature_list = self.backbone(range_images)
if self.training:
logits_list = []
for head, feat in zip(self.heads, feature_list):
logits = head(feat)
logits = F.softmax(logits, axis=1)
logits_list.append(logits)
loss = self.loss(logits_list, paddle.stack(
samples['labels'], axis=0))
return {"loss": loss}
else:
# TODO(will-jl944): support multi-card evaluation and prediction
logits = self.heads[-1](feature_list[-1])
prediction = paddle.argmax(logits, axis=1)
# de-batchify
ret = []
for batch_idx, pred in enumerate(prediction):
sample = Sample(
path=samples["path"][batch_idx],
modality=samples["modality"][batch_idx])
sample.labels = pred[samples["meta"]["proj_y"][batch_idx],
samples["meta"]["proj_x"][batch_idx]]
ret.append(sample)
return {"preds": ret}
def init_weight(self):
if self.pretrained is not None:
checkpoint.load_pretrained_model(self, self.pretrained)
else:
for layer in self.sublayers():
if isinstance(layer, (nn.Conv2D, nn.Conv2DTranspose)):
param_init.kaiming_uniform_init(
layer.weight, a=math.sqrt(5))
if layer.bias is not None:
fan_in, _ = param_init._calculate_fan_in_and_fan_out(
layer.weight)
if fan_in != 0:
bound = 1 / math.sqrt(fan_in)
param_init.uniform_init(layer.bias, -bound, bound)
def export_forward(self, samples):
range_images = samples
feature_list = self.backbone(range_images)
logits = self.heads[-1](feature_list[-1])
prediction = paddle.argmax(logits, axis=1)
return prediction
@add_export_args('--input_shape', nargs='+', type=int, required=True)
def export(self, save_dir: str, input_shape: list = None, **kwargs):
self.forward = self.export_forward
save_path = os.path.join(save_dir, 'squeezesegv3')
if input_shape is None:
raise ValueError("input_shape must be provided!")
elif len(input_shape) == 1:
shape = [
None, self.backbone.in_channels, input_shape[0], input_shape[0]
]
elif len(input_shape) == 2:
shape = [None, self.backbone.in_channels] + input_shape
else:
shape = input_shape
input_spec = [paddle.static.InputSpec(shape=shape, dtype="float32")]
paddle.jit.to_static(self, input_spec=input_spec)
paddle.jit.save(self, save_path)
logger.info("Exported model is saved in {}".format(save_dir))
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/segmentation
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/segmentation/squeezesegv3/squeezesegv3_loss.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle3d.apis import manager
from paddle3d.datasets.semantic_kitti.semantic_kitti import \
SemanticKITTIDataset
__all__ = ["SSGLossComputation"]
@manager.LOSSES.add_component
class SSGLossComputation(nn.Layer):
"""
Loss layer of SqueezeSegV3.
Args:
num_classes: Number of classes.
epsilon_w: Epsilon for weight normalization.
ignore_index: Index of ignored class.
"""
def __init__(self,
num_classes: int,
epsilon_w: float,
ignore_index: int = None):
super().__init__()
remap_lut = SemanticKITTIDataset.build_remap_lut()
content = paddle.zeros([num_classes], dtype="float32")
for cl, freq in SemanticKITTIDataset.CONTENT.items():
x_cl = remap_lut[cl]
content[x_cl] += freq
weight = 1. / (content + epsilon_w)
if ignore_index in range(num_classes):
weight[ignore_index] = 0.
self.loss_func = nn.NLLLoss(weight, ignore_index=ignore_index)
def forward(self, logits_list, target):
loss_list = []
for logits in logits_list:
loss = self.loss_func(
paddle.log(paddle.clip(logits, min=1e-8)),
F.interpolate(target, logits.shape[-2:],
mode="nearest").squeeze(axis=1))
loss_list.append(loss)
return sum(loss_list)
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/segmentation
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/segmentation/squeezesegv3/__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .squeezesegv3 import *
from .squeezesegv3_loss import *
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/necks/fpn.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import paddle
from paddle import nn
import paddle.nn.functional as F
from paddle3d.models.layers import FrozenBatchNorm2d, param_init
from paddle3d.apis import manager
__all__ = ["FPN", "LastLevelP6P7", "LastLevelP6"]
@manager.NECKS.add_component
class FPN(nn.Layer):
"""
This module implements :paper:`FPN`.
It creates pyramid features built on top of some input feature maps.
This code is based on https://github.com/facebookresearch/detectron2/blob/333efcb6d0b60d7cceb7afc91bd96315cf211b0a/detectron2/modeling/backbone/fpn.py#L17
"""
def __init__(self,
in_strides,
in_channels,
out_channel,
norm="",
top_block=None,
fuse_type="sum"):
"""
Args:
in_strides(list): strides list
out_channel (int): number of channels in the output feature maps.
norm (str): the normalization to use.
top_block (nn.Layer or None): if provided, an extra operation will
be performed on the output of the last (smallest resolution)
FPN output, and the result will extend the result list. The top_block
further downsamples the feature map. It must have an attribute
"num_levels", meaning the number of extra FPN levels added by
this block, and "in_feature", which is a string representing
its input feature (e.g., p5).
fuse_type (str): types for fusing the top down features and the lateral
ones. It can be "sum" (default), which sums up element-wise; or "avg",
which takes the element-wise mean of the two.
"""
super(FPN, self).__init__()
_assert_strides_are_log2_contiguous(in_strides)
lateral_convs = []
output_convs = []
use_bias = norm == ""
for idx, in_channel in enumerate(in_channels):
if norm == "BN":
lateral_norm = nn.BatchNorm2D(out_channel)
output_norm = nn.BatchNorm2D(out_channel)
elif norm == "FrozenBN":
lateral_norm = FrozenBatchNorm2d(out_channel)
output_norm = FrozenBatchNorm2d(out_channel)
else:
raise NotImplementedError()
lateral_conv = [
nn.Conv2D(
in_channel, out_channel, kernel_size=1, bias_attr=use_bias),
lateral_norm
]
output_conv = [
nn.Conv2D(
out_channel,
out_channel,
kernel_size=3,
stride=1,
padding=1,
bias_attr=use_bias), output_norm
]
stage = int(math.log2(in_strides[idx]))
self.add_sublayer("fpn_lateral{}".format(stage),
nn.Sequential(*lateral_conv))
self.add_sublayer("fpn_output{}".format(stage),
nn.Sequential(*output_conv))
lateral_convs.append(nn.Sequential(*lateral_conv))
output_convs.append(nn.Sequential(*output_conv))
# Place convs into top-down order (from low to high resolution)
# to make the top-down computation in forward clearer.
self.lateral_convs = lateral_convs[::-1]
self.output_convs = output_convs[::-1]
self.top_block = top_block
# Return feature names are "p<stage>", like ["p2", "p3", ..., "p6"]
self._out_feature_strides = {
"p{}".format(int(math.log2(s))): s
for s in in_strides
}
# top block output feature maps.
if self.top_block is not None:
for s in range(stage, stage + self.top_block.num_levels):
self._out_feature_strides["p{}".format(s + 1)] = 2**(s + 1)
self._out_features = list(self._out_feature_strides.keys())
self._out_feature_channels = {
k: out_channel
for k in self._out_features
}
assert fuse_type in {"avg", "sum"}
self._fuse_type = fuse_type
def _init_weights(self):
predictors = [self.lateral_convs, self.output_convs]
for layers in predictors:
for l in layers.sublayers():
if isinstance(l, nn.Conv2D):
param_init.kaiming_uniform_init(l.weight, a=1)
if l.bias is not None: # depth head may not have bias.
param_init.constant_init(l.bias, value=0.0)
def forward(self, x):
"""
Args:
input (dict[str->Tensor]): mapping feature map name (e.g., "res5") to
feature map tensor for each feature level in high to low resolution order.
Returns:
dict[str->Tensor]:
mapping from feature map name to FPN feature map tensor
in high to low resolution order. Returned feature names follow the FPN
paper convention: "p<stage>", where stage has stride = 2 ** stage e.g.,
["p2", "p3", ..., "p6"].
"""
results = []
prev_features = self.lateral_convs[0](x[-1])
results.append(self.output_convs[0](prev_features))
# Reverse feature maps into top-down order (from low to high resolution)
for idx, (lateral_conv, output_conv) in enumerate(
zip(self.lateral_convs, self.output_convs)):
if idx > 0:
features = x[-idx - 1]
top_down_features = F.interpolate(
prev_features, scale_factor=2.0, mode="nearest")
lateral_features = lateral_conv(features)
prev_features = lateral_features + top_down_features
if self._fuse_type == "avg":
prev_features /= 2
results.insert(0, output_conv(prev_features))
if self.top_block is not None:
top_block_in_feature = results[self._out_features.index(
self.top_block.in_feature)]
results.extend(self.top_block(top_block_in_feature))
assert len(self._out_features) == len(results)
return {f: res for f, res in zip(self._out_features, results)}
def _assert_strides_are_log2_contiguous(strides):
"""
Assert that each stride is 2x times its preceding stride, i.e. "contiguous in log2".
"""
for i, stride in enumerate(strides[1:], 1):
assert stride == 2 * strides[
i - 1], "Strides {} {} are not log2 contiguous".format(
stride, strides[i - 1])
@manager.NECKS.add_component
class LastLevelP6P7(nn.Layer):
"""
This module is used in RetinaNet to generate extra layers, P6 and P7 from
C5 feature.
"""
def __init__(self, in_channels, out_channels, in_feature="res5"):
super().__init__()
self.num_levels = 2
self.in_feature = in_feature
self.p6 = nn.Conv2D(in_channels, out_channels, 3, 2, 1)
self.p7 = nn.Conv2D(out_channels, out_channels, 3, 2, 1)
self._init_weights()
def _init_weights(self):
predictors = [self.p6, self.p7]
for layers in predictors:
param_init.kaiming_uniform_init(layers.weight, a=1)
if layers.bias is not None: # depth head may not have bias.
param_init.constant_init(layers.bias, value=0.0)
def forward(self, c5):
p6 = self.p6(c5)
p7 = self.p7(F.relu(p6))
return [p6, p7]
@manager.NECKS.add_component
class LastLevelP6(nn.Layer):
"""
This module is used in FCOS to generate extra layers
"""
def __init__(self, in_channels, out_channels, in_feature="res5"):
super().__init__()
self.num_levels = 1
self.in_feature = in_feature
self.p6 = nn.Conv2D(in_channels, out_channels, 3, 2, 1)
self._init_weights()
def _init_weights(self):
predictors = [self.p6]
for layers in predictors:
param_init.kaiming_uniform_init(layers.weight, a=1)
if layers.bias is not None: # depth head may not have bias.
param_init.constant_init(layers.bias, value=0.0)
def forward(self, x):
p6 = self.p6(x)
return [p6]
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/necks/__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import second_fpn
from . import fpn
from .cp_fpn import CPFPN
from .second_fpn import *
from .fpn import *
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/necks/cp_fpn.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
# Copyright (c) 2022 megvii-model. All Rights Reserved.
# ------------------------------------------------------------------------
# Modified from mmdetection (https://github.com/open-mmlab/mmdetection)
# Copyright (c) OpenMMLab. All rights reserved.
# ------------------------------------------------------------------------
import paddle.nn as nn
import paddle.nn.functional as F
from paddle3d.apis import manager
from paddle3d.models.layers.layer_libs import ConvNormActLayer
# This FPN remove the unused parameters which can used with checkpoint
# (use_recompute = True in Backbone)
@manager.NECKS.add_component
class CPFPN(nn.Layer):
"""checkpoint Feature Pyramid Network.
"""
def __init__(
self,
in_channels,
out_channels,
num_outs,
start_level=0,
end_level=-1,
add_extra_convs=False,
relu_before_extra_convs=False,
no_norm_on_lateral=False,
conv_cfg=dict(
type='Conv2D', init_cfg=dict(type='xavier_uniform_init')),
norm_cfg=None,
act_cfg=None,
upsample_cfg=dict(mode='nearest'),
):
super(CPFPN, self).__init__()
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.relu_before_extra_convs = relu_before_extra_convs
self.no_norm_on_lateral = no_norm_on_lateral
self.fp16_enabled = False
self.upsample_cfg = upsample_cfg.copy()
if end_level == -1:
self.backbone_end_level = self.num_ins
assert num_outs >= self.num_ins - start_level
else:
# if end_level < inputs, no extra level is allowed
self.backbone_end_level = end_level
assert end_level <= len(in_channels)
assert num_outs == end_level - start_level
self.start_level = start_level
self.end_level = end_level
self.add_extra_convs = add_extra_convs
assert isinstance(add_extra_convs, (str, bool))
if isinstance(add_extra_convs, str):
# Extra_convs_source choices: 'on_input', 'on_lateral', 'on_output'
assert add_extra_convs in ('on_input', 'on_lateral', 'on_output')
elif add_extra_convs: # True
self.add_extra_convs = 'on_input'
self.lateral_convs = nn.LayerList()
self.fpn_convs = nn.LayerList()
for i in range(self.start_level, self.backbone_end_level):
l_conv = ConvNormActLayer(
in_channels[i],
out_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg if not self.no_norm_on_lateral else None,
act_cfg=act_cfg)
self.lateral_convs.append(l_conv)
if i == 0:
fpn_conv = ConvNormActLayer(
out_channels,
out_channels,
3,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.fpn_convs.append(fpn_conv)
# add extra conv layers (e.g., RetinaNet)
extra_levels = num_outs - self.backbone_end_level + self.start_level
if self.add_extra_convs and extra_levels >= 1:
for i in range(extra_levels):
if i == 0 and self.add_extra_convs == 'on_input':
in_channels = self.in_channels[self.backbone_end_level - 1]
else:
in_channels = out_channels
extra_fpn_conv = ConvNormActLayer(
in_channels,
out_channels,
3,
stride=2,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.fpn_convs.append(extra_fpn_conv)
def forward(self, inputs):
"""Forward function."""
assert len(inputs) == len(self.in_channels)
# build laterals
laterals = [
lateral_conv(inputs[i + self.start_level])
for i, lateral_conv in enumerate(self.lateral_convs)
]
# build top-down path
used_backbone_levels = len(laterals)
for i in range(used_backbone_levels - 1, 0, -1):
# In some cases, fixing `scale factor` (e.g. 2) is preferred, but
# it cannot co-exist with `size` in `F.interpolate`.
if 'scale_factor' in self.upsample_cfg:
laterals[i - 1] += F.interpolate(laterals[i],
**self.upsample_cfg)
else:
prev_shape = laterals[i - 1].shape[2:]
laterals[i - 1] += F.interpolate(
laterals[i], size=prev_shape, **self.upsample_cfg)
# build outputs
# part 1: from original levels
outs = [
self.fpn_convs[i](laterals[i]) if i == 0 else laterals[i]
for i in range(used_backbone_levels)
]
# part 2: add extra levels
if self.num_outs > len(outs):
# use max pool to get more levels on top of outputs
# (e.g., Faster R-CNN, Mask R-CNN)
if not self.add_extra_convs:
for i in range(self.num_outs - used_backbone_levels):
outs.append(F.max_pool2d(outs[-1], 1, stride=2))
# add conv layers on top of original feature maps (RetinaNet)
else:
if self.add_extra_convs == 'on_input':
extra_source = inputs[self.backbone_end_level - 1]
elif self.add_extra_convs == 'on_lateral':
extra_source = laterals[-1]
elif self.add_extra_convs == 'on_output':
extra_source = outs[-1]
else:
raise NotImplementedError
outs.append(self.fpn_convs[used_backbone_levels](extra_source))
for i in range(used_backbone_levels + 1, self.num_outs):
if self.relu_before_extra_convs:
outs.append(self.fpn_convs[i](F.relu(outs[-1])))
else:
outs.append(self.fpn_convs[i](outs[-1]))
return tuple(outs)
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/necks/second_fpn.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is based on https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/models/necks/second_fpn.py
Ths copyright of mmdetection3d is as follows:
Apache-2.0 license [see LICENSE for details].
"""
import math
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle import ParamAttr
from paddle.nn.initializer import Constant, Normal, Uniform
from paddle3d.apis import manager
from paddle3d.models.backbones.second_backbone import build_conv_layer
from paddle3d.models.voxel_encoders.pillar_encoder import build_norm_layer
__all__ = ['SecondFPN']
def build_upsample_layer(in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
bias=True,
distribution="uniform"):
"""Build upsample layer."""
if distribution == "uniform":
bound = 1 / math.sqrt(in_channels)
param_attr = ParamAttr(initializer=Uniform(-bound, bound))
bias_attr = False
if bias:
bias_attr = ParamAttr(initializer=Uniform(-bound, bound))
else:
fan_out = out_channels * kernel_size**2
std = math.sqrt(2) / math.sqrt(fan_out)
param_attr = ParamAttr(initializer=Normal(0, std))
bias_attr = False
if bias:
bias_attr = ParamAttr(initializer=Constant(0.))
deconv_layer = nn.Conv2DTranspose(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
weight_attr=param_attr,
bias_attr=bias_attr)
return deconv_layer
class ChannelPool(nn.Layer):
def forward(self, x):
return paddle.concat(
(paddle.max(x, 1).unsqueeze(1), paddle.mean(x, 1).unsqueeze(1)),
axis=1)
class SpatialGate(nn.Layer):
def __init__(self):
super(SpatialGate, self).__init__()
kernel_size = 7
norm_cfg = dict(type='BatchNorm2D', eps=1e-5, momentum=0.01)
self.compress = ChannelPool()
self.spatial = nn.Sequential(
build_conv_layer(
in_channels=2,
out_channels=1,
kernel_size=kernel_size,
padding=(kernel_size - 1) // 2,
stride=1,
bias=False), build_norm_layer(norm_cfg, 1, False, False))
def forward(self, x):
x_compress = self.compress(x)
x_out = self.spatial(x_compress)
scale = F.sigmoid(x_out) # broadcasting
return x * scale
@manager.NECKS.add_component
class SecondFPN(nn.Layer):
def __init__(self,
in_channels=[128, 128, 256],
out_channels=[256, 256, 256],
upsample_strides=[1, 2, 4],
use_conv_for_no_stride=False,
use_spatial_attn_before_concat=False):
super(SecondFPN, self).__init__()
assert len(out_channels) == len(upsample_strides) == len(in_channels)
self.in_channels = in_channels
self.out_channels = out_channels
self.upsample_strides = upsample_strides
norm_cfg = dict(type='BatchNorm2D', eps=1e-3, momentum=0.01)
deblocks = []
for i, out_channel in enumerate(out_channels):
stride = upsample_strides[i]
if stride > 1 or (stride == 1 and not use_conv_for_no_stride):
upsample_layer = build_upsample_layer(
in_channels=in_channels[i],
out_channels=out_channel,
kernel_size=upsample_strides[i],
stride=upsample_strides[i],
bias=False)
else:
stride = round(1 / stride)
upsample_layer = build_conv_layer(
in_channels=in_channels[i],
out_channels=out_channel,
kernel_size=stride,
stride=stride,
bias=False)
deblock = nn.Sequential(upsample_layer,
build_norm_layer(norm_cfg, out_channel),
nn.ReLU())
if use_spatial_attn_before_concat:
deblock.add_sublayer(str(len(deblock)), SpatialGate())
deblocks.append(deblock)
self.deblocks = nn.LayerList(deblocks)
def forward(self, x):
ups = [deblock(x[i]) for i, deblock in enumerate(self.deblocks)]
if len(ups) > 1:
out = paddle.concat(ups, axis=1)
else:
out = ups[0]
return out
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads/__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .class_heads import *
from .dense_heads import *
from .fcos_heads import *
from .roi_heads import *
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.