repo_id
stringlengths 19
138
| file_path
stringlengths 32
200
| content
stringlengths 1
12.9M
| __index_level_0__
int64 0
0
|
|---|---|---|---|
apollo_public_repos/apollo-model-centerpoint/deploy/pointpillars/cpp
|
apollo_public_repos/apollo-model-centerpoint/deploy/pointpillars/cpp/custom_ops/iou3d_nms_api.cpp
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <vector>
#include "iou3d_cpu.h"
#include "iou3d_nms.h"
#include "paddle/include/experimental/ext_all.h"
std::vector<paddle::DataType> BoxesIouBevCpuInferDtype(
paddle::DataType boxes_a_dtype, paddle::DataType boxes_b_dtype) {
return {boxes_a_dtype};
}
std::vector<std::vector<int64_t>> BoxesIouBevCpuInferShape(
std::vector<int64_t> boxes_a_shape, std::vector<int64_t> boxes_b_shape) {
return {{boxes_a_shape[0], boxes_b_shape[0]}};
}
std::vector<paddle::DataType> NmsInferDtype(paddle::DataType boxes_dtype) {
return {paddle::DataType::INT64, paddle::DataType::INT64};
}
std::vector<std::vector<int64_t>> NmsInferShape(
std::vector<int64_t> boxes_shape) {
return {{boxes_shape[0]}, {1}};
}
std::vector<paddle::DataType> NmsNormalInferDtype(
paddle::DataType boxes_dtype) {
return {paddle::DataType::INT64, paddle::DataType::INT64};
}
std::vector<std::vector<int64_t>> NmsNormalInferShape(
std::vector<int64_t> boxes_shape) {
return {{boxes_shape[0]}, {1}};
}
std::vector<paddle::DataType> BoxesIouBevGpuInferDtype(
paddle::DataType boxes_a_dtype, paddle::DataType boxes_b_dtype) {
return {boxes_a_dtype};
}
std::vector<std::vector<int64_t>> BoxesIouBevGpuInferShape(
std::vector<int64_t> boxes_a_shape, std::vector<int64_t> boxes_b_shape) {
return {{boxes_a_shape[0], boxes_b_shape[0]}};
}
std::vector<paddle::DataType> BoxesOverlapBevGpuInferDtype(
paddle::DataType boxes_a_dtype, paddle::DataType boxes_b_dtype) {
return {boxes_a_dtype};
}
std::vector<std::vector<int64_t>> BoxesOverlapBevGpuInferShape(
std::vector<int64_t> boxes_a_shape, std::vector<int64_t> boxes_b_shape) {
return {{boxes_a_shape[0], boxes_b_shape[0]}};
}
PD_BUILD_OP(boxes_iou_bev_cpu)
.Inputs({"boxes_a_tensor", " boxes_b_tensor"})
.Outputs({"ans_iou_tensor"})
.SetKernelFn(PD_KERNEL(boxes_iou_bev_cpu))
.SetInferDtypeFn(PD_INFER_DTYPE(BoxesIouBevCpuInferDtype))
.SetInferShapeFn(PD_INFER_SHAPE(BoxesIouBevCpuInferShape));
PD_BUILD_OP(boxes_iou_bev_gpu)
.Inputs({"boxes_a_tensor", " boxes_b_tensor"})
.Outputs({"ans_iou_tensor"})
.SetKernelFn(PD_KERNEL(boxes_iou_bev_gpu))
.SetInferDtypeFn(PD_INFER_DTYPE(BoxesIouBevGpuInferDtype))
.SetInferShapeFn(PD_INFER_SHAPE(BoxesIouBevGpuInferShape));
PD_BUILD_OP(boxes_overlap_bev_gpu)
.Inputs({"boxes_a", " boxes_b"})
.Outputs({"ans_overlap"})
.SetKernelFn(PD_KERNEL(boxes_overlap_bev_gpu))
.SetInferDtypeFn(PD_INFER_DTYPE(BoxesOverlapBevGpuInferDtype))
.SetInferShapeFn(PD_INFER_SHAPE(BoxesOverlapBevGpuInferShape));
PD_BUILD_OP(nms_gpu)
.Inputs({"boxes"})
.Outputs({"keep", "num_to_keep"})
.Attrs({"nms_overlap_thresh: float"})
.SetKernelFn(PD_KERNEL(nms_gpu))
.SetInferDtypeFn(PD_INFER_DTYPE(NmsInferDtype))
.SetInferShapeFn(PD_INFER_SHAPE(NmsInferShape));
PD_BUILD_OP(nms_normal_gpu)
.Inputs({"boxes"})
.Outputs({"keep", "num_to_keep"})
.Attrs({"nms_overlap_thresh: float"})
.SetInferShapeFn(PD_INFER_SHAPE(NmsNormalInferShape))
.SetKernelFn(PD_KERNEL(nms_normal_gpu))
.SetInferDtypeFn(PD_INFER_DTYPE(NmsNormalInferDtype));
| 0
|
apollo_public_repos/apollo-model-centerpoint/deploy/pointpillars/cpp/cmake
|
apollo_public_repos/apollo-model-centerpoint/deploy/pointpillars/cpp/cmake/external/boost.cmake
|
include(ExternalProject)
set(BOOST_PROJECT "extern_boost")
# To release PaddlePaddle as a pip package, we have to follow the
# manylinux1 standard, which features as old Linux kernels and
# compilers as possible and recommends CentOS 5. Indeed, the earliest
# CentOS version that works with NVIDIA CUDA is CentOS 6. And a new
# version of boost, say, 1.66.0, doesn't build on CentOS 6. We
# checked that the devtools package of CentOS 6 installs boost 1.41.0.
# So we use 1.41.0 here.
set(BOOST_VER "1.41.0")
set(BOOST_TAR "boost_1_41_0" CACHE STRING "" FORCE)
set(BOOST_URL "http://paddlepaddledeps.bj.bcebos.com/${BOOST_TAR}.tar.gz" CACHE STRING "" FORCE)
MESSAGE(STATUS "BOOST_TAR: ${BOOST_TAR}, BOOST_URL: ${BOOST_URL}")
set(BOOST_SOURCES_DIR ${THIRD_PARTY_PATH}/boost)
set(BOOST_DOWNLOAD_DIR "${BOOST_SOURCES_DIR}/src/${BOOST_PROJECT}")
set(BOOST_INCLUDE_DIR "${BOOST_DOWNLOAD_DIR}" CACHE PATH "boost include directory." FORCE)
set_directory_properties(PROPERTIES CLEAN_NO_CUSTOM 1)
include_directories(${BOOST_INCLUDE_DIR})
ExternalProject_Add(
${BOOST_PROJECT}
${EXTERNAL_PROJECT_LOG_ARGS}
DOWNLOAD_DIR ${BOOST_DOWNLOAD_DIR}
URL ${BOOST_URL}
DOWNLOAD_NO_PROGRESS 1
PREFIX ${BOOST_SOURCES_DIR}
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
UPDATE_COMMAND ""
)
if (${CMAKE_VERSION} VERSION_LESS "3.3.0" OR NOT WIN32)
set(dummyfile ${CMAKE_CURRENT_BINARY_DIR}/boost_dummy.c)
file(WRITE ${dummyfile} "const char *dummy = \"${dummyfile}\";")
add_library(boost STATIC ${dummyfile})
else()
add_library(boost INTERFACE)
endif()
add_dependencies(boost ${BOOST_PROJECT})
set(Boost_INCLUDE_DIR ${BOOST_INCLUDE_DIR})
| 0
|
apollo_public_repos/apollo-model-centerpoint/deploy/paconv
|
apollo_public_repos/apollo-model-centerpoint/deploy/paconv/python/infer.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import time
from os import path as osp
import h5py
import numpy as np
import paddle
import paddle.nn.functional as F
from paddle import inference
from paddle.inference import Config, create_predictor
from paddle3d.ops.assign_score_withk import assign_score_withk
def parse_args():
def str2bool(v):
return v.lower() in ("true", "t", "1")
# general params
parser = argparse.ArgumentParser("Paddle3D Inference model script")
parser.add_argument(
'-c',
'--config',
type=str,
default='configs/example.yaml',
help='config file path')
parser.add_argument("-i", "--input_file", type=str, help="input file path")
parser.add_argument("--model_file", type=str)
parser.add_argument("--params_file", type=str)
# params for predict
parser.add_argument("-b", "--batch_size", type=int, default=1)
parser.add_argument("--use_gpu", type=str2bool, default=True)
parser.add_argument("--precision", type=str, default="fp32")
parser.add_argument("--ir_optim", type=str2bool, default=True)
parser.add_argument("--gpu_mem", type=int, default=8000)
parser.add_argument("--enable_benchmark", type=str2bool, default=False)
parser.add_argument("--enable_mkldnn", type=str2bool, default=False)
parser.add_argument("--cpu_threads", type=int, default=None)
return parser.parse_args()
def create_paddle_predictor(args):
config = Config(args.model_file, args.params_file)
if args.use_gpu:
config.enable_use_gpu(args.gpu_mem, 0)
else:
config.disable_gpu()
if args.cpu_threads:
config.set_cpu_math_library_num_threads(args.cpu_threads)
if args.enable_mkldnn:
# cache 10 different shapes for mkldnn to avoid memory leak
config.set_mkldnn_cache_capacity(10)
config.enable_mkldnn()
if args.precision == "fp16":
config.enable_mkldnn_bfloat16()
# config.disable_glog_info()
config.switch_ir_optim(args.ir_optim) # default true
config.enable_memory_optim()
# use zero copy
config.switch_use_feed_fetch_ops(False)
predictor = create_predictor(config)
return config, predictor
def parse_file_paths(input_path: str) -> list:
if osp.isfile(input_path):
files = [
input_path,
]
else:
files = os.listdir(input_path)
files = [file for file in files if (file.endswith(".h5"))]
files = [osp.join(input_path, file) for file in files]
return files
def postprocess(input_file, output, print_output=True):
"""
output: list
"""
top_k = 1
output = output[0] # [B, num_cls]
N = len(input_file[0])
if output.shape[0] != N:
output = output.reshape(
[N] + [output.shape[0] // N] + list(output.shape[1:])) # [N, T, C]
output = output.mean(axis=1) # [N, C]
for i in range(N):
classes = np.argpartition(output[i], -top_k)[-top_k:]
classes = classes[np.argsort(-output[i, classes])]
scores = output[i, classes]
if print_output:
for j in range(top_k):
print("\ttop-{0} class: {1}".format(j + 1, classes[j]))
print("\ttop-{0} score: {1}".format(j + 1, scores[j]))
def main():
args = parse_args()
model_name = 'PAConv'
print(f"Inference model({model_name})...")
# InferenceHelper = build_inference_helper(cfg.INFERENCE)
inference_config, predictor = create_paddle_predictor(args)
# get input_tensor and output_tensor
input_names = predictor.get_input_names()
output_names = predictor.get_output_names()
input_tensor_list = []
output_tensor_list = []
for item in input_names:
input_tensor_list.append(predictor.get_input_handle(item))
for item in output_names:
output_tensor_list.append(predictor.get_output_handle(item))
files = parse_file_paths(args.input_file)
if args.enable_benchmark:
num_warmup = 3
# instantiate auto log
import auto_log
pid = os.getpid()
autolog = auto_log.AutoLogger(
model_name="PAConv",
model_precision=args.precision,
batch_size=args.batch_size,
data_shape="dynamic",
save_path="./output/auto_log.lpg",
inference_config=inference_config,
pids=pid,
process_name=None,
gpu_ids=0 if args.use_gpu else None,
time_keys=['preprocess_time', 'inference_time', 'postprocess_time'],
warmup=num_warmup)
test_num = 10
files = [args.input_file for _ in range(test_num)]
# Inferencing process
batch_num = args.batch_size
for st_idx in range(0, len(files), batch_num):
ed_idx = min(st_idx + batch_num, len(files))
# auto log start
if args.enable_benchmark:
autolog.times.start()
data_list = []
for i in range(st_idx, ed_idx):
f = h5py.File(files[i], mode='r')
data = f['data'][:].astype('float32')
labels = f['label'][:].astype('int64')
f.close()
data = data[:, :1024, :]
data_list.append(data)
data_list = np.concatenate(data_list)
# Pre process batched input
batched_inputs = [data_list]
# get pre process time cost
if args.enable_benchmark:
autolog.times.stamp()
# run inference
for i in range(len(input_tensor_list)):
input_tensor_list[i].copy_from_cpu(batched_inputs[i])
predictor.run()
batched_outputs = []
for j in range(len(output_tensor_list)):
batched_outputs.append(output_tensor_list[j].copy_to_cpu())
# get inference process time cost
if args.enable_benchmark:
autolog.times.stamp()
postprocess([labels], batched_outputs, not args.enable_benchmark)
# get post process time cost
if args.enable_benchmark:
autolog.times.end(stamp=True)
# report benchmark log if enabled
if args.enable_benchmark:
autolog.report()
if __name__ == "__main__":
main()
| 0
|
apollo_public_repos/apollo-model-centerpoint/deploy/voxel_rcnn
|
apollo_public_repos/apollo-model-centerpoint/deploy/voxel_rcnn/python/infer.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import cv2
import numpy as np
import paddle
from paddle.inference import Config, create_predictor
from paddle3d.ops.voxelize import hard_voxelize
from paddle3d.ops.pointnet2_ops import voxel_query_wrapper, grouping_operation, farthest_point_sample
from paddle3d.ops.iou3d_nms_cuda import nms_gpu
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_file",
type=str,
help="Model filename, Specify this when your model is a combined model.",
required=True)
parser.add_argument(
"--params_file",
type=str,
help=
"Parameter filename, Specify this when your model is a combined model.",
required=True)
parser.add_argument(
'--lidar_file', type=str, help='The lidar path.', required=True)
parser.add_argument(
"--num_point_dim",
type=int,
default=4,
help="Dimension of a point in the lidar file.")
parser.add_argument(
"--point_cloud_range",
dest='point_cloud_range',
nargs='+',
help="Range of point cloud for voxelize operation.",
type=float,
default=None)
parser.add_argument("--gpu_id", type=int, default=0, help="GPU card id.")
parser.add_argument(
"--use_trt",
type=int,
default=0,
help="Whether to use tensorrt to accelerate when using gpu.")
parser.add_argument(
"--trt_precision",
type=int,
default=0,
help="Precision type of tensorrt, 0: kFloat32, 1: kHalf.")
parser.add_argument(
"--trt_use_static",
type=int,
default=0,
help="Whether to load the tensorrt graph optimization from a disk path."
)
parser.add_argument(
"--trt_static_dir",
type=str,
help="Path of a tensorrt graph optimization directory.")
parser.add_argument(
"--collect_shape_info",
type=int,
default=0,
help="Whether to collect dynamic shape before using tensorrt.")
parser.add_argument(
"--dynamic_shape_file",
type=str,
default="",
help="Path of a dynamic shape file for tensorrt.")
return parser.parse_args()
def read_point(file_path, num_point_dim):
points = np.fromfile(file_path, np.float32).reshape(-1, num_point_dim)
points = points[:, :4]
return points
def filter_points_outside_range(points, point_cloud_range):
limit_range = np.asarray(point_cloud_range, dtype=np.float32)
mask = (points[:, 0] >= limit_range[0]) & (points[:, 0] <= limit_range[3]) \
& (points[:, 1] >= limit_range[1]) & (points[:, 1] <= limit_range[4])
points = points[mask]
return points
def preprocess(file_path, num_point_dim, point_cloud_range):
points = read_point(file_path, num_point_dim)
points = filter_points_outside_range(points, point_cloud_range)
return points
def init_predictor(model_file,
params_file,
gpu_id=0,
use_trt=False,
trt_precision=0,
trt_use_static=False,
trt_static_dir=None,
collect_shape_info=False,
dynamic_shape_file=None):
config = Config(model_file, params_file)
config.enable_memory_optim()
config.enable_use_gpu(1000, gpu_id)
if use_trt:
precision_mode = paddle.inference.PrecisionType.Float32
if trt_precision == 1:
precision_mode = paddle.inference.PrecisionType.Half
config.enable_tensorrt_engine(
workspace_size=1 << 30,
max_batch_size=1,
min_subgraph_size=40,
precision_mode=precision_mode,
use_static=trt_use_static,
use_calib_mode=False)
if collect_shape_info:
config.collect_shape_range_info(dynamic_shape_file)
else:
config.enable_tuned_tensorrt_dynamic_shape(dynamic_shape_file, True)
if trt_use_static:
config.set_optim_cache_dir(trt_static_dir)
predictor = create_predictor(config)
return predictor
def parse_result(box3d_lidar, label_preds, scores):
num_bbox3d, bbox3d_dims = box3d_lidar.shape
for box_idx in range(num_bbox3d):
# filter fake results: score = -1
if scores[box_idx] < 0:
continue
print(
"Score: {} Label: {} Box(x_c, y_c, z_c, w, l, h, -rot): {} {} {} {} {} {} {}"
.format(scores[box_idx], label_preds[box_idx],
box3d_lidar[box_idx, 0], box3d_lidar[box_idx, 1],
box3d_lidar[box_idx, 2], box3d_lidar[box_idx, 3],
box3d_lidar[box_idx, 4], box3d_lidar[box_idx, 5],
box3d_lidar[box_idx, 6]))
def run(predictor, points):
# copy img data to input tensor
input_names = predictor.get_input_names()
for i, name in enumerate(input_names):
if name == "data":
input_tensor = predictor.get_input_handle(name)
input_tensor.reshape(points.shape)
input_tensor.copy_from_cpu(points.copy())
# do the inference
predictor.run()
results = []
# get out data from output tensor
output_names = predictor.get_output_names()
for i, name in enumerate(output_names):
output_tensor = predictor.get_output_handle(name)
if i == 0:
box3d_lidar = output_tensor.copy_to_cpu()
elif i == 1:
scores = output_tensor.copy_to_cpu()
elif i == 2:
label_preds = output_tensor.copy_to_cpu()
return box3d_lidar, label_preds, scores
def main(args):
predictor = init_predictor(args.model_file, args.params_file, args.gpu_id,
args.use_trt, args.trt_precision,
args.trt_use_static, args.trt_static_dir,
args.collect_shape_info, args.dynamic_shape_file)
points = preprocess(args.lidar_file, args.num_point_dim,
args.point_cloud_range)
box3d_lidar, label_preds, scores = run(predictor, points)
parse_result(box3d_lidar, label_preds, scores)
if __name__ == '__main__':
args = parse_args()
main(args)
| 0
|
apollo_public_repos/apollo-model-centerpoint/deploy/voxel_rcnn
|
apollo_public_repos/apollo-model-centerpoint/deploy/voxel_rcnn/cpp/main.cc
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gflags/gflags.h>
#include <glog/logging.h>
#include <chrono>
#include <cmath>
#include <fstream>
#include <iostream>
#include <numeric>
#include <string>
#include "paddle/include/paddle_inference_api.h"
using paddle_infer::Config;
using paddle_infer::CreatePredictor;
using paddle_infer::Predictor;
DEFINE_string(model_file, "", "Path of a inference model");
DEFINE_string(params_file, "", "Path of a inference params");
DEFINE_string(lidar_file, "", "Path of a lidar file to be predicted");
DEFINE_int32(num_point_dim, 4, "Dimension of a point in the lidar file");
DEFINE_string(point_cloud_range, "",
"Range of point cloud for voxelize operation");
DEFINE_int32(gpu_id, 0, "GPU card id");
DEFINE_int32(use_trt, 0,
"Whether to use tensorrt to accelerate when using gpu");
DEFINE_int32(trt_precision, 0,
"Precision type of tensorrt, 0: kFloat32, 1: kHalf");
DEFINE_int32(
trt_use_static, 0,
"Whether to load the tensorrt graph optimization from a disk path");
DEFINE_string(trt_static_dir, "",
"Path of a tensorrt graph optimization directory");
DEFINE_int32(collect_shape_info, 0,
"Whether to collect dynamic shape before using tensorrt");
DEFINE_string(dynamic_shape_file, "",
"Path of a dynamic shape file for tensorrt");
void parse_string_to_vector(const std::string &str, std::vector<float> *vec) {
std::stringstream ss(str);
float number;
while (ss >> number) {
vec->push_back(number);
}
}
bool read_point(const std::string &file_path, const int num_point_dim,
void **buffer, int *num_points) {
std::ifstream file_in(file_path, std::ios::in | std::ios::binary);
if (num_point_dim < 4) {
LOG(ERROR) << "Point dimension must not be less than 4, but recieved "
<< "num_point_dim is " << num_point_dim << ".\n";
}
if (!file_in) {
LOG(ERROR) << "Failed to read file: " << file_path << "\n";
return false;
}
std::streampos file_size;
file_in.seekg(0, std::ios::end);
file_size = file_in.tellg();
file_in.seekg(0, std::ios::beg);
*buffer = malloc(file_size);
if (*buffer == nullptr) {
LOG(ERROR) << "Failed to malloc memory of size: " << file_size << "\n";
return false;
}
file_in.read(reinterpret_cast<char *>(*buffer), file_size);
file_in.close();
if (file_size / sizeof(float) % num_point_dim != 0) {
LOG(ERROR) << "Loaded file size (" << file_size
<< ") is not evenly divisible by num_point_dim ("
<< num_point_dim << ")\n";
return false;
}
*num_points = file_size / sizeof(float) / num_point_dim;
return true;
}
void mask_points_outside_range(const float *points, const int num_points,
const std::vector<float> &point_cloud_range,
const int num_point_dim,
std::vector<float> *selected_points) {
for (int i = 0; i < num_points; i += num_point_dim) {
float pt_x = points[i];
float pt_y = points[i + 1];
// in [-x, x] and [-y, y] range
if ((pt_x >= point_cloud_range[0]) && (pt_x <= point_cloud_range[3]) &&
(pt_y >= point_cloud_range[1]) && (pt_y <= point_cloud_range[4])) {
for (int d = 0; d < num_point_dim; ++d) {
selected_points->push_back(points[i + d]);
}
}
}
}
bool preprocess(const std::string &file_path, const int num_point_dim,
const std::vector<float> &point_cloud_range,
std::vector<int> *points_shape,
std::vector<float> *points_data) {
void *buffer = nullptr;
int num_points = 0;
if (!read_point(file_path, num_point_dim, &buffer, &num_points)) {
return false;
}
float *points = static_cast<float *>(buffer);
std::vector<float> masked_points;
mask_points_outside_range(points, num_points, point_cloud_range,
num_point_dim, &masked_points);
points_data->assign(masked_points.begin(), masked_points.end());
points_shape->push_back(masked_points.size() / num_point_dim);
points_shape->push_back(num_point_dim);
free(points);
return true;
}
std::shared_ptr<paddle_infer::Predictor> create_predictor(
const std::string &model_path, const std::string ¶ms_path,
const int gpu_id, const int use_trt, const int trt_precision,
const int trt_use_static, const std::string trt_static_dir,
const int collect_shape_info, const std::string dynamic_shape_file) {
paddle::AnalysisConfig config;
config.EnableUseGpu(1000, gpu_id);
config.SetModel(model_path, params_path);
if (use_trt) {
paddle::AnalysisConfig::Precision precision;
if (trt_precision == 0) {
precision = paddle_infer::PrecisionType::kFloat32;
} else if (trt_precision == 1) {
precision = paddle_infer::PrecisionType::kHalf;
} else {
LOG(ERROR) << "Tensorrt type can only support 0 or 1, but recieved is"
<< trt_precision << "\n";
return nullptr;
}
config.EnableTensorRtEngine(1 << 30, 1, 40, precision, trt_use_static,
false);
if (dynamic_shape_file == "") {
LOG(ERROR) << "dynamic_shape_file should be set, but recieved is "
<< dynamic_shape_file << "\n";
return nullptr;
}
if (collect_shape_info) {
config.CollectShapeRangeInfo(dynamic_shape_file);
} else {
config.EnableTunedTensorRtDynamicShape(dynamic_shape_file, true);
}
if (trt_use_static) {
if (trt_static_dir == "") {
LOG(ERROR) << "trt_static_dir should be set, but recieved is "
<< trt_static_dir << "\n";
return nullptr;
}
config.SetOptimCacheDir(trt_static_dir);
}
}
config.SwitchIrOptim(true);
return paddle_infer::CreatePredictor(config);
}
void run(Predictor *predictor, const std::vector<int> &points_shape,
const std::vector<float> &points_data, std::vector<float> *box3d_lidar,
std::vector<int64_t> *label_preds, std::vector<float> *scores) {
auto input_names = predictor->GetInputNames();
for (const auto &tensor_name : input_names) {
auto in_tensor = predictor->GetInputHandle(tensor_name);
if (tensor_name == "data") {
in_tensor->Reshape(points_shape);
in_tensor->CopyFromCpu(points_data.data());
}
}
CHECK(predictor->Run());
auto output_names = predictor->GetOutputNames();
for (size_t i = 0; i != output_names.size(); i++) {
auto output = predictor->GetOutputHandle(output_names[i]);
std::vector<int> output_shape = output->shape();
int out_num = std::accumulate(output_shape.begin(), output_shape.end(), 1,
std::multiplies<int>());
if (i == 0) {
box3d_lidar->resize(out_num);
output->CopyToCpu(box3d_lidar->data());
} else if (i == 1) {
scores->resize(out_num);
output->CopyToCpu(scores->data());
} else if (i == 2) {
label_preds->resize(out_num);
output->CopyToCpu(label_preds->data());
}
}
}
bool parse_result(const std::vector<float> &box3d_lidar,
const std::vector<int64_t> &label_preds,
const std::vector<float> &scores) {
int num_bbox3d = scores.size();
for (size_t box_idx = 0; box_idx != num_bbox3d; ++box_idx) {
// filter fake results: score = -1
if (scores[box_idx] < 0) {
continue;
}
LOG(INFO) << "Score: " << scores[box_idx]
<< " Label: " << label_preds[box_idx] << " ";
LOG(INFO) << "Box (x_c, y_c, z_c, w, l, h, -rot): "
<< box3d_lidar[box_idx * 7 + 0] << " "
<< box3d_lidar[box_idx * 7 + 1] << " "
<< box3d_lidar[box_idx * 7 + 2] << " "
<< box3d_lidar[box_idx * 7 + 3] << " "
<< box3d_lidar[box_idx * 7 + 4] << " "
<< box3d_lidar[box_idx * 7 + 5] << " "
<< box3d_lidar[box_idx * 7 + 6] << "\n";
}
return true;
}
int main(int argc, char *argv[]) {
google::ParseCommandLineFlags(&argc, &argv, true);
if (FLAGS_model_file == "" || FLAGS_params_file == "" ||
FLAGS_lidar_file == "" || FLAGS_point_cloud_range == "") {
LOG(INFO) << "Missing required parameter"
<< "\n";
LOG(INFO) << "Usage: " << std::string(argv[0])
<< " --model_file ${MODEL_FILE} "
<< "--params_file ${PARAMS_FILE} "
<< "--lidar_file ${LIDAR_FILE}"
<< "--point_cloud_range ${POINT_CLOUD_RANGE} "
<< "\n";
return -1;
}
auto predictor = create_predictor(
FLAGS_model_file, FLAGS_params_file, FLAGS_gpu_id, FLAGS_use_trt,
FLAGS_trt_precision, FLAGS_trt_use_static, FLAGS_trt_static_dir,
FLAGS_collect_shape_info, FLAGS_dynamic_shape_file);
if (predictor == nullptr) {
return 0;
}
std::vector<float> point_cloud_range;
parse_string_to_vector(FLAGS_point_cloud_range, &point_cloud_range);
std::vector<int> points_shape;
std::vector<float> points_data;
if (!preprocess(FLAGS_lidar_file, FLAGS_num_point_dim, point_cloud_range,
&points_shape, &points_data)) {
LOG(ERROR) << "Failed to preprocess!\n";
return 0;
}
std::vector<float> box3d_lidar;
std::vector<int64_t> label_preds;
std::vector<float> scores;
run(predictor.get(), points_shape, points_data, &box3d_lidar, &label_preds,
&scores);
parse_result(box3d_lidar, label_preds, scores);
return 0;
}
| 0
|
apollo_public_repos/apollo-model-centerpoint/deploy/voxel_rcnn
|
apollo_public_repos/apollo-model-centerpoint/deploy/voxel_rcnn/cpp/CMakeLists.txt
|
cmake_minimum_required(VERSION 3.0)
project(cpp_inference_demo CXX C)
option(WITH_MKL "Compile demo with MKL/OpenBlas support, default use MKL." ON)
option(WITH_GPU "Compile demo with GPU/CPU, default use CPU." ON)
option(USE_TENSORRT "Compile demo with TensorRT." ON)
option(CUSTOM_OPERATOR_FILES "List of file names for custom operators" "")
execute_process(COMMAND ${CMAKE_C_COMPILER} -dumpfullversion -dumpversion
OUTPUT_VARIABLE GCC_VERSION)
string(REGEX MATCHALL "[0-9]+" GCC_VERSION_COMPONENTS ${GCC_VERSION})
list(GET GCC_VERSION_COMPONENTS 0 GCC_MAJOR)
list(GET GCC_VERSION_COMPONENTS 1 GCC_MINOR)
set(GCC_VERSION "${GCC_MAJOR}.${GCC_MINOR}")
if (GCC_VERSION LESS "8.0")
set(CMAKE_CXX_FLAGS "-Wl,--no-as-needed")
endif()
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake")
include(external/boost)
if(WITH_GPU)
find_package(CUDA REQUIRED)
add_definitions("-DPADDLE_WITH_CUDA")
endif()
if(NOT WITH_STATIC_LIB)
add_definitions("-DPADDLE_WITH_SHARED_LIB")
else()
# PD_INFER_DECL is mainly used to set the dllimport/dllexport attribute in dynamic library mode.
# Set it to empty in static library mode to avoid compilation issues.
add_definitions("/DPD_INFER_DECL=")
endif()
macro(safe_set_static_flag)
foreach(flag_var
CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE
CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO)
if(${flag_var} MATCHES "/MD")
string(REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}")
endif(${flag_var} MATCHES "/MD")
endforeach(flag_var)
endmacro()
if(NOT DEFINED PADDLE_LIB)
message(FATAL_ERROR "please set PADDLE_LIB with -DPADDLE_LIB=/path/paddle/lib")
endif()
if(NOT DEFINED DEMO_NAME)
message(FATAL_ERROR "please set DEMO_NAME with -DDEMO_NAME=demo_name")
endif()
include_directories("${PADDLE_LIB}/")
set(PADDLE_LIB_THIRD_PARTY_PATH "${PADDLE_LIB}/third_party/install/")
include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}protobuf/include")
include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}glog/include")
include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}gflags/include")
include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}xxhash/include")
include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}onnxruntime/include")
include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}paddle2onnx/include")
link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}protobuf/lib")
link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}glog/lib")
link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}gflags/lib")
link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}xxhash/lib")
link_directories("${PADDLE_LIB}/paddle/lib")
link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}onnxruntime/lib")
link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}paddle2onnx/lib")
if (WIN32)
add_definitions("/DGOOGLE_GLOG_DLL_DECL=")
option(MSVC_STATIC_CRT "use static C Runtime library by default" ON)
if (MSVC_STATIC_CRT)
if (WITH_MKL)
set(FLAG_OPENMP "/openmp")
endif()
set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} /bigobj /MTd ${FLAG_OPENMP}")
set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} /bigobj /MT ${FLAG_OPENMP}")
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /bigobj /MTd ${FLAG_OPENMP}")
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /bigobj /MT ${FLAG_OPENMP}")
safe_set_static_flag()
if (WITH_STATIC_LIB)
add_definitions(-DSTATIC_LIB)
endif()
endif()
else()
if(WITH_MKL)
set(FLAG_OPENMP "-fopenmp")
endif()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14 ${FLAG_OPENMP}")
endif()
if(WITH_GPU)
if(NOT WIN32)
set(CUDA_LIB "/usr/local/cuda/lib64/" CACHE STRING "CUDA Library")
else()
if(CUDA_LIB STREQUAL "")
set(CUDA_LIB "C:\\Program\ Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v8.0\\lib\\x64")
endif()
endif(NOT WIN32)
endif()
if (USE_TENSORRT AND WITH_GPU)
set(TENSORRT_ROOT "" CACHE STRING "The root directory of TensorRT library")
if("${TENSORRT_ROOT}" STREQUAL "")
message(FATAL_ERROR "The TENSORRT_ROOT is empty, you must assign it a value with CMake command. Such as: -DTENSORRT_ROOT=TENSORRT_ROOT_PATH ")
endif()
set(TENSORRT_INCLUDE_DIR ${TENSORRT_ROOT}/include)
set(TENSORRT_LIB_DIR ${TENSORRT_ROOT}/lib)
endif()
if (NOT WIN32)
if (USE_TENSORRT AND WITH_GPU)
include_directories("${TENSORRT_INCLUDE_DIR}")
link_directories("${TENSORRT_LIB_DIR}")
endif()
endif(NOT WIN32)
if(WITH_MKL)
set(MATH_LIB_PATH "${PADDLE_LIB_THIRD_PARTY_PATH}mklml")
include_directories("${MATH_LIB_PATH}/include")
if(WIN32)
set(MATH_LIB ${MATH_LIB_PATH}/lib/mklml${CMAKE_STATIC_LIBRARY_SUFFIX}
${MATH_LIB_PATH}/lib/libiomp5md${CMAKE_STATIC_LIBRARY_SUFFIX})
else()
set(MATH_LIB ${MATH_LIB_PATH}/lib/libmklml_intel${CMAKE_SHARED_LIBRARY_SUFFIX}
${MATH_LIB_PATH}/lib/libiomp5${CMAKE_SHARED_LIBRARY_SUFFIX})
endif()
set(MKLDNN_PATH "${PADDLE_LIB_THIRD_PARTY_PATH}mkldnn")
if(EXISTS ${MKLDNN_PATH})
include_directories("${MKLDNN_PATH}/include")
if(WIN32)
set(MKLDNN_LIB ${MKLDNN_PATH}/lib/mkldnn.lib)
else(WIN32)
set(MKLDNN_LIB ${MKLDNN_PATH}/lib/libmkldnn.so.0)
endif(WIN32)
endif()
else()
set(OPENBLAS_LIB_PATH "${PADDLE_LIB_THIRD_PARTY_PATH}openblas")
include_directories("${OPENBLAS_LIB_PATH}/include/openblas")
if(WIN32)
set(MATH_LIB ${OPENBLAS_LIB_PATH}/lib/openblas${CMAKE_STATIC_LIBRARY_SUFFIX})
else()
set(MATH_LIB ${OPENBLAS_LIB_PATH}/lib/libopenblas${CMAKE_STATIC_LIBRARY_SUFFIX})
endif()
endif()
if(WITH_STATIC_LIB)
set(DEPS ${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_STATIC_LIBRARY_SUFFIX})
else()
if(WIN32)
set(DEPS ${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_STATIC_LIBRARY_SUFFIX})
else()
set(DEPS ${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_SHARED_LIBRARY_SUFFIX})
endif()
endif()
if (NOT WIN32)
if (GCC_VERSION LESS "8.0")
set(EXTERNAL_LIB ${EXTERNAL_LIB} "-lssl -lcrypto -lz -lleveldb -lsnappy")
endif()
set(EXTERNAL_LIB ${EXTERNAL_LIB} "-lrt -ldl -lpthread")
set(DEPS ${DEPS}
${MATH_LIB} ${MKLDNN_LIB}
glog gflags protobuf xxhash
${EXTERNAL_LIB})
else()
set(DEPS ${DEPS}
${MATH_LIB} ${MKLDNN_LIB}
glog gflags_static libprotobuf xxhash ${EXTERNAL_LIB})
set(DEPS ${DEPS} shlwapi.lib)
endif(NOT WIN32)
if(WITH_GPU)
if(NOT WIN32)
if (USE_TENSORRT)
set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/libnvinfer${CMAKE_SHARED_LIBRARY_SUFFIX})
set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/libnvinfer_plugin${CMAKE_SHARED_LIBRARY_SUFFIX})
endif()
set(DEPS ${DEPS} ${CUDA_LIB}/libcudart${CMAKE_SHARED_LIBRARY_SUFFIX})
else()
if(USE_TENSORRT)
set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/nvinfer${CMAKE_STATIC_LIBRARY_SUFFIX})
set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/nvinfer_plugin${CMAKE_STATIC_LIBRARY_SUFFIX})
endif()
set(DEPS ${DEPS} ${CUDA_LIB}/cudart${CMAKE_STATIC_LIBRARY_SUFFIX} )
set(DEPS ${DEPS} ${CUDA_LIB}/cublas${CMAKE_STATIC_LIBRARY_SUFFIX} )
set(DEPS ${DEPS} ${CUDA_LIB}/cudnn${CMAKE_STATIC_LIBRARY_SUFFIX} )
endif()
endif()
cuda_add_library(pd_infer_custom_op ${CUSTOM_OPERATOR_FILES} SHARED)
add_executable(${DEMO_NAME} ${DEMO_NAME}.cc)
if (GCC_VERSION GREATER_EQUAL "8.0")
set(DEPS ${DEPS} libssl.a libcrypto.a libz.a libleveldb.a libsnappy.a)
endif()
set(DEPS ${DEPS} boost pd_infer_custom_op)# libssl.a libcrypto.a libz.a libleveldb.a libsnappy.a)
if(WIN32)
if(USE_TENSORRT)
add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/nvinfer${CMAKE_SHARED_LIBRARY_SUFFIX}
${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE}
COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/nvinfer_plugin${CMAKE_SHARED_LIBRARY_SUFFIX}
${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE}
)
endif()
if(WITH_MKL)
add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy ${MATH_LIB_PATH}/lib/mklml.dll ${CMAKE_BINARY_DIR}/Release
COMMAND ${CMAKE_COMMAND} -E copy ${MATH_LIB_PATH}/lib/libiomp5md.dll ${CMAKE_BINARY_DIR}/Release
COMMAND ${CMAKE_COMMAND} -E copy ${MKLDNN_PATH}/lib/mkldnn.dll ${CMAKE_BINARY_DIR}/Release
)
else()
add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy ${OPENBLAS_LIB_PATH}/lib/openblas.dll ${CMAKE_BINARY_DIR}/Release
)
endif()
if(NOT WITH_STATIC_LIB)
add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy "${PADDLE_LIB}/paddle/lib/paddle_fluid.dll" ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE}
)
endif()
endif()
target_link_libraries(${DEMO_NAME} ${DEPS})
| 0
|
apollo_public_repos/apollo-model-centerpoint/deploy/voxel_rcnn
|
apollo_public_repos/apollo-model-centerpoint/deploy/voxel_rcnn/cpp/compile.sh
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
mkdir -p build
cd build
rm -rf *
DEMO_NAME=main
WITH_MKL=ON
WITH_GPU=ON
USE_TENSORRT=OFF
LIB_DIR=/centerpoint/kaihuo/Paddle/build_develop/paddle_inference_install_dir
CUDNN_LIB=/usr/lib/x86_64-linux-gnu
CUDA_LIB=/usr/local/cuda/lib64
TENSORRT_ROOT=/centerpoint/two_three/Paddle/TensorRT-8.2.5.1
CUSTOM_OPERATOR_FILES="custom_ops/voxel/voxelize_op.cc;custom_ops/voxel/voxelize_op.cu;custom_ops/iou3d_nms/iou3d_cpu.cpp;custom_ops/iou3d_nms/iou3d_nms_api.cpp;custom_ops/iou3d_nms/iou3d_nms.cpp;custom_ops/iou3d_nms/iou3d_nms_kernel.cu;custom_ops/pointnet2/sampling_gpu.cu;custom_ops/pointnet2/sampling.cc;custom_ops/pointnet2/voxel_query_gpu.cu;custom_ops/pointnet2/voxel_query.cc;custom_ops/pointnet2/group_points.cc;custom_ops/pointnet2/group_points_gpu.cu"
cmake .. -DPADDLE_LIB=${LIB_DIR} \
-DWITH_MKL=${WITH_MKL} \
-DDEMO_NAME=${DEMO_NAME} \
-DWITH_GPU=${WITH_GPU} \
-DWITH_STATIC_LIB=OFF \
-DUSE_TENSORRT=${USE_TENSORRT} \
-DCUDNN_LIB=${CUDNN_LIB} \
-DCUDA_LIB=${CUDA_LIB} \
-DTENSORRT_ROOT=${TENSORRT_ROOT} \
-DCUSTOM_OPERATOR_FILES=${CUSTOM_OPERATOR_FILES}
make -j
| 0
|
apollo_public_repos/apollo-model-centerpoint/deploy/voxel_rcnn/cpp/custom_ops
|
apollo_public_repos/apollo-model-centerpoint/deploy/voxel_rcnn/cpp/custom_ops/pointnet2/group_points_gpu.cu
|
/*
Stacked-batch-data version of point grouping, modified from the original
implementation of official PointNet++ codes. Written by Shaoshuai Shi All Rights
Reserved 2019-2020.
*/
#include "paddle/include/experimental/ext_all.h"
#define THREADS_PER_BLOCK 256
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
__global__ void group_points_grad_kernel_stack(
int B, int M, int C, int N, int nsample, const float *grad_out,
const int *idx, const int *idx_batch_cnt, const int *features_batch_cnt,
float *grad_features) {
// :param grad_out: (M1 + M2 ..., C, nsample) tensor of the gradients of the
// output from forward :param idx: (M1 + M2 ..., nsample) tensor containing
// the indicies of features to group with :param idx_batch_cnt: (batch_size)
// [M1 + M2 ...] tensor containing the indicies of features to group with
// :param features_batch_cnt: (batch_size) [N1 + N2 ...] tensor containing the
// indicies of features to group with :return:
// grad_features: (N1 + N2 ..., C) gradient of the features
int index = blockIdx.x * blockDim.x + threadIdx.x;
int sample_idx = index % nsample;
int C_idx = (index / nsample) % C;
int pt_idx = (index / nsample / C);
if (pt_idx >= M || C_idx >= C || sample_idx >= nsample) return;
int bs_idx = 0, pt_cnt = idx_batch_cnt[0];
for (int k = 1; k < B; k++) {
if (pt_idx < pt_cnt) break;
pt_cnt += idx_batch_cnt[k];
bs_idx = k;
}
int features_batch_start_idx = 0;
for (int k = 0; k < bs_idx; k++)
features_batch_start_idx += features_batch_cnt[k];
grad_out += pt_idx * C * nsample + C_idx * nsample + sample_idx;
idx += pt_idx * nsample + sample_idx;
grad_features += (features_batch_start_idx + idx[0]) * C + C_idx;
atomicAdd(grad_features, grad_out[0]);
}
void group_points_grad_kernel_launcher_stack(int B, int M, int C, int N,
int nsample, const float *grad_out,
const int *idx,
const int *idx_batch_cnt,
const int *features_batch_cnt,
float *grad_features) {
// :param grad_out: (M1 + M2 ..., C, nsample) tensor of the gradients of the
// output from forward :param idx: (M1 + M2 ..., nsample) tensor containing
// the indicies of features to group with :param idx_batch_cnt: (batch_size)
// [M1 + M2 ...] tensor containing the indicies of features to group with
// :param features_batch_cnt: (batch_size) [N1 + N2 ...] tensor containing the
// indicies of features to group with :return:
// grad_features: (N1 + N2 ..., C) gradient of the features
cudaError_t err;
// dim3 blocks(DIVUP(npoints * nsample, THREADS_PER_BLOCK), c, b); //
// blockIdx.x(col), blockIdx.y(row)
dim3 blocks(DIVUP(M * C * nsample,
THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
group_points_grad_kernel_stack<<<blocks, threads>>>(
B, M, C, N, nsample, grad_out, idx, idx_batch_cnt, features_batch_cnt,
grad_features);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
__global__ void group_points_kernel_stack(int B, int M, int C, int nsample,
const float *features,
const int *features_batch_cnt,
const int *idx,
const int *idx_batch_cnt,
float *out) {
// :param features: (N1 + N2 ..., C) tensor of features to group
// :param features_batch_cnt: (batch_size) [N1 + N2 ...] tensor containing the
// indicies of features to group with :param idx: (M1 + M2 ..., nsample)
// tensor containing the indicies of features to group with :param
// idx_batch_cnt: (batch_size) [M1 + M2 ...] tensor containing the indicies of
// features to group with :return:
// output: (M1 + M2, C, nsample) tensor
int index = blockIdx.x * blockDim.x + threadIdx.x;
int sample_idx = index % nsample;
int C_idx = (index / nsample) % C;
int pt_idx = (index / nsample / C);
if (pt_idx >= M || C_idx >= C || sample_idx >= nsample) return;
int bs_idx = 0, pt_cnt = idx_batch_cnt[0];
for (int k = 1; k < B; k++) {
if (pt_idx < pt_cnt) break;
pt_cnt += idx_batch_cnt[k];
bs_idx = k;
}
int features_batch_start_idx = 0;
for (int k = 0; k < bs_idx; k++)
features_batch_start_idx += features_batch_cnt[k];
features += features_batch_start_idx * C;
idx += pt_idx * nsample + sample_idx;
int in_idx = idx[0] * C + C_idx;
int out_idx = pt_idx * C * nsample + C_idx * nsample + sample_idx;
out[out_idx] = features[in_idx];
}
void group_points_kernel_launcher_stack(const int B, const int M, const int C,
const int nsample,
const float *features,
const int *features_batch_cnt,
const int *idx,
const int *idx_batch_cnt, float *out) {
// :param features: (N1 + N2 ..., C) tensor of features to group
// :param features_batch_cnt: (batch_size) [N1 + N2 ...] tensor containing the
// indicies of features to group with :param idx: (M1 + M2 ..., nsample)
// tensor containing the indicies of features to group with :param
// idx_batch_cnt: (batch_size) [M1 + M2 ...] tensor containing the indicies of
// features to group with :return:
// output: (M1 + M2, C, nsample) tensor
cudaError_t err;
dim3 blocks(DIVUP(M * C * nsample,
THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
group_points_kernel_stack<<<blocks, threads>>>(
B, M, C, nsample, features, features_batch_cnt, idx, idx_batch_cnt, out);
// cudaDeviceSynchronize(); // for using printf in kernel function
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
| 0
|
apollo_public_repos/apollo-model-centerpoint/deploy/voxel_rcnn/cpp/custom_ops
|
apollo_public_repos/apollo-model-centerpoint/deploy/voxel_rcnn/cpp/custom_ops/pointnet2/sampling.cc
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <vector>
#include "paddle/include/experimental/ext_all.h"
#define CHECK_INPUT(x) PD_CHECK(x.is_gpu(), #x " must be a GPU Tensor.")
// cuda launcher declaration
void farthest_point_sampling_kernel_launcher(int b, int n, int m,
const float *dataset, float *temp,
int *idxs);
// op forward wrapper
std::vector<paddle::Tensor> farthest_point_sampling_cuda_forward(
const paddle::Tensor &points_tensor, const int &npoints) {
// points_tensor: (B, N, 3)
// tmp_tensor: (B, N)
// output:
// idx_tensor: (B, npoints)
const int b = points_tensor.shape()[0];
const int n = points_tensor.shape()[1];
auto *points = points_tensor.data<float>();
auto temp_tensor =
paddle::full({b, n}, 1e10, paddle::DataType::FLOAT32, paddle::GPUPlace());
auto idx_tensor =
paddle::empty({b, npoints}, paddle::DataType::INT32, paddle::GPUPlace());
auto *temp = temp_tensor.data<float>();
auto *idx = idx_tensor.data<int>();
farthest_point_sampling_kernel_launcher(b, n, npoints, points, temp, idx);
return {idx_tensor};
}
// shape infer
std::vector<std::vector<int64_t>> FPSInferShape(
std::vector<int64_t> points_shape, const int &npoints) {
return {{points_shape[0], npoints}};
}
// dtype infer
std::vector<paddle::DataType> FPSInferDtype(paddle::DataType points_dtype) {
return {paddle::DataType::INT32};
}
// build op forward
PD_BUILD_OP(farthest_point_sample)
.Inputs({"points_tensor"})
.Outputs({"idx_tensor"})
.Attrs({"npoints: int"})
.SetKernelFn(PD_KERNEL(farthest_point_sampling_cuda_forward))
.SetInferShapeFn(PD_INFER_SHAPE(FPSInferShape))
.SetInferDtypeFn(PD_INFER_DTYPE(FPSInferDtype));
| 0
|
apollo_public_repos/apollo-model-centerpoint/deploy/voxel_rcnn/cpp/custom_ops
|
apollo_public_repos/apollo-model-centerpoint/deploy/voxel_rcnn/cpp/custom_ops/pointnet2/sampling_gpu.cu
|
#include <cmath>
#include "paddle/include/experimental/ext_all.h"
#define TOTAL_THREADS 1024
#define THREADS_PER_BLOCK 256
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
inline int opt_n_threads(int work_size) {
const int pow_2 = std::log(static_cast<double>(work_size)) / std::log(2.0);
return max(min(1 << pow_2, TOTAL_THREADS), 1);
}
__device__ void __update(float *__restrict__ dists, int *__restrict__ dists_i,
int idx1, int idx2) {
const float v1 = dists[idx1], v2 = dists[idx2];
const int i1 = dists_i[idx1], i2 = dists_i[idx2];
dists[idx1] = max(v1, v2);
dists_i[idx1] = v2 > v1 ? i2 : i1;
}
template <unsigned int block_size>
__global__ void farthest_point_sampling_kernel(
int b, int n, int m, const float *__restrict__ dataset,
float *__restrict__ temp, int *__restrict__ idxs) {
// dataset: (B, N, 3)
// tmp: (B, N)
// output:
// idx: (B, M)
if (m <= 0) return;
__shared__ float dists[block_size];
__shared__ int dists_i[block_size];
int batch_index = blockIdx.x;
dataset += batch_index * n * 3;
temp += batch_index * n;
idxs += batch_index * m;
int tid = threadIdx.x;
const int stride = block_size;
int old = 0;
if (threadIdx.x == 0) idxs[0] = old;
__syncthreads();
for (int j = 1; j < m; j++) {
int besti = 0;
float best = -1;
float x1 = dataset[old * 3 + 0];
float y1 = dataset[old * 3 + 1];
float z1 = dataset[old * 3 + 2];
for (int k = tid; k < n; k += stride) {
float x2, y2, z2;
x2 = dataset[k * 3 + 0];
y2 = dataset[k * 3 + 1];
z2 = dataset[k * 3 + 2];
float d =
(x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1);
float d2 = min(d, temp[k]);
temp[k] = d2;
besti = d2 > best ? k : besti;
best = d2 > best ? d2 : best;
}
dists[tid] = best;
dists_i[tid] = besti;
__syncthreads();
if (block_size >= 1024) {
if (tid < 512) {
__update(dists, dists_i, tid, tid + 512);
}
__syncthreads();
}
if (block_size >= 512) {
if (tid < 256) {
__update(dists, dists_i, tid, tid + 256);
}
__syncthreads();
}
if (block_size >= 256) {
if (tid < 128) {
__update(dists, dists_i, tid, tid + 128);
}
__syncthreads();
}
if (block_size >= 128) {
if (tid < 64) {
__update(dists, dists_i, tid, tid + 64);
}
__syncthreads();
}
if (block_size >= 64) {
if (tid < 32) {
__update(dists, dists_i, tid, tid + 32);
}
__syncthreads();
}
if (block_size >= 32) {
if (tid < 16) {
__update(dists, dists_i, tid, tid + 16);
}
__syncthreads();
}
if (block_size >= 16) {
if (tid < 8) {
__update(dists, dists_i, tid, tid + 8);
}
__syncthreads();
}
if (block_size >= 8) {
if (tid < 4) {
__update(dists, dists_i, tid, tid + 4);
}
__syncthreads();
}
if (block_size >= 4) {
if (tid < 2) {
__update(dists, dists_i, tid, tid + 2);
}
__syncthreads();
}
if (block_size >= 2) {
if (tid < 1) {
__update(dists, dists_i, tid, tid + 1);
}
__syncthreads();
}
old = dists_i[0];
if (tid == 0) idxs[j] = old;
}
}
void farthest_point_sampling_kernel_launcher(int b, int n, int m,
const float *dataset, float *temp,
int *idxs) {
// dataset: (B, N, 3)
// tmp: (B, N)
// output:
// idx: (B, M)
cudaError_t err;
unsigned int n_threads = opt_n_threads(n);
switch (n_threads) {
case 1024:
farthest_point_sampling_kernel<1024>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
break;
case 512:
farthest_point_sampling_kernel<512>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
break;
case 256:
farthest_point_sampling_kernel<256>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
break;
case 128:
farthest_point_sampling_kernel<128>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
break;
case 64:
farthest_point_sampling_kernel<64>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
break;
case 32:
farthest_point_sampling_kernel<32>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
break;
case 16:
farthest_point_sampling_kernel<16>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
break;
case 8:
farthest_point_sampling_kernel<8>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
break;
case 4:
farthest_point_sampling_kernel<4>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
break;
case 2:
farthest_point_sampling_kernel<2>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
break;
case 1:
farthest_point_sampling_kernel<1>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
break;
default:
farthest_point_sampling_kernel<512>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
}
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
| 0
|
apollo_public_repos/apollo-model-centerpoint/deploy/voxel_rcnn/cpp/custom_ops
|
apollo_public_repos/apollo-model-centerpoint/deploy/voxel_rcnn/cpp/custom_ops/pointnet2/voxel_query.cc
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <vector>
#include "paddle/include/experimental/ext_all.h"
#define CHECK_INPUT(x) PD_CHECK(x.is_gpu(), #x " must be a GPU Tensor.")
void voxel_query_kernel_launcher_stack(int M, int R1, int R2, int R3,
int nsample, float radius, int z_range,
int y_range, int x_range,
const float *new_xyz, const float *xyz,
const int *new_coords,
const int *point_indices, int *idx);
std::vector<paddle::Tensor> voxel_query_wrapper_stack(
const paddle::Tensor &new_xyz_tensor, const paddle::Tensor &xyz_tensor,
const paddle::Tensor &new_coords_tensor,
const paddle::Tensor &point_indices_tensor, const float radius,
const int nsample, const int z_range, const int y_range,
const int x_range) {
CHECK_INPUT(new_coords_tensor);
CHECK_INPUT(point_indices_tensor);
CHECK_INPUT(new_xyz_tensor);
CHECK_INPUT(xyz_tensor);
const float *new_xyz = new_xyz_tensor.data<float>();
const float *xyz = xyz_tensor.data<float>();
const int *new_coords = new_coords_tensor.data<int>();
const int *point_indices = point_indices_tensor.data<int>();
const int M = new_coords_tensor.shape()[0];
const int B = point_indices_tensor.shape()[0];
const int Z = point_indices_tensor.shape()[1];
const int Y = point_indices_tensor.shape()[2];
const int X = point_indices_tensor.shape()[3];
auto idx_tensor = paddle::full({M, nsample}, 0, paddle::DataType::INT32,
paddle::GPUPlace());
int *idx = idx_tensor.data<int>();
voxel_query_kernel_launcher_stack(M, Z, Y, X, nsample, radius, z_range,
y_range, x_range, new_xyz, xyz, new_coords,
point_indices, idx);
return {idx_tensor};
}
// shape infer
std::vector<std::vector<int64_t>> VoxelQueryInferShape(
std::vector<int64_t> new_xyz_shape, std::vector<int64_t> xyz_shape,
std::vector<int64_t> new_coords_shape,
std::vector<int64_t> point_indices_shape, const float radius,
const int nsample, const int z_range, const int y_range,
const int x_range) {
return {{new_coords_shape[0], nsample}};
}
// data type infer
std::vector<paddle::DataType> VoxelQueryInferDtype(
paddle::DataType new_xyz_type, paddle::DataType xyz_type,
paddle::DataType new_coords_type, paddle::DataType point_indices_type) {
return {paddle::DataType::INT32};
}
// build forward op
PD_BUILD_OP(voxel_query_wrapper)
.Inputs({"new_xyz_tensor", "xyz_tensor", "new_coords_tensor",
"point_indices_tensor"})
.Outputs({"idx_tensor"})
.Attrs({"radius: float", "nsample: int", "z_range: int", "y_range: int",
"x_range: int"})
.SetKernelFn(PD_KERNEL(voxel_query_wrapper_stack))
.SetInferShapeFn(PD_INFER_SHAPE(VoxelQueryInferShape))
.SetInferDtypeFn(PD_INFER_DTYPE(VoxelQueryInferDtype));
| 0
|
apollo_public_repos/apollo-model-centerpoint/deploy/voxel_rcnn/cpp/custom_ops
|
apollo_public_repos/apollo-model-centerpoint/deploy/voxel_rcnn/cpp/custom_ops/pointnet2/voxel_query_gpu.cu
|
#include <curand_kernel.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "paddle/include/experimental/ext_all.h"
#define THREADS_PER_BLOCK 256
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
__global__ void voxel_query_kernel_stack(int M, int R1, int R2, int R3,
int nsample, float radius, int z_range,
int y_range, int x_range,
const float *new_xyz, const float *xyz,
const int *new_coords,
const int *point_indices, int *idx) {
// :param new_coords: (M1 + M2 ..., 4) centers of the ball query
// :param point_indices: (B, Z, Y, X)
// output:
// idx: (M1 + M2, nsample)
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (pt_idx >= M) return;
new_xyz += pt_idx * 3;
new_coords += pt_idx * 4;
idx += pt_idx * nsample;
curandState state;
curand_init(pt_idx, 0, 0, &state);
float radius2 = radius * radius;
float new_x = new_xyz[0];
float new_y = new_xyz[1];
float new_z = new_xyz[2];
int batch_idx = new_coords[0];
int new_coords_z = new_coords[1];
int new_coords_y = new_coords[2];
int new_coords_x = new_coords[3];
int cnt = 0;
int cnt2 = 0;
// for (int dz = -1*z_range; dz <= z_range; ++dz) {
for (int dz = -1 * z_range; dz <= z_range; ++dz) {
int z_coord = new_coords_z + dz;
if (z_coord < 0 || z_coord >= R1) continue;
for (int dy = -1 * y_range; dy <= y_range; ++dy) {
int y_coord = new_coords_y + dy;
if (y_coord < 0 || y_coord >= R2) continue;
for (int dx = -1 * x_range; dx <= x_range; ++dx) {
int x_coord = new_coords_x + dx;
if (x_coord < 0 || x_coord >= R3) continue;
int index = batch_idx * R1 * R2 * R3 + z_coord * R2 * R3 +
y_coord * R3 + x_coord;
int neighbor_idx = point_indices[index];
if (neighbor_idx < 0) continue;
float x_per = xyz[neighbor_idx * 3 + 0];
float y_per = xyz[neighbor_idx * 3 + 1];
float z_per = xyz[neighbor_idx * 3 + 2];
float dist2 = (x_per - new_x) * (x_per - new_x) +
(y_per - new_y) * (y_per - new_y) +
(z_per - new_z) * (z_per - new_z);
if (dist2 > radius2) continue;
++cnt2;
if (cnt < nsample) {
if (cnt == 0) {
for (int l = 0; l < nsample; ++l) {
idx[l] = neighbor_idx;
}
}
idx[cnt] = neighbor_idx;
++cnt;
}
// else {
// float rnd = curand_uniform(&state);
// if (rnd < (float(nsample) / cnt2)) {
// int insertidx = ceilf(curand_uniform(&state) * nsample) - 1;
// idx[insertidx] = neighbor_idx;
// }
// }
}
}
}
if (cnt == 0) idx[0] = -1;
}
void voxel_query_kernel_launcher_stack(int M, int R1, int R2, int R3,
int nsample, float radius, int z_range,
int y_range, int x_range,
const float *new_xyz, const float *xyz,
const int *new_coords,
const int *point_indices, int *idx) {
// :param new_coords: (M1 + M2 ..., 4) centers of the voxel query
// :param point_indices: (B, Z, Y, X)
// output:
// idx: (M1 + M2, nsample)
cudaError_t err;
dim3 blocks(DIVUP(M, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
voxel_query_kernel_stack<<<blocks, threads>>>(
M, R1, R2, R3, nsample, radius, z_range, y_range, x_range, new_xyz, xyz,
new_coords, point_indices, idx);
// cudaDeviceSynchronize(); // for using printf in kernel function
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
| 0
|
apollo_public_repos/apollo-model-centerpoint/deploy/voxel_rcnn/cpp/custom_ops
|
apollo_public_repos/apollo-model-centerpoint/deploy/voxel_rcnn/cpp/custom_ops/pointnet2/group_points.cc
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <vector>
#include "paddle/include/experimental/ext_all.h"
#define CHECK_INPUT(x) PD_CHECK(x.is_gpu(), #x " must be a GPU Tensor.")
// cuda launcher declaration
void group_points_kernel_launcher_stack(const int B, const int M, const int C,
const int nsample,
const float *features,
const int *features_batch_cnt,
const int *idx,
const int *idx_batch_cnt, float *out);
void group_points_grad_kernel_launcher_stack(
const int B, const int M, const int C, const int N, const int nsample,
const float *grad_out, const int *idx, const int *idx_batch_cnt,
const int *features_batch_cnt, float *grad_features);
// op forward wrapper
std::vector<paddle::Tensor> group_points_cuda_forward(
const paddle::Tensor &features_tensor,
const paddle::Tensor &features_batch_cnt_tensor,
const paddle::Tensor &idx_tensor,
const paddle::Tensor &idx_batch_cnt_tensor) {
CHECK_INPUT(features_tensor);
CHECK_INPUT(features_batch_cnt_tensor);
CHECK_INPUT(idx_tensor);
CHECK_INPUT(idx_batch_cnt_tensor);
const int m = idx_tensor.shape()[0];
const int nsample = idx_tensor.shape()[1];
const int n = features_tensor.shape()[0];
const int c = features_tensor.shape()[1];
const int b = idx_batch_cnt_tensor.shape()[0];
const float *features = features_tensor.data<float>();
const int *features_batch_cnt = features_batch_cnt_tensor.data<int>();
const int *idx = idx_tensor.data<int>();
const int *idx_batch_cnt = idx_batch_cnt_tensor.data<int>();
auto out_tensor = paddle::empty({m, c, nsample}, paddle::DataType::FLOAT32,
paddle::GPUPlace());
float *out = out_tensor.data<float>();
group_points_kernel_launcher_stack(
b, m, c, nsample, features, features_batch_cnt, idx, idx_batch_cnt, out);
return {out_tensor};
}
// op backward wrapper
std::vector<paddle::Tensor> group_points_cuda_backward(
const paddle::Tensor &grad_out_tensor,
const paddle::Tensor &features_tensor,
const paddle::Tensor &features_batch_cnt_tensor,
const paddle::Tensor &idx_tensor,
const paddle::Tensor &idx_batch_cnt_tensor) {
CHECK_INPUT(grad_out_tensor);
CHECK_INPUT(features_tensor);
CHECK_INPUT(features_batch_cnt_tensor);
CHECK_INPUT(idx_tensor);
CHECK_INPUT(idx_batch_cnt_tensor);
const int m = idx_tensor.shape()[0];
const int nsample = idx_tensor.shape()[1];
const int n = features_tensor.shape()[0];
const int c = features_tensor.shape()[1];
const int b = idx_batch_cnt_tensor.shape()[0];
const float *grad_out = grad_out_tensor.data<float>();
const int *features_batch_cnt = features_batch_cnt_tensor.data<int>();
const int *idx = idx_tensor.data<int>();
const int *idx_batch_cnt = idx_batch_cnt_tensor.data<int>();
auto grad_features_tensor =
paddle::full({n, c}, 0., paddle::DataType::FLOAT32, paddle::GPUPlace());
float *grad_features = grad_features_tensor.data<float>();
group_points_grad_kernel_launcher_stack(b, m, c, n, nsample, grad_out, idx,
idx_batch_cnt, features_batch_cnt,
grad_features);
return {grad_features_tensor};
}
// shape infer
std::vector<std::vector<int64_t>> GroupInferShape(
std::vector<int64_t> features_shape,
std::vector<int64_t> features_batch_cnt_shapeshape,
std::vector<int64_t> idx_shape, std::vector<int64_t> idx_batch_cnt_shape) {
const int m = idx_shape[0];
const int nsample = idx_shape[1];
const int c = features_shape[1];
return {{m, c, nsample}};
}
// data type infer
std::vector<paddle::DataType> GroupInferDtype(
paddle::DataType features_dtype, paddle::DataType features_batch_cnt_dtype,
paddle::DataType idx_dtype, paddle::DataType idx_batch_cnt_dtype) {
return {features_dtype};
}
// build forward op
PD_BUILD_OP(grouping_operation)
.Inputs({"features_tensor", "features_batch_cnt_tensor", "idx_tensor",
"idx_batch_cnt_tensor"})
.Outputs({"out_tensor"})
.SetKernelFn(PD_KERNEL(group_points_cuda_forward))
.SetInferShapeFn(PD_INFER_SHAPE(GroupInferShape))
.SetInferDtypeFn(PD_INFER_DTYPE(GroupInferDtype));
// build backward op
PD_BUILD_GRAD_OP(grouping_operation)
.Inputs({paddle::Grad("out_tensor"), "features_tensor",
"features_batch_cnt_tensor", "idx_tensor", "idx_batch_cnt_tensor"})
.Outputs({paddle::Grad("features_tensor")})
.SetKernelFn(PD_KERNEL(group_points_cuda_backward));
| 0
|
apollo_public_repos/apollo-model-centerpoint/deploy/voxel_rcnn/cpp/custom_ops
|
apollo_public_repos/apollo-model-centerpoint/deploy/voxel_rcnn/cpp/custom_ops/voxel/voxelize_op.cc
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <vector>
#include "paddle/include/experimental/ext_all.h"
template <typename T, typename T_int>
bool hard_voxelize_cpu_kernel(
const T *points, const float point_cloud_range_x_min,
const float point_cloud_range_y_min, const float point_cloud_range_z_min,
const float voxel_size_x, const float voxel_size_y,
const float voxel_size_z, const int grid_size_x, const int grid_size_y,
const int grid_size_z, const int64_t num_points, const int num_point_dim,
const int max_num_points_in_voxel, const int max_voxels, T *voxels,
T_int *coords, T_int *num_points_per_voxel, T_int *grid_idx_to_voxel_idx,
T_int *num_voxels) {
std::fill(voxels,
voxels + max_voxels * max_num_points_in_voxel * num_point_dim,
static_cast<T>(0));
num_voxels[0] = 0;
int voxel_idx, grid_idx, curr_num_point;
int coord_x, coord_y, coord_z;
for (int point_idx = 0; point_idx < num_points; ++point_idx) {
coord_x = floor(
(points[point_idx * num_point_dim + 0] - point_cloud_range_x_min) /
voxel_size_x);
coord_y = floor(
(points[point_idx * num_point_dim + 1] - point_cloud_range_y_min) /
voxel_size_y);
coord_z = floor(
(points[point_idx * num_point_dim + 2] - point_cloud_range_z_min) /
voxel_size_z);
if (coord_x < 0 || coord_x > grid_size_x || coord_x == grid_size_x) {
continue;
}
if (coord_y < 0 || coord_y > grid_size_y || coord_y == grid_size_y) {
continue;
}
if (coord_z < 0 || coord_z > grid_size_z || coord_z == grid_size_z) {
continue;
}
grid_idx =
coord_z * grid_size_y * grid_size_x + coord_y * grid_size_x + coord_x;
voxel_idx = grid_idx_to_voxel_idx[grid_idx];
if (voxel_idx == -1) {
voxel_idx = num_voxels[0];
if (num_voxels[0] == max_voxels || num_voxels[0] > max_voxels) {
continue;
}
num_voxels[0]++;
grid_idx_to_voxel_idx[grid_idx] = voxel_idx;
coords[voxel_idx * 3 + 0] = coord_z;
coords[voxel_idx * 3 + 1] = coord_y;
coords[voxel_idx * 3 + 2] = coord_x;
}
curr_num_point = num_points_per_voxel[voxel_idx];
if (curr_num_point < max_num_points_in_voxel) {
for (int j = 0; j < num_point_dim; ++j) {
voxels[voxel_idx * max_num_points_in_voxel * num_point_dim +
curr_num_point * num_point_dim + j] =
points[point_idx * num_point_dim + j];
}
num_points_per_voxel[voxel_idx] = curr_num_point + 1;
}
}
return true;
}
std::vector<paddle::Tensor> hard_voxelize_cpu(
const paddle::Tensor &points, const std::vector<float> &voxel_size,
const std::vector<float> &point_cloud_range,
const int max_num_points_in_voxel, const int max_voxels) {
auto num_points = points.shape()[0];
auto num_point_dim = points.shape()[1];
const float voxel_size_x = voxel_size[0];
const float voxel_size_y = voxel_size[1];
const float voxel_size_z = voxel_size[2];
const float point_cloud_range_x_min = point_cloud_range[0];
const float point_cloud_range_y_min = point_cloud_range[1];
const float point_cloud_range_z_min = point_cloud_range[2];
int grid_size_x = static_cast<int>(
round((point_cloud_range[3] - point_cloud_range[0]) / voxel_size_x));
int grid_size_y = static_cast<int>(
round((point_cloud_range[4] - point_cloud_range[1]) / voxel_size_y));
int grid_size_z = static_cast<int>(
round((point_cloud_range[5] - point_cloud_range[2]) / voxel_size_z));
auto voxels =
paddle::empty({max_voxels, max_num_points_in_voxel, num_point_dim},
paddle::DataType::FLOAT32, paddle::CPUPlace());
auto coords = paddle::full({max_voxels, 3}, 0, paddle::DataType::INT32,
paddle::CPUPlace());
auto *coords_data = coords.data<int>();
auto num_points_per_voxel = paddle::full(
{max_voxels}, 0, paddle::DataType::INT32, paddle::CPUPlace());
auto *num_points_per_voxel_data = num_points_per_voxel.data<int>();
std::fill(num_points_per_voxel_data,
num_points_per_voxel_data + num_points_per_voxel.size(),
static_cast<int>(0));
auto num_voxels =
paddle::full({1}, 0, paddle::DataType::INT32, paddle::CPUPlace());
auto *num_voxels_data = num_voxels.data<int>();
auto grid_idx_to_voxel_idx =
paddle::full({grid_size_z, grid_size_y, grid_size_x}, -1,
paddle::DataType::INT32, paddle::CPUPlace());
auto *grid_idx_to_voxel_idx_data = grid_idx_to_voxel_idx.data<int>();
PD_DISPATCH_FLOATING_TYPES(
points.type(), "hard_voxelize_cpu_kernel", ([&] {
hard_voxelize_cpu_kernel<data_t, int>(
points.data<data_t>(), point_cloud_range_x_min,
point_cloud_range_y_min, point_cloud_range_z_min, voxel_size_x,
voxel_size_y, voxel_size_z, grid_size_x, grid_size_y, grid_size_z,
num_points, num_point_dim, max_num_points_in_voxel, max_voxels,
voxels.data<data_t>(), coords_data, num_points_per_voxel_data,
grid_idx_to_voxel_idx_data, num_voxels_data);
}));
return {voxels, coords, num_points_per_voxel, num_voxels};
}
#ifdef PADDLE_WITH_CUDA
std::vector<paddle::Tensor> hard_voxelize_cuda(
const paddle::Tensor &points, const std::vector<float> &voxel_size,
const std::vector<float> &point_cloud_range, int max_num_points_in_voxel,
int max_voxels);
#endif
std::vector<paddle::Tensor> hard_voxelize(
const paddle::Tensor &points, const std::vector<float> &voxel_size,
const std::vector<float> &point_cloud_range,
const int max_num_points_in_voxel, const int max_voxels) {
if (points.is_cpu()) {
return hard_voxelize_cpu(points, voxel_size, point_cloud_range,
max_num_points_in_voxel, max_voxels);
#ifdef PADDLE_WITH_CUDA
} else if (points.is_gpu() || points.is_gpu_pinned()) {
return hard_voxelize_cuda(points, voxel_size, point_cloud_range,
max_num_points_in_voxel, max_voxels);
#endif
} else {
PD_THROW(
"Unsupported device type for hard_voxelize "
"operator.");
}
}
std::vector<std::vector<int64_t>> HardInferShape(
std::vector<int64_t> points_shape, const std::vector<float> &voxel_size,
const std::vector<float> &point_cloud_range,
const int &max_num_points_in_voxel, const int &max_voxels) {
return {{max_voxels, max_num_points_in_voxel, points_shape[1]},
{max_voxels, 3},
{max_voxels},
{1}};
}
std::vector<paddle::DataType> HardInferDtype(paddle::DataType points_dtype) {
return {points_dtype, paddle::DataType::INT32, paddle::DataType::INT32,
paddle::DataType::INT32};
}
PD_BUILD_OP(hard_voxelize)
.Inputs({"POINTS"})
.Outputs({"VOXELS", "COORS", "NUM_POINTS_PER_VOXEL", "num_voxels"})
.SetKernelFn(PD_KERNEL(hard_voxelize))
.Attrs({"voxel_size: std::vector<float>",
"point_cloud_range: std::vector<float>",
"max_num_points_in_voxel: int", "max_voxels: int"})
.SetInferShapeFn(PD_INFER_SHAPE(HardInferShape))
.SetInferDtypeFn(PD_INFER_DTYPE(HardInferDtype));
| 0
|
apollo_public_repos/apollo-model-centerpoint/deploy/voxel_rcnn/cpp/custom_ops
|
apollo_public_repos/apollo-model-centerpoint/deploy/voxel_rcnn/cpp/custom_ops/voxel/voxelize_op.cu
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/include/experimental/ext_all.h"
#define CHECK_INPUT_CUDA(x) \
PD_CHECK(x.is_gpu() || x.is_gpu_pinned(), #x " must be a GPU Tensor.")
#define CUDA_KERNEL_LOOP(i, n) \
for (auto i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
template <typename T, typename T_int>
__global__ void map_point_to_grid_kernel(
const T *points, const float point_cloud_range_x_min,
const float point_cloud_range_y_min, const float point_cloud_range_z_min,
const float voxel_size_x, const float voxel_size_y,
const float voxel_size_z, const int grid_size_x, const int grid_size_y,
const int grid_size_z, const int64_t num_points, const int num_point_dim,
const int max_num_points_in_voxel, T_int *points_to_grid_idx,
T_int *points_to_num_idx, T_int *num_points_in_grid, int *points_valid) {
int64_t point_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (point_idx > num_points || point_idx == num_points) {
return;
}
int coord_x =
floor((points[point_idx * num_point_dim + 0] - point_cloud_range_x_min) /
voxel_size_x);
int coord_y =
floor((points[point_idx * num_point_dim + 1] - point_cloud_range_y_min) /
voxel_size_y);
int coord_z =
floor((points[point_idx * num_point_dim + 2] - point_cloud_range_z_min) /
voxel_size_z);
if (coord_x < 0 || coord_x > grid_size_x || coord_x == grid_size_x) {
return;
}
if (coord_y < 0 || coord_y > grid_size_y || coord_y == grid_size_y) {
return;
}
if (coord_z < 0 || coord_z > grid_size_z || coord_z == grid_size_z) {
return;
}
int grid_idx =
coord_z * grid_size_y * grid_size_x + coord_y * grid_size_x + coord_x;
T_int num = atomicAdd(num_points_in_grid + grid_idx, 1);
if (num < max_num_points_in_voxel) {
points_to_num_idx[point_idx] = num;
points_to_grid_idx[point_idx] = grid_idx;
atomicMin(points_valid + grid_idx, static_cast<int>(point_idx));
}
}
template <typename T_int>
__global__ void update_points_flag(const int *points_valid,
const T_int *points_to_grid_idx,
const int num_points, int *points_flag) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < num_points; i += gridDim.x * blockDim.x) {
T_int grid_idx = points_to_grid_idx[i];
if (grid_idx >= 0) {
int id = points_valid[grid_idx];
if (id != num_points && id == i) {
points_flag[i] = 1;
}
}
}
}
template <typename T_int>
__global__ void get_voxel_idx_kernel(const int *points_flag,
const T_int *points_to_grid_idx,
const int *points_flag_prefix_sum,
const int num_points, const int max_voxels,
T_int *num_voxels,
T_int *grid_idx_to_voxel_idx) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < num_points; i += gridDim.x * blockDim.x) {
if (points_flag[i] == 1) {
T_int grid_idx = points_to_grid_idx[i];
int num = points_flag_prefix_sum[i];
if (num < max_voxels) {
grid_idx_to_voxel_idx[grid_idx] = num;
}
}
if (i == num_points - 1) {
int num = points_flag_prefix_sum[i] + points_flag[i];
if (num < max_voxels) {
num_voxels[0] = num;
} else {
num_voxels[0] = max_voxels;
}
}
}
}
template <typename T>
__global__ void init_voxels_kernel(const int64_t num, T *voxels) {
int64_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx > num || idx == num) {
return;
}
voxels[idx] = static_cast<T>(0);
}
template <typename T, typename T_int>
__global__ void assign_voxels_kernel(
const T *points, const T_int *points_to_grid_idx,
const T_int *points_to_num_idx, const T_int *grid_idx_to_voxel_idx,
const int64_t num_points, const int num_point_dim,
const int max_num_points_in_voxel, T *voxels) {
int64_t point_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (point_idx > num_points || point_idx == num_points) {
return;
}
T_int grid_idx = points_to_grid_idx[point_idx];
T_int num_idx = points_to_num_idx[point_idx];
if (grid_idx > -1 && num_idx > -1) {
T_int voxel_idx = grid_idx_to_voxel_idx[grid_idx];
if (voxel_idx > -1) {
for (int64_t i = 0; i < num_point_dim; ++i) {
voxels[voxel_idx * max_num_points_in_voxel * num_point_dim +
num_idx * num_point_dim + i] =
points[point_idx * num_point_dim + i];
}
}
}
}
template <typename T, typename T_int>
__global__ void assign_coords_kernel(const T_int *grid_idx_to_voxel_idx,
const T_int *num_points_in_grid,
const int num_grids, const int grid_size_x,
const int grid_size_y,
const int grid_size_z,
const int max_num_points_in_voxel,
T *coords, T *num_points_per_voxel) {
int64_t grid_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (grid_idx > num_grids || grid_idx == num_grids) {
return;
}
T_int voxel_idx = grid_idx_to_voxel_idx[grid_idx];
if (voxel_idx > -1) {
T_int coord_z = grid_idx / grid_size_x / grid_size_y;
T_int coord_y =
(grid_idx - coord_z * grid_size_x * grid_size_y) / grid_size_x;
T_int coord_x =
grid_idx - coord_z * grid_size_x * grid_size_y - coord_y * grid_size_x;
coords[voxel_idx * 3 + 0] = coord_z;
coords[voxel_idx * 3 + 1] = coord_y;
coords[voxel_idx * 3 + 2] = coord_x;
num_points_per_voxel[voxel_idx] =
min(num_points_in_grid[grid_idx], max_num_points_in_voxel);
}
}
std::vector<paddle::Tensor> hard_voxelize_cuda(
const paddle::Tensor &points, const std::vector<float> &voxel_size,
const std::vector<float> &point_cloud_range, int max_num_points_in_voxel,
int max_voxels) {
// check device
CHECK_INPUT_CUDA(points);
int64_t num_points = points.shape()[0];
int64_t num_point_dim = points.shape()[1];
const float voxel_size_x = voxel_size[0];
const float voxel_size_y = voxel_size[1];
const float voxel_size_z = voxel_size[2];
const float point_cloud_range_x_min = point_cloud_range[0];
const float point_cloud_range_y_min = point_cloud_range[1];
const float point_cloud_range_z_min = point_cloud_range[2];
int grid_size_x = static_cast<int>(
round((point_cloud_range[3] - point_cloud_range[0]) / voxel_size_x));
int grid_size_y = static_cast<int>(
round((point_cloud_range[4] - point_cloud_range[1]) / voxel_size_y));
int grid_size_z = static_cast<int>(
round((point_cloud_range[5] - point_cloud_range[2]) / voxel_size_z));
int num_grids = grid_size_x * grid_size_y * grid_size_z;
auto voxels =
paddle::empty({max_voxels, max_num_points_in_voxel, num_point_dim},
paddle::DataType::FLOAT32, paddle::GPUPlace());
auto coords = paddle::full({max_voxels, 3}, 0, paddle::DataType::INT32,
paddle::GPUPlace());
auto *coords_data = coords.data<int>();
auto num_points_per_voxel = paddle::full(
{max_voxels}, 0, paddle::DataType::INT32, paddle::GPUPlace());
auto *num_points_per_voxel_data = num_points_per_voxel.data<int>();
auto points_to_grid_idx = paddle::full(
{num_points}, -1, paddle::DataType::INT32, paddle::GPUPlace());
auto *points_to_grid_idx_data = points_to_grid_idx.data<int>();
auto points_to_num_idx = paddle::full(
{num_points}, -1, paddle::DataType::INT32, paddle::GPUPlace());
auto *points_to_num_idx_data = points_to_num_idx.data<int>();
auto num_points_in_grid =
paddle::full({grid_size_z, grid_size_y, grid_size_x}, 0,
paddle::DataType::INT32, paddle::GPUPlace());
auto *num_points_in_grid_data = num_points_in_grid.data<int>();
auto grid_idx_to_voxel_idx =
paddle::full({grid_size_z, grid_size_y, grid_size_x}, -1,
paddle::DataType::INT32, paddle::GPUPlace());
auto *grid_idx_to_voxel_idx_data = grid_idx_to_voxel_idx.data<int>();
auto num_voxels =
paddle::full({1}, 0, paddle::DataType::INT32, paddle::GPUPlace());
auto *num_voxels_data = num_voxels.data<int>();
auto points_valid = paddle::full({grid_size_z * grid_size_y * grid_size_x},
static_cast<int>(num_points),
paddle::DataType::INT32, paddle::GPUPlace());
int *points_valid_data = points_valid.data<int>();
auto points_flag = paddle::full({num_points}, 0, paddle::DataType::INT32,
paddle::GPUPlace());
// 1. Find the grid index for each point, compute the
// number of points in each grid
int64_t threads = 512;
int64_t blocks = (num_points + threads - 1) / threads;
PD_DISPATCH_FLOATING_TYPES(
points.type(), "map_point_to_grid_kernel", ([&] {
map_point_to_grid_kernel<data_t, int>
<<<blocks, threads, 0, points.stream()>>>(
points.data<data_t>(), point_cloud_range_x_min,
point_cloud_range_y_min, point_cloud_range_z_min, voxel_size_x,
voxel_size_y, voxel_size_z, grid_size_x, grid_size_y,
grid_size_z, num_points, num_point_dim, max_num_points_in_voxel,
points_to_grid_idx_data, points_to_num_idx_data,
num_points_in_grid_data, points_valid_data);
}));
// 2. Find the number of non-zero voxels
int *points_flag_data = points_flag.data<int>();
threads = 512;
blocks = (num_points + threads - 1) / threads;
update_points_flag<int><<<blocks, threads, 0, points.stream()>>>(
points_valid_data, points_to_grid_idx_data, num_points, points_flag_data);
auto points_flag_prefix_sum =
paddle::experimental::cumsum(points_flag, 0, false, true, false);
int *points_flag_prefix_sum_data = points_flag_prefix_sum.data<int>();
get_voxel_idx_kernel<int><<<blocks, threads, 0, points.stream()>>>(
points_flag_data, points_to_grid_idx_data, points_flag_prefix_sum_data,
num_points, max_voxels, num_voxels_data, grid_idx_to_voxel_idx_data);
// 3. Store points to voxels coords and num_points_per_voxel
int64_t num = max_voxels * max_num_points_in_voxel * num_point_dim;
threads = 512;
blocks = (num + threads - 1) / threads;
PD_DISPATCH_FLOATING_TYPES(points.type(), "init_voxels_kernel", ([&] {
init_voxels_kernel<data_t>
<<<blocks, threads, 0, points.stream()>>>(
num, voxels.data<data_t>());
}));
threads = 512;
blocks = (num_points + threads - 1) / threads;
PD_DISPATCH_FLOATING_TYPES(
points.type(), "assign_voxels_kernel", ([&] {
assign_voxels_kernel<data_t, int>
<<<blocks, threads, 0, points.stream()>>>(
points.data<data_t>(), points_to_grid_idx_data,
points_to_num_idx_data, grid_idx_to_voxel_idx_data, num_points,
num_point_dim, max_num_points_in_voxel, voxels.data<data_t>());
}));
// 4. Store coords, num_points_per_voxel
blocks = (num_grids + threads - 1) / threads;
assign_coords_kernel<int><<<blocks, threads, 0, points.stream()>>>(
grid_idx_to_voxel_idx_data, num_points_in_grid_data, num_grids,
grid_size_x, grid_size_y, grid_size_z, max_num_points_in_voxel,
coords_data, num_points_per_voxel_data);
return {voxels, coords, num_points_per_voxel, num_voxels};
}
| 0
|
apollo_public_repos/apollo-model-centerpoint/deploy/voxel_rcnn/cpp/custom_ops
|
apollo_public_repos/apollo-model-centerpoint/deploy/voxel_rcnn/cpp/custom_ops/iou3d_nms/iou3d_cpu.cpp
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
3D Rotated IoU Calculation (CPU)
Written by Shaoshuai Shi
All Rights Reserved 2020.
*/
#include "iou3d_cpu.h"
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <math.h>
#include <stdio.h>
#include <vector>
#include "paddle/include/experimental/ext_all.h"
inline float min(float a, float b) { return a > b ? b : a; }
inline float max(float a, float b) { return a > b ? a : b; }
const float EPS = 1e-8;
struct Point {
float x, y;
__device__ Point() {}
__device__ Point(double _x, double _y) { x = _x, y = _y; }
__device__ void set(float _x, float _y) {
x = _x;
y = _y;
}
__device__ Point operator+(const Point &b) const {
return Point(x + b.x, y + b.y);
}
__device__ Point operator-(const Point &b) const {
return Point(x - b.x, y - b.y);
}
};
inline float cross(const Point &a, const Point &b) {
return a.x * b.y - a.y * b.x;
}
inline float cross(const Point &p1, const Point &p2, const Point &p0) {
return (p1.x - p0.x) * (p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y);
}
inline int check_rect_cross(const Point &p1, const Point &p2, const Point &q1,
const Point &q2) {
int ret = min(p1.x, p2.x) <= max(q1.x, q2.x) &&
min(q1.x, q2.x) <= max(p1.x, p2.x) &&
min(p1.y, p2.y) <= max(q1.y, q2.y) &&
min(q1.y, q2.y) <= max(p1.y, p2.y);
return ret;
}
inline int check_in_box2d(const float *box, const Point &p) {
// params: (7) [x, y, z, dx, dy, dz, heading]
const float MARGIN = 1e-2;
float center_x = box[0], center_y = box[1];
float angle_cos = cos(-box[6]),
angle_sin =
sin(-box[6]); // rotate the point in the opposite direction of box
float rot_x = (p.x - center_x) * angle_cos + (p.y - center_y) * (-angle_sin);
float rot_y = (p.x - center_x) * angle_sin + (p.y - center_y) * angle_cos;
return (fabs(rot_x) < box[3] / 2 + MARGIN &&
fabs(rot_y) < box[4] / 2 + MARGIN);
}
inline int intersection(const Point &p1, const Point &p0, const Point &q1,
const Point &q0, Point &ans) {
// fast exclusion
if (check_rect_cross(p0, p1, q0, q1) == 0) return 0;
// check cross standing
float s1 = cross(q0, p1, p0);
float s2 = cross(p1, q1, p0);
float s3 = cross(p0, q1, q0);
float s4 = cross(q1, p1, q0);
if (!(s1 * s2 > 0 && s3 * s4 > 0)) return 0;
// calculate intersection of two lines
float s5 = cross(q1, p1, p0);
if (fabs(s5 - s1) > EPS) {
ans.x = (s5 * q0.x - s1 * q1.x) / (s5 - s1);
ans.y = (s5 * q0.y - s1 * q1.y) / (s5 - s1);
} else {
float a0 = p0.y - p1.y, b0 = p1.x - p0.x, c0 = p0.x * p1.y - p1.x * p0.y;
float a1 = q0.y - q1.y, b1 = q1.x - q0.x, c1 = q0.x * q1.y - q1.x * q0.y;
float D = a0 * b1 - a1 * b0;
ans.x = (b0 * c1 - b1 * c0) / D;
ans.y = (a1 * c0 - a0 * c1) / D;
}
return 1;
}
inline void rotate_around_center(const Point ¢er, const float angle_cos,
const float angle_sin, Point &p) {
float new_x =
(p.x - center.x) * angle_cos + (p.y - center.y) * (-angle_sin) + center.x;
float new_y =
(p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos + center.y;
p.set(new_x, new_y);
}
inline int point_cmp(const Point &a, const Point &b, const Point ¢er) {
return atan2(a.y - center.y, a.x - center.x) >
atan2(b.y - center.y, b.x - center.x);
}
inline float box_overlap(const float *box_a, const float *box_b) {
// params: box_a (7) [x, y, z, dx, dy, dz, heading]
// params: box_b (7) [x, y, z, dx, dy, dz, heading]
// float a_x1 = box_a[0], a_y1 = box_a[1], a_x2 = box_a[2], a_y2 =
// box_a[3], a_angle = box_a[4];
// float b_x1 = box_b[0], b_y1 = box_b[1], b_x2 = box_b[2], b_y2 =
// box_b[3], b_angle = box_b[4];
float a_angle = box_a[6], b_angle = box_b[6];
float a_dx_half = box_a[3] / 2, b_dx_half = box_b[3] / 2,
a_dy_half = box_a[4] / 2, b_dy_half = box_b[4] / 2;
float a_x1 = box_a[0] - a_dx_half, a_y1 = box_a[1] - a_dy_half;
float a_x2 = box_a[0] + a_dx_half, a_y2 = box_a[1] + a_dy_half;
float b_x1 = box_b[0] - b_dx_half, b_y1 = box_b[1] - b_dy_half;
float b_x2 = box_b[0] + b_dx_half, b_y2 = box_b[1] + b_dy_half;
Point center_a(box_a[0], box_a[1]);
Point center_b(box_b[0], box_b[1]);
Point box_a_corners[5];
box_a_corners[0].set(a_x1, a_y1);
box_a_corners[1].set(a_x2, a_y1);
box_a_corners[2].set(a_x2, a_y2);
box_a_corners[3].set(a_x1, a_y2);
Point box_b_corners[5];
box_b_corners[0].set(b_x1, b_y1);
box_b_corners[1].set(b_x2, b_y1);
box_b_corners[2].set(b_x2, b_y2);
box_b_corners[3].set(b_x1, b_y2);
// get oriented corners
float a_angle_cos = cos(a_angle), a_angle_sin = sin(a_angle);
float b_angle_cos = cos(b_angle), b_angle_sin = sin(b_angle);
for (int k = 0; k < 4; k++) {
rotate_around_center(center_a, a_angle_cos, a_angle_sin, box_a_corners[k]);
rotate_around_center(center_b, b_angle_cos, b_angle_sin, box_b_corners[k]);
}
box_a_corners[4] = box_a_corners[0];
box_b_corners[4] = box_b_corners[0];
// get intersection of lines
Point cross_points[16];
Point poly_center;
int cnt = 0, flag = 0;
poly_center.set(0, 0);
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
flag = intersection(box_a_corners[i + 1], box_a_corners[i],
box_b_corners[j + 1], box_b_corners[j],
cross_points[cnt]);
if (flag) {
poly_center = poly_center + cross_points[cnt];
cnt++;
}
}
}
// check corners
for (int k = 0; k < 4; k++) {
if (check_in_box2d(box_a, box_b_corners[k])) {
poly_center = poly_center + box_b_corners[k];
cross_points[cnt] = box_b_corners[k];
cnt++;
}
if (check_in_box2d(box_b, box_a_corners[k])) {
poly_center = poly_center + box_a_corners[k];
cross_points[cnt] = box_a_corners[k];
cnt++;
}
}
poly_center.x /= cnt;
poly_center.y /= cnt;
// sort the points of polygon
Point temp;
for (int j = 0; j < cnt - 1; j++) {
for (int i = 0; i < cnt - j - 1; i++) {
if (point_cmp(cross_points[i], cross_points[i + 1], poly_center)) {
temp = cross_points[i];
cross_points[i] = cross_points[i + 1];
cross_points[i + 1] = temp;
}
}
}
// get the overlap areas
float area = 0;
for (int k = 0; k < cnt - 1; k++) {
area += cross(cross_points[k] - cross_points[0],
cross_points[k + 1] - cross_points[0]);
}
return fabs(area) / 2.0;
}
inline float iou_bev(const float *box_a, const float *box_b) {
// params: box_a (7) [x, y, z, dx, dy, dz, heading]
// params: box_b (7) [x, y, z, dx, dy, dz, heading]
float sa = box_a[3] * box_a[4];
float sb = box_b[3] * box_b[4];
float s_overlap = box_overlap(box_a, box_b);
return s_overlap / fmaxf(sa + sb - s_overlap, EPS);
}
std::vector<paddle::Tensor> boxes_iou_bev_cpu(
const paddle::Tensor &boxes_a_tensor,
const paddle::Tensor &boxes_b_tensor) {
// params boxes_a_tensor: (N, 7) [x, y, z, dx, dy, dz, heading]
// params boxes_b_tensor: (M, 7) [x, y, z, dx, dy, dz, heading]
// params ans_iou_tensor: (N, M)
int num_boxes_a = boxes_a_tensor.shape()[0];
int num_boxes_b = boxes_b_tensor.shape()[0];
const float *boxes_a = boxes_a_tensor.data<float>();
const float *boxes_b = boxes_b_tensor.data<float>();
auto ans_iou_tensor =
paddle::empty({num_boxes_a, num_boxes_b}, paddle::DataType::FLOAT32,
paddle::CPUPlace());
float *ans_iou = ans_iou_tensor.data<float>();
for (int i = 0; i < num_boxes_a; i++) {
for (int j = 0; j < num_boxes_b; j++) {
ans_iou[i * num_boxes_b + j] = iou_bev(boxes_a + i * 7, boxes_b + j * 7);
}
}
return {ans_iou_tensor};
}
| 0
|
apollo_public_repos/apollo-model-centerpoint/deploy/voxel_rcnn/cpp/custom_ops
|
apollo_public_repos/apollo-model-centerpoint/deploy/voxel_rcnn/cpp/custom_ops/iou3d_nms/iou3d_nms.cpp
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
3D IoU Calculation and Rotated NMS(modified from 2D NMS written by others)
Written by Shaoshuai Shi
All Rights Reserved 2019-2020.
*/
#include "iou3d_nms.h"
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <vector>
#include "paddle/include/experimental/ext_all.h"
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
const int THREADS_PER_BLOCK_NMS = sizeof(int64_t) * 8;
void BoxesOverlapLauncher(const cudaStream_t &stream, const int num_a,
const float *boxes_a, const int num_b,
const float *boxes_b, float *ans_overlap);
void BoxesIouBevLauncher(const cudaStream_t &stream, const int num_a,
const float *boxes_a, const int num_b,
const float *boxes_b, float *ans_iou);
void NmsLauncher(const cudaStream_t &stream, const float *boxes, int64_t *mask,
int boxes_num, float nms_overlap_thresh);
void NmsNormalLauncher(const cudaStream_t &stream, const float *boxes,
int64_t *mask, int boxes_num, float nms_overlap_thresh);
std::vector<paddle::Tensor> boxes_overlap_bev_gpu(
const paddle::Tensor &boxes_a, const paddle::Tensor &boxes_b) {
// params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
// params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading]
// params ans_overlap: (N, M)
int num_a = boxes_a.shape()[0];
int num_b = boxes_b.shape()[0];
const float *boxes_a_data = boxes_a.data<float>();
const float *boxes_b_data = boxes_b.data<float>();
auto ans_overlap = paddle::empty({num_a, num_b}, paddle::DataType::FLOAT32,
paddle::GPUPlace());
float *ans_overlap_data = ans_overlap.data<float>();
BoxesOverlapLauncher(boxes_a.stream(), num_a, boxes_a_data, num_b,
boxes_b_data, ans_overlap_data);
return {ans_overlap};
}
std::vector<paddle::Tensor> boxes_iou_bev_gpu(
const paddle::Tensor &boxes_a_tensor,
const paddle::Tensor &boxes_b_tensor) {
// params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
// params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading]
// params ans_overlap: (N, M)
int num_a = boxes_a_tensor.shape()[0];
int num_b = boxes_b_tensor.shape()[0];
const float *boxes_a_data = boxes_a_tensor.data<float>();
const float *boxes_b_data = boxes_b_tensor.data<float>();
auto ans_iou_tensor = paddle::empty({num_a, num_b}, paddle::DataType::FLOAT32,
paddle::GPUPlace());
float *ans_iou_data = ans_iou_tensor.data<float>();
BoxesIouBevLauncher(boxes_a_tensor.stream(), num_a, boxes_a_data, num_b,
boxes_b_data, ans_iou_data);
return {ans_iou_tensor};
}
std::vector<paddle::Tensor> nms_gpu(const paddle::Tensor &boxes,
float nms_overlap_thresh) {
// params boxes: (N, 7) [x, y, z, dx, dy, dz, heading]
auto keep = paddle::empty({boxes.shape()[0]}, paddle::DataType::INT32,
paddle::CPUPlace());
auto num_to_keep_tensor =
paddle::empty({1}, paddle::DataType::INT32, paddle::CPUPlace());
int *num_to_keep_data = num_to_keep_tensor.data<int>();
int boxes_num = boxes.shape()[0];
const float *boxes_data = boxes.data<float>();
int *keep_data = keep.data<int>();
const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS);
// int64_t *mask_data = NULL;
// CHECK_ERROR(cudaMalloc((void**)&mask_data, boxes_num * col_blocks *
// sizeof(int64_t)));
auto mask = paddle::empty({boxes_num * col_blocks}, paddle::DataType::INT64,
paddle::GPUPlace());
int64_t *mask_data = mask.data<int64_t>();
NmsLauncher(boxes.stream(), boxes_data, mask_data, boxes_num,
nms_overlap_thresh);
// std::vector<int64_t> mask_cpu(boxes_num * col_blocks);
// CHECK_ERROR(cudaMemcpy(&mask_cpu[0], mask_data, boxes_num * col_blocks *
// sizeof(int64_t),
// cudaMemcpyDeviceToHost));
const paddle::Tensor mask_cpu_tensor = mask.copy_to(paddle::CPUPlace(), true);
const int64_t *mask_cpu = mask_cpu_tensor.data<int64_t>();
// cudaFree(mask_data);
int64_t remv_cpu[col_blocks];
memset(remv_cpu, 0, col_blocks * sizeof(int64_t));
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / THREADS_PER_BLOCK_NMS;
int inblock = i % THREADS_PER_BLOCK_NMS;
if (!(remv_cpu[nblock] & (1ULL << inblock))) {
keep_data[num_to_keep++] = i;
const int64_t *p = &mask_cpu[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv_cpu[j] |= p[j];
}
}
}
num_to_keep_data[0] = num_to_keep;
if (cudaSuccess != cudaGetLastError()) printf("Error!\n");
return {keep, num_to_keep_tensor};
}
std::vector<paddle::Tensor> nms_normal_gpu(const paddle::Tensor &boxes,
float nms_overlap_thresh) {
// params boxes: (N, 7) [x, y, z, dx, dy, dz, heading]
// params keep: (N)
auto keep = paddle::empty({boxes.shape()[0]}, paddle::DataType::INT32,
paddle::CPUPlace());
auto num_to_keep_tensor =
paddle::empty({1}, paddle::DataType::INT32, paddle::CPUPlace());
int *num_to_keep_data = num_to_keep_tensor.data<int>();
int boxes_num = boxes.shape()[0];
const float *boxes_data = boxes.data<float>();
int *keep_data = keep.data<int>();
const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS);
// int64_t *mask_data = NULL;
// CHECK_ERROR(cudaMalloc((void**)&mask_data, boxes_num * col_blocks *
// sizeof(int64_t)));
auto mask = paddle::empty({boxes_num * col_blocks}, paddle::DataType::INT64,
paddle::GPUPlace());
int64_t *mask_data = mask.data<int64_t>();
NmsNormalLauncher(boxes.stream(), boxes_data, mask_data, boxes_num,
nms_overlap_thresh);
// int64_t mask_cpu[boxes_num * col_blocks];
// int64_t *mask_cpu = new int64_t [boxes_num * col_blocks];
// std::vector<int64_t> mask_cpu(boxes_num * col_blocks);
// CHECK_ERROR(cudaMemcpy(&mask_cpu[0], mask_data, boxes_num * col_blocks *
// sizeof(int64_t),
// cudaMemcpyDeviceToHost));
// cudaFree(mask_data);
const paddle::Tensor mask_cpu_tensor = mask.copy_to(paddle::CPUPlace(), true);
const int64_t *mask_cpu = mask_cpu_tensor.data<int64_t>();
int64_t remv_cpu[col_blocks];
memset(remv_cpu, 0, col_blocks * sizeof(int64_t));
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / THREADS_PER_BLOCK_NMS;
int inblock = i % THREADS_PER_BLOCK_NMS;
if (!(remv_cpu[nblock] & (1ULL << inblock))) {
keep_data[num_to_keep++] = i;
const int64_t *p = &mask_cpu[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv_cpu[j] |= p[j];
}
}
}
num_to_keep_data[0] = num_to_keep;
if (cudaSuccess != cudaGetLastError()) {
printf("Error!\n");
}
return {keep, num_to_keep_tensor};
}
| 0
|
apollo_public_repos/apollo-model-centerpoint/deploy/voxel_rcnn/cpp/custom_ops
|
apollo_public_repos/apollo-model-centerpoint/deploy/voxel_rcnn/cpp/custom_ops/iou3d_nms/iou3d_nms_kernel.cu
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
3D IoU Calculation and Rotated NMS(modified from 2D NMS written by others)
Written by Shaoshuai Shi
All Rights Reserved 2019-2020.
*/
#include <stdio.h>
#define THREADS_PER_BLOCK 16
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
// #define DEBUG
const int THREADS_PER_BLOCK_NMS = sizeof(int64_t) * 8;
const float EPS = 1e-8;
struct Point {
float x, y;
__device__ Point() {}
__device__ Point(double _x, double _y) { x = _x, y = _y; }
__device__ void set(float _x, float _y) {
x = _x;
y = _y;
}
__device__ Point operator+(const Point &b) const {
return Point(x + b.x, y + b.y);
}
__device__ Point operator-(const Point &b) const {
return Point(x - b.x, y - b.y);
}
};
__device__ inline float cross(const Point &a, const Point &b) {
return a.x * b.y - a.y * b.x;
}
__device__ inline float cross(const Point &p1, const Point &p2,
const Point &p0) {
return (p1.x - p0.x) * (p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y);
}
__device__ int check_rect_cross(const Point &p1, const Point &p2,
const Point &q1, const Point &q2) {
int ret = min(p1.x, p2.x) <= max(q1.x, q2.x) &&
min(q1.x, q2.x) <= max(p1.x, p2.x) &&
min(p1.y, p2.y) <= max(q1.y, q2.y) &&
min(q1.y, q2.y) <= max(p1.y, p2.y);
return ret;
}
__device__ inline int check_in_box2d(const float *box, const Point &p) {
// params: (7) [x, y, z, dx, dy, dz, heading]
const float MARGIN = 1e-2;
float center_x = box[0], center_y = box[1];
float angle_cos = cos(-box[6]),
angle_sin =
sin(-box[6]); // rotate the point in the opposite direction of box
float rot_x = (p.x - center_x) * angle_cos + (p.y - center_y) * (-angle_sin);
float rot_y = (p.x - center_x) * angle_sin + (p.y - center_y) * angle_cos;
return (fabs(rot_x) < box[3] / 2 + MARGIN &&
fabs(rot_y) < box[4] / 2 + MARGIN);
}
__device__ inline int intersection(const Point &p1, const Point &p0,
const Point &q1, const Point &q0,
Point &ans) {
// fast exclusion
if (check_rect_cross(p0, p1, q0, q1) == 0) return 0;
// check cross standing
float s1 = cross(q0, p1, p0);
float s2 = cross(p1, q1, p0);
float s3 = cross(p0, q1, q0);
float s4 = cross(q1, p1, q0);
if (!(s1 * s2 > 0 && s3 * s4 > 0)) return 0;
// calculate intersection of two lines
float s5 = cross(q1, p1, p0);
if (fabs(s5 - s1) > EPS) {
ans.x = (s5 * q0.x - s1 * q1.x) / (s5 - s1);
ans.y = (s5 * q0.y - s1 * q1.y) / (s5 - s1);
} else {
float a0 = p0.y - p1.y, b0 = p1.x - p0.x, c0 = p0.x * p1.y - p1.x * p0.y;
float a1 = q0.y - q1.y, b1 = q1.x - q0.x, c1 = q0.x * q1.y - q1.x * q0.y;
float D = a0 * b1 - a1 * b0;
ans.x = (b0 * c1 - b1 * c0) / D;
ans.y = (a1 * c0 - a0 * c1) / D;
}
return 1;
}
__device__ inline void rotate_around_center(const Point ¢er,
const float angle_cos,
const float angle_sin, Point &p) {
float new_x =
(p.x - center.x) * angle_cos + (p.y - center.y) * (-angle_sin) + center.x;
float new_y =
(p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos + center.y;
p.set(new_x, new_y);
}
__device__ inline int point_cmp(const Point &a, const Point &b,
const Point ¢er) {
return atan2(a.y - center.y, a.x - center.x) >
atan2(b.y - center.y, b.x - center.x);
}
__device__ inline float box_overlap(const float *box_a, const float *box_b) {
// params box_a: [x, y, z, dx, dy, dz, heading]
// params box_b: [x, y, z, dx, dy, dz, heading]
float a_angle = box_a[6], b_angle = box_b[6];
float a_dx_half = box_a[3] / 2, b_dx_half = box_b[3] / 2,
a_dy_half = box_a[4] / 2, b_dy_half = box_b[4] / 2;
float a_x1 = box_a[0] - a_dx_half, a_y1 = box_a[1] - a_dy_half;
float a_x2 = box_a[0] + a_dx_half, a_y2 = box_a[1] + a_dy_half;
float b_x1 = box_b[0] - b_dx_half, b_y1 = box_b[1] - b_dy_half;
float b_x2 = box_b[0] + b_dx_half, b_y2 = box_b[1] + b_dy_half;
Point center_a(box_a[0], box_a[1]);
Point center_b(box_b[0], box_b[1]);
#ifdef DEBUG
printf(
"a: (%.3f, %.3f, %.3f, %.3f, %.3f), b: (%.3f, %.3f, %.3f, %.3f, %.3f)\n",
a_x1, a_y1, a_x2, a_y2, a_angle, b_x1, b_y1, b_x2, b_y2, b_angle);
printf("center a: (%.3f, %.3f), b: (%.3f, %.3f)\n", center_a.x, center_a.y,
center_b.x, center_b.y);
#endif
Point box_a_corners[5];
box_a_corners[0].set(a_x1, a_y1);
box_a_corners[1].set(a_x2, a_y1);
box_a_corners[2].set(a_x2, a_y2);
box_a_corners[3].set(a_x1, a_y2);
Point box_b_corners[5];
box_b_corners[0].set(b_x1, b_y1);
box_b_corners[1].set(b_x2, b_y1);
box_b_corners[2].set(b_x2, b_y2);
box_b_corners[3].set(b_x1, b_y2);
// get oriented corners
float a_angle_cos = cos(a_angle), a_angle_sin = sin(a_angle);
float b_angle_cos = cos(b_angle), b_angle_sin = sin(b_angle);
for (int k = 0; k < 4; k++) {
#ifdef DEBUG
printf("before corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k,
box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x,
box_b_corners[k].y);
#endif
rotate_around_center(center_a, a_angle_cos, a_angle_sin, box_a_corners[k]);
rotate_around_center(center_b, b_angle_cos, b_angle_sin, box_b_corners[k]);
#ifdef DEBUG
printf("corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, box_a_corners[k].x,
box_a_corners[k].y, box_b_corners[k].x, box_b_corners[k].y);
#endif
}
box_a_corners[4] = box_a_corners[0];
box_b_corners[4] = box_b_corners[0];
// get intersection of lines
Point cross_points[16];
Point poly_center;
int cnt = 0, flag = 0;
poly_center.set(0, 0);
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
flag = intersection(box_a_corners[i + 1], box_a_corners[i],
box_b_corners[j + 1], box_b_corners[j],
cross_points[cnt]);
if (flag) {
poly_center = poly_center + cross_points[cnt];
cnt++;
#ifdef DEBUG
printf(
"Cross points (%.3f, %.3f): a(%.3f, %.3f)->(%.3f, %.3f), b(%.3f, "
"%.3f)->(%.3f, %.3f) \n",
cross_points[cnt - 1].x, cross_points[cnt - 1].y,
box_a_corners[i].x, box_a_corners[i].y, box_a_corners[i + 1].x,
box_a_corners[i + 1].y, box_b_corners[i].x, box_b_corners[i].y,
box_b_corners[i + 1].x, box_b_corners[i + 1].y);
#endif
}
}
}
// check corners
for (int k = 0; k < 4; k++) {
if (check_in_box2d(box_a, box_b_corners[k])) {
poly_center = poly_center + box_b_corners[k];
cross_points[cnt] = box_b_corners[k];
cnt++;
#ifdef DEBUG
printf("b corners in a: corner_b(%.3f, %.3f)", cross_points[cnt - 1].x,
cross_points[cnt - 1].y);
#endif
}
if (check_in_box2d(box_b, box_a_corners[k])) {
poly_center = poly_center + box_a_corners[k];
cross_points[cnt] = box_a_corners[k];
cnt++;
#ifdef DEBUG
printf("a corners in b: corner_a(%.3f, %.3f)", cross_points[cnt - 1].x,
cross_points[cnt - 1].y);
#endif
}
}
poly_center.x /= cnt;
poly_center.y /= cnt;
// sort the points of polygon
Point temp;
for (int j = 0; j < cnt - 1; j++) {
for (int i = 0; i < cnt - j - 1; i++) {
if (point_cmp(cross_points[i], cross_points[i + 1], poly_center)) {
temp = cross_points[i];
cross_points[i] = cross_points[i + 1];
cross_points[i + 1] = temp;
}
}
}
#ifdef DEBUG
printf("cnt=%d\n", cnt);
for (int i = 0; i < cnt; i++) {
printf("All cross point %d: (%.3f, %.3f)\n", i, cross_points[i].x,
cross_points[i].y);
}
#endif
// get the overlap areas
float area = 0;
for (int k = 0; k < cnt - 1; k++) {
area += cross(cross_points[k] - cross_points[0],
cross_points[k + 1] - cross_points[0]);
}
return fabs(area) / 2.0;
}
__device__ inline float iou_bev(const float *box_a, const float *box_b) {
// params box_a: [x, y, z, dx, dy, dz, heading]
// params box_b: [x, y, z, dx, dy, dz, heading]
float sa = box_a[3] * box_a[4];
float sb = box_b[3] * box_b[4];
float s_overlap = box_overlap(box_a, box_b);
return s_overlap / fmaxf(sa + sb - s_overlap, EPS);
}
__global__ void boxes_overlap_kernel(const int num_a, const float *boxes_a,
const int num_b, const float *boxes_b,
float *ans_overlap) {
// params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
// params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading]
const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y;
const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x;
if (a_idx >= num_a || b_idx >= num_b) {
return;
}
const float *cur_box_a = boxes_a + a_idx * 7;
const float *cur_box_b = boxes_b + b_idx * 7;
float s_overlap = box_overlap(cur_box_a, cur_box_b);
ans_overlap[a_idx * num_b + b_idx] = s_overlap;
}
__global__ void boxes_iou_bev_kernel(const int num_a, const float *boxes_a,
const int num_b, const float *boxes_b,
float *ans_iou) {
// params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
// params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading]
const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y;
const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x;
if (a_idx >= num_a || b_idx >= num_b) {
return;
}
const float *cur_box_a = boxes_a + a_idx * 7;
const float *cur_box_b = boxes_b + b_idx * 7;
float cur_iou_bev = iou_bev(cur_box_a, cur_box_b);
ans_iou[a_idx * num_b + b_idx] = cur_iou_bev;
}
__global__ void nms_kernel(const int boxes_num, const float nms_overlap_thresh,
const float *boxes, int64_t *mask) {
// params: boxes (N, 7) [x, y, z, dx, dy, dz, heading]
// params: mask (N, N/THREADS_PER_BLOCK_NMS)
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS,
THREADS_PER_BLOCK_NMS);
const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS,
THREADS_PER_BLOCK_NMS);
__shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 7];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 7 + 0] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 0];
block_boxes[threadIdx.x * 7 + 1] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 1];
block_boxes[threadIdx.x * 7 + 2] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 2];
block_boxes[threadIdx.x * 7 + 3] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 3];
block_boxes[threadIdx.x * 7 + 4] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 4];
block_boxes[threadIdx.x * 7 + 5] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 5];
block_boxes[threadIdx.x * 7 + 6] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 6];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x;
const float *cur_box = boxes + cur_box_idx * 7;
int i = 0;
int64_t t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (iou_bev(cur_box, block_boxes + i * 7) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS);
mask[cur_box_idx * col_blocks + col_start] = t;
}
}
__device__ inline float iou_normal(float const *const a, float const *const b) {
// params: a: [x, y, z, dx, dy, dz, heading]
// params: b: [x, y, z, dx, dy, dz, heading]
float left = fmaxf(a[0] - a[3] / 2, b[0] - b[3] / 2),
right = fminf(a[0] + a[3] / 2, b[0] + b[3] / 2);
float top = fmaxf(a[1] - a[4] / 2, b[1] - b[4] / 2),
bottom = fminf(a[1] + a[4] / 2, b[1] + b[4] / 2);
float width = fmaxf(right - left, 0.f), height = fmaxf(bottom - top, 0.f);
float interS = width * height;
float Sa = a[3] * a[4];
float Sb = b[3] * b[4];
return interS / fmaxf(Sa + Sb - interS, EPS);
}
__global__ void nms_normal_kernel(const int boxes_num,
const float nms_overlap_thresh,
const float *boxes, int64_t *mask) {
// params: boxes (N, 7) [x, y, z, dx, dy, dz, heading]
// params: mask (N, N/THREADS_PER_BLOCK_NMS)
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS,
THREADS_PER_BLOCK_NMS);
const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS,
THREADS_PER_BLOCK_NMS);
__shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 7];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 7 + 0] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 0];
block_boxes[threadIdx.x * 7 + 1] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 1];
block_boxes[threadIdx.x * 7 + 2] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 2];
block_boxes[threadIdx.x * 7 + 3] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 3];
block_boxes[threadIdx.x * 7 + 4] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 4];
block_boxes[threadIdx.x * 7 + 5] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 5];
block_boxes[threadIdx.x * 7 + 6] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 6];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x;
const float *cur_box = boxes + cur_box_idx * 7;
int i = 0;
int64_t t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (iou_normal(cur_box, block_boxes + i * 7) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS);
mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void BoxesOverlapLauncher(const cudaStream_t &stream, const int num_a,
const float *boxes_a, const int num_b,
const float *boxes_b, float *ans_overlap) {
dim3 blocks(
DIVUP(num_b, THREADS_PER_BLOCK),
DIVUP(num_a, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK);
boxes_overlap_kernel<<<blocks, threads, 0, stream>>>(num_a, boxes_a, num_b,
boxes_b, ans_overlap);
#ifdef DEBUG
cudaDeviceSynchronize(); // for using printf in kernel function
#endif
}
void BoxesIouBevLauncher(const cudaStream_t &stream, const int num_a,
const float *boxes_a, const int num_b,
const float *boxes_b, float *ans_iou) {
dim3 blocks(
DIVUP(num_b, THREADS_PER_BLOCK),
DIVUP(num_a, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK);
boxes_iou_bev_kernel<<<blocks, threads, 0, stream>>>(num_a, boxes_a, num_b,
boxes_b, ans_iou);
#ifdef DEBUG
cudaDeviceSynchronize(); // for using printf in kernel function
#endif
}
void NmsLauncher(const cudaStream_t &stream, const float *boxes, int64_t *mask,
int boxes_num, float nms_overlap_thresh) {
dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS),
DIVUP(boxes_num, THREADS_PER_BLOCK_NMS));
dim3 threads(THREADS_PER_BLOCK_NMS);
nms_kernel<<<blocks, threads, 0, stream>>>(boxes_num, nms_overlap_thresh,
boxes, mask);
}
void NmsNormalLauncher(const cudaStream_t &stream, const float *boxes,
int64_t *mask, int boxes_num, float nms_overlap_thresh) {
dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS),
DIVUP(boxes_num, THREADS_PER_BLOCK_NMS));
dim3 threads(THREADS_PER_BLOCK_NMS);
nms_normal_kernel<<<blocks, threads, 0, stream>>>(
boxes_num, nms_overlap_thresh, boxes, mask);
}
| 0
|
apollo_public_repos/apollo-model-centerpoint/deploy/voxel_rcnn/cpp/custom_ops
|
apollo_public_repos/apollo-model-centerpoint/deploy/voxel_rcnn/cpp/custom_ops/iou3d_nms/iou3d_nms.h
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef IOU3D_NMS_H
#define IOU3D_NMS_H
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <vector>
#include "paddle/include/experimental/ext_all.h"
std::vector<paddle::Tensor> boxes_overlap_bev_gpu(
const paddle::Tensor &boxes_a, const paddle::Tensor &boxes_b);
std::vector<paddle::Tensor> boxes_iou_bev_gpu(
const paddle::Tensor &boxes_a_tensor, const paddle::Tensor &boxes_b_tensor);
std::vector<paddle::Tensor> nms_gpu(const paddle::Tensor &boxes,
float nms_overlap_thresh);
std::vector<paddle::Tensor> nms_normal_gpu(const paddle::Tensor &boxes,
float nms_overlap_thresh);
#endif
| 0
|
apollo_public_repos/apollo-model-centerpoint/deploy/voxel_rcnn/cpp/custom_ops
|
apollo_public_repos/apollo-model-centerpoint/deploy/voxel_rcnn/cpp/custom_ops/iou3d_nms/iou3d_cpu.h
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef IOU3D_CPU_H
#define IOU3D_CPU_H
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <vector>
#include "paddle/include/experimental/ext_all.h"
std::vector<paddle::Tensor> boxes_iou_bev_cpu(
const paddle::Tensor& boxes_a_tensor, const paddle::Tensor& boxes_b_tensor);
#endif
| 0
|
apollo_public_repos/apollo-model-centerpoint/deploy/voxel_rcnn/cpp/custom_ops
|
apollo_public_repos/apollo-model-centerpoint/deploy/voxel_rcnn/cpp/custom_ops/iou3d_nms/iou3d_nms_api.cpp
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <vector>
#include "iou3d_cpu.h"
#include "iou3d_nms.h"
#include "paddle/include/experimental/ext_all.h"
std::vector<paddle::DataType> BoxesIouBevCpuInferDtype(
paddle::DataType boxes_a_dtype, paddle::DataType boxes_b_dtype) {
return {boxes_a_dtype};
}
std::vector<std::vector<int64_t>> BoxesIouBevCpuInferShape(
std::vector<int64_t> boxes_a_shape, std::vector<int64_t> boxes_b_shape) {
return {{boxes_a_shape[0], boxes_b_shape[0]}};
}
std::vector<paddle::DataType> NmsInferDtype(paddle::DataType boxes_dtype) {
return {paddle::DataType::INT64, paddle::DataType::INT64};
}
std::vector<std::vector<int64_t>> NmsInferShape(
std::vector<int64_t> boxes_shape) {
return {{boxes_shape[0]}, {1}};
}
std::vector<paddle::DataType> NmsNormalInferDtype(
paddle::DataType boxes_dtype) {
return {paddle::DataType::INT64, paddle::DataType::INT64};
}
std::vector<std::vector<int64_t>> NmsNormalInferShape(
std::vector<int64_t> boxes_shape) {
return {{boxes_shape[0]}, {1}};
}
std::vector<paddle::DataType> BoxesIouBevGpuInferDtype(
paddle::DataType boxes_a_dtype, paddle::DataType boxes_b_dtype) {
return {boxes_a_dtype};
}
std::vector<std::vector<int64_t>> BoxesIouBevGpuInferShape(
std::vector<int64_t> boxes_a_shape, std::vector<int64_t> boxes_b_shape) {
return {{boxes_a_shape[0], boxes_b_shape[0]}};
}
std::vector<paddle::DataType> BoxesOverlapBevGpuInferDtype(
paddle::DataType boxes_a_dtype, paddle::DataType boxes_b_dtype) {
return {boxes_a_dtype};
}
std::vector<std::vector<int64_t>> BoxesOverlapBevGpuInferShape(
std::vector<int64_t> boxes_a_shape, std::vector<int64_t> boxes_b_shape) {
return {{boxes_a_shape[0], boxes_b_shape[0]}};
}
PD_BUILD_OP(boxes_iou_bev_cpu)
.Inputs({"boxes_a_tensor", " boxes_b_tensor"})
.Outputs({"ans_iou_tensor"})
.SetKernelFn(PD_KERNEL(boxes_iou_bev_cpu))
.SetInferDtypeFn(PD_INFER_DTYPE(BoxesIouBevCpuInferDtype))
.SetInferShapeFn(PD_INFER_SHAPE(BoxesIouBevCpuInferShape));
PD_BUILD_OP(boxes_iou_bev_gpu)
.Inputs({"boxes_a_tensor", " boxes_b_tensor"})
.Outputs({"ans_iou_tensor"})
.SetKernelFn(PD_KERNEL(boxes_iou_bev_gpu))
.SetInferDtypeFn(PD_INFER_DTYPE(BoxesIouBevGpuInferDtype))
.SetInferShapeFn(PD_INFER_SHAPE(BoxesIouBevGpuInferShape));
PD_BUILD_OP(boxes_overlap_bev_gpu)
.Inputs({"boxes_a", " boxes_b"})
.Outputs({"ans_overlap"})
.SetKernelFn(PD_KERNEL(boxes_overlap_bev_gpu))
.SetInferDtypeFn(PD_INFER_DTYPE(BoxesOverlapBevGpuInferDtype))
.SetInferShapeFn(PD_INFER_SHAPE(BoxesOverlapBevGpuInferShape));
PD_BUILD_OP(nms_gpu)
.Inputs({"boxes"})
.Outputs({"keep", "num_to_keep"})
.Attrs({"nms_overlap_thresh: float"})
.SetKernelFn(PD_KERNEL(nms_gpu))
.SetInferDtypeFn(PD_INFER_DTYPE(NmsInferDtype))
.SetInferShapeFn(PD_INFER_SHAPE(NmsInferShape));
PD_BUILD_OP(nms_normal_gpu)
.Inputs({"boxes"})
.Outputs({"keep", "num_to_keep"})
.Attrs({"nms_overlap_thresh: float"})
.SetInferShapeFn(PD_INFER_SHAPE(NmsNormalInferShape))
.SetKernelFn(PD_KERNEL(nms_normal_gpu))
.SetInferDtypeFn(PD_INFER_DTYPE(NmsNormalInferDtype));
| 0
|
apollo_public_repos/apollo-model-centerpoint/deploy/voxel_rcnn/cpp/cmake
|
apollo_public_repos/apollo-model-centerpoint/deploy/voxel_rcnn/cpp/cmake/external/boost.cmake
|
include(ExternalProject)
set(BOOST_PROJECT "extern_boost")
# To release PaddlePaddle as a pip package, we have to follow the
# manylinux1 standard, which features as old Linux kernels and
# compilers as possible and recommends CentOS 5. Indeed, the earliest
# CentOS version that works with NVIDIA CUDA is CentOS 6. And a new
# version of boost, say, 1.66.0, doesn't build on CentOS 6. We
# checked that the devtools package of CentOS 6 installs boost 1.41.0.
# So we use 1.41.0 here.
set(BOOST_VER "1.41.0")
set(BOOST_TAR "boost_1_41_0" CACHE STRING "" FORCE)
set(BOOST_URL "http://paddlepaddledeps.bj.bcebos.com/${BOOST_TAR}.tar.gz" CACHE STRING "" FORCE)
MESSAGE(STATUS "BOOST_TAR: ${BOOST_TAR}, BOOST_URL: ${BOOST_URL}")
set(BOOST_SOURCES_DIR ${THIRD_PARTY_PATH}/boost)
set(BOOST_DOWNLOAD_DIR "${BOOST_SOURCES_DIR}/src/${BOOST_PROJECT}")
set(BOOST_INCLUDE_DIR "${BOOST_DOWNLOAD_DIR}" CACHE PATH "boost include directory." FORCE)
set_directory_properties(PROPERTIES CLEAN_NO_CUSTOM 1)
include_directories(${BOOST_INCLUDE_DIR})
ExternalProject_Add(
${BOOST_PROJECT}
${EXTERNAL_PROJECT_LOG_ARGS}
DOWNLOAD_DIR ${BOOST_DOWNLOAD_DIR}
URL ${BOOST_URL}
DOWNLOAD_NO_PROGRESS 1
PREFIX ${BOOST_SOURCES_DIR}
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
UPDATE_COMMAND ""
)
if (${CMAKE_VERSION} VERSION_LESS "3.3.0" OR NOT WIN32)
set(dummyfile ${CMAKE_CURRENT_BINARY_DIR}/boost_dummy.c)
file(WRITE ${dummyfile} "const char *dummy = \"${dummyfile}\";")
add_library(boost STATIC ${dummyfile})
else()
add_library(boost INTERFACE)
endif()
add_dependencies(boost ${BOOST_PROJECT})
set(Boost_INCLUDE_DIR ${BOOST_INCLUDE_DIR})
| 0
|
apollo_public_repos/apollo-model-centerpoint/tests
|
apollo_public_repos/apollo-model-centerpoint/tests/apis/test_scheduler.py
|
import unittest
import paddle3d
class SchedulerTestCase(unittest.TestCase):
"""
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.scheduler = paddle3d.apis.Scheduler(
save_interval=10, log_interval=5, do_eval=True)
def test_status(self):
for i in range(1, 21):
status = self.scheduler.step()
if i % 5 == 0:
self.assertEqual(status.do_log, True)
else:
self.assertEqual(status.do_log, False)
if i % 10 == 0:
self.assertEqual(status.save_checkpoint, True)
self.assertEqual(status.do_eval, True)
else:
self.assertEqual(status.save_checkpoint, False)
self.assertEqual(status.do_eval, False)
if __name__ == "__main__":
unittest.main()
| 0
|
apollo_public_repos/apollo-model-centerpoint/tests
|
apollo_public_repos/apollo-model-centerpoint/tests/datasets/test_kitti_dataset.py
|
import unittest
import numpy as np
import paddle
import paddle3d
class KittiMonoDatasetTestCase(unittest.TestCase):
"""
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
#prepare dataset to temp dir
self.kitti_train = paddle3d.datasets.KittiMonoDataset(
dataset_root='../../datasets/KITTI/')
self.kitti_trainval = paddle3d.datasets.KittiMonoDataset(
dataset_root='../../datasets/KITTI/', mode='trainval')
self.kitti_val = paddle3d.datasets.KittiMonoDataset(
dataset_root='../../datasets/KITTI/', mode='val')
self.kitti_test = paddle3d.datasets.KittiMonoDataset(
dataset_root='../../datasets/KITTI/', mode='test')
def test_size(self):
"""
"""
self.assertEqual(len(self.kitti_train), 3712)
self.assertEqual(len(self.kitti_trainval), 7480)
self.assertEqual(len(self.kitti_val), 3769)
self.assertEqual(len(self.kitti_test), 7517)
def test_evaluation(self):
"""
"""
samples = [s for s in self.kitti_train]
# add confidences
for s in samples:
num_boxes = s.bboxes_2d.shape[0]
s.confidences = np.ones([num_boxes])
metric_obj = self.kitti_train.metric
metric_obj.update(samples)
print(metric_obj.compute())
def test_batching(self):
loader = paddle.io.DataLoader(
self.kitti_train,
batch_size=4,
collate_fn=self.kitti_train.collate_fn)
for _ in loader:
...
class KittiPCDatasetTestCase(unittest.TestCase):
"""
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
#prepare dataset to temp dir
self.kitti_train = paddle3d.datasets.KittiPCDataset(
dataset_root='../../datasets/KITTI/')
self.kitti_trainval = paddle3d.datasets.KittiPCDataset(
dataset_root='../../datasets/KITTI/', mode='trainval')
self.kitti_val = paddle3d.datasets.KittiPCDataset(
dataset_root='../../datasets/KITTI/', mode='val')
self.kitti_test = paddle3d.datasets.KittiPCDataset(
dataset_root='../../datasets/KITTI/', mode='test')
def test_size(self):
"""
"""
self.assertEqual(len(self.kitti_train), 3712)
self.assertEqual(len(self.kitti_trainval), 7480)
self.assertEqual(len(self.kitti_val), 3769)
self.assertEqual(len(self.kitti_test), 7517)
def test_evaluation(self):
"""
"""
samples = [s for s in self.kitti_train]
# add confidences
for s in samples:
num_boxes = s.bboxes_3d.shape[0]
s.confidences = np.ones([num_boxes])
metric_obj = self.kitti_train.metric
metric_obj.update(samples)
print(metric_obj.compute())
def test_batching(self):
loader = paddle.io.DataLoader(
self.kitti_train,
batch_size=4,
collate_fn=self.kitti_train.collate_fn)
for _ in loader:
...
if __name__ == "__main__":
unittest.main()
| 0
|
apollo_public_repos/apollo-model-centerpoint/tests
|
apollo_public_repos/apollo-model-centerpoint/tests/datasets/test_nuscenes_dataset.py
|
import unittest
import numpy as np
import paddle
import paddle3d
class NuscenesPCDatasetTestCase(unittest.TestCase):
"""
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
#prepare dataset to temp dir
self.nuscenes_minitrain = paddle3d.datasets.NuscenesPCDataset(
dataset_root='../../datasets/nuscenes/', mode='mini_train')
self.nuscenes_minival = paddle3d.datasets.NuscenesPCDataset(
dataset_root='../../datasets/nuscenes/', mode='mini_val')
def test_size(self):
"""
"""
self.assertEqual(len(self.nuscenes_minitrain), 323)
self.assertEqual(len(self.nuscenes_minival), 81)
def test_evaluation(self):
"""
"""
samples = [s for s in self.nuscenes_minitrain]
# add confidences
for s in samples:
num_boxes = s.bboxes_3d.shape[0]
s.confidences = np.ones([num_boxes])
metric_obj = self.nuscenes_minitrain.metric
metric_obj.update(samples)
print(metric_obj.compute())
def test_batching(self):
loader = paddle.io.DataLoader(
self.nuscenes_minitrain,
batch_size=4,
collate_fn=self.nuscenes_minitrain.collate_fn)
for _ in loader:
...
if __name__ == "__main__":
unittest.main()
| 0
|
apollo_public_repos/apollo-model-centerpoint
|
apollo_public_repos/apollo-model-centerpoint/docs/release_note.md
|
# Release Notes
## v1.0
2022.12.27
### New Features
* The new version 1.0 of Paddle3D is released, which provides the following features
* We supports multiple type of 3D perception models, including monocular 3D models SMOKE/CaDDN/DD3D, pointcloud detection models PointPillars/CenterPoint/IA-SSD/PV-RCNN/Voxel R-CNN, BEV visual detection models PETR/PETRv2/BEVFormer, and pointcloud segmentation model SqueezeSegv3
* We added support for Waymo datasets and now Paddle3D has completed full support for the three open source datasets for autonomous driving
* Supports automatic mixed-precision training and quantitative deployment capabilities, providing better model acceleration capabilities
* Supports for sparse convolution, and integrated related SOTA models that are easy to deploy
* We continue to cooperate with Apollo team to provide one-click deployment of multiple models and integrate them into the perception algorithm part of Apollo to make it easier for developers to debug models
### 新特性
* 全部发布Paddle3D 1.0版本,提供了以下特性:
* 支持多种3D感知模型,包括单目3D模型SMOKE/CaDDN/DD3D,点云检测模型 PointPillars/CenterPoint/IA-SSD/PV-RCNN/Voxel R-CNN,BEV视觉检测模型 PETR/PETRv2/BEVFormer,点云分割模型SqueezeSegv3
* 新增Waymo数据集支持,完成了对自动驾驶三大开源数据集的全面支持
* 支持自动混合精度训练以及量化部署能力,提供更好的模型加速能力
* 新增了对稀疏卷积能力的支持,并集成了稀疏卷积方向的SOTA模型,模型训推一体,便于部署
* 持续与Apollo进行合作开发,提供多个模型一键部署集成到Apollo的感知算法部分,便于开发者更好地进行模型调试
## v0.5
2022.08.09
### New Features
* Release the first version of Paddle3D (that is, v0.5 version), the code of this project is still in beta stage, but under rapid development
* Release the monocular 3D model SMOKE/CaDDN, the point cloud detection model PointPillars/CenterPoint, and the point cloud segmentation model SqueezeSegv3, all with deployment tutorials
* Support for KITTI dataset and nuScenes dataset
* Tutorials for quick access to the Apollo platform are provided for all detection models
### 新特性
* 发布Paddle3D 的第一个版本 v0.5,本项目的代码仍在beta阶段,但处于快速迭代中
* 发布单目3D模型SMOKE/CaDDN,点云检测模型 PointPillars/CenterPoint,以及点云分割模型SqueezeSegv3,均带有部署教程
* 支持KITTI数据集和nuScenes数据集
* 为所有的检测模型提供了快速接入Apollo平台的教程
| 0
|
apollo_public_repos/apollo-model-centerpoint
|
apollo_public_repos/apollo-model-centerpoint/docs/api.md
|
# 训练
* [paddle3d.apis.Checkpoint](apis/checkpoint.md)
* [paddle3d.apis.Config](apis/config.md)
* [paddle3d.apis.Scheduler](apis/scheduler.md)
* [paddle3d.apis.Trainer](apis/trainer.md)
# 模型
* [paddle3d.models.SMOKE](apis/models/smoke.md)
# 数据集
* [paddle3d.datasets.KittiMonoDataset](apis/datasets/kitti_mono_dataset.md)
* [paddle3d.datasets.KittiPCDataset](apis/datasets/kitti_pointcloud_dataset.md)
* [paddle3d.datasets.NuscenesPCDataset](apis/datasets/nuscenes_pointcloud_dataset.md)
* [paddle3d.datasets.SemanticKITTIDataset](apis/datasets/semantickitti_seg_dataset.md)
| 0
|
apollo_public_repos/apollo-model-centerpoint
|
apollo_public_repos/apollo-model-centerpoint/docs/quickstart.md
|
# 快速开始
本文以SMOKE模型和KITTI数据集为例,介绍如何基于Paddle3D进行模型训练、评估、可视化的全流程操作。其他模型的全流程操作与此一致,各模型详细的使用教程和benchmark可参考[模型文档](./models)。
## 准备工作
在开始本教程之前,请确保已经按照 [安装文档](./installation.md) 完成了相关的准备工作
<br>
## 模型训练
**单卡训练**
使用如下命令启动单卡训练,由于一次完整的训练流程耗时较久,我们只训练100个iter进行快速体验,下面的命令在Telsa V100上大约耗时2分钟
```shell
python tools/train.py --config configs/smoke/smoke_dla34_no_dcn_kitti.yml --iters 100 --log_interval 10 --save_interval 20
```
**多卡训练**
很多3D感知模型需要使用多卡并行进行训练,Paddle3D同样支持快捷地启动多卡训练,使用如下命令可以启动四卡并行训练
```shell
export CUDA_VISIBLE_DEVICES=0,1,2,3
fleetrun tools/train.py --config configs/smoke/smoke_dla34_no_dcn_kitti.yml --iters 100 --log_interval 10 --save_interval 20
```
**混合精度训练**
如果想要启动混合精度训练,请参考[配置文件](../configs/smoke/smoke_dla34_no_dcn_kitti_amp.yml#L6-#L11)中添加amp的参数项,可用的参数可以参考 API **[paddle.amp.auto_cast](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/amp/auto_cast_cn.html#paddle.amp.auto_cast)**
**训练脚本参数介绍**
| 参数名 | 用途 | 是否必选项 | 默认值 |
| :------------------ | :----------------------------------------------------------- | :--------- | :--------------- |
| iters | 训练迭代步数 | 否 | 配置文件中指定值 |
| epochs | 训练迭代次数 | 否 | 配置文件中指定值 |
| batch_size | 单卡batch size | 否 | 配置文件中指定值 |
| learning_rate | 初始学习率 | 否 | 配置文件中指定值 |
| config | 配置文件路径 | 是 | - |
| save_dir | 检查点(模型和visualdl日志文件)的保存根路径 | 否 | output |
| num_workers | 用于异步读取数据的进程数量, 大于等于1时开启子进程读取数据 | 否 | 2 |
| save_interval | 模型保存的间隔步数 | 否 | 1000 |
| do_eval | 是否在保存模型时启动评估 | 否 | 否 |
| log_interval | 打印日志的间隔步数 | 否 | 10 |
| resume | 是否从检查点中恢复训练状态 | 否 | None |
| keep_checkpoint_max | 最多保存模型的数量 | 否 | 5 |
| quant_config | 量化配置文件,一般放在[configs/quant](../configs/quant)目录下 | 否 | None |
| seed | Paddle/numpy/random的全局随机种子值 | 否 | None |
*注意:使用一个 batch 数据对模型进行一次参数更新的过程称之为一步,iters 即为训练过程中的训练步数。完整遍历一次数据对模型进行训练的过程称之为一次迭代,epochs 即为训练过程中的训练迭代次数。一个epoch包含多个iter。*
<br>
## 训练过程可视化
Paddle3D使用VisualDL来记录训练过程中的指标和数据,我们可以在训练过程中,在命令行使用VisualDL启动一个server,并在浏览器查看相应的数据
```shell
# logdir需要和训练脚本中指定的save_dir保持一致
# 指定实际IP和端口
visualdl --logdir output --host ${HOST_IP} --port {$PORT}
```

<br>
## 模型量化(可选)
为了导出量化的模型,我们可以对模型进行量化训练,量化后的模型可以使用TensorRT + int8进行推理,从而提升推理速度,使用如下命令启动量化训练。
以多卡训练为例子,使用如下命令启动多卡量化训练,同样只训练100个iter进行快速体验
```shell
export CUDA_VISIBLE_DEVICES=0,1,2,3
# 注意这是一次新的训练,需要指定加载已经训练好的模型参数进行微调
# 并且指定新的模型保存路径
fleetrun tools/train.py \
--config configs/smoke/smoke_dla34_no_dcn_kitti.yml \
--iters 100 \
--log_interval 10 \
--save_interval 20 \
--quant_config configs/quant/smoke_kitti.yml \
--model output/iter_100/model.pdparams \
--save_dir output_smoke_quant
```
*注意,不同的模型需要探索不同的量化训练配置(如重新训练的次数,学习率衰减等),我们提供了 **SMOKE** 和 **CenterPoint** 的配置文件供参考*
<br>
## 模型评估
**单卡评估**
当模型训练完成后,需要对训练完成的模型进行指标评估,确保模型的指标满足诉求。目前Paddle3D的模型只支持单卡评估,使用以下命令启动评估脚本
```shell
wget https://paddle3d.bj.bcebos.com/models/smoke/smoke_dla34_no_dcn_kitti/model.pdparams
python tools/evaluate.py --config configs/smoke/smoke_dla34_no_dcn_kitti.yml --model model.pdparams
```
**评估脚本参数介绍**
| 参数名 | 用途 | 是否必选项 | 默认值 |
| :------------------ | :----------------------------------------------------------- | :--------- | :--------------- |
| batch_size | 评估时的batch size | 否 | 配置文件中指定值 |
| config | 配置文件路径 | 是 | - |
| model | 模型参数路径 | 否 | - |
| num_workers | 用于异步读取数据的进程数量, 大于等于1时开启子进程读取数据 | 否 | 2 |
| quant_config | 量化配置文件,一般放在[configs/quant](../configs/quant)目录下,如果模型使用量化训练,则在评估时同样需要指定量化配置文件 | 否 |
<br>
## 模型导出
当完成模型训练后,我们需要将模型导出成推理格式进行部署,我们加载Paddle3D已经训练完成的SMOKE模型参数进行模型导出
```shell
wget https://paddle3d.bj.bcebos.com/models/smoke/smoke_dla34_no_dcn_kitti/model.pdparams
python tools/export.py --config configs/smoke/smoke_dla34_no_dcn_kitti.yml --model model.pdparams
```
**导出脚本参数介绍**
| 参数名 | 用途 | 是否必选项 | 默认值 |
| :------------------ | :------------------------------------------------------------------ | :--------- | :--------------------- |
| config | 配置文件路径 | 是 | - |
| model | 模型参数路径 | 否 | - |
| export_for_apollo | 是否用于Apollo部署,当打开该开关时,会同步生成用于Apollo部署的meta文件 | 否 | False |
| save_dir | 推理模型文件的保存路径 | 否 | exported_model |
| save_name | 推理模型文件的保存名字 | 否 | None(由各模型自定决定) |
| quant_config | 量化配置文件,一般放在[configs/quant](../configs/quant)目录下,如果模型使用量化训练,则在模型导出时同样需要指定量化配置文件 | 否 |
<br>
## 模型部署
请根据实际模型选择对应的[部署文档](./models)进行参照
| 0
|
apollo_public_repos/apollo-model-centerpoint
|
apollo_public_repos/apollo-model-centerpoint/docs/configuration.md
|
# 配置文件详解
Paddle3D支持通过配置文件来描述相关的任务,从而实现配置化驱动的训练、评估、模型导出等流程,Paddle3D的配置化文件具备以下特点:
* 以yaml格式进行编写
* 支持用户配置模型、数据集、训练超参等配置项
* 通过特定的关键字 `type` 指定组件类型,并将其他参数作为实参来初始化组件
* 支持加载PaddleSeg和PaddleDetection中的组件:
* 在指定类型 `type` 时,加上 `$paddledet.` 前缀即可加载PaddleDetection的组件。
* 在指定类型 `type` 时,加上 `$paddleseg.` 前缀即可加载PaddleSeg的组件。
## 支持的配置项
| 配置项 | 含义 | 类型 |
| ----- | ---- | :-----: |
|train_dataset |训练数据集 | dict |
|val_dataset |验证数据集 | dict |
|batch_size|单张卡上,每步迭代训练时的数据量。一般来说,单步训练时的batch_size越大,则样本整体梯度更加稳定,有利于模型的收敛,调大batch_size时往往需要适当调大learning_rate | int |
|iters| 使用一个 batch 数据对模型进行一次参数更新的过程称之为一步,iters 即为训练过程中的训练步数。 | int|
|epochs| 完整遍历一次数据对模型进行训练的过程称之为一次迭代,epochs 即为训练过程中的训练迭代次数。一个epoch包含多个iter | int|
|optimizer|优化器类型,支持飞桨全部的[优化器类型](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/optimizer/Overview_cn.html#paddle-optimizer) | dict|
|lr_scheduler|调度器类型,支持飞桨全部的[LRScheduler](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/optimizer/lr/LRScheduler_cn.html) |dict|
|model| 模型类型,所支持值请参考[模型库](./apis/models/)|dict|
|\_\_base\_\_| 基础配置文件,可以不指定,该配置指向另外一个配置文件作为继承的基础配置|str|
## 完整示例
```yaml
# 从另外一个配置文件中继承配置
_base_: '../_base_/kitti_mono.yml'
# 设置batch size为8
batch_size: 8
# 设置训练轮次为70000
iters: 70000
# 指定训练集参数,由于训练集类别在 kitti_mono.yml 中已经指定,此处不需要特殊指定,直接继承
train_dataset:
# 设置三个Transform对加载的数据进行处理
transforms:
- type: LoadImage
reader: pillow
to_chw: False
- type: Gt2SmokeTarget
mode: train
num_classes: 3
- type: Normalize
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
val_dataset:
transforms:
- type: LoadImage
reader: pillow
to_chw: False
- type: Gt2SmokeTarget
mode: val
num_classes: 3
- type: Normalize
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
# 使用Adam优化器,优化器参数使用默认参数
optimizer:
type: Adam
# 设置学习率按 指定轮数 进行衰减
lr_scheduler:
type: MultiStepDecay
# 衰减轮次分别为 36000 和 55000
milestones: [36000, 55000]
# 初始学习率
learning_rate: 1.25e-4
# 选择SMOKE模型
model:
type: SMOKE
backbone:
# 骨干网络选择DLA34,并从paddle3d的云端存储中下载预训练模型进行加载
type: DLA34
pretrained: "https://bj.bcebos.com/paddle3d/pretrained/imagenet/dla34.pdparams"
head:
type: SMOKEPredictor
num_classes: 3
reg_channels: [1, 2, 3, 2, 2]
num_chanels: 256
norm_type: "gn"
in_channels: 64
depth_ref: [28.01, 16.32]
dim_ref: [[3.88, 1.63, 1.53], [1.78, 1.70, 0.58], [0.88, 1.73, 0.67]]
max_detection: 50
pred_2d: True
```
| 0
|
apollo_public_repos/apollo-model-centerpoint
|
apollo_public_repos/apollo-model-centerpoint/docs/installation.md
|
# 1. 安装教程
- [1. 安装教程](#1-安装教程)
- [1.1. 环境要求](#11-环境要求)
- [1.2. 安装说明](#12-安装说明)
- [1.2.1. 安装MiniConda](#121-安装miniconda)
- [1.2.2. 安装PaddlePaddle](#122-安装paddlepaddle)
- [1.2.2.1. 创建虚拟环境](#1221-创建虚拟环境)
- [1.2.2.2. 进入 conda 虚拟环境](#1222-进入-conda-虚拟环境)
- [1.2.2.3. 添加清华源(可选)](#1223-添加清华源可选)
- [1.2.2.4. 安装GPU版的PaddlePaddle(首选)](#1224-安装gpu版的paddlepaddle首选)
- [1.2.2.5. 安装CPU版的PaddlePaddle(备选)](#1225-安装cpu版的paddlepaddle备选)
- [1.2.2.6. 验证安装](#1226-验证安装)
- [1.2.2.7. 更多PaddlePaddle安装方式](#1227-更多paddlepaddle安装方式)
- [1.2.3. 安装apollo-model-centerpoint](#123-安装apollo-model-centerpoint)
- [1.2.3.1. 下载apollo-model-centerpoint源码](#1231-下载apollo-model-centerpoint源码)
- [1.2.3.2. 安装apollo-model-centerpoint依赖](#1232-安装apollo-model-centerpoint依赖)
- [1.2.3.3. 安装apollo-model-centerpoint](#1233-安装apollo-model-centerpoint)
- [1.3. 完整安装脚本](#13-完整安装脚本)
- [1.4. FAQ](#14-faq)
## 1.1. 环境要求
- PaddlePaddle >= 2.4.0
- Python >= 3.6
- CUDA 10.2、11.2、11.6、11.7
`说明:`
- 更多CUDA版本可参考[从源码编译](https://www.paddlepaddle.org.cn/documentation/docs/zh/install/compile/fromsource.html)。
- Jetson系列芯片可参考预编译好的[Python、C++推理库](https://www.paddlepaddle.org.cn/inference/v2.4/guides/install/download_lib.html#:~:text=paddle_inference_c.tgz-,Python%20%E6%8E%A8%E7%90%86%E5%BA%93,-%C2%B6)。
## 1.2. 安装说明
### 1.2.1. 安装MiniConda
```bash
说明:如果已安装Anaconda则无需再安装Miniconda。
```
Miniconda是一款小巧的Python环境管理工具,其安装程序中包含conda软件包管理器和Python。MiniConda的包使用软件包管理系统Conda进行管理。Conda是一个开源包管理系统和环境管理系统,可在Windows、macOS和Linux上运行。
`传送门:`[MiniConda安装教程](https://docs.conda.io/en/latest/miniconda.html#linux-installers)
### 1.2.2. 安装PaddlePaddle
#### 1.2.2.1. 创建虚拟环境
```bash
conda create -n paddle_env python=3.8
```
#### 1.2.2.2. 进入 conda 虚拟环境
```bash
conda activate paddle_env
```
#### 1.2.2.3. 添加清华源(可选)
```bash
# 对于国内用户无法连接到 conda 官方源的可以按照以下命令添加清华源:
conda config --add channels https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/free/
conda config --add channels https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/main/
conda config --set show_channel_urls yes
```
#### 1.2.2.4. 安装GPU版的PaddlePaddle(首选)
```bash
说明:如果您的计算机有 NVIDIA® GPU,建议安装 GPU 版的 PaddlePaddle
```
```bash
# 对于 CUDA 10.2,需要搭配 cuDNN 7.6.5(多卡环境下 NCCL>=2.7),安装命令为:
conda install paddlepaddle-gpu==2.4.1 cudatoolkit=10.2 --channel https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/Paddle/
# 对于 CUDA 11.2,需要搭配 cuDNN 8.2.1(多卡环境下 NCCL>=2.7),安装命令为:
conda install paddlepaddle-gpu==2.4.1 cudatoolkit=11.2 -c https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/Paddle/ -c conda-forge
# 对于 CUDA 11.6,需要搭配 cuDNN 8.4.0(多卡环境下 NCCL>=2.7),安装命令为:
conda install paddlepaddle-gpu==2.4.1 cudatoolkit=11.6 -c https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/Paddle/ -c conda-forge
# 对于 CUDA 11.7,需要搭配 cuDNN 8.4.1(多卡环境下 NCCL>=2.7),安装命令为:
conda install paddlepaddle-gpu==2.4.1 cudatoolkit=11.7 -c https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/Paddle/ -c conda-forge
```
您可参考 NVIDIA 官方文档了解 CUDA、CUDNN、TensorRT的安装流程和配置方法。
`传送门:`
- [CUDA安装说明](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/)
- [cuDNN安装说明](https://docs.nvidia.com/deeplearning/cudnn/install-guide/)
- [TensorRT安装说明](https://docs.nvidia.com/deeplearning/tensorrt/index.html)
- 更多CUDA版本可参考[从源码编译](https://www.paddlepaddle.org.cn/documentation/docs/zh/install/compile/fromsource.html)。
- Jetson系列芯片可参考预编译好的[Python、C++推理库](https://www.paddlepaddle.org.cn/inference/v2.4/guides/install/download_lib.html#:~:text=paddle_inference_c.tgz-,Python%20%E6%8E%A8%E7%90%86%E5%BA%93,-%C2%B6)。
#### 1.2.2.5. 安装CPU版的PaddlePaddle(备选)
```bash
说明:如果您的计算机没有 NVIDIA® GPU,请安装 CPU 版的 PaddlePaddle
```
```bash
conda install paddlepaddle==2.4.1 --channel https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/Paddle/
```
#### 1.2.2.6. 验证安装
```bash
# 输入python进入python解释器
python
```
```python
# 在python解释器中输入
import paddle
# 再输入
paddle.utils.run_check()
```
```bash
如果出现PaddlePaddle is installed successfully!,说明您已成功安装。
```
```python
# 输入quit()退出python解释器
quit()
```
#### 1.2.2.7. 更多PaddlePaddle安装方式
`传送门:`(PaddlePaddle其他安装指南)[https://www.paddlepaddle.org.cn/documentation/docs/zh/install/index_cn.html]
### 1.2.3. 安装apollo-model-centerpoint
#### 1.2.3.1. 下载apollo-model-centerpoint源码
```bash
说明:如已下载apollo-model-centerpoint源码可忽略这一步。
```
```shell
git clone https://github.com/ApolloAuto/apollo-model-centerpoint.git
```
#### 1.2.3.2. 安装apollo-model-centerpoint依赖
```bash
cd apollo-model-centerpoint
pip install -r requirements.txt
```
#### 1.2.3.3. 安装apollo-model-centerpoint
```shell
pip install -e . # install in edit mode
```
## 1.3. 完整安装脚本
以下是完整的基于conda安装apollo-model-centerpoint的脚本,假设已经成功安装MiniConda或Anaconda,已安装CUDA 11.6。
```bash
# 创建虚拟环境
conda create -n paddle_env python=3.8
# 进入 conda 虚拟环境
conda activate paddle_env
# 添加清华源(可选)
# 对于国内用户无法连接到 conda 官方源的可以按照以下命令添加清华源:
conda config --add channels https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/free/
conda config --add channels https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/main/
conda config --set show_channel_urls yes
# 安装GPU版的PaddlePaddle
# 对于 CUDA 11.6,需要搭配 cuDNN 8.4.0(多卡环境下 NCCL>=2.7),安装命令为:
conda install paddlepaddle-gpu==2.4.1 cudatoolkit=11.6 -c https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/Paddle/ -c conda-forge
# 下载apollo-model-centerpoint代码
# 说明:如已下载apollo-model-centerpoint源码可忽略这一步。
git clone https://github.com/ApolloAuto/apollo-model-centerpoint.git
# 安装apollo-model-centerpoint依赖
cd apollo-model-centerpoint
pip install -r requirements.txt
# 安装apollo-model-centerpoint
pip install -e . # install in edit mode
```
## 1.4. FAQ
如果在安装过程中遇到什么问题,可以先参考[FAQ](docs/faq.md)页面. 如果没有找到对应的解决方案,你也可以在[issue](https://github.com/PaddlePaddle/Paddle3D/issues)。
| 0
|
apollo_public_repos/apollo-model-centerpoint/docs
|
apollo_public_repos/apollo-model-centerpoint/docs/apis/trainer.md
|
# paddle3d.apis.Trainer
训练器对象,支持在指定的数据集上训练和评估模型
## \_\_init\_\_
* **参数**
* model: 待训练或者评估的模型
* iters: 更新的训练步数,可以不指定,与epochs互斥,当指定iters时,epochs不生效
* epochs: 更新的训练轮次,可以不指定
* optimizer: 训练所用的优化器
* train_dataset: 训练数据集
* val_dataset: 评估数据集,可以不指定
* resume: 是否从检查点中恢复到上一次训练状态
* checkpoint: 检查点参数,用于保存训练过程中的模型参数和训练状态,该参数可以是:
* `dict` 类型,指定构建默认 [Checkpoint](./checkpoint.md) 类对象的参数。
* 继承了 [paddle3d.apis.CheckpointABC](./checkpoint.md) 的类对象
* scheduler: 调度器参数,用于决定训练过程中的调度行为,该参数可以是:
* `dict` 类型,指定构建默认 [Scheduler](./scheduler.md) 类对象的参数。
* 继承了 [paddle3d.apis.SchedulerABC](./scheduler.md) 的类对象
* dataloader_fn: 数据加载器参数,用于构建数据加载器,该参数可以是:
* `dict` 类型,指定构建默认 Dataloader 类对象的参数,如 `batch_size` / `drop_last` / `shuffle` 。
* 继承了 `paddle3d.apis.CheckpointABC` 的类对象
*注意:使用一个 batch 数据对模型进行一次参数更新的过程称之为一步,iters 即为训练过程中的训练步数。完整遍历一次数据对模型进行训练的过程称之为一次迭代,epochs 即为训练过程中的训练迭代次数。一个epoch包含多个iter。*
* **异常值**
* RuntimeError: 当指定的Checkpoint存在数据且未设置 `resume` 时,此时数据存在被覆写的隐患,因此将抛出该异常
## train
执行训练流程的接口
## evaluate
执行评估流程的接口
* **异常值**
* RuntimeError: 初始化时如果未指定评估数据集,则抛出该异常
| 0
|
apollo_public_repos/apollo-model-centerpoint/docs
|
apollo_public_repos/apollo-model-centerpoint/docs/apis/scheduler.md
|
# paddle3d.apis.SchedulerABC
调度器抽象基类,定义调度器应该实现的方法
## step
通知调度器对象步进一次,并返回当前步的调度状态 `SchedulerStatus`
<br>
# paddle3d.apis.Scheduler
调度器类,继承自SchedulerABC,用于决定Trainer训练过程中的调度行为,包括:
* 是否打印日志
* 是否保存检查点
* 是否执行评估操作
## \_\_init\_\_
* **参数**
* save_interval: 保存检查点的间隔步数
* log_interval: 打印日志的间隔步数
* do_eval: 是否在保存检查点时启动评估
<br>
# paddle3d.apis.SchedulerStatus
namedtuple对象,包含了调度状态
## do_eval
是否执行评估操作
## do_log
是否打印日志
## save_checkpoint
是否保存检查点
| 0
|
apollo_public_repos/apollo-model-centerpoint/docs
|
apollo_public_repos/apollo-model-centerpoint/docs/apis/checkpoint.md
|
# paddle3d.apis.CheckpointABC
检查点抽象基类,定义检查点应该实现的方法
## have
检查点中是否保存了指定tag的信息
* **参数**
* tag: 数据tag
## get
获取检查点中的指定信息
* **参数**
* tag: 数据tag
## push
保存一组模型参数和优化器参数到检查点中
* **参数**
* params_dict: 待保存的模型参数
* opt_dict: 待保存的优化器参数
* kwargs: 其余参数,和各个继承类实现有关
## pop
删除检查点队列中最先保存的数据
* **参数**
* kwargs: 其余参数,和各个继承类实现有关
## empty
检查点是否为空
## record
记录一组训练信息到检查点中
* **参数**
* key: 训练信息标签
* value: 训练信息内容
## meta
检查点的元数据
## metafile
检查点保存元数据的文件路径
## rootdir
检查点的根路径
<br>
# paddle3d.apis.Checkpoint
检查点类方法,支持保存模型和优化器参数,以及训练过程中的状态信息,继承自抽象基类CheckpointABC
## push
保存一组模型参数和优化器参数到检查点中
* **参数**
* params_dict: 待保存的模型参数
* opt_dict: 待保存的优化器参数
* tag: 参数的标签,可以不填写
* enqueue: 保存的参数是否放入队列中,队列中的参数在超过限制时会被自动删除,默认为True
* verbose: 是否打印详细日志
## pop
删除检查点队列中最先保存的数据
* **参数**
* verbose: 是否打印详细日志
## rwlock
读写锁,用于保护多进程场景下不会读写检查点不会造成数据冲突
| 0
|
apollo_public_repos/apollo-model-centerpoint/docs
|
apollo_public_repos/apollo-model-centerpoint/docs/apis/config.md
|
# paddle3d.apis.Config
配置类方法,用于解析配置文件(yaml格式),提取文件中指定的组件并实例化成对应的Paddle3D对象
## \_\_init\_\_
* **参数**
* path: 配置文件路径
* learning_rate: 更新的学习率参数,可以不指定
* batch_size: 更新的batch_size,可以不指定
* iters: 更新的训练步数,可以不指定
* epochs: 更新的训练轮次,可以不指定
*注意:使用一个 batch 数据对模型进行一次参数更新的过程称之为一步,iters 即为训练过程中的训练步数。完整遍历一次数据对模型进行训练的过程称之为一次迭代,epochs 即为训练过程中的训练迭代次数。一个epoch包含多个iter。*
* **异常值**
* ValueError: 未指定配置文件路径时抛出该异常
* FileNotFoundError: 指定文件不存在时抛出该异常
* RuntimeError: 指定文件不是 yaml 格式时抛出该异常
## update
更新配置类的特定超参
* **参数**
* learning_rate: 更新的学习率参数,可以不指定
* batch_size: 更新的batch_size,可以不指定
* iters: 更新的训练步数,可以不指定
* epochs: 更新的训练轮次,可以不指定
## to_dict
将配置类中的组件信息转成字典形式并返回
## batch_size
单卡batch_size大小
## iters
训练步数,与epochs互斥,当指定iters时,epochs不生效
## epochs
训练轮次
## lr_scheduler
调度器对象
## optimizer
优化器对象
## model
模型对象
## train_dataset
训练数据集
## val_dataset
评估数据集
| 0
|
apollo_public_repos/apollo-model-centerpoint/docs/apis
|
apollo_public_repos/apollo-model-centerpoint/docs/apis/datasets/kitti_pointcloud_dataset.md
|
# paddle3d.datasets.KittiPCDataset
KITTI点云检测数据集,数据集信息请参考[KITTI官网](http://www.cvlibs.net/datasets/kitti/)
*注意:KITTI官网只区分了训练集和测试集,我们遵循业界的普遍做法,将7481个训练集样本,进一步划分为3712个训练集样本和3769个验证集样本*
## \_\_init\_\_
* **参数**
* dataset_root: 数据集的根目录
* mode: 数据集模式,支持 `train` / `val` / `trainval` / `test` 等格式
* transforms: 数据增强方法
| 0
|
apollo_public_repos/apollo-model-centerpoint/docs/apis
|
apollo_public_repos/apollo-model-centerpoint/docs/apis/datasets/kitti_mono_dataset.md
|
# paddle3d.datasets.KittiMonoDataset
KITTI单目3D检测数据集,数据集信息请参考[KITTI官网](http://www.cvlibs.net/datasets/kitti/)
*注意:KITTI官网只区分了训练集和测试集,我们遵循业界的普遍做法,将7481个训练集样本,进一步划分为3712个训练集样本和3769个验证集样本*
## \_\_init\_\_
* **参数**
* dataset_root: 数据集的根目录
* mode: 数据集模式,支持 `train` / `val` / `trainval` / `test` 等格式
* transforms: 数据增强方法
| 0
|
apollo_public_repos/apollo-model-centerpoint/docs/apis
|
apollo_public_repos/apollo-model-centerpoint/docs/apis/datasets/nuscenes_pointcloud_dataset.md
|
# paddle3d.datasets.NuscenesPCDataset
Nuscenes点云检测数据集,数据集信息请参考[NuScenes官网](https://www.nuscenes.org/)
## \_\_init\_\_
* **参数**
* dataset_root: 数据集的根目录
* mode: 数据集模式,支持 `train` / `val` / `trainval` / `test` / `mini_train` / `mini_val` 等格式
*注意:当使用NuScenes官方提供的mini数据集时,请指定mode为 mini_train 或者 mini_val*
* transforms: 数据增强方法
* max_sweeps: 用于增强每一帧点云的sweeps数量,默认为10
* class_balanced_sampling: 是否做类别均衡采样,默认为False
* class_names: 类别名,可以不指定
| 0
|
apollo_public_repos/apollo-model-centerpoint/docs/apis
|
apollo_public_repos/apollo-model-centerpoint/docs/apis/datasets/semantickitti_seg_dataset.md
|
# paddle3d.datasets.SemanticKITTIDataset
SemanticKITTI点云分割数据集,数据集信息请参考[SemanticKITTI官网](http://www.semantic-kitti.org/)
## \_\_init\_\_
* **参数**
* dataset_root: 数据集的根目录
* mode: 数据集模式,支持 `train` / `val` / `trainval` / `test` 等格式
* sequences: 数据划分序列,可以不指定,默认使用官网推荐的划分方式
* transforms: 数据增强方法
| 0
|
apollo_public_repos/apollo-model-centerpoint/docs/apis
|
apollo_public_repos/apollo-model-centerpoint/docs/apis/models/smoke.md
|
# paddle3d.models.SMOKE
单目3D检测模型 《Single-Stage Monocular 3D Object Detection via Keypoint Estimation》
## \_\_init\_\_
* **参数**
* backbone: 所用的骨干网络
* head: 预测头,目前只支持 `SMOKEPredictor`
* depth_ref: 深度参考值
* dim_ref: 每个类别的维度参考值
* max_detection: 最大检测目标数量,默认为50
* pred_2d: 是否同时预测2D检测框结果,默认为True
<br>
# paddle3d.models.SMOKEPredictor
SMOKE模型的预测头
## \_\_init\_\_
* **参数**
* num_classe:检测类别数
* norm_type: NormLayer的类型,默认为gn
* in_channels: 输入channel数量
| 0
|
apollo_public_repos/apollo-model-centerpoint/docs
|
apollo_public_repos/apollo-model-centerpoint/docs/datasets/custom.md
|
# 自定义数据集格式说明
Paddle3D支持按照[KITTTI数据集](http://www.cvlibs.net/datasets/kitti/eval_object.php?obj_benchmark=3d)格式构建自己的数据集,目录结构示意如下:
```
custom_dataset
|—— training
| |—— image_2
| | |—— 000001.png
| | |—— ...
| |—— label_2
| | |—— 000001.txt
| | |—— ...
| |—— calib
| | |—— 000001.txt
| | |—— ...
| |—— velodyne
| | |—— 000001.bin
| | |—— ...
|—— ImageSets
| |—— train.txt
| |—— val.txt
```
`image_2`、`velodyne`、`label_2`和`calib`存放图像文件、点云文件、真值标注文件、坐标系转换参数文件,4个文件夹下对应同一帧的文件名前缀需相同。`ImageSets`目录存放划分至训练集和验证集的文件名前缀列表。
**注意**:单目三维物体检测任务可以不准备`velodyne`,点云三维物体检测任务可以不准备`image_2`。
- `label_2`
真值标注文件`000001.txt`示意如下:
```
Truck 0.00 0 -1.57 599.41 156.40 629.75 189.25 2.85 2.63 12.34 0.47 1.49 69.44 -1.56
Car 0.00 0 1.85 387.63 181.54 423.81 203.12 1.67 1.87 3.69 -16.53 2.39 58.49 1.57
Cyclist 0.00 3 -1.65 676.60 163.95 688.98 193.93 1.86 0.60 2.02 4.59 1.32 45.84 -1.55
DontCare -1 -1 -10 503.89 169.71 590.61 190.13 -1 -1 -1 -1000 -1000 -1000 -10
DontCare -1 -1 -10 511.35 174.96 527.81 187.45 -1 -1 -1 -1000 -1000 -1000 -10
DontCare -1 -1 -10 532.37 176.35 542.68 185.27 -1 -1 -1 -1000 -1000 -1000 -10
DontCare -1 -1 -10 559.62 175.83 575.40 183.15 -1 -1 -1 -1000 -1000 -1000 -10
```
标注格式说明如下,具体可见[KITTI官方真值说明工具](https://s3.eu-central-1.amazonaws.com/avg-kitti/devkit_object.zip)。**类别(type)可根据实际情况取名。**
```
#Values Name Description
----------------------------------------------------------------------------
1 type Describes the type of object: 'Car', 'Van', 'Truck',
'Pedestrian', 'Person_sitting', 'Cyclist', 'Tram',
'Misc' or 'DontCare'
1 truncated Float from 0 (non-truncated) to 1 (truncated), where
truncated refers to the object leaving image boundaries
1 occluded Integer (0,1,2,3) indicating occlusion state:
0 = fully visible, 1 = partly occluded
2 = largely occluded, 3 = unknown
1 alpha Observation angle of object, ranging [-pi..pi]
4 bbox 2D bounding box of object in the image (0-based index):
contains left, top, right, bottom pixel coordinates
3 dimensions 3D object dimensions: height, width, length (in meters)
3 location 3D object location x,y,z in camera coordinates (in meters)
1 rotation_y Rotation ry around Y-axis in camera coordinates [-pi..pi]
1 score Only for results: Float, indicating confidence in
detection, needed for p/r curves, higher is better.
```
**注意:** 真值标注给的是摄像头坐标系下的,而不是激光坐标系下的。KITTI各坐标系说明可参考[KITTI Coordinate Transformations](https://towardsdatascience.com/kitti-coordinate-transformations-125094cd42fb)。
- `calib`
坐标系转换参数文件`000001.txt`示例如下:
```
P0: 7.215377000000e+02 0.000000000000e+00 6.095593000000e+02 0.000000000000e+00 0.000000000000e+00 7.215377000000e+02 1.728540000000e+02 0.000000000000e+00 0.000000000000e+00 0.000000000000e+00 1.000000000000e+00 0.000000000000e+00
P1: 7.215377000000e+02 0.000000000000e+00 6.095593000000e+02 -3.875744000000e+02 0.000000000000e+00 7.215377000000e+02 1.728540000000e+02 0.000000000000e+00 0.000000000000e+00 0.000000000000e+00 1.000000000000e+00 0.000000000000e+00
P2: 7.215377000000e+02 0.000000000000e+00 6.095593000000e+02 4.485728000000e+01 0.000000000000e+00 7.215377000000e+02 1.728540000000e+02 2.163791000000e-01 0.000000000000e+00 0.000000000000e+00 1.000000000000e+00 2.745884000000e-03
P3: 7.215377000000e+02 0.000000000000e+00 6.095593000000e+02 -3.395242000000e+02 0.000000000000e+00 7.215377000000e+02 1.728540000000e+02 2.199936000000e+00 0.000000000000e+00 0.000000000000e+00 1.000000000000e+00 2.729905000000e-03
R0_rect: 9.999239000000e-01 9.837760000000e-03 -7.445048000000e-03 -9.869795000000e-03 9.999421000000e-01 -4.278459000000e-03 7.402527000000e-03 4.351614000000e-03 9.999631000000e-01
Tr_velo_to_cam: 7.533745000000e-03 -9.999714000000e-01 -6.166020000000e-04 -4.069766000000e-03 1.480249000000e-02 7.280733000000e-04 -9.998902000000e-01 -7.631618000000e-02 9.998621000000e-01 7.523790000000e-03 1.480755000000e-02 -2.717806000000e-01
Tr_imu_to_velo: 9.999976000000e-01 7.553071000000e-04 -2.035826000000e-03 -8.086759000000e-01 -7.854027000000e-04 9.998898000000e-01 -1.482298000000e-02 3.195559000000e-01 2.024406000000e-03 1.482454000000e-02 9.998881000000e-01 -7.997231000000e-01
```
`P2`是摄像头的内参,`R0_rect`是摄像头的外参,该摄像头产生的图片位于`image_2`目录下。如果没有多个摄像头,则`P0`、`P1`、`P3`皆可重复填写成`P2`。`Tr_velo_to_cam`是激光传感器到相机坐标系的转换矩阵。`Tr_imu_to_velo`是IMU到激光传感器的坐标转换矩阵。当前`P0`、`P1`、`P3`和`Tr_imu_to_velo`在Paddle3D中暂未参与计算,但为了适配数据集读取,需提供这四个参数。
- `ImageSets`
训练集列表`train.txt`示意如下:
```
000000
000003
000007
000009
000010
... ...
```
验证集列表`val.txt`示意如下:
```
000001
000002
000004
000005
000006
... ...
```
| 0
|
apollo_public_repos/apollo-model-centerpoint/docs/models
|
apollo_public_repos/apollo-model-centerpoint/docs/models/smoke/README.md
|
# SMOKE:Single-Stage Monocular 3D Object Detection via Keypoint Estimation
## 目录
* [引用](#引用)
* [简介](#简介)
* [训练配置](#训练配置)
* [使用教程](#使用教程)
* [数据准备](#数据准备)
* [训练](#训练)
* [评估](#评估)
* [导出部署](#导出部署)
* [自定义数据集](#自定义数据集)
<br>
## 引用
> Liu, Zechen, Zizhang Wu, and Roland Tóth. "Smoke: Single-stage monocular 3d object detection via keypoint estimation." In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, pp. 996-997. 2020.
<br>
## 简介
SMOKE是一个单阶段的单目3D检测模型,该论文创新性地提出了预测物体中心点投影来间接预测物体3D检测框的方法。我们参照了Apollo项目对于该模型的[修改](https://github.com/ApolloAuto/apollo/tree/master/modules/perception/camera#architecture):
* 使用普通卷积替代了原论文中使用的可形变卷积
* 添加了一个头部来预测 2D 中心点和 3D 中心点之间的偏移
* 添加了另一个头部来预测 2D 边界框的宽度和高度。可以通过预测的二维中心、宽度和高度直接获得二维边界框
<br>
## 模型库
| 模型 | 骨干网络 | 3DmAP Mod. | Car<br>Easy Mod. Hard | Pedestrian<br>Easy Mod. Hard | Cyclist<br>Easy Mod. Hard | 模型下载 | 配置文件 | 日志 |
| :--: | :-------: | :--------: | :-------------------: | :--------------------------: | :-----------------------: | :------: | :-----: | :--: |
|SMOKE | DLA34 | 2.94 | 6.26 5.16 4.54 | 3.04 2.73 2.23 | 1.69 0.95 0.94 | [model](https://bj.bcebos.com/paddle3d/models/smoke/smoke_dla34_no_dcn_kitti/model.pdparams) | [config](../../../configs/smoke/smoke_dla34_no_dcn_kitti.yml) | [log](https://bj.bcebos.com/paddle3d/models/smoke/smoke_dla34_no_dcn_kitti/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=1650ec346b4426486bd079b506fc1f86) |
|SMOKE | HRNet18 | 4.05 | 8.48 6.44 5.74 | 5.02 4.23 3.06 | 2.59 1.49 1.37 | [model](https://bj.bcebos.com/paddle3d/models/smoke/smoke_hrnet18_no_dcn_kitti/model.pdparams) | [config](../../../configs/smoke/smoke_hrnet18_no_dcn_kitti.yml) | [log](https://bj.bcebos.com/paddle3d/models/smoke/smoke_hrnet18_no_dcn_kitti/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=4e31655b33d0f44b0c19399df8fb7b00) |
**注意:** KITTI benchmark使用4张V100 GPU训练得出。
<br>
## 使用教程
下面的教程将从数据准备开始,说明如何训练SMOKE模型
### 数据准备
目前Paddle3D中提供的SMOKE模型支持在KITTI数据集上训练,因此需要先准备KITTI数据集,请在[官网](http://www.cvlibs.net/datasets/kitti/eval_object.php?obj_benchmark=3d)进行下载:
1. left color images of object data set (12 GB)
2. training labels of object data set (5 MB)
3. camera calibration matrices of object data set (16 MB)
并下载数据集的划分文件列表:
```shell
wget https://bj.bcebos.com/paddle3d/datasets/KITTI/ImageSets.tar.gz
```
将数据解压后按照下方的目录结构进行组织
```shell
$ tree KITTI
KITTI
├── ImageSets
│ ├── test.txt
│ ├── train.txt
│ ├── trainval.txt
│ └── val.txt
└── training
├── calib
├── image_2
└── label_2
```
在Paddle3D的目录下创建软链接 `datasets/KITTI`,指向到上面的数据集目录
### 训练
使用如下命令启动4卡训练
```shell
export CUDA_VISIBLE_DEVICES=0,1,2,3
# 每隔50步打印一次训练进度
# 每隔5000步保存一次模型,模型参数将被保存在output目录下
fleetrun tools/train.py --config configs/smoke/smoke_dla34_no_dcn_kitti.yml --num_workers 2 --log_interval 50 --save_interval 5000
```
### 评估
使用如下命令启动评估
```shell
export CUDA_VISIBLE_DEVICES=0
# 使用Paddle3D提供的预训练模型进行评估
python tools/evaluate.py --config configs/smoke/smoke_dla34_no_dcn_kitti.yml --num_workers 2 --model output/iter_70000/model.pdparams
```
<br>
## 导出部署
使用如下命令导出训练完成的模型
```shell
# 导出Paddle3D提供的预训练模型
python tools/export.py --config configs/smoke/smoke_dla34_no_dcn_kitti.yml --model output/iter_70000/model.pdparams
```
### 执行预测
命令参数说明如下:
| 参数 | 说明 |
| -- | -- |
| model_file | 导出模型的结构文件`smoke.pdmodel`所在路径 |
| params_file | 导出模型的参数文件`smoke.pdiparams`所在路径 |
| image | 待预测的图片路径 |
| use_gpu | 是否使用GPU进行预测,默认为False|
| use_trt | 是否使用TensorRT进行加速,默认为False|
| trt_precision | 当use_trt设置为1时,模型精度可设置0/1/2,0表示fp32,1表示int8,2表示fp16。默认0 |
| collect_dynamic_shape_info | 是否收集模型动态shape信息。默认为False。**只需首次运行,下次运行时直接加载生成的shape信息文件即可进行TensorRT加速推理** |
| dynamic_shape_file | 保存收集到的模型动态shape信息的文件路径。默认为dynamic_shape_info.txt |
### Python部署
进入代码目录 `deploy/smoke/python`,运行以下命令,执行预测:
* 执行CPU预测
```shell
python infer.py --model_file /path/to/smoke.pdmodel --params_file /path/to/smoke.pdiparams --image /path/to/image
```
* 执行GPU预测
```shell
python infer.py --model_file /path/to/smoke.pdmodel --params_file /path/to/smoke.pdiparams --image /path/to/image --use_gpu
```
* 执行CPU预测并显示3d框
```shell
python vis.py --model_file /path/to/smoke.pdmodel --params_file /path/to/smoke.pdiparams --image /path/to/image
```
* 执行GPU预测并显示3d框
```shell
python vis.py --model_file /path/to/smoke.pdmodel --params_file /path/to/smoke.pdiparams --image /path/to/image --use_gpu
```
* 执行TRT预测
**注意:需要下载支持TRT版本的paddlepaddle以及nvidia对应版本的TensorRT库**
* **首次运行TensorRT**,收集模型动态shape信息,并保存至`--dynamic_shape_file`指定的文件中
```shell
python infer.py --model_file /path/to/smoke.pdmodel --params_file /path/to/smoke.pdiparams --image /path/to/image --collect_shape_info --dynamic_shape_file /path/to/shape_info.txt
```
* 加载`--dynamic_shape_file`指定的模型动态shape信息,使用FP32精度进行预测
```shell
python infer.py --model_file /path/to/smoke.pdmodel --params_file /path/to/smoke.pdiparams --image /path/to/image --use_trt --dynamic_shape_file /path/to/shape_info.txt
```
### C++部署
#### 编译步骤
- step 1: 进入部署代码所在路径
```shell
cd deploy/smoke/cpp
```
- step 2: 下载Paddle Inference C++预编译库
Paddle Inference针对**是否使用GPU**、**是否支持TensorRT**、以及**不同的CUDA/cuDNN/GCC版本**均提供已经编译好的库文件,请至[Paddle Inference C++预编译库下载列表](https://www.paddlepaddle.org.cn/inference/user_guides/download_lib.html#c)选择符合的版本。
- step 3: 下载OpenCV
- step 4: 修改`compile.sh`中的编译参数
主要修改编译脚本`compile.sh`中的以下参数:
| 参数 | 说明 |
| -- | -- |
| WITH_GPU | 是否使用gpu。ON或OFF, OFF表示使用CPU,默认ON|
| USE_TENSORRT | 是否使用TensorRT加速。ON或OFF,默认OFF|
| LIB_DIR | Paddle Inference C++预编译包所在路径,该路径下的内容应有:`CMakeCache.txt`、`paddle`、`third_party`和`version.txt` |
| CUDNN_LIB | cuDNN`libcudnn.so`所在路径 |
| CUDA_LIB | CUDA`libcudart.so `所在路径 |
| TENSORRT_ROOT | TensorRT所在路径。**非必须**,如果`USE_TENSORRT`设置为`ON`时,需要填写该路径,该路径下的内容应有`bin`、`lib`和`include`等|
- step 5: 开始编译
```shell
sh compile.sh
```
- step 6: 执行预测
```shell
./build/infer --model_file /path/to/smoke.pdmodel --params_file /path/to/smoke.pdiparams --image /path/to/image
```
**注意:如果要使用TRT预测,请根据编译步骤的step 3,修改`compile.sh`中TensorRT相关的编译参数,并重新编译。**
<br>
## 自定义数据集
如果您想在自定义数据集上进行训练,请参考[自定义数据准备教程](../datasets/custom.md)将数据组织成KITTI数据格式即可
| 0
|
apollo_public_repos/apollo-model-centerpoint/docs/models
|
apollo_public_repos/apollo-model-centerpoint/docs/models/petr/README.md
|
# PETR
## 目录
* [引用](#1)
* [简介](#2)
* [训练配置](#3)
* [使用教程](#4)
* [数据准备](#5)
* [训练](#6)
* [评估](#7)
* [导出 & 部署](#8)
## <h2 id="1">引用</h2>
> Liu, Yingfei and Wang, Tiancai and Zhang, Xiangyu and Sun, Jian. "Petr: Position embedding transformation for multi-view 3d object detection." arXiv preprint arXiv:2203.05625, 2022.
## <h2 id="2">简介</h2>
PETRv1是一个位置嵌入信息感知的多视角3D视觉检测算法。PETRv1将3D坐标信息与图像特征相融合,借助transfomer的结构实现端到端的3D目标检测,PETRv1在比较简洁的架构上达到了精度SOTA(50.4 NDS, 44.1mAP),并且在一段时间内在NuScenes数据集上排名第一。
PETRv2在v1的基础上加入了时序信息,致力于构建一个统一的多视角感知框架。 PETRv2扩展了3D位置信息嵌入模块(3D PE),实现不同时刻帧之间的对齐。并在这个基础上加入了特征指导编码器来提高3D位置信息嵌入模块的数据自适应能力。PETRv2以一个简洁而有效的框架在3D目标检测,BEV分割和3D车道线检测等任务上都取得了SOTA的效果。
## <h2 id="3">训练配置</h2>
我们提供了在开源数据集上的训练配置与结果,详见[PETR训练配置](../../../configs/petr)
## <h2 id="4">模型库</h2>
| 模型 | 骨干网络 | 3DmAP | NDS | 模型下载 | 配置文件 | 日志 |
| :--: | :-------: | :--------: | :-------------------: | :------: | :-----: | :--: |
|PETR v1 | v99 | 38.35 | 43.52 | [model](https://paddle3d.bj.bcebos.com/models/petr/petr_vovnet_gridmask_p4_800x320_amp/model.pdparams) | [config](../../../configs/petr/petr_vovnet_gridmask_p4_800x320_amp.yml) | [log](https://paddle3d.bj.bcebos.com/models/petr/petr_vovnet_gridmask_p4_800x320_amp/train.log) \| [vdl](https://www.paddlepaddle.org.cn/paddle/visualdl/service/app/scalar?id=334e6a6ba257c953fe67bac17a1434a6) |
|PETR v2 | v99 | 41.05 | 49.86 | [model](https://paddle3d.bj.bcebos.com/models/petr/petrv2_vovnet_gridmask_p4_800x320/model.pdparams) | [config](../../../configs/petr/petrv2_vovnet_gridmask_p4_800x320.yml) | [log](https://paddle3d.bj.bcebos.com/models/petr/petrv2_vovnet_gridmask_p4_800x320/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=8888769be49447d6bbabebe78a5fa3ed) |
|PETR v2 + denoise | v99 | 41.35 | 50.64 | [model](https://paddle3d.bj.bcebos.com/models/petr/petrv2_vovnet_gridmask_p4_800x320_dn_amp/model.pdparams) | [config](../../../configs/petr/petrv2_vovnet_gridmask_p4_800x320_dn_amp.yml) | [log](https://paddle3d.bj.bcebos.com/models/petr/petrv2_vovnet_gridmask_p4_800x320_dn_amp/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=3b3951b08caf6367f469edf7f3863e2b) |
|PETR v2 + denoise + centerview | v99 | 43.45 | 52.24 | [model](https://paddle3d.bj.bcebos.com/models/petr/petrv2_vovnet_gridmask_p4_800x320_dn_centerview_amp/model.pdparams) | [config](../../../configs/petr/petrv2_vovnet_gridmask_p4_800x320_dn_centerview_amp.yml) | [log](https://paddle3d.bj.bcebos.com/models/petr/petrv2_vovnet_gridmask_p4_800x320_dn_centerview_amp/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=41e8354ab1faef44ab05850e1b4d5383) |
|PETR v2 + denoise + multiview | v99 | 44.91 | 53.34 | [model](https://paddle3d.bj.bcebos.com/models/petr/petrv2_vovnet_gridmask_p4_1600x640_dn_multiscale_amp/model.pdparams) | [config](../../../configs/petr/petrv2_vovnet_gridmask_p4_1600x640_dn_multiscale_amp.yml) | [log](https://paddle3d.bj.bcebos.com/models/petr/petrv2_vovnet_gridmask_p4_1600x640_dn_multiscale_amp/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=ed5c16888449914ddde4f9554c6edeac) |
## <h2 id="5">使用教程</h2>
## <h2 id="6">数据准备</h2>
请下载Nuscenes测数据集, 下载作者提供的annotion文件。
下载好后的数据集目录结构
```
nuscenes
├── maps
├── samples
├── sweeps
├── v1.0-trainval
├── v1.0-test
...
```
将nuscenes数据软链至data/nuscenes,或更改配置文件数据集路径。
运行如下命令生成petr模型所需的annotation文件。
```
python tools/create_petr_nus_infos.py
```
生成完后的数据集目录
```
nuscenes
├── maps
├── samples
├── sweeps
├── v1.0-trainval
├── v1.0-test
├── petr_nuscenes_annotation_train.pkl
├── petr_nuscenes_annotation_val.pkl
```
为了方便,我们提供了生成好的annotation文件
| 文件名称 | 下载链接 |
| -- | -- |
| petr_nuscenes_annotation_train.pkl | [下载](https://paddle3d.bj.bcebos.com/datasets/nuScenes/petr_nuscenes_annotation_train.pkl) |
| petr_nuscenes_annotation_val.pkl | [下载](https://paddle3d.bj.bcebos.com/datasets/nuScenes/petr_nuscenes_annotation_val.pkl) |
## <h2 id="7">训练</h2>
需要预先下载预训练权重:
```
wget https://paddle3d.bj.bcebos.com/pretrained/fcos3d_vovnet_imgbackbone-remapped.pdparams
```
运行以下命令,进行单卡训练
```
python tools/train.py --config configs/petr/petr_vovnet_gridmask_p4_800x320.yml --model fcos3d_vovnet_imgbackbone-remapped.pdparams
```
运行以下命令,进行多卡训练
```
export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
python -m paddle.distributed.launch tools/train.py --config configs/petr/petr_vovnet_gridmask_p4_800x320.yml --num_workers 2 --log_interval 50 --save_interval 1 --keep_checkpoint_max 100 --save_dir out_petr --model fcos3d_vovnet_imgbackbone-remapped.pdparams
```
训练中断,可以通过`--resume`进行继续训练。
## <h2 id="8">评估</h2>
运行以下命令,进行评估
```
python tools/evaluate.py --config configs/petr/petr_vovnet_gridmask_p4_800x320.yml --model out_petr/epoch_24/model.pdparams
```
## <h2 id="9">导出 & 部署</h2>
### <h3 id="91">模型导出</h3>模型导出
运行以下命令,将训练时保存的动态图模型文件导出成推理引擎能够加载的静态图模型文件。
```
python tools/export.py --config configs/petr/petr_vovnet_gridmask_p4_800x320.yml --model /path/to/model.pdparams --save_dir /path/to/output
```
| 参数 | 说明 |
| -- | -- |
| config | **[必填]** 训练配置文件所在路径 |
| model | **[必填]** 训练时保存的模型文件`model.pdparams`所在路径 |
| save_dir | **[必填]** 保存导出模型的路径,`save_dir`下将会生成三个文件:`petr.pdiparams `、`petr.pdiparams.info`和`petr.pdmodel` |
提供训练好的导出模型
| 配置文件 | 下载 |
| -- | -- |
| PETR v1 | [下载](https://paddle3d.bj.bcebos.com/models/petr/petr_exported_model.tar) |
| PETR v2 | [下载](https://paddle3d.bj.bcebos.com/models/petr/petrv2_exported_model.tar) |
### C++部署
#### Linux系统
#### 环境依赖
- GCC >= 5.4.0
- Cmake >= 3.5.1
- Ubuntu 16.04/18.04
> 说明:本文档的部署环节在以下环境中进行过测试并通过:
测试环境:
- GCC==8.2.0
- Cmake==3.16.0
- Ubuntu 18.04
- CUDA 11.2
- cuDNN==8.2
- Paddle Inferece==2.4.0rc0
- TensorRT-8.2.5.1.Linux.x86_64-gnu.cuda-11.4.cudnn8.2
#### 编译步骤
**注意:目前CADDN的仅支持使用GPU进行推理。**
- step 1: 进入部署代码所在路径
```
cd deploy/petr/cpp
```
- step 2: 下载Paddle Inference C++预编译库
Paddle Inference针对**是否使用GPU**、**是否支持TensorRT**、以及**不同的CUDA/cuDNN/GCC版本**均提供已经编译好的库文件,请至[Paddle Inference C++预编译库下载列表](https://www.paddlepaddle.org.cn/inference/user_guides/download_lib.html#c)选择符合的版本。
- step 3: 修改`compile.sh`中的编译参数
主要修改编译脚本`compile.sh`中的以下参数:
| 参数 | 说明 |
| -- | -- |
| WITH_GPU | 是否使用gpu。ON或OFF, OFF表示使用CPU,默认ON|
| USE_TENSORRT | 是否使用TensorRT加速。ON或OFF,默认OFF|
| LIB_DIR | Paddle Inference C++预编译包所在路径,该路径下的内容应有:`CMakeCache.txt`、`paddle`、`third_party`和`version.txt` |
| CUDNN_LIB | cuDNN`libcudnn.so`所在路径 |
| CUDA_LIB | CUDA`libcudart.so `所在路径 |
| TENSORRT_ROOT | TensorRT所在路径。**非必须**,如果`USE_TENSORRT`设置为`ON`时,需要填写该路径,该路径下的内容应有`bin`、`lib`和`include`等|
- step 4: 开始编译
```
sh compile.sh
```
### 执行预测
**注意:目前CADDN的仅支持使用GPU进行推理。**
执行命令参数说明
| 参数 | 说明 |
| -- | -- |
| model_file | 导出模型的结构文件`petr.pdmodel`所在路径 |
| params_file | 导出模型的参数文件`petr.pdiparams`所在路径 |
| image_files | 待预测的图像文件路径列表,每个文件用逗号分开 |
| with_timestamp | 是否需要时间戳,为True时表示运行petrv2的模型 |
执行命令:
```
# petrv1
./build/main --model_file /path/to/petr.pdmodel --params_file /path/to/petr.pdiparams --image_files /path/to/img0.png,/path/to/img1.png,/path/to/img2.png,/path/to/img3.png,/path/to/img4.png,/path/to/img5.png
# petrv2
./build/main --model_file /path/to/petr.pdmodel --params_file /path/to/petr.pdiparams --image_files /path/to/img0.png,/path/to/img1.png,/path/to/img2.png,/path/to/img3.png,/path/to/img4.png,/path/to/img5.png,/path/to/img0_pre.png,/path/to/img1_pre.png,/path/to/img2_pre.png,/path/to/img3_pre.png,/path/to/img4_pre.png,/path/to/img5_pre.png --with_timestamp
```
### 开启TensorRT加速预测【可选】
**注意:请根据编译步骤的step 3,修改`compile.sh`中TensorRT相关的编译参数,并重新编译。**
运行命令参数说明如下:
| 参数 | 说明 |
| -- | -- |
| model_file | 导出模型的结构文件`petr.pdmodel`所在路径 |
| params_file | 导出模型的参数文件`petr.pdiparams`所在路径 |
| image_files | 待预测的图像文件路径列表,每个文件用逗号分开 |
| use_trt | 是否使用TensorRT进行加速,默认false|
| trt_precision | 当use_trt设置为1时,模型精度可设置0或1,0表示fp32, 1表示fp16。默认0 |
| trt_use_static | 当trt_use_static设置为true时,**在首次运行程序的时候会将TensorRT的优化信息进行序列化到磁盘上,下次运行时直接加载优化的序列化信息而不需要重新生成**。默认false |
| trt_static_dir | 当trt_use_static设置为true时,保存优化信息的路径 |
| collect_shape_info | 是否收集模型动态shape信息。默认false。**只需首次运行,下次运行时直接加载生成的shape信息文件即可进行TensorRT加速推理** |
| dynamic_shape_file | 保存模型动态shape信息的文件路径。 默认:petr_shape_info.txt|
| with_timestamp | 是否需要时间戳,为True时表示运行petrv2的模型 |
* **首次运行TensorRT**,收集模型动态shape信息,并保存至`--dynamic_shape_file`指定的文件中
```
# petrv1
./build/main --model_file /path/to/petr.pdmodel --params_file /path/to/petr.pdiparams --image_files /path/to/img0.png,/path/to/img1.png,/path/to/img2.png,/path/to/img3.png,/path/to/img4.png,/path/to/img5.png --use_trt --collect_shape_info --dynamic_shape_file /path/to/shape_info.txt
# petrv2
./build/main --model_file /path/to/petr.pdmodel --params_file /path/to/petr.pdiparams --image_files /path/to/img0.png,/path/to/img1.png,/path/to/img2.png,/path/to/img3.png,/path/to/img4.png,/path/to/img5.png,/path/to/img0_pre.png,/path/to/img1_pre.png,/path/to/img2_pre.png,/path/to/img3_pre.png,/path/to/img4_pre.png,/path/to/img5_pre.png --use_trt --collect_shape_info --dynamic_shape_file /path/to/shape_info.txt --with_timestamp
```
* 加载`--dynamic_shape_file`指定的模型动态shape信息,使用FP32精度进行预测
```
# petrv1
./build/main --model_file /path/to/petr.pdmodel --params_file /path/to/petr.pdiparams --image_files /path/to/img0.png,/path/to/img1.png,/path/to/img2.png,/path/to/img3.png,/path/to/img4.png,/path/to/img5.png --use_trt --dynamic_shape_file /path/to/shape_info.txt
# petrv2
./build/main --model_file /path/to/petr.pdmodel --params_file /path/to/petr.pdiparams --image_files /path/to/img0.png,/path/to/img1.png,/path/to/img2.png,/path/to/img3.png,/path/to/img4.png,/path/to/img5.png,/path/to/img0_pre.png,/path/to/img1_pre.png,/path/to/img2_pre.png,/path/to/img3_pre.png,/path/to/img4_pre.png,/path/to/img5_pre.png --use_trt --dynamic_shape_file /path/to/shape_info.txt --with_timestamp
```
* 加载`--dynamic_shape_file`指定的模型动态shape信息,使用FP16精度进行预测
```
# petrv1
./build/main --model_file /path/to/petr.pdmodel --params_file /path/to/petr.pdiparams --image_files /path/to/img0.png,/path/to/img1.png,/path/to/img2.png,/path/to/img3.png,/path/to/img4.png,/path/to/img5.png --use_trt --dynamic_shape_file /path/to/shape_info.txt --trt_precision 1
# petrv2
./build/main --model_file /path/to/petr.pdmodel --params_file /path/to/petr.pdiparams --image_files /path/to/img0.png,/path/to/img1.png,/path/to/img2.png,/path/to/img3.png,/path/to/img4.png,/path/to/img5.png,/path/to/img0_pre.png,/path/to/img1_pre.png,/path/to/img2_pre.png,/path/to/img3_pre.png,/path/to/img4_pre.png,/path/to/img5_pre.png --use_trt --dynamic_shape_file /path/to/shape_info.txt --trt_precision 1 --with_timestamp
```
* 如果觉得每次运行时模型加载的时间过长,可以设置`trt_use_static`和`trt_static_dir`,首次运行时将TensorRT的优化信息保存在硬盘中,后续直接反序列化优化信息即可
```
# petrv1
./build/main --model_file /path/to/petr.pdmodel --params_file /path/to/petr.pdiparams --image_files /path/to/img0.png,/path/to/img1.png,/path/to/img2.png,/path/to/img3.png,/path/to/img4.png,/path/to/img5.png --use_trt --collect_shape_info --dynamic_shape_file /path/to/shape_info.txt --trt_precision 1 --trt_use_static --trt_static_dir /path/to/OptimCacheDir
# petrv2
./build/main --model_file /path/to/petr.pdmodel --params_file /path/to/petr.pdiparams --image_files /path/to/img0.png,/path/to/img1.png,/path/to/img2.png,/path/to/img3.png,/path/to/img4.png,/path/to/img5.png,/path/to/img0_pre.png,/path/to/img1_pre.png,/path/to/img2_pre.png,/path/to/img3_pre.png,/path/to/img4_pre.png,/path/to/img5_pre.png --use_trt --dynamic_shape_file /path/to/shape_info.txt --trt_precision 1 --with_timestamp --trt_use_static --trt_static_dir /path/to/OptimCacheDir
```
### Python部署
进入部署代码所在路径
```
cd deploy/petr/python
```
**注意:目前CADDN仅支持使用GPU进行推理。**
命令参数说明如下:
| 参数 | 说明 |
| -- | -- |
| model_file | 导出模型的结构文件`petr.pdmodel`所在路径 |
| params_file | 导出模型的参数文件`petr.pdiparams`所在路径 |
| img_paths | 待预测的图像文件路径列表,每个文件用逗号分开 |
| use_trt | 是否使用TensorRT进行加速,默认False|
| trt_precision | 当use_trt设置为1时,模型精度可设置0或1,0表示fp32, 1表示fp16。默认0 |
| trt_use_static | 当trt_use_static设置为True时,**在首次运行程序的时候会将TensorRT的优化信息进行序列化到磁盘上,下次运行时直接加载优化的序列化信息而不需要重新生成**。默认0 |
| trt_static_dir | 当trt_use_static设置为1时,保存优化信息的路径 |
| collect_shape_info | 是否收集模型动态shape信息。默认False。**只需首次运行,后续直接加载生成的shape信息文件即可进行TensorRT加速推理** |
| dynamic_shape_file | 保存模型动态shape信息的文件路径。 |
| with_timestamp | 是否需要时间戳,为True时表示运行petrv2的模型 |
运行以下命令,执行预测:
```
# petrv1
python infer.py --model_file /path/to/petr.pdmodel --params_file /path/to/petr.pdiparams --img_paths /path/to/img0.png /path/to/img1.png /path/to/img2.png /path/to/img3.png /path/to/img4.png /path/to/img5.png
# petrv2
python infer.py --model_file /path/to/petr.pdmodel --params_file /path/to/petr.pdiparams --img_paths /path/to/img0.png /path/to/img1.png /path/to/img2.png /path/to/img3.png /path/to/img4.png /path/to/img5.png /path/to/img0_pre.png /path/to/img1_pre.png /path/to/img2_pre.png /path/to/img3_pre.png /path/to/img4_pre.png /path/to/img5_pre.png
```
| 0
|
apollo_public_repos/apollo-model-centerpoint/docs/models
|
apollo_public_repos/apollo-model-centerpoint/docs/models/centerpoint/README.md
|
# CenterPoint:Center-based 3D Object Detection and Tracking
## 目录
* [引用](#1)
* [简介](#2)
* [模型库](#3)
* [训练 & 评估](#4)
* [nuScenes数据集](#41)
* [KITTI数据集](#42)
* [导出 & 部署](#8)
* [Apollo模型](#9)
* [训练自定义数据集](#10)
## <h2 id="1">引用</h2>
> Yin, Tianwei and Zhou, Xingyi and Krahenbuhl, Philipp. "Center-Based 3D Object Detection and Tracking." In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 11784-11793. 2021.
## <h2 id="2">简介</h2>
CenterPoint是Anchor-Free的三维物体检测器,以点云作为输入,将三维物体在Bird-View下的中心点作为关键点,基于关键点检测的方式回归物体的尺寸、方向和速度。相比于Anchor-Based的三维物体检测器,CenterPoint不需要人为设定Anchor尺寸,面向物体尺寸多样不一的场景时其精度表现更高,且简易的模型设计使其在性能上也表现更加高效。
Paddle3D实现的CenterPoint做了以下优化:
- 对模型的前后处理做了性能优化。CenterPoint-Pillars在[nuScenes](https://www.nuscenes.org/nuscenes) val set上精度有50.97mAP,速度在Tesla V100上达到了50.28FPS。
- 提供[KITTI数据集](http://www.cvlibs.net/datasets/kitti/eval_object.php?obj_benchmark=3d)上的训练配置和Baseline。CenterPoint-Pillars在KITTI val set上精度达到64.75 mAP,速度在Tesla V100上达到了43.96FPS。
跟原论文相比,Paddle3D实现的CenterPoint有以下差异:
- 未提供第二个阶段的实现。在原论文中,作者还设计了第二个阶段来进一步精炼物体的位置、尺寸和方向,并在[Waymo数据集](https://waymo.com/open/)上做了验证。Paddle3D目前还未适配Waymo数据集,所以第二个阶段暂未实现。
- 未提供在nuScenes数据集上将预测速度用于多目标跟踪的实现。
## <h2 id="3">模型库</h2>
- CenterPoint在nuScenes Val set数据集上的表现
| 模型 | 体素格式 | mAP | NDS | V100 TensorRT FP32(FPS) | V100 TensorRT FP16(FPS) | 模型下载 | 配置文件 | 日志 |
| ---- | ---------------- | --- | --- | ----------------------- | ----------------------- | -------- | -------- | ---- |
| CenterPoint | 2D-Pillars | 50.97 | 61.30 | 50.28 | 63.43 | [model](https://bj.bcebos.com/paddle3d/models/centerpoint//centerpoint_pillars_02voxel_nuscenes_10sweep/model.pdparams) | [config](../../../configs/centerpoint/centerpoint_pillars_02voxel_nuscenes_10sweep.yml) | [log](https://bj.bcebos.com/paddle3d/models/centerpoint//centerpoint_pillars_02voxel_nuscenes_10sweep/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=f150eb3b4db30c7bd4ff2dfac5ca4166) |
| CenterPoint | 3D-Voxels | 59.25 | 66.74 | 21.90 | 26.93 | [model]( https://bj.bcebos.com/paddle3d/models/centerpoint/centerpoint_voxels_0075voxel_nuscenes_10sweep/model.pdparams) | [config](../../../configs/centerpoint/centerpoint_voxels_0075voxel_nuscenes_10sweep.yml) | [log]( https://bj.bcebos.com/paddle3d/models/centerpoint/centerpoint_voxels_0075voxel_nuscenes_10sweep/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=2cf9f2123ea8393cf873e8f8ae907fdc) |
**注意:nuScenes benchmark使用4张V100 GPU训练得出。3D Sparse Conv功能需要安装Paddle 2.4版。**
- CenterPoint在KITTI Val set数据集上的表现
| 模型 | 体素格式 | 3DmAP Mod. | Car<br>Easy Mod. Hard | Pedestrian<br>Easy Mod. Hard | Cyclist<br>Easy Mod. Hard | V100 TensorRT FP32(FPS) | V100 TensorRT FP16(FPS) | 模型下载 | 配置文件 | 日志 |
| ---- | ---------------- | ---------- | ------------------ | ------------------------- | -----------------------| ----------------------- | ----------------------- | -------- | -------- | ---- |
| CenterPoint | 2D-Pillars | 64.75 | 85.99 76.69 73.62 | 57.66 54.03 49.75 | 84.30 63.52 59.47 | 43.96 | 74.21 | [model]( https://bj.bcebos.com/paddle3d/models/centerpoint//centerpoint_pillars_016voxel_kitti/model.pdparams) | [config](../../../configs/centerpoint/centerpoint_pillars_016voxel_kitti.yml)| [log]( https://bj.bcebos.com/paddle3d/models/centerpoint//centerpoint_pillars_016voxel_kitti/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=7f2b637cfce7995a55b915216b8b1171) |
| 模型 | 体素格式 | BEVmAP Mod. | Car<br>Easy Mod. Hard | Pedestrian<br>Easy Mod. Hard | Cyclist<br>Easy Mod. Hard | V100 TensorRT FP32(FPS) | V100 TensorRT FP16(FPS) | 模型下载 | 配置文件 | 日志 |
| ---- | ---------------- | ----------- | ------------------ | ------------------------- | ---------------------- | ----------------------- | ----------------------- | -------- | -------- | ---- |
| CenterPoint | 2D-Pillars | 71.87 | 93.03 87.33 86.21 | 66.46 62.66 58.54 | 86.59 65.62 61.58 | 43.96 | 74.21 | [model]( https://bj.bcebos.com/paddle3d/models/centerpoint//centerpoint_pillars_016voxel_kitti/model.pdparams) | [config](../../../configs/centerpoint/centerpoint_pillars_016voxel_kitti.yml)| [log]( https://bj.bcebos.com/paddle3d/models/centerpoint//centerpoint_pillars_016voxel_kitti/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=7f2b637cfce7995a55b915216b8b1171) |
**注意:** KITTI benchmark使用8张V100 GPU训练得出。
## <h2 id="4">训练 & 评估</h2>
### <h3 id="41">nuScenes数据集</h3>
#### 数据准备
- 目前Paddle3D中提供的CenterPoint模型支持在nuScenes数据集上训练,因此需要先准备nuScenes数据集,请在[官网](https://www.nuscenes.org/nuscenes)进行下载,并将数据集目录准备如下:
```
nuscenes_dataset_root
|—— samples
|—— sweeps
|—— maps
|—— v1.0-trainval
```
在Paddle3D的目录下创建软链接 `datasets/nuscenes`,指向到上面的数据集目录:
```
mkdir datasets
ln -s /path/to/nuscenes_dataset_root ./datasets
mv ./datasets/nuscenes_dataset_root ./datasets/nuscenes
```
- 生成训练时数据增强所需的真值库:
```
python tools/create_det_gt_database.py --dataset_name nuscenes --dataset_root ./datasets/nuscenes --save_dir ./datasets/nuscenes
```
`--dataset_root`指定nuScenes数据集所在路径,`--save_dir`指定用于保存所生成的真值库的路径。该命令执行后,`save_dir`生成的目录如下:
```
gt_database_train_nsweeps10_withvelo
|—— anno_info_train_nsweeps10_withvelo.pkl
|—— bicycle
| |—— 20646_bicycle_4.bin
| |—— ...
|—— car
|—— ...
```
#### 训练
nuScenes数据集上的训练使用4张GPU:
```
python -m paddle.distributed.launch --gpus 0,1,2,3 tools/train.py --config configs/centerpoint/centerpoint_pillars_02voxel_nuscenes_10sweep.yml --save_dir ./output_nuscenes --num_workers 3 --save_interval 5
```
训练启动参数介绍可参考文档[全流程速览](../../quickstart.md#模型训练)。
#### 评估
```
python tools/evaluate.py --config configs/centerpoint/centerpoint_pillars_02voxel_nuscenes_10sweep.yml --model ./output_nuscenes/epoch_20/model.pdparams --batch_size 1 --num_workers 3
```
**注意**:CenterPoint的评估目前只支持batch_size为1。
评估启动参数介绍可参考文档[全流程速览](../../quickstart.md#模型评估)。
### <h3 id="42">KITTI数据集</h3>
- 目前Paddle3D中提供的CenterPoint模型支持在KITTI数据集上训练,因此需要先准备KITTI数据集,请在[官网](http://www.cvlibs.net/datasets/kitti/eval_object.php?obj_benchmark=3d)进行下载:
1. Download Velodyne point clouds, if you want to use laser information (29 GB)
2. training labels of object data set (5 MB)
3. camera calibration matrices of object data set (16 MB)
并下载数据集的划分文件列表:
```
wget https://bj.bcebos.com/paddle3d/datasets/KITTI/ImageSets.tar.gz
```
将数据解压后按照下方的目录结构进行组织:
```
kitti_dataset_root
|—— training
| |—— label_2
| | |—— 000001.txt
| | |—— ...
| |—— calib
| | |—— 000001.txt
| | |—— ...
| |—— velodyne
| | |—— 000001.bin
| | |—— ...
|—— ImageSets
│ |—— test.txt
│ |—— train.txt
│ |—— trainval.txt
│ |—— val.txt
```
在Paddle3D的目录下创建软链接 `datasets/KITTI`,指向到上面的数据集目录:
```
mkdir datasets
ln -s /path/to/kitti_dataset_root ./datasets
mv ./datasets/kitti_dataset_root ./datasets/KITTI
```
- 生成训练时数据增强所需的真值库:
```
python tools/create_det_gt_database.py --dataset_name kitti --dataset_root ./datasets/KITTI --save_dir ./datasets/KITTI
```
`--dataset_root`指定KITTI数据集所在路径,`--save_dir`指定用于保存所生成的真值库的路径。该命令执行后,`save_dir`生成的目录如下:
```
kitti_train_gt_database
|—— anno_info_train.pkl
|—— Car
| |—— 4371_Car_7.bin
| |—— ...
|—— Cyclist
```
#### 训练
KITTI数据集上的训练使用8张GPU:
```
python -m paddle.distributed.launch --gpus 0,1,2,3,4,5,6,7 tools/train.py --config configs/centerpoint/centerpoint_pillars_016voxel_kitti.yml --save_dir ./output_kitti --num_workers 4 --save_interval 5
```
训练启动参数介绍可参考文档[全流程速览](../../quickstart.md#模型训练)。
#### 评估
```
python tools/evaluate.py --config configs/centerpoint/centerpoint_pillars_016voxel_kitti.yml --model ./output_kitti/epoch_160/model.pdparams --batch_size 1 --num_workers 4
```
**注意**:CenterPoint的评估目前只支持batch_size为1。
评估启动参数介绍可参考文档[全流程速览](../../quickstart.md#模型评估)。
## <h2 id="8">导出 & 部署</h2>
### <h3 id="81">模型导出</h3>
运行以下命令,将训练时保存的动态图模型文件导出成推理引擎能够加载的静态图模型文件。
```
python tools/export.py --config configs/centerpoint/centerpoint_pillars_02voxel_nuscenes_10sweep.yml --model /path/to/model.pdparams --save_dir /path/to/output
```
| 参数 | 说明 |
| -- | -- |
| config | **[必填]** 训练配置文件所在路径 |
| model | **[必填]** 训练时保存的模型文件`model.pdparams`所在路径 |
| save_dir | **[必填]** 保存导出模型的路径,`save_dir`下将会生成三个文件:`centerpoint.pdiparams `、`centerpoint.pdiparams.info`和`centerpoint.pdmodel` |
### C++部署
#### Linux系统
#### 环境依赖
- GCC >= 5.4.0
- Cmake >= 3.5.1
- Ubuntu 16.04/18.04
> 说明:本文档的部署环节在以下环境中进行过测试并通过:
测试环境一:
- GCC==8.2.0
- Cmake==3.16.0
- Ubuntu 18.04
- CUDA 11.2
- cuDNN==8.1.1
- Paddle Inferece==2.3.1
- TensorRT-8.2.5.1.Linux.x86_64-gnu.cuda-11.4.cudnn8.2
测试环境二:
- GCC==7.5.0
- Cmake==3.19.6
- Ubuntu 18.04
- CUDA==11.1
- cuDNN==8.0.4
- Paddle Inferece==2.3.1
- TensorRT-8.2.5.1.Linux.x86_64-gnu.cuda-11.4.cudnn8.2
#### 编译步骤
**注意:目前CenterPoint的仅支持使用GPU进行推理。**
- step 1: 进入部署代码所在路径
```
cd deploy/centerpoint/cpp
```
- step 2: 下载Paddle Inference C++预编译库
Paddle Inference针对**是否使用GPU**、**是否支持TensorRT**、以及**不同的CUDA/cuDNN/GCC版本**均提供已经编译好的库文件,请至[Paddle Inference C++预编译库下载列表](https://www.paddlepaddle.org.cn/inference/user_guides/download_lib.html#c)选择符合的版本。
- step 3: 修改`compile.sh`中的编译参数
主要修改编译脚本`compile.sh`中的以下参数:
| 参数 | 说明 |
| -- | -- |
| WITH_GPU | 是否使用gpu。ON或OFF, OFF表示使用CPU,默认ON|
| USE_TENSORRT | 是否使用TensorRT加速。ON或OFF,默认OFF|
| LIB_DIR | Paddle Inference C++预编译包所在路径,该路径下的内容应有:`CMakeCache.txt`、`paddle`、`third_party`和`version.txt` |
| CUDNN_LIB | cuDNN`libcudnn.so`所在路径 |
| CUDA_LIB | CUDA`libcudart.so `所在路径 |
| TENSORRT_ROOT | TensorRT所在路径。**非必须**,如果`USE_TENSORRT`设置为`ON`时,需要填写该路径,该路径下的内容应有`bin`、`lib`和`include`等|
- step 4: 开始编译
```
sh compile.sh
```
### 执行预测
**注意:目前CenterPoint的仅支持使用GPU进行推理。**
执行命令参数说明
| 参数 | 说明 |
| -- | -- |
| model_file | 导出模型的结构文件`centerpoint.pdmodel`所在路径 |
| params_file | 导出模型的参数文件`centerpoint.pdiparams`所在路径 |
| lidar_file | 待预测的点云文件所在路径 |
| num_point_dim | 点云文件中每个点的维度大小。例如,若每个点的信息是`x, y, z, intensity`,则`num_point_dim`填写为4 |
| with_timelag | 该参数仅针对由多帧融合而成的点云文件,融合后的点云文件通常每个点都会包含时间差(timelag)。若点云维度大于等于5且第5维信息是timelag,需设置为1,默认0 |
```
./build/main --model_file /path/to/centerpoint.pdmodel --params_file /path/to/centerpoint.pdiparams --lidar_file /path/to/lidar.pcd.bin --num_point_dim 5
```
**注意:** 请预先确认实际待测试点云文件的维度是否是5,如果不是5,`--num_point_dim`请修改为实际值。如果待测试的点云文件是由多帧融合而成且点云维度大于等于5且第5维信息是timelag,可将`--with_timelag`设置为1。
### 开启TensorRT加速预测【可选】
**注意:请根据编译步骤的step 3,修改`compile.sh`中TensorRT相关的编译参数,并重新编译。**
运行命令参数说明如下:
| 参数 | 说明 |
| -- | -- |
| model_file | 导出模型的结构文件`centerpoint.pdmodel`所在路径 |
| params_file | 导出模型的参数文件`centerpoint.pdiparams`所在路径 |
| lidar_file | 待预测的点云文件所在路径 |
| num_point_dim | 点云文件中每个点的维度大小。例如,若每个点的信息是`x, y, z, intensity`,则`num_point_dim`填写为4 |
| with_timelag | 仅针对`nuscenes`数据集,若使用`nuscenes`数据集训练的模型,需设置为1,默认0 |
| use_trt | 是否使用TensorRT进行加速,默认0|
| trt_precision | 当use_trt设置为1时,模型精度可设置0或1,0表示fp32, 1表示fp16。默认0 |
| trt_use_static | 当trt_use_static设置为1时,**在首次运行程序的时候会将TensorRT的优化信息进行序列化到磁盘上,下次运行时直接加载优化的序列化信息而不需要重新生成**。默认0 |
| trt_static_dir | 当trt_use_static设置为1时,保存优化信息的路径 |
| collect_shape_info | 是否收集模型动态shape信息。默认0。**只需首次运行,下次运行时直接加载生成的shape信息文件即可进行TensorRT加速推理** |
| dynamic_shape_file | 保存模型动态shape信息的文件路径。 |
* **首次运行TensorRT**,收集模型动态shape信息,并保存至`--dynamic_shape_file`指定的文件中
```
./build/main --model_file /path/to/centerpoint.pdmodel --params_file /path/to/centerpoint.pdiparams --lidar_file /path/to/lidar.pcd.bin --num_point_dim 5 --use_trt 1 --collect_shape_info 1 --dynamic_shape_file /path/to/shape_info.txt
```
* 加载`--dynamic_shape_file`指定的模型动态shape信息,使用FP32精度进行预测
```
./build/main --model_file /path/to/centerpoint.pdmodel --params_file /path/to/centerpoint.pdiparams --lidar_file /path/to/lidar.pcd.bin --num_point_dim 5 --use_trt 1 --dynamic_shape_file /path/to/shape_info.txt
```
* 加载`--dynamic_shape_file`指定的模型动态shape信息,使用FP16精度进行预测
```
./build/main --model_file /path/to/centerpoint.pdmodel --params_file /path/to/centerpoint.pdiparams --lidar_file /path/to/lidar.pcd.bin --num_point_dim 5 --use_trt 1 --dynamic_shape_file /path/to/shape_info.txt --trt_precision 1
```
* 如果觉得每次运行时模型加载的时间过长,可以设置`trt_use_static`和`trt_static_dir`,首次运行时将TensorRT的优化信息保存在硬盘中,后续直接反序列化优化信息即可
```
./build/main --model_file /path/to/centerpoint.pdmodel --params_file /path/to/centerpoint.pdiparams --lidar_file /path/to/lidar.pcd.bin --num_point_dim 4 --use_trt 1 --dynamic_shape_file /path/to/shape_info.txt --trt_precision 1 --trt_use_static 1 --trt_static_dir /path/to/OptimCacheDir
```
### Python部署
**注意:目前CenterPoint的仅支持使用GPU进行推理。**
命令参数说明如下:
| 参数 | 说明 |
| -- | -- |
| model_file | 导出模型的结构文件`centerpoint.pdmodel`所在路径 |
| params_file | 导出模型的参数文件`centerpoint.pdiparams`所在路径 |
| lidar_file | 待预测的点云文件所在路径 |
| num_point_dim | 点云文件中每个点的维度大小。例如,若每个点的信息是`x, y, z, intensity`,则`num_point_dim`填写为4 |
| with_timelag | 该参数仅针对由多帧融合而成的点云文件,融合后的点云文件通常每个点都会包含时间差(timelag)。若点云维度大于等于5且第5维信息是timelag,需设置为1>,默认0 |
| use_trt | 是否使用TensorRT进行加速,默认0|
| trt_precision | 当use_trt设置为1时,模型精度可设置0或1,0表示fp32, 1表示fp16。默认0 |
| trt_use_static | 当trt_use_static设置为1时,**在首次运行程序的时候会将TensorRT的优化信息进行序列化到磁盘上,下次运行时直接加载优化的序列化信息而不需要重新生成**。默认0 |
| trt_static_dir | 当trt_use_static设置为1时,保存优化信息的路径 |
| collect_shape_info | 是否收集模型动态shape信息。默认0。**只需首次运行,后续直接加载生成的shape信息文件即可进行TensorRT加速推理** |
| dynamic_shape_file | 保存模型动态shape信息的文件路径。 |
运行以下命令,执行预测:
```
python infer.py --model_file /path/to/centerpoint.pdmodel --params_file /path/to/centerpoint.pdiparams --lidar_file /path/to/lidar.pcd.bin --num_point_dim 5
```
Python开启TensorRT的推理步骤与C++开启TensorRT加速推理一致,请参考文档前面介绍【开启TensorRT加速预测】并将C++命令参数替换成Python的命令参数。推荐使用PaddlePaddle 的官方镜像,镜像内已经预安装TensorRT。官方镜像请至[Paddle官网](https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/install/docker/linux-docker.html)进行下载。
## <h2 id="9">Apollo模型</h2>
Apollo使用百度自动驾驶数据对CenterPoint进行了训练和优化,检测效果和泛化能力都获得大幅提升,可以提供复杂城市道路场景下实时、准确、稳定的3D目标检测效果。
模型文件下载地址:
| 模型文件 | 下载地址 |
| -- | -- |
| Apollo CenterPoint训练权重文件 | [Link](https://apollo-pkg-beta.bj.bcebos.com/perception_model/centerpoint_core_pretrained_model.zip) |
| Apollo CenterPoint可部署文件 | [Link](https://apollo-pkg-beta.bj.bcebos.com/perception_model/center_point_paddle.zip) |
检测效果:
<div align=center>
<img src="../../../images/centerpoint_result2.png" width="1200"/>
</div>
<div align=center>
<img src="../../../images/centerpoint_result3.png" width="1200"/>
</div>
<div align=center>
<img src="../../../images/centerpoint_result1.png" width="1200"/>
</div>
## <h2 id="10">训练自定义数据集</h2>
下面将以apolloscape数据集为例,介绍用户训练自己数据集的完整流程
### 转化为KITTI标准格式
推荐用户将处理为标准的KITTI格式,数据组织形式如下所示:
```
|-- ImageSets
|-- train.txt
|-- val.txt
|-- training
|-- label_2
|-- 000000.txt
|-- 000001.txt
|-- ....
|-- velodyne
|-- 000000.bin
|-- 000001.bin
|-- ....
```
train.txt和val.txt分别保存着训练数据和评测数据的索引,下面是train.txt的内容示例:
```
000000
000001
000003
000004
000006
000008
000010
000011
000014
....
```
velodyne文件夹中存放着每一帧的点云数据,以.bin形式存储
label_2文件夹中存放着每一帧的标签信息,下面是标签文件示例:
```
pedestrian 0 0 0 0 0 0 0 1.018863 0.648392 0.348114 -32.12419891357422 40.14154434204102 -0.9670228362083435 -1.637705
pedestrian 0 0 0 0 0 0 0 0.661574 0.297775 0.735925 -18.38454437255859 -4.152974128723145 -1.521841764450073 1.564056
pedestrian 0 0 0 0 0 0 0 0.772804 0.287372 0.35712 -12.922926902771 25.13016510009766 -0.3287706673145294 0.02878607
pedestrian 0 0 0 0 0 0 0 0.620953 0.373367 0.447131 -12.88798904418945 25.85581016540527 -0.4463132917881012 0.07662772
cyclist 0 0 0 0 0 0 0 1.716547 0.619485 1.912308 7.602930545806885 -3.483364820480347 -0.9519524574279785 -0.03732504
```
上述标签文件仿照KITTI格式,但稍有不同,按顺序15个元素的含义如下所示:
| KITTI数据集 | 类别 |被截断程度 |被遮挡程度 | 观测角 |2d box 左、上、右、下边界坐标 |3d box 高度、宽度、长度 | 3d box在相机坐标系下的中心坐标 |3d box在相机坐标系下的旋转角 |
| -- | -- | -- | -- | -- | -- | -- | -- | -- |
| 用户自主数据集(例如apolloscape数据集) | 类别 | 0 |0 | 0 |0 |3d box 高度、宽度、长度 | 3d box在雷达坐标系下的中心坐标 |3d box在雷达坐标系下的旋转角 |
### 类别映射
当前centerpoint模型输出5种类别,如下所示
| 类别 | 包括类别 |
| -- | -- |
| smallMot | 小型车 |
| bigMot | 大型车 |
| nonMot | 三轮车 二轮车 骑摩托车的人 骑三轮车的人 骑自行车的人 |
| pedestrian | 行人 |
| TrafficCone | 交通锥筒 水马 施工牌 防撞筒 |
推荐用户将自主数据集的类别映射为上述5种类别
修改paddle3d/datasets/apollo/apollo_utils.py中的class_information进行类别映射,以apolloscape数据集为例
* apolloscape中小型车的类别为smallvehicle,需要映射为smallMot,也即'map_class': 'smallMot'
* difficulty_threshold表示根据点云数量定义目标困难程度,[20, 40]表示小于20个点时为hard,大于20小于40个点时为moderate,大于40个点为easy。用户可自定义设置
```
class_information = {
# smallMot
'smallmot': {'map_class': 'smallMot', 'difficulty_threshold': [20, 40]},
'midmot': {'map_class': 'smallMot', 'difficulty_threshold': [20, 40]},
'smallcar': {'map_class': 'smallMot', 'difficulty_threshold': [20, 40]},
'smallvehicle': {'map_class': 'smallMot', 'difficulty_threshold': [20, 40]},
.....
```
在Paddle3D的目录下创建软链接 `datasets/apolloscape`,指向到上面的数据集目录:
```
mkdir datasets
ln -s /path/to/apolloscape ./datasets/apolloscape
```
### 生成gt base
使用object sample数据增强方法可以显著增强检测效果,需要使用下面的脚本生成gt base
```
python tools/create_det_gt_database.py --config configs/centerpoint/centerpoint_pillars_02voxel_apolloscape.yml --dataset_name apollo
```
### 训练&评测&导出
训练apolloscape数据集
```
python -m paddle.distributed.launch --gpus 0,1,2,3,4,5,6,7 tools/train.py --config configs/centerpoint/centerpoint_pillars_02voxel_apolloscape.yml --save_dir ./output_apolloscape --num_workers 4 --save_interval 5
```
用户可使用Apollo官方提供的[预训练模型](https://apollo-pkg-beta.bj.bcebos.com/perception_model/centerpoint_core_pretrained_model.zip)进行训练,以获得更好的检测效果,预训练模型的模型配置可参考centerpoint_pillars_02voxel_apolloscape.yml,训练方式如下:
```
python -m paddle.distributed.launch --gpus 0,1,2,3,4,5,6,7 tools/train.py --config configs/centerpoint/centerpoint_pillars_02voxel_apolloscape.yml --model ./centerpoint_core_pretrained_model/model.pdparams --save_dir ./output_apolloscape --num_workers 4 --save_interval 5
```
评测apolloscape数据集
```
python tools/evaluate.py --config configs/centerpoint/centerpoint_pillars_02voxel_apolloscape.yml --model ./output_apolloscape/epoch_160/model.pdparams --batch_size 1 --num_workers 4
```
导出模型
```
python tools/export.py --config configs/centerpoint/centerpoint_pillars_02voxel_apolloscape.yml --model /path/to/model.pdparams --save_dir /path/to/output --export_for_apollo
```
| 0
|
apollo_public_repos/apollo-model-centerpoint/docs/models
|
apollo_public_repos/apollo-model-centerpoint/docs/models/iassd/README.md
|
# Not All Points Are Equal: Learning Highly Efficient Point-based Detectors for 3D LiDAR Point Clouds

## 目录
* [引用](#1)
* [简介](#2)
* [模型库](#3)
* [训练 & 评估](#4)
* [KITTI数据集](#41)
* [Waymo数据集](#42)
* [导出 & 部署](#8)
## <h2 id="1">一:引用</h2>
> Zhang, Yifan, et al. "Not all points are equal: Learning highly efficient point-based detectors for 3d lidar point clouds." Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition(CVPR). 2022.
## <h2 id="2">二:简介</h2>
IA-SSD是一个single-stage & point-based的3D点云目标检测器。由于点云数据存在较大的冗余,论文提出了面向检测任务的instance-aware sampling方法来有效的采样出那些具有代表性的点,并引入contextual centroid perception来进一步预测更为精确的物体中心,以此来获得更准确的检测结果。IA-SSD以较小的显存占用和较快的速度在kitti和waymo数据集上取得了具有竞争力的结果。
## <h2 id="3">三:模型库</h2>
- IA-SSD在KITTI Val set数据集上的表现
| 模型 | Car Mod <br> (IoU=0.7) | Ped. Mod<br>(IoU=0.5) | Cyc. Mod<br>(IoU=0.5) | 模型下载 | 配置文件 | 日志 | VDL |
| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |
| IA-SSD | 79.13 | 58.51 | 71.32 | [model](https://paddle3d.bj.bcebos.com/models/iassd/iassd_kitti/model.pdparams) | [config](../../../configs/iassd/iassd_kitti.yaml) | [log](https://paddle3d.bj.bcebos.com/models/iassd/iassd_kitti/train.log) | [visualdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=7010b963202fe43cdf67cf714678e411) |
**注意:** KITTI benchmark使用4张V100 GPU训练得出。
- IA-SSD在Waymo Val set数据集上的表现
| 模型 | Vec_L1<br>AP / APH | Vec_L2<br>AP / APH | Ped_L1<br>AP / APH | Ped_L2<br>AP / APH | Cyc_L1<br>AP / APH | Cyc_L2<br>AP / APH | 模型下载 | 配置文件 | 日志 | VDL |
| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |
| IA-SSD | 73.90 / 73.27| 64.84 / 64.28 | 70.36 / 60.75 | 62.93 / 54.13 | 68.21 / 66.25 | 66.06 / 64.16 | - | [config](../../../configs/iassd/iassd_waymo.yaml) | [log](https://paddle3d.bj.bcebos.com/models/iassd/iassd_waymo/train.log) | [visualdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=12256b19993b8876cf36124e2e0f2bd5) |
**注意:** Waymo benchmark使用4张V100 GPU训练得出。另外,由于Waymo数据集[License](https://waymo.com/open/faq/)许可问题,我们无法提供在Waymo数据上训练出的模型权重,用户可以采用提供的模型配置训练出该精度。
## <h2 id="4">四:训练 & 评估</h2>
### <h3 id="41">KITTI数据集</h3>
#### 1)数据准备
- 目前Paddle3D中提供的IA-SSD模型支持在KITTI数据集上训练,因此需要先准备KITTI数据集,请在[官网](http://www.cvlibs.net/datasets/kitti/eval_object.php?obj_benchmark=3d)进行下载:
1. Download Velodyne point clouds, if you want to use laser information (29 GB)
2. training labels of object data set (5 MB)
3. camera calibration matrices of object data set (16 MB)
4. Download road plane infos from [here](https://drive.google.com/file/d/1d5mq0RXRnvHPVeKx6Q612z0YRO1t2wAp/view?usp=sharing) (optional)
- 下载数据集的划分文件列表:
```
wget https://bj.bcebos.com/paddle3d/datasets/KITTI/ImageSets.tar.gz
```
- 将数据解压后按照下方的目录结构进行组织:
```
KITTI
|—— training
| |—— label_2
| | |—— 000001.txt
| | |—— ...
| |—— calib
| | |—— 000001.txt
| | |—— ...
| |—— velodyne
| | |—— 000001.bin
| | |—— ...
| |—— planes(optional)
| | |—— 000001.txt
| | |—— ...
|—— ImageSets
│ |—— test.txt
│ |—— train.txt
│ |—— trainval.txt
│ |—— val.txt
```
- 在Paddle3D的目录下创建软链接 `datasets/KITTI`,指向到上面的数据集目录:
```
mkdir datasets
ln -s /path/to/kitti_dataset_root ./datasets
mv ./datasets/kitti_dataset_root ./datasets/KITTI
```
- 生成训练时数据增强所需的真值库:
```
python tools/create_det_gt_database.py --dataset_name kitti --dataset_root ./datasets/KITTI --save_dir ./datasets/KITTI
```
`--dataset_root`指定KITTI数据集所在路径,`--save_dir`指定用于保存所生成的真值库的路径。该命令执行后,`save_dir`生成的目录如下:
```
KITTI
|—— ImageSets
│ |—— test.txt
│ |—— train.txt
│ |—— trainval.txt
│ |—— val.txt
|—— training
| |—— label_2
| | |—— 000001.txt
| | |—— ...
| |—— calib
| | |—— 000001.txt
| | |—— ...
| |—— velodyne
| | |—— 000001.bin
| | |—— ...
| |—— planes(optional)
| | |—— 000001.txt
| | |—— ...
|—— kitti_train_gt_database
| |—— anno_info_train.pkl
| |—— Car
| | |—— 4371_Car_7.bin
| | |—— ...
| |—— Cyclist
```
#### 2)训练
```shell
# 单卡训练
python tools/train.py --config configs/iassd/iassd_kitti.yaml --save_interval 1 --num_workers 4 --save_dir outputs/iassd_kitti
# 多卡训练,每隔1个epoch保存模型至save_dir
export CUDA_VISIBLE_DEVICES=0,1,2,3
fleetrun tools/train.py --config configs/iassd/iassd_kitti.yaml --save_interval 1 --num_workers 4 --save_dir outputs/iassd_kitti
```
训练启动参数介绍可参考文档[全流程速览](../../quickstart.md#模型训练)。
#### 3)评估
```shell
python tools/evaluate.py --config configs/iassd/iassd_kitti.yaml --batch_size 16 --num_workers 4 --model outputs/iass_kitti/epoch_80/model.pdparams
```
评估启动参数介绍可参考文档[全流程速览](../../quickstart.md#模型评估)。
### <h3 id="42">Waymo数据集</h3>
#### 1)数据准备
- 目前Paddle3D中提供的IA-SSD模型支持在Waymo数据集上训练,因此需要先准备Waymo数据集,请在[官网](https://waymo.com/open)进行下载,建议下载***v1.3.2***及之后的版本
- 下载数据集的划分文件列表:
```shell
wget https://bj.bcebos.com/paddle3d/datasets/Waymo/1.3.2/ImageSets.tar.gz
```
- 将数据解压后按照下方的目录结构进行组织:
```
waymo
|—— ImageSets
│ |—— train.txt
│ |—— val.txt
|—— raw_data(解压后的所有tfrecord文件)
| |—— segment-xxxx.tfrecord
| |—— ...
```
- 在Paddle3D的目录下创建软链接 `datasets/waymo`,指向到上面的数据集目录:
```shell
mkdir datasets
ln -s /path/to/waymo_dataset_root ./datasets
mv ./datasets/waymo_dataset_root ./datasets/waymo
```
- 解析`raw_data`中tfrecord序列中的每个frame数据,并生成训练时数据增强所需的真值库:
```shell
python tools/create_waymo_infos.py
```
该命令执行后,生成的目录信息如下:
```
waymo_dataset_root
|—— ImageSets
│ |—— train.txt
│ |—— val.txt
|—— raw_data(解压后的所有tfrecord文件)
| |—— segment-xxxx.tfrecord
| |—— ...
|—— waymo_processed_data_v1_3_2
| |—— segment-xxxx
| | |—— 0000.npy
| | |—— ...
| | |—— segment-xxx.pkl
| |—— segment-xxxx
| |—— ...
|—— waymo_train_gt_database
| |—— Cyclist
| | |—— xxxx.bin
| | |—— ...
| |—— Pedestrian
| |—— Vehicle
| |—— waymo_train_gt_database_infos.pkl
```
`train.txt`和`val.txt`存放train和val的tfrecord文件列表,`waymo_processed_data_v1_3_2`是解析后的数据,每一个frame的点云数据以.npy的形式存放,每一个frame的标注信息存放在同级目录的.pkl文件中。`waymo_train_gt_database`是采样出的真值物体点云和box坐标信息。
#### 2)训练
> waymo数据的一个segment是由连续的frames组成,我们遵循业界的普遍做法,对每个segment以间隔5进行采样,取到整个训练数据的***20%约32k个frame***用于训练
```shell
# 单卡训练
python tools/train.py --config configs/iassd/iassd_waymo.yaml --save_interval 1 --num_workers 4 --save_dir outputs/iassd_waymo
# 多卡训练,每隔1个epoch保存模型至save_dir
export CUDA_VISIBLE_DEVICES=0,1,2,3
fleetrun tools/train.py --config configs/iassd/iassd_waymo.yaml --save_interval 1 --num_workers 4 --save_dir outputs/iassd_waymo
```
训练启动参数介绍可参考文档[全流程速览](../../quickstart.md#模型训练)。
#### 3)评估
```
python tools/evaluate.py --config configs/iassd/iassd_waymo.yaml --batch_size 32 --num_workers 4 --model outputs/iassd_waymo/epoch_30/model.pdparams
```
评估启动参数介绍可参考文档[全流程速览](../../quickstart.md#模型评估)。
## <h2 id="8">导出 & 部署</h2>
### <h3 id="81">模型导出</h3>
运行以下命令,将训练时保存的动态图模型文件导出成推理引擎能够加载的静态图模型文件。
```shell
python tools/export.py --config configs/iassd/iassd_kitti.yaml --model /path/to/model.pdparams --save_dir /path/to/output
```
| 参数 | <center>说明</center> |
| -- | -- |
| config | **[必填]** 训练配置文件所在路径 |
| model | **[必填]** 训练时保存的模型文件`model.pdparams`所在路径 |
| save_dir | **[必填]** 保存导出模型的路径,`save_dir`下将会生成三个文件:`iassd.pdiparams `、`iassd.pdiparams.info`和`iassd.pdmodel`, `iassd.pdmodel`和`iassd.pdiparams`用于后续模型推理|
### Python部署
目前IA-SSD的模型的Python部署只支持GPU和TensorRT加速。
#### 1)环境依赖
> - Ubuntu 18.04
> - Python==3.7
> - CUDA==11.2
> - cuDNN==8.2.0
> - Paddle Inference==2.3.2
> - TensorRT_8.2.5.1
#### 2)执行预测
进入`deploy/iassd/python`,运行以下命令,执行不同配置的推理(如果需要开启TensorRT加速,请下载带有TRT的PaddlePaddle版本):
- 执行原生`GPU`预测:
```shell
python3.7 deploy/iassd/python/infer.py --model_file /path/to/iassd.pdmodel --params_file /path/to/iassd.pdiparams --lidar_file /.../000001.bin --gpu_id 0
```
- 执行`TRT_FP32`预测:
```shell
python3.7 deploy/iassd/python/infer.py --model_file /path/to/iassd.pdmodel --params_file /path/to/iassd.pdiparams --lidar_file /.../000001.bin --gpu_id 0 --run_mode trt_fp32
```
- 执行`TRT_FP16`预测:
```shell
python3.7 deploy/iassd/python/infer.py --model_file /path/to/iassd.pdmodel --params_file /path/to/iassd.pdiparams --lidar_file /.../000001.bin --gpu_id 0 --run_mode trt_fp16
```
执行上述命令后可以得到点云3D检测结果:
```
{'boxes': array([[ 5.9013954e+01, 1.6384674e+01, -8.3350408e-01, 4.2825608e+00,
1.6158756e+00, 1.5127110e+00, 2.9756324e+00],
[ 1.7047174e+00, -5.7812780e-01, -1.0745335e+00, 3.9012828e+00,
1.5632588e+00, 1.4671783e+00, -2.4538052e+00],
[ 4.6258511e+01, -4.5674486e+00, -6.3848123e-03, 1.6965854e+00,
4.1198540e-01, 1.7212551e+00, 1.1975995e-01],
[ 4.0068634e+01, 1.9198923e+01, -9.9642396e-01, 4.1801486e+00,
1.6866810e+00, 1.6624287e+00, -1.9688037e-01]], dtype=float32),
'labels': array([0, 0, 2, 0]),
'scores': array([0.35698408, 0.2946543 , 0.21058026, 0.16246885], dtype=float32)}
```
| 参数 | <center>说明</center> |
|:--| :-- |
| --model_file | 导出模型的结构文件`iassd.pdmodel`路径 |
| --params_file | 导出模型的参数文件`iassd.pdiparams`路径 |
| --lidar_file | 待预测的点云文件`.bin`格式 |
| --gpu_id | 用于预测的GPU ID |
| --run_mode | 推理模式,支持`fp32`,`trt_fp32`,`trt_fp16` |
### C++部署
目前IA-SSD的模型的C++部署只支持GPU和TensorRT加速。
#### 1)环境依赖
> - Ubuntu 18.04
> - Python==3.7
> - CUDA==11.2
> - cuDNN==8.2.0
> - Paddle Inference==2.3.2
> - TensorRT_8.2.5.1
> - GCC==8.2.0
> - CMake==3.16.0
#### 2)编译
- step 1:进入部署代码所在路径
```shell
cd deploy/iassd/cpp
```
- step 2:下载Paddle Inference C++预测库
Paddle Inference针对是否使用GPU、是否支持TensorRT、以及不同的CUDA/cuDNN/GCC版本均提供已经编译好的库文件,请至
[Paddle Inference C++预测库](https://www.paddlepaddle.org.cn/inference/v2.4/guides/introduction/index_intro.html#c)中下载符合的版本。
- step 3:修改compile.sh中的编译参数
主要修改以下参数:
| 参数 | <center>说明</center> |
|:--| :-- |
| WITH_GPU | 是否使用gpu。ON或OFF, OFF表示使用CPU,默认ON |
| USE_TENSORRT | 是否使用TensorRT加速。ON或OFF,默认OFF |
| LIB_DIR | Paddle Inference C++预测库所在路径 |
| CUDNN_LIB | cuDNN `libcudnn.so`所在路径 |
| CUDA_LIB | CUDA `libcudart.so`所在路径 |
| TENSORRT_ROOT | TensorRT安装路径。如果开启`USE_TENSORRT`加速,则需要填写该路径 |
- step 4:编译
```shell
sh compile.sh
```
### 3)执行预测
执行命令参数说明
| 参数 | <center>说明</center> |
| :-- | :-- |
| model_file | 导出模型的结构文件`iassd.pdmodel`所在路径 |
| params_file | 导出模型的参数文件`iassd.pdiparams`所在路径 |
| lidar_file | 待预测的点云文件所在路径 |
| rum_mode | 预测配置,支持`trt_fp32`, `trt_fp16`,默认采用`gpu_fp32`执行预测 |
| gpu_id | 用于预测的GPU_ID |
- 执行原生`GPU`预测:
```shell
./build/main --model_file /path/to/iassd.pdmodel --params_file /path/to/iassd.pdiparams --lidar_file /.../000001.bin --gpu_id 0
```
- 执行`trt_fp32`预测:
```shell
./build/main --model_file /path/to/iassd.pdmodel --params_file /path/to/iassd.pdiparams --lidar_file /.../000001.bin --gpu_id 0 --run_mode trt_fp32
```
- 执行`trt_fp16`预测:
```shell
./build/main --model_file /path/to/iassd.pdmodel --params_file /path/to/iassd.pdiparams --lidar_file /.../000001.bin --gpu_id 0 --run_mode trt_fp16
```
| 0
|
apollo_public_repos/apollo-model-centerpoint/docs/models
|
apollo_public_repos/apollo-model-centerpoint/docs/models/caddn/README.md
|
# CADDN:Categorical Depth DistributionNetwork for Monocular 3D Object Detection
## 目录
* [引用](#1)
* [简介](#2)
* [训练配置](#3)
* [使用教程](#4)
* [数据准备](#5)
* [训练](#6)
* [评估](#7)
* [导出 & 部署](#8)
* [自定义数据集](#9)
* [Apollo使用教程](#10)
## <h2 id="1">引用</h2>
> Cody Reading, Ali Harakeh, Julia Chae, Steven L. Waslander. "Categorical Depth DistributionNetwork for Monocular 3D Object Detection." Computer Vision and Pattern Recognition (CVPR), 2021.
## <h2 id="2">简介</h2>
单目3D物体检测是自动驾驶汽车的关键问题,与典型的多传感器系统相比,单目3D检测提供了一种配置简单的解决方案。单目3D检测的主要挑战在于准确预测物体深度,由于缺乏直接的距离测量,必须从物体和场景线索中推断出物体深度。目前一些方法试图通过直接估计深度来辅助3D检测,但由于深度不准确,性能有限。而CaDDN模型提出了解决方案,它使用每个像素的预测分类深度分布,将丰富的上下文特征信息投射到3D空间中适当的深度区间。然后,CaDDN模型使用计算效率高的鸟瞰投影和单级检测器来生成最终的输出包围框。同时CaDDN模型被设计为一种完全可微的端到端联合深度估计和目标检测方法。在模型发布时,CaDDN在Kitti 3D对象检测基准上获得了已发表的单目方法中的第一名,到目前,CaDDN模型的指标仍具有竞争力。
## <h2 id="3">训练配置</h2>
我们提供了在开源数据集上的训练配置与结果,详见[CADDN训练配置](../../../configs/caddn)
## <h2 id="4">模型库</h2>
| 模型 | 骨干网络 | 3DmAP Mod. | Car<br>Easy Mod. Hard | Pedestrian<br>Easy Mod. Hard | Cyclist<br>Easy Mod. Hard | 模型下载 | 配置文件 | 日志 |
| :--: | :-------: | :--------: | :-------------------: | :--------------------------: | :-----------------------: | :------: | :-----: | :--: |
|CADDN | ocrnet_hrnet_w18 | 7.86 | 22.50 15.78 13.95 | 10.09 7.12 5.57 | 1.27 0.69 0.69 | [model](https://paddle3d.bj.bcebos.com/models/caddn/caddn_ocrnet_hrnet_w18_kitti/model.pdparams) | [config](../../../configs/caddn/caddn_ocrnet_hrnet_w18_kitti.yml) | [log](https://paddle3d.bj.bcebos.com/models/caddn/caddn_ocrnet_hrnet_w18_kitti/train.log) \| [vdl](https://www.paddlepaddle.org.cn/paddle/visualdl/service/app/scalar?id=36ff3161e13f37bb318fc2d78e679983) |
|CADDN | deeplabv3p_resnet101_os8 | 7.21 | 21.45 14.36 12.57 | 9.15 6.53 5.12 | 1.82 0.74 0.75 | [model](https://paddle3d.bj.bcebos.com/models/caddn/caddn_deeplabv3p_resnet101_os8_kitti/model.pdparams) | [config](../../../configs/caddn/caddn_deeplabv3p_resnet101_os8_kitti.yml) | [log](https://paddle3d.bj.bcebos.com/models/caddn/caddn_deeplabv3p_resnet101_os8_kitti/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=a56f45325b80ce7f7e29f185efaed28c) |
## <h2 id="5">使用教程</h2>
## <h2 id="6">数据准备</h2>
请下载KITTI单目3D检测数据集,数据集信息请参考[KITTI官网](http://www.cvlibs.net/datasets/kitti/)
*注意:KITTI官网只区分了训练集和测试集,我们遵循业界的普遍做法,将7481个训练集样本,进一步划分为3712个训练集样本和3769个验证集样本*
下载好后的数据集目录结构
```
kttti
├── gt_database
├── ImageSets
| ├── test.txt
| ├── train.txt
| └── val.txt
├── testing
| ├── calib
| └── image_2
├── training
| ├── calib
| ├── depth_2
| ├── image_2
| └── label_2
├── kitti_infos_test.pkl
├── kitti_infos_train.pkl
├── kitti_infos_val.pkl
...
```
将kitti数据软链至data/kitti,或更改配置文件数据集路径。
备注:准备好kitti数据集后,上述的.pkl是通过下列命令生成
```
python tools/creat_caddn_kitti_infos.py
```
| 参数 | 说明 |
| -- | -- |
| dataset_root | **[选填]** kitti数据集路径,默认data/kitti |
| save_dir | **[选填]** 生成的.pkl文件保存路径,默认data/kitti |
## <h2 id="7">训练</h2>
运行以下命令,进行单卡训练
```
python -u tools/train.py --config configs/caddn/caddn_deeplabv3p_resnet101_os8_kitti.yml
```
运行以下命令,进行多卡训练
```
export CUDA_VISIBLE_DEVICES=0,1,2,3
fleetrun tools/train.py --config configs/caddn/caddn_deeplabv3p_resnet101_os8_kitti.yml
```
训练中断,可以通过`--resume`进行继续训练。
## <h2 id="8">评估</h2>
运行以下命令,进行评估
```
python tools/evaluate.py --config configs/caddn/caddn_deeplabv3p_resnet101_os8_kitti.yml --model pretrained_model_path
```
## <h2 id="9">导出 & 部署</h2>
### <h3 id="91">模型导出</h3>模型导出
运行以下命令,将训练时保存的动态图模型文件导出成推理引擎能够加载的静态图模型文件。
```
python tools/export.py --config configs/caddn/caddn_deeplabv3p_resnet101_os8_kitti.yml --model /path/to/model.pdparams --save_dir /path/to/output
```
| 参数 | 说明 |
| -- | -- |
| config | **[必填]** 训练配置文件所在路径 |
| model | **[必填]** 训练时保存的模型文件`model.pdparams`所在路径 |
| save_dir | **[必填]** 保存导出模型的路径,`save_dir`下将会生成三个文件:`caddn.pdiparams `、`caddn.pdiparams.info`和`caddn.pdmodel` |
提供训练好的导出模型
| 配置文件 | 下载 |
| -- | -- |
| caddn_ocrnet_hrnet_w18_kitti | [下载](https://paddle3d.bj.bcebos.com/models/caddn/caddn_ocrnet_hrnet_w18_kitti/model.zip) |
| caddn_deeplabv3p_resnet101_os8_kitti | [下载](https://paddle3d.bj.bcebos.com/models/caddn/caddn_deeplabv3p_resnet101_os8_kitti/model.zip) |
### C++部署
#### Linux系统
#### 环境依赖
- GCC >= 5.4.0
- Cmake >= 3.5.1
- Ubuntu 16.04/18.04
> 说明:本文档的部署环节在以下环境中进行过测试并通过:
测试环境:
- GCC==8.2.0
- Cmake==3.16.0
- Ubuntu 18.04
- CUDA 11.2
- cuDNN==8.1.1
- Paddle Inferece==2.3.1
- TensorRT-8.2.5.1.Linux.x86_64-gnu.cuda-11.4.cudnn8.2
#### 编译步骤
**注意:目前CADDN的仅支持使用GPU进行推理。**
- step 1: 进入部署代码所在路径
```
cd deploy/caddn/cpp
```
- step 2: 下载Paddle Inference C++预编译库
Paddle Inference针对**是否使用GPU**、**是否支持TensorRT**、以及**不同的CUDA/cuDNN/GCC版本**均提供已经编译好的库文件,请至[Paddle Inference C++预编译库下载列表](https://www.paddlepaddle.org.cn/inference/user_guides/download_lib.html#c)选择符合的版本。
- step 3: 修改`compile.sh`中的编译参数
主要修改编译脚本`compile.sh`中的以下参数:
| 参数 | 说明 |
| -- | -- |
| WITH_GPU | 是否使用gpu。ON或OFF, OFF表示使用CPU,默认ON|
| USE_TENSORRT | 是否使用TensorRT加速。ON或OFF,默认OFF|
| LIB_DIR | Paddle Inference C++预编译包所在路径,该路径下的内容应有:`CMakeCache.txt`、`paddle`、`third_party`和`version.txt` |
| CUDNN_LIB | cuDNN`libcudnn.so`所在路径 |
| CUDA_LIB | CUDA`libcudart.so `所在路径 |
| TENSORRT_ROOT | TensorRT所在路径。**非必须**,如果`USE_TENSORRT`设置为`ON`时,需要填写该路径,该路径下的内容应有`bin`、`lib`和`include`等|
- step 4: 开始编译
```
sh compile.sh
```
### 执行预测
**注意:目前CADDN的仅支持使用GPU进行推理。**
执行命令参数说明
| 参数 | 说明 |
| -- | -- |
| model_file | 导出模型的结构文件`caddn.pdmodel`所在路径 |
| params_file | 导出模型的参数文件`caddn.pdiparams`所在路径 |
| image_file | 待预测的图像文件所在路径 |
执行命令:
```
./build/main --model_file /path/to/caddn.pdmodel --params_file /path/to/caddn.pdiparams --image_file /path/to/image.png
```
### 开启TensorRT加速预测【可选】
**注意:请根据编译步骤的step 3,修改`compile.sh`中TensorRT相关的编译参数,并重新编译。**
运行命令参数说明如下:
| 参数 | 说明 |
| -- | -- |
| model_file | 导出模型的结构文件`caddn.pdmodel`所在路径 |
| params_file | 导出模型的参数文件`caddn.pdiparams`所在路径 |
| image_file | 待预测的图像文件所在路径 |
| use_trt | 是否使用TensorRT进行加速,默认0|
| trt_precision | 当use_trt设置为1时,模型精度可设置0或1,0表示fp32, 1表示fp16。默认0 |
| trt_use_static | 当trt_use_static设置为1时,**在首次运行程序的时候会将TensorRT的优化信息进行序列化到磁盘上,下次运行时直接加载优化的序列化信息而不需要重新生成**。默认0 |
| trt_static_dir | 当trt_use_static设置为1时,保存优化信息的路径 |
| collect_shape_info | 是否收集模型动态shape信息。默认0。**只需首次运行,下次运行时直接加载生成的shape信息文件即可进行TensorRT加速推理** |
| dynamic_shape_file | 保存模型动态shape信息的文件路径。 |
* **首次运行TensorRT**,收集模型动态shape信息,并保存至`--dynamic_shape_file`指定的文件中
```
./build/main --model_file /path/to/caddn.pdmodel --params_file /path/to/caddn.pdiparams --image_file /path/to/image.png --use_trt 1 --collect_shape_info 1 --dynamic_shape_file /path/to/shape_info.txt
```
* 加载`--dynamic_shape_file`指定的模型动态shape信息,使用FP32精度进行预测
```
./build/main --model_file /path/to/caddn.pdmodel --params_file /path/to/caddn.pdiparams --image_file /path/to/image.png --use_trt 1 --dynamic_shape_file /path/to/shape_info.txt
```
* 加载`--dynamic_shape_file`指定的模型动态shape信息,使用FP16精度进行预测
```
./build/main --model_file /path/to/caddn.pdmodel --params_file /path/to/caddn.pdiparams --image_file /path/to/image.png --use_trt 1 --dynamic_shape_file /path/to/shape_info.txt --trt_precision 1
```
* 如果觉得每次运行时模型加载的时间过长,可以设置`trt_use_static`和`trt_static_dir`,首次运行时将TensorRT的优化信息保存在硬盘中,后续直接反序列化优化信息即可
```
./build/main --model_file /path/to/caddn.pdmodel --params_file /path/to/caddn.pdiparams --image_file /path/to/image.png --use_trt 1 --collect_shape_info 1 --dynamic_shape_file /path/to/shape_info.txt --trt_precision 1 --trt_use_static 1 --trt_static_dir /path/to/OptimCacheDir
```
### Python部署
进入部署代码所在路径
```
cd deploy/caddn/python
```
**注意:目前CADDN仅支持使用GPU进行推理。**
命令参数说明如下:
| 参数 | 说明 |
| -- | -- |
| model_file | 导出模型的结构文件`caddn.pdmodel`所在路径 |
| params_file | 导出模型的参数文件`caddn.pdiparams`所在路径 |
| img_path | 待预测的图像文件所在路径 |
| use_trt | 是否使用TensorRT进行加速,默认0|
| trt_precision | 当use_trt设置为1时,模型精度可设置0或1,0表示fp32, 1表示fp16。默认0 |
| trt_use_static | 当trt_use_static设置为1时,**在首次运行程序的时候会将TensorRT的优化信息进行序列化到磁盘上,下次运行时直接加载优化的序列化信息而不需要重新生成**。默认0 |
| trt_static_dir | 当trt_use_static设置为1时,保存优化信息的路径 |
| collect_shape_info | 是否收集模型动态shape信息。默认0。**只需首次运行,后续直接加载生成的shape信息文件即可进行TensorRT加速推理** |
| dynamic_shape_file | 保存模型动态shape信息的文件路径。 |
运行以下命令,执行预测:
```
python infer.py --model_file /path/to/caddn.pdmodel --params_file /path/to/caddn.pdiparams --img_path /path/to/image.png
```
## <h2 id="9">自定义数据集</h2>
## <h2 id="10">Apollo使用教程</h2>
基于Paddle3D训练完成的CADDN模型可以直接部署到Apollo架构中使用,请参考[教程](https://github.com/ApolloAuto/apollo/blob/master/modules/perception/README_paddle3D_CN.md)
| 0
|
apollo_public_repos/apollo-model-centerpoint/docs/models
|
apollo_public_repos/apollo-model-centerpoint/docs/models/squeezesegv3/README.md
|
# SqueezeSegV3: Spatially-Adaptive Convolution for Efficient Point-Cloud Segmentation
## 目录
* [引用](#h2-id1h2)
* [简介](#h2-id2h2)
* [模型库](#h2-id3h2)
* [训练配置](#h2-id4h2)
* [使用教程](#h2-id5h2)
* [数据准备](#h3-id51h3)
* [训练](#h3-id52h3)
* [评估](#h3-id53h3)
* [模型导出](#h3-id54h3)
* [模型部署](#h3-id55h3)
## <h2 id="1">引用</h2>
> Xu, Chenfeng, Bichen Wu, Zining Wang, Wei Zhan, Peter Vajda, Kurt Keutzer, και Masayoshi Tomizuka. ‘SqueezeSegV3: Spatially-Adaptive Convolution for Efficient Point-Cloud Segmentation’. CoRR abs/2004.01803 (2020).
## <h2 id="2">简介</h2>
SqueezeSegV3是一个点云语义分割模型。该论文延续了SqueezeSeg系列将三维空间点云投影至二维空间进行特征提取的思想,并在RangeNet++模型结构的
基础上创新性地提出并应用了空间自适应卷积(Spatially-Adaptive Convolution)。
## <h2 id="3">模型库</h2>
- SqueezeSegV3在SemanticKITTI Val set数据集上的表现
| 模型 | mIoU (Point Cloud / Range View) | mAcc (Point Cloud / Range View) | 模型下载 | 配置文件 |
|:---------------:|:-------------------------------:|:-------------------------------:|:----------------------------------------------------------------------------------------------------------------:|:---------------------------------------------------------------------------------:|
| SqueezeSegV3-21 | 46.3 / 51.2 | 87.3 / 90.0 | [model](https://bj.bcebos.com/paddle3d/models/squeezesegv3/squeezesegv3_rangenet21_semantickitti/model.pdparams) | [config](../../../configs/squeezesegv3/squeezesegv3_rangenet21_semantickitti.yml) |
| SqueezeSegV3-53 | 48.8 / 54.2 | 88.4 / 91.2 | [model](https://bj.bcebos.com/paddle3d/models/squeezesegv3/squeezesegv3_rangenet53_semantickitti/model.pdparams) | [config](../../../configs/squeezesegv3/squeezesegv3_rangenet53_semantickitti.yml) |
## <h2 id="4">训练配置</h2>
我们提供了在开源数据集上的训练配置与结果,详见[SqueezeSegV3训练配置](../../../configs/squeezesegv3)。
## <h2 id="5">使用教程</h2>
### <h3 id="51">数据准备</h3>
1. 数据格式
SqueezeSegV3模型目前仅适配[SemanticKITTI](http://semantic-kitti.org/dataset.html)格式的数据集。需将数据集放置于
`Paddle3D/datasets/SemanticKITTI`目录下,或在[配置文件](../../../configs/_base_/semantickitti.yml)中指定数据集路径。数据集文件结构如下:
```
└── Paddle3D/datasets/SemanticKITTI
├── sqeuences
├── 00
├── velodyne
├── 000000.bin
├── ...
├── labels
├── 000000.label
├── ...
├── poses.txt
```
2. 数据划分
SemanticKITTI数据集共包含`00`至`21`共22个序列,其中官方默认的数据集划分为:
- 训练集:00, 01, 02, 03, 04, 05, 06, 07, 09, 10
- 验证集:08
- 测试集:11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21
如需使用自己的划分,可在[配置文件](../../../configs/_base_/semantickitti.yml)中指定。
### <h3 id="52">训练</h3>
位于`Paddle3D/`目录下,执行:
```shell
python -m paddle.distributed.launch --gpus 0,1,2,3,4,5,6,7 \
tools/train.py \
--config configs/squeezesegv3/squeezesegv3_rangenet53_semantickitti.yml \
--save_interval 1195 \
--keep_checkpoint_max 50 \
--save_dir outputs/squeezesegv3/rangenet21_semantickitti \
--do_eval
```
训练脚本支持设置如下参数:
| 参数名 | 用途 | 是否必选项 | 默认值 |
|:--------------------|:-------------------------------|:------|:---------|
| gpus | 使用的GPU编号 | 是 | - |
| config | 配置文件 | 是 | - |
| save_dir | 模型和visualdl日志文件的保存根路径 | 否 | output |
| num_workers | 用于异步读取数据的进程数量, 大于等于1时开启子进程读取数据 | 否 | 0 |
| save_interval | 模型保存的间隔步数 | 否 | 1000 |
| do_eval | 是否在保存模型时进行评估 | 否 | 否 |
| log_interval | 打印日志的间隔步数 | 否 | 10 |
| keep_checkpoint_max | 最新模型保存个数 | 否 | 5 |
| resume | 是否从断点恢复训练 | 否 | 否 |
| batch_size | mini-batch大小(每张GPU) | 否 | 在配置文件中指定 |
| iters | 训练轮数 | 否 | 在配置文件中指定 |
| learning_rate | 学习率 | 否 | 在配置文件中指定 |
| seed | 固定随机种子 | 否 | None |
### <h3 id="53">评估</h3>
位于`Paddle3D/`目录下,执行:
```shell
python tools/evaluate.py \
--config configs/squeezesegv3/squeezesegv3_rangenet53_semantickitti.yml \
--model /path/to/model.pdparams
```
评估脚本支持设置如下参数:
| 参数名 | 用途 | 是否必选项 | 默认值 |
|:--------------------|:-------------------------------|:------|:---------|
| config | 配置文件 | 是 | - |
| model | 待评估模型路径 | 是 | - |
| num_workers | 用于异步读取数据的进程数量, 大于等于1时开启子进程读取数据 | 否 | 0 |
| batch_size | mini-batch大小 | 否 | 在配置文件中指定 |
### <h3 id="54">模型导出</h3>
运行以下命令,将训练时保存的动态图模型文件导出成推理引擎能够加载的静态图模型文件。
```shell
python tools/export.py \
--config configs/squeezesegv3/squeezesegv3_rangenet53_semantickitti.yml \
--model /path/to/model.pdparams \
--input_shape 64 1024 \
--save_dir /path/to/output
```
模型导出脚本支持设置如下参数:
| 参数名 | 用途 | 是否必选项 | 默认值 |
|:------------|:-------------------------------------------------------------------------------------------------------------|:------|:---------|
| config | 配置文件 | 是 | - |
| model | 待导出模型参数`model.pdparams`路径 | 是 | - |
| input_shape | 指定模型的输入尺寸,支持`N, C, H, W`或`H, W`格式 | 是 | - |
| save_dir | 保存导出模型的路径,`save_dir`下将会生成三个文件:`squeezesegv3.pdiparams `、`squeezesegv3.pdiparams.info`和`squeezesegv3.pdmodel` | 否 | `deploy` |
### <h3 id="55">模型部署</h3>
#### C++部署
Coming soon...
#### Python部署
命令参数说明如下:
| 参数 | 说明 |
| -- | -- |
| model_file | 导出模型的结构文件`squeezesegv3.pdmodel`所在路径 |
| params_file | 导出模型的参数文件`squeezesegv3.pdiparams`所在路径 |
| lidar_file | 待预测的点云文件所在路径 |
| img_mean | 点云投影到range-view后所成图像的均值,例如为`12.12,10.88,0.23,-1.04,0.21` |
| img_std | 点云投影到range-view后所成图像的方差,例如为`12.32,11.47,6.91,0.86,0.16` |
| use_trt | 是否使用TensorRT进行加速,默认0|
| trt_precision | 当use_trt设置为1时,模型精度可设置0或1,0表示fp32, 1表示fp16。默认0 |
| trt_use_static | 当trt_use_static设置为1时,**在首次运行程序的时候会将TensorRT的优化信息进行序列化到磁盘上,下次运行时直接加载优化的序列化信息而不需要重新生成**。默认0 |
| trt_static_dir | 当trt_use_static设置为1时,保存优化信息的路径 |
运行以下命令,执行预测:
```
python infer.py --model_file /path/to/squeezesegv3.pdmodel --params_file /path/to/squeezesegv3.pdiparams --lidar_file /path/to/lidar.pcd.bin --img_mean 12.12,10.88,0.23,-1.04,0.21 --img_std 12.32,11.47,6.91,0.86,0.16
```
如果要开启TensorRT的话,请卸载掉原有的`paddlepaddel_gpu`,至[Paddle官网](https://paddleinference.paddlepaddle.org.cn/user_guides/download_lib.html#python)下载与TensorRT连编的预编译Paddle Inferece安装包,选择符合本地环境CUDA/cuDNN/TensorRT版本的安装包完成安装即可。
运行以下命令,开启TensorRT加速模型预测:
```
python infer.py --model_file /path/to/squeezesegv3.pdmodel --params_file /path/to/squeezesegv3.pdiparams --lidar_file /path/to/lidar.pcd.bin --img_mean 12.12,10.88,0.23,-1.04,0.21 --img_std 12.32,11.47,6.91,0.86,0.16 --use_trt 1
```
| 0
|
apollo_public_repos/apollo-model-centerpoint/docs/models
|
apollo_public_repos/apollo-model-centerpoint/docs/models/pv_rcnn/README.md
|
# PV-RCNN: Point-Voxel Feature Set Abstraction for 3D Object Detection
## 目录
* [引用](#1)
* [简介](#2)
* [模型库](#3)
* [训练 & 评估](#4)
* [KITTI数据集](#41)
* [导出 & 部署](#5)
* [自定义数据集](#6)
## <h2 id="1">引用</h2>
> Shi, Shaoshuai, et al. "Pv-rcnn: Point-voxel feature set abstraction for 3d object detection." Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 2020.
## <h2 id="2">简介</h2>
PV-RCNN是Voxel-Based和Point-based相结合的Anchor-Based二阶段点云三维物体检测方法。在精度提优方面,PV-RCNN在Voxel-Branch中使用3D Sparse Conv学习有序体素特征,在Point-Branch中基于PointNet++提取无序点云的特征,将两种表示方法的优点充分结合,有效提升模型的精度。在性能提速方面,针对提取每个ROI内所有点云导致耗时太长的问题,PV-RCNN仅提取ROI内数量有限且能代表周围点云的关键点的特征,有效提升模型的性能。
## <h2 id="3">模型库</h2>
- PV-RCNN在KITTI Val set数据集上的表现:
| 模型 | Car Mod@0.7 AP_R11 / AP_R40 | Pedestrian Mod@0.5 AP_R11 / AP_R40| Cyclist Mod@0.5 AP_R11 / AP_R40 | V100 TensorRT FP32(FPS) | V100 TensorRT FP16(FPS) | 模型下载 | 配置文件 | 日志 |
| ---- | ---------- |----------- |------------------ | -------------- | ----------------| ------ | ------- | ----- |
| PV-RCNN | 83.78 / 84.72 | 58.91 / 58.30 | 73.29 / 73.27 | 10.14 | 10.74 | [model](https://paddle3d.bj.bcebos.com/models/pv_rcnn/pv_rcnn_005voxel_kitti/model.pdparams) | [config](../../../configs/pv_rcnn/pv_rcnn_005voxel_kitti.yml)| [log](https://paddle3d.bj.bcebos.com/models/pv_rcnn/pv_rcnn_005voxel_kitti/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=2166361cceed2624cee057ea583ee257) |
**注意:** KITTI benchmark使用8张V100 GPU训练得出。
## <h2 id="4">训练 & 评估</h2>
### <h3 id="41">KITTI数据集</h3>
- 目前Paddle3D中提供的PV-RCNN模型支持在KITTI数据集上训练,因此需要先准备KITTI数据集,请在[官网](http://www.cvlibs.net/datasets/kitti/eval_object.php?obj_benchmark=3d)进行下载:
1. Download Velodyne point clouds, if you want to use laser information (29 GB)
2. training labels of object data set (5 MB)
3. camera calibration matrices of object data set (16 MB)
并下载数据集的划分文件列表:
```
wget https://bj.bcebos.com/paddle3d/datasets/KITTI/ImageSets.tar.gz
```
将数据解压后按照下方的目录结构进行组织:
```
kitti_dataset_root
|—— training
| |—— label_2
| | |—— 000001.txt
| | |—— ...
| |—— calib
| | |—— 000001.txt
| | |—— ...
| |—— velodyne
| | |—— 000001.bin
| | |—— ...
|—— ImageSets
│ |—— test.txt
│ |—— train.txt
│ |—— trainval.txt
│ |—— val.txt
```
在Paddle3D的目录下创建软链接 `datasets/KITTI`,指向到上面的数据集目录:
```
mkdir datasets
ln -s /path/to/kitti_dataset_root ./datasets
mv ./datasets/kitti_dataset_root ./datasets/KITTI
```
- 生成训练时数据增强所需的真值库:
```
python tools/create_det_gt_database.py --dataset_name kitti --dataset_root ./datasets/KITTI --save_dir ./datasets/KITTI
```
`--dataset_root`指定KITTI数据集所在路径,`--save_dir`指定用于保存所生成的真值库的路径。该命令执行后,`save_dir`生成的目录如下:
```
kitti_train_gt_database
|—— anno_info_train.pkl
|—— Car
| |—— 4371_Car_7.bin
| |—— ...
|—— Cyclist
```
#### 训练
KITTI数据集上的训练使用8张GPU:
```
python -m paddle.distributed.launch --gpus 0,1,2,3,4,5,6,7 tools/train.py --config configs/pv_rcnn/pv_rcnn.yml --save_dir ./output_pv_rcnn --num_workers 4 --save_interval 1
```
训练启动参数介绍可参考文档[全流程速览](../../quickstart.md#模型训练)。
#### 评估
```
python tools/evaluate.py --config configs/pv_rcnn/pv_rcnn.yml --model ./output_pv_rcnn/epoch_80/model.pdparams --batch_size 1 --num_workers 4
```
**注意**:PV-RCNN的评估目前只支持batch_size为1。
评估启动参数介绍可参考文档[全流程速览](../../quickstart.md#模型评估)。
## <h2 id="5">导出 & 部署</h2>
### 模型导出
运行以下命令,将训练时保存的动态图模型文件导出成推理引擎能够加载的静态图模型文件。
```
python tools/export.py --config configs/pv_rcnn/pv_rcnn.yml --model /path/to/model.pdparams --save_dir /path/to/output
```
| 参数 | 说明 |
| -- | -- |
| config | **[必填]** 训练配置文件所在路径 |
| model | **[必填]** 训练时保存的模型文件`model.pdparams`所在路径 |
| save_dir | **[必填]** 保存导出模型的路径,`save_dir`下将会生成三个文件:`pv_rcnn.pdiparams `、`pv_rcnn.pdiparams.info`和`pv_rcnn.pdmodel` |
### C++部署
#### Linux系统
#### 环境依赖
- GCC >= 5.4.0
- Cmake >= 3.5.1
- Ubuntu 16.04/18.04
> 说明:本文档的部署环节在以下环境中进行过测试并通过:
测试环境:
- GCC==8.2.0
- Cmake==3.16.0
- Ubuntu 18.04
- CUDA 11.2
- cuDNN==8.1.1
- Paddle Inferece==2.3.1
- TensorRT-8.2.5.1.Linux.x86_64-gnu.cuda-11.4.cudnn8.2
#### 编译步骤
**注意:目前PV-RCNN的仅支持使用GPU进行推理。**
- step 1: 进入部署代码所在路径
```
cd deploy/pv_rcnn/cpp
```
- step 2: 下载Paddle Inference C++预编译库
Paddle Inference针对**是否使用GPU**、**是否支持TensorRT**、以及**不同的CUDA/cuDNN/GCC版本**均提供已经编译好的库文件,请至[Paddle Inference C++预编译库下载列表](https://www.paddlepaddle.org.cn/inference/user_guides/download_lib.html#c)选择符合的版本。
- step 3: 修改`compile.sh`中的编译参数
主要修改编译脚本`compile.sh`中的以下参数:
| 参数 | 说明 |
| -- | -- |
| WITH_GPU | 是否使用gpu。ON或OFF, OFF表示使用CPU,默认ON|
| USE_TENSORRT | 是否使用TensorRT加速。ON或OFF,默认OFF|
| LIB_DIR | Paddle Inference C++预编译包所在路径,该路径下的内容应有:`CMakeCache.txt`、`paddle`、`third_party`和`version.txt` |
| CUDNN_LIB | cuDNN`libcudnn.so`所在路径 |
| CUDA_LIB | CUDA`libcudart.so `所在路径 |
| TENSORRT_ROOT | TensorRT所在路径。**非必须**,如果`USE_TENSORRT`设置为`ON`时,需要填写该路径,该路径下的内容应有`bin`、`lib`和`include`等|
- step 4: 开始编译
```
sh compile.sh
```
### 执行预测
**注意:目前PV-RCNN的仅支持使用GPU进行推理。**
执行命令参数说明
| 参数 | 说明 |
| -- | -- |
| model_file | 导出模型的结构文件`pv_rcnn.pdmodel`所在路径 |
| params_file | 导出模型的参数文件`pv_rcnn.pdiparams`所在路径 |
| lidar_file | 待预测的点云文件所在路径 |
| num_point_dim | 点云文件中每个点的维度大小。例如,若每个点的信息是`x, y, z, intensity`,则`num_point_dim`填写为4 |
| point_cloud_range | 输入模型的点云所处的空间范围,超出此范围内的点将被滤除。格式为`"X_min Y_min Z_min X_max Y_Max Z_max"`|
```
./build/main --model_file /path/to/pv_rcnn.pdmodel --params_file /path/to/pv_rcnn.pdiparams --lidar_file /path/to/lidar.pcd.bin --num_point_dim 4 --point_cloud_range "0 -40 -3 70.4 40 1"
```
**注意:** 请预先确认实际待测试点云文件的维度是否是4,如果不是4,`--num_point_dim`请修改为实际值。
### 开启TensorRT加速预测【可选】
**注意:请根据编译步骤的step 3,修改`compile.sh`中TensorRT相关的编译参数,并重新编译。**
运行命令参数说明如下:
| 参数 | 说明 |
| -- | -- |
| model_file | 导出模型的结构文件`pv_rcnn.pdmodel`所在路径 |
| params_file | 导出模型的参数文件`pv_rcnn.pdiparams`所在路径 |
| lidar_file | 待预测的点云文件所在路径 |
| num_point_dim | 点云文件中每个点的维度大小。例如,若每个点的信息是`x, y, z, intensity`,则`num_point_dim`填写为4 |
| point_cloud_range | 输入模型的点云所处的空间范围,超出此范围内的点将被滤除。格式为`"X_min Y_min Z_min X_max Y_Max Z_max"`|
| use_trt | 是否使用TensorRT进行加速,默认0|
| trt_precision | 当use_trt设置为1时,模型精度可设置0或1,0表示fp32, 1表示fp16。默认0 |
| trt_use_static | 当trt_use_static设置为1时,**在首次运行程序的时候会将TensorRT的优化信息进行序列化到磁盘上,下次运行时直接加载优化的序列化信息而不需要重新生成**。默认0 |
| trt_static_dir | 当trt_use_static设置为1时,保存优化信息的路径 |
| collect_shape_info | 是否收集模型动态shape信息。默认0。**只需首次运行,下次运行时直接加载生成的shape信息文件即可进行TensorRT加速推理** |
| dynamic_shape_file | 保存模型动态shape信息的文件路径。 |
* **首次运行TensorRT**,收集模型动态shape信息,并保存至`--dynamic_shape_file`指定的文件中
```
./build/main --model_file /path/to/pv_rcnn.pdmodel --params_file /path/to/pv_rcnn.pdiparams --lidar_file /path/to/lidar.pcd.bin --num_point_dim 4 --point_cloud_range "0 -40 -3 70.4 40 1" --use_trt 1 --collect_shape_info 1 --dynamic_shape_file /path/to/shape_info.txt
```
* 加载`--dynamic_shape_file`指定的模型动态shape信息,使用FP32精度进行预测
```
./build/main --model_file /path/to/pv_rcnn.pdmodel --params_file /path/to/pv_rcnn.pdiparams --lidar_file /path/to/lidar.pcd.bin --num_point_dim 4 --point_cloud_range "0 -40 -3 70.4 40 1" --use_trt 1 --dynamic_shape_file /path/to/shape_info.txt
```
* 加载`--dynamic_shape_file`指定的模型动态shape信息,使用FP16精度进行预测
```
./build/main --model_file /path/to/pv_rcnn.pdmodel --params_file /path/to/pv_rcnn.pdiparams --lidar_file /path/to/lidar.pcd.bin --num_point_dim 4 --point_cloud_range "0 -40 -3 70.4 40 1" --use_trt 1 --dynamic_shape_file /path/to/shape_info.txt --trt_precision 1
```
* 如果觉得每次运行时模型加载的时间过长,可以设置`trt_use_static`和`trt_static_dir`,首次运行时将TensorRT的优化信息保存在硬盘中,后续直接反序列化优化信息即可
```
./build/main --model_file /path/to/pv_rcnn.pdmodel --params_file /path/to/pv_rcnn.pdiparams --lidar_file /path/to/lidar.pcd.bin --num_point_dim 4 --point_cloud_range "0 -40 -3 70.4 40 1" --use_trt 1 --dynamic_shape_file /path/to/shape_info.txt --trt_precision 1 --trt_use_static 1 --trt_static_dir /path/to/OptimCacheDir
```
### Python部署
**注意:目前PV-RCNN的仅支持使用GPU进行推理。**
命令参数说明如下:
| 参数 | 说明 |
| -- | -- |
| model_file | 导出模型的结构文件`pv_rcnn.pdmodel`所在路径 |
| params_file | 导出模型的参数文件`pv_rcnn.pdiparams`所在路径 |
| lidar_file | 待预测的点云文件所在路径 |
| num_point_dim | 点云文件中每个点的维度大小。例如,若每个点的信息是`x, y, z, intensity`,则`num_point_dim`填写为4 |
| point_cloud_range | 输入模型的点云所处的空间范围,超出此范围内的点将被滤除。格式为`X_min Y_min Z_min X_max Y_Max Z_max`|
| use_trt | 是否使用TensorRT进行加速,默认0|
| trt_precision | 当use_trt设置为1时,模型精度可设置0或1,0表示fp32, 1表示fp16。默认0 |
| trt_use_static | 当trt_use_static设置为1时,**在首次运行程序的时候会将TensorRT的优化信息进行序列化到磁盘上,下次运行时直接加载优化的序列化信息而不需要重新生成**。默认0 |
| trt_static_dir | 当trt_use_static设置为1时,保存优化信息的路径 |
| collect_shape_info | 是否收集模型动态shape信息。默认0。**只需首次运行,后续直接加载生成的shape信息文件即可进行TensorRT加速推理** |
| dynamic_shape_file | 保存模型动态shape信息的文件路径。 |
运行以下命令,执行预测:
```
python infer.py --model_file /path/to/pv_rcnn.pdmodel --params_file /path/to/pv_rcnn.pdiparams --lidar_file /path/to/lidar.pcd.bin --num_point_dim 4 --point_cloud_range 0 -40 -3 70.4 40 1
```
## <h2 id="6">自定义数据集</h2>
请参考文档[自定义数据集格式说明](../../../datasets/custom.md)准备自定义数据集。
| 0
|
apollo_public_repos/apollo-model-centerpoint/docs/models
|
apollo_public_repos/apollo-model-centerpoint/docs/models/pointpillars/README.md
|
# PointPillars: Fast Encoders for Object Detection from Point Clouds
## 目录
* [引用](#h2-id1h2)
* [简介](#h2-id2h2)
* [模型库](#h2-id3h2)
* [训练配置](#h2-id4h2)
* [使用教程](#h2-id5h2)
* [数据准备](#h3-id51h3)
* [训练](#h3-id52h3)
* [评估](#h3-id53h3)
* [模型导出](#h3-id54h3)
* [模型部署](#h3-id55h3)
## <h2 id="1">引用</h2>
> Lang, Alex H., Sourabh, Vora, Holger, Caesar, Lubing, Zhou, Jiong, Yang, and Oscar, Beijbom. "PointPillars: Fast Encoders for Object Detection From Point Clouds." . In 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (pp. 12689-12697).2019.
## <h2 id="2">简介</h2>
PointPillars是目前工业界应用广泛的点云检测模型,其最主要的特点是检测速度和精度的平衡。PointPillars 在 [VoxelNet](https://arxiv.org/abs/1711.06396) 和 [SECOND](https://pdfs.semanticscholar.org/5125/a16039cabc6320c908a4764f32596e018ad3.pdf)
的基础上针对性能进行了优化,将点云转化为柱体(Pillars)表示,从而使得编码后的点云特征可以使用2D卷积神经网络进行检测任务。
## <h2 id="3">模型库</h2>
- PointPillars在KITTI Val set数据集上Car类别的表现
| 模型 | Car<br>Easy Mod. Hard | V100 TensorRT FP32(FPS) | V100 TensorRT FP16(FPS) | 模型下载 | 配置文件 |
|:------------:|:---------------------:|:-----------------------:|:-----------------------:|:--------------------------------------------------------------------------------------------------------:|:--------------------------------------------------------------------------:|
| PointPillars | 86.90 75.21 71.57 | 37.3 | 40.5 | [model](https://bj.bcebos.com/paddle3d/models/pointpillars/pointpillars_xyres16_kitti_car/model.pdparams) | [config](../../../configs/pointpillars/pointpillars_xyres16_kitti_car.yml) |
- PointPillars在KITTI Val set数据集上Cyclist及Pedestrian类别的表现
| 模型 | Cyclist<br>Easy Mod. Hard | Pedestrian<br>Easy Mod. Hard | V100 TensorRT FP32(FPS) | V100 TensorRT FP16(FPS) | 模型下载 | 配置文件 |
|:------------:|:-------------------------:|:----------------------------:|:-----------------------:|:-----------------------:|:-----------------------------------------------------------------------------------------------------------------------:|:-----------------------------------------------------------------------------------------:|
| PointPillars | 84.36 64.66 60.53 | 66.13 60.36 54.40 | 30.0 | 30.2 | [model](https://bj.bcebos.com/paddle3d/models/pointpillars/pointpillars_xyres16_kitti_cyclist_pedestrian/model.pdparams) | [config](../../../configs/pointpillars/pointpillars_xyres16_kitti_cyclist_pedestrian.yml) |
## <h2 id="4">训练配置</h2>
我们提供了在开源数据集上的训练配置与结果,详见[PointPillars 训练配置](../../../configs/pointpillars)。
## <h2 id="5">使用教程</h2>
### <h3 id="51">数据准备</h3>
- 目前Paddle3D中提供的PointPillars模型支持在KITTI数据集上训练,因此需要先准备KITTI数据集,请在[官网](http://www.cvlibs.net/datasets/kitti/eval_object.php?obj_benchmark=3d)进行下载:
1. Download Velodyne point clouds, if you want to use laser information (29 GB)
2. training labels of object data set (5 MB)
3. camera calibration matrices of object data set (16 MB)
并下载数据集的划分文件列表:
```shell
wget https://bj.bcebos.com/paddle3d/datasets/KITTI/ImageSets.tar.gz
```
将数据解压后按照下方的目录结构进行组织:
```
└── kitti_dataset_root
|—— training
|—— label_2
|—— 000001.txt
|—— ...
|—— calib
|—— 000001.txt
|—— ...
|—— velodyne
|—— 000001.bin
|—— ...
|—— ImageSets
|—— test.txt
|—— train.txt
|—— trainval.txt
|—— val.txt
```
在Paddle3D的目录下创建软链接 `datasets/KITTI`,指向到上面的数据集目录:
```
mkdir datasets
ln -s /path/to/kitti_dataset_root ./datasets
mv ./datasets/kitti_dataset_root ./datasets/KITTI
```
- 生成训练时数据增强所需的真值库:
```
python tools/create_det_gt_database.py --dataset_name kitti --dataset_root ./datasets/KITTI --save_dir ./datasets/KITTI
```
`--dataset_root`指定KITTI数据集所在路径,`--save_dir`指定用于保存所生成的真值库的路径。该命令执行后,`save_dir`生成的目录如下:
```
└── kitti_train_gt_database
|—— anno_info_train.pkl
|—— Car
|—— 4371_Car_7.bin
|—— ...
|—— Cyclist
```
### <h3 id="52">训练</h3>
位于`Paddle3D/`目录下,执行:
```shell
python -m paddle.distributed.launch --gpus 0 \
tools/train.py \
--config configs/pointpillars/pointpillars_xyres16_kitti_car.yml \
--save_interval 1856 \
--keep_checkpoint_max 100 \
--save_dir outputs/pointpillars \
--do_eval \
--num_workers 8
```
训练脚本支持设置如下参数:
| 参数名 | 用途 | 是否必选项 | 默认值 |
|:--------------------|:-------------------------------|:------:|:---------:|
| gpus | 使用的GPU编号 | 是 | - |
| config | 配置文件 | 是 | - |
| save_dir | 模型和visualdl日志文件的保存根路径 | 否 | output |
| num_workers | 用于异步读取数据的进程数量, 大于等于1时开启子进程读取数据 | 否 | 2 |
| save_interval | 模型保存的间隔步数 | 否 | 1000 |
| do_eval | 是否在保存模型时进行评估 | 否 | 否 |
| log_interval | 打印日志的间隔步数 | 否 | 10 |
| keep_checkpoint_max | 最新模型保存个数 | 否 | 5 |
| resume | 是否从断点恢复训练 | 否 | 否 |
| batch_size | mini-batch大小(每张GPU) | 否 | 在配置文件中指定 |
| iters | 训练轮数 | 否 | 在配置文件中指定 |
| learning_rate | 学习率 | 否 | 在配置文件中指定 |
| seed | Paddle的全局随机种子值 | 否 | None |
### <h3 id="53">评估</h3>
位于`Paddle3D/`目录下,执行:
```shell
python tools/evaluate.py \
--config configs/pointpillars/pointpillars_xyres16_kitti_car.yml \
--model /path/to/model.pdparams \
--num_workers 8
```
评估脚本支持设置如下参数:
| 参数名 | 用途 | 是否必选项 | 默认值 |
|:--------------------|:-------------------------------|:------:|:---------:|
| config | 配置文件 | 是 | - |
| model | 待评估模型路径 | 是 | - |
| num_workers | 用于异步读取数据的进程数量, 大于等于1时开启子进程读取数据 | 否 | 2 |
| batch_size | mini-batch大小 | 否 | 在配置文件中指定 |
### <h3 id="54">模型导出</h3>
运行以下命令,将训练时保存的动态图模型文件导出成推理引擎能够加载的静态图模型文件。
```shell
python tools/export.py \
--config configs/pointpillars/pointpillars_xyres16_kitti_car.yml \
--model /path/to/model.pdparams \
--save_dir /path/to/output
```
模型导出脚本支持设置如下参数:
| 参数名 | 用途 | 是否必选项 | 默认值 |
|:------------|:-------------------------------------------------------------------------------------------------------------|:------:|:---------:|
| config | 配置文件 | 是 | - |
| model | 待导出模型参数`model.pdparams`路径 | 是 | - |
| save_dir | 保存导出模型的路径,`save_dir`下将会生成三个文件:`pointpillars.pdiparams `、`pointpillars.pdiparams.info`和`pointpillars.pdmodel` | 否 | `deploy` |
### <h3 id="55">模型部署</h3>
#### C++部署(Linux系统)
#### 环境依赖:
- GCC >= 5.4.0
- Cmake >= 3.5.1
- Ubuntu 16.04/18.04
> 说明:本文档的部署环节在以下环境中进行过测试并通过:
- GCC==8.2.0
- Cmake==3.16.0
- Ubuntu 18.04
- CUDA 11.2
- cuDNN==8.1.1
- PaddleInference==2.3.1
- TensorRT-8.2.5.1.Linux.x86_64-gnu.cuda-11.4.cudnn8.2
#### 编译步骤:
**注意:目前PointPillars的仅支持使用GPU进行推理。**
- step 1: 进入部署代码所在路径
```commandline
cd deploy/pointpillars/cpp
```
- step 2: 下载Paddle Inference C++预编译库
Paddle Inference针对**是否使用GPU**、**是否支持TensorRT**、以及**不同的CUDA/cuDNN/GCC版本**
均提供已经编译好的库文件,请至[Paddle Inference C++预编译库下载列表](https://www.paddlepaddle.org.cn/inference/user_guides/download_lib.html#c)
选择符合的版本。
- step 3: 修改`compile.sh`中的编译参数
主要修改编译脚本`compile.sh`中的以下参数:
| 参数名 | 说明 | 是否必选项 | 默认值 |
|:--------------|:---------------------------------------------------------------------------------------------|:------:|:-----------------------------------------------------------------:|
| WITH_GPU | 是否使用GPU | 否 | ON |
| USE_TENSORRT | 是否使用TensorRT加速 | 否 | ON |
| LIB_DIR | Paddle Inference C++预编译包所在路径,该路径下的内容应有:`CMakeCache.txt`、`paddle`、`third_party`和`version.txt` | 是 | - |
| CUDNN_LIB | cuDNN`libcudnn.so`所在路径 | 否 | `/usr/lib/x86_64-linux-gnu` |
| CUDA_LIB | CUDA`libcuda.so`所在路径 | 否 | `/usr/local/cuda/lib64` |
| TENSORRT_ROOT | TensorRT所在路径 | 否 | 如果`USE_TENSORRT`设置为`ON`时,需要填写该路径,该路径下的内容应有`bin`、`lib`和`include`等 |
- step 4: 开始编译
```commandline
sh compile.sh
```
#### 执行预测:
**注意:目前Pointpillars仅支持使用GPU进行推理。**
执行命令参数说明
| 参数名 | 说明 | 是否必选项 | 默认值 |
|:--------------------|:------------------------------------------------------------------------|:------:|:----:|
| model_file | 导出模型的结构文件`pointpillars.pdmodel`所在路径 | 是 | - |
| params_file | 导出模型的参数文件`pointpillars.pdiparams`所在路径 | 是 | - |
| lidar_file | 待预测的点云文件所在路径 | 是 | - |
| point_cloud_range | 模型中将点云划分为柱体(pillars)时选取的点云范围,格式为`"X_min Y_min Z_min X_max Y_Max Z_max"` | 是 | - |
| voxel_size | 模型中将点云划分为柱体(pillars)时每个柱体的尺寸,格式为`"X_size Y_size Z_size"` | 是 | - |
| max_points_in_voxel | 模型中将点云划分为柱体(pillars)时每个柱体包含点数量上限 | 是 | - |
| max_voxel_num | 模型中将点云划分为柱体(pillars)时保留的柱体数量上限 | 是 | - |
| num_point_dim | 点云文件中每个点的维度大小。例如,若每个点的信息是`x, y, z, intensity`,则`num_point_dim`填写为4 | 否 | 4 |
执行命令:
```shell
./build/main \
--model_file /path/to/pointpillars.pdmodel \
--params_file /path/to/pointpillars.pdiparams \
--lidar_file /path/to/lidar.pcd.bin \
--point_cloud_range "0 -39.68 -3 69.12 39.68 1" \
--voxel_size ".16 .16 4" \
--max_points_in_voxel 32 \
--max_voxel_num 40000
```
#### 开启TensorRT加速预测【可选】:
**注意:请根据编译步骤的step 3,修改`compile.sh`中TensorRT相关的编译参数,并重新编译。**
运行命令参数说明如下:
| 参数名 | 说明 | 是否必选项 | 默认值 |
|:--------------------|:----------------------------------------------------------------------------------------|:------:|:----:|
| model_file | 导出模型的结构文件`pointpillars.pdmodel`所在路径 | 是 | - |
| params_file | 导出模型的参数文件`pointpillars.pdiparams`所在路径 | 是 | - |
| lidar_file | 待预测的点云文件所在路径 | 是 | - |
| point_cloud_range | 模型中将点云划分为柱体(pillars)时选取的点云范围,格式为`"X_min Y_min Z_min X_max Y_Max Z_max"` | 是 | - |
| voxel_size | 模型中将点云划分为柱体(pillars)时每个柱体的尺寸,格式为`"X_size Y_size Z_size"` | 是 | - |
| max_points_in_voxel | 模型中将点云划分为柱体(pillars)时每个柱体包含点数量上限 | 是 | - |
| max_voxel_num | 模型中将点云划分为柱体(pillars)时保留的柱体数量上限 | 是 | - |
| num_point_dim | 点云文件中每个点的维度大小。例如,若每个点的信息是`x, y, z, intensity`,则`num_point_dim`填写为4 | 否 | 4 |
| use_trt | 是否开启TensorRT加速预测 | 否 | 0 |
| trt_precision | 当`use_trt`设置为1时,模型精度可设置0或1,0表示fp32, 1表示fp16 | 否 | 0 |
| trt_use_static | 当`trt_use_static`设置为1时,**在首次运行程序的时候会将TensorRT的优化信息进行序列化到磁盘上,下次运行时直接加载优化的序列化信息而不需要重新生成** | 否 | 0 |
| trt_static_dir | 当`trt_use_static`设置为1时,保存优化信息的路径 | 否 | - |
| collect_shape_info | 当`use_trt`设置为1时,是否收集模型动态shape信息。默认0。**只需首次运行,下次运行时直接加载生成的shape信息文件即可进行TensorRT加速推理** | 否 | 0 |
| dynamic_shape_file | 当`collect_shape_info`设置为1时,保存模型动态shape信息的文件路径 | 否 | - |
* **首次运行TensorRT**,收集模型动态shape信息,并保存至`--dynamic_shape_file`指定的文件中
```shell
./build/main \
--model_file /path/to/pointpillars.pdmodel \
--params_file /path/to/pointpillars.pdiparams \
--lidar_file /path/to/lidar.bin \
--num_point_dim 4 \
--point_cloud_range "0 -39.68 -3 69.12 39.68 1" \
--voxel_size ".16 .16 4" \
--max_points_in_voxel 32 \
--max_voxel_num 40000 \
--use_trt 1 \
--collect_shape_info 1 \
--dynamic_shape_file /path/to/shape_info.txt
```
* 加载`--dynamic_shape_file`指定的模型动态shape信息,使用FP32精度进行预测
```shell
./build/main \
--model_file /path/to/pointpillars.pdmodel \
--params_file /path/to/pointpillars.pdiparams \
--lidar_file /path/to/lidar.bin \
--num_point_dim 4 \
--point_cloud_range "0 -39.68 -3 69.12 39.68 1" \
--voxel_size ".16 .16 4" \
--max_points_in_voxel 32 \
--max_voxel_num 40000 \
--use_trt 1 \
--dynamic_shape_file /path/to/shape_info.txt
```
* 加载`--dynamic_shape_file`指定的模型动态shape信息,使用FP16精度进行预测
```shell
./build/main \
--model_file /path/to/pointpillars.pdmodel \
--params_file /path/to/pointpillars.pdiparams \
--lidar_file /path/to/lidar.bin \
--num_point_dim 4 \
--point_cloud_range "0 -39.68 -3 69.12 39.68 1" \
--voxel_size ".16 .16 4" \
--max_points_in_voxel 32 \
--max_voxel_num 40000 \
--use_trt 1 \
--dynamic_shape_file /path/to/shape_info.txt \
--trt_precision 1
```
* 如果觉得每次运行时模型加载的时间过长,可以设置`trt_use_static`和`trt_static_dir`,首次运行时将TensorRT的优化信息保存在硬盘中,后续直接反序列化优化信息即可
```shell
./build/main \
--model_file /path/to/pointpillars.pdmodel \
--params_file /path/to/pointpillars.pdiparams \
--lidar_file /path/to/lidar.bin \
--num_point_dim 4 \
--point_cloud_range "0 -39.68 -3 69.12 39.68 1" \
--voxel_size ".16 .16 4" \
--max_points_in_voxel 32 \
--max_voxel_num 40000 \
--use_trt 1 \
--dynamic_shape_file /path/to/shape_info.txt \
--trt_use_static 1 \
--trt_static_dir /path/to/OptimCacheDir
```
#### Python 部署
**注意:目前PointPillars的仅支持使用GPU进行推理。**
运行命令参数说明如下:
| 参数名 | 用途 | 是否必选项 | 默认值 |
|:--------------------|:--------------------------------------------------------------------------------------|:------|:----|
| mdoel_file | 导出模型的结构文件`pointpillars.pdmodel`所在路径 | 是 | - |
| params_file | 导出模型的参数文件`pointpillars.pdiparams`所在路径 | 是 | - |
| lidar_file | 待预测的点云所在路径 | 是 | - |
| point_cloud_range | 模型中将点云划分为柱体(pillars)时选取的点云范围,格式为`X_min Y_min Z_min X_max Y_Max Z_max` | 是 | - |
| voxel_size | 模型中将点云划分为柱体(pillars)时每个柱体的尺寸,格式为`X_size Y_size Z_size` | 是 | - |
| max_points_in_voxel | 模型中将点云划分为柱体(pillars)时每个柱体包含点数量上限 | 是 | - |
| max_voxel_num | 模型中将点云划分为柱体(pillars)时保留的柱体数量上限 | 是 | - |
| num_point_dim | 点云文件中每个点的维度大小。例如,若每个点的信息是`x, y, z, intensity`,则`num_point_dim`填写为4 | 否 | 4 |
| use_trt | 是否使用TensorRT进行加速 | 否 | 0 |
| trt_precision | 当use_trt设置为1时,模型精度可设置0或1,0表示fp32, 1表示fp16 | 否 | 0 |
| trt_use_static | 当trt_use_static设置为1时,**在首次运行程序的时候会将TensorRT的优化信息进行序列化到磁盘上,下次运行时直接加载优化的序列化信息而不需要重新生成** | 否 | 0 | |
| trt_static_dir | 当trt_use_static设置为1时,保存优化信息的路径 | 否 | - |
| collect_shape_info | 是否收集模型动态shape信息。默认0。**只需首次运行,后续直接加载生成的shape信息文件即可进行TensorRT加速推理** | 否 | 0 | |
| dynamic_shape_file | 保存模型动态shape信息的文件路径 | 否 | - |
运行以下命令,执行预测:
```shell
python infer.py \
--model_file /path/to/pointpillars.pdmodel \
--params_file /path/to/pointpillars.pdiparams \
--lidar_file /path/to/lidar.bin \
--point_cloud_range 0 -39.68 -3 69.12 39.68 1 \
--voxel_size .16 .16 4 \
--max_points_in_voxel 32 \
--max_voxel_num 40000
```
| 0
|
apollo_public_repos/apollo-model-centerpoint/docs/models
|
apollo_public_repos/apollo-model-centerpoint/docs/models/bevformer/README.md
|
# BEVFormer: Learning Bird's-Eye-View Representation from Multi-Camera Images via Spatiotemporal Transformers
## 目录
* [引用](#1)
* [简介](#2)
* [模型库](#3)
* [训练 & 评估](#4)
* [nuScenes数据集](#41)
* [导出 & 部署](#8)
## <h2 id="1">引用</h2>
```
@article{li2022bevformer,
title={BEVFormer: Learning Bird’s-Eye-View Representation from Multi-Camera Images via Spatiotemporal Transformers},
author={Li, Zhiqi and Wang, Wenhai and Li, Hongyang and Xie, Enze and Sima, Chonghao and Lu, Tong and Qiao, Yu and Dai, Jifeng}
journal={arXiv preprint arXiv:2203.17270},
year={2022}
}
```
## <h2 id="2">简介</h2>
BEVFormer以多目图像作为输入,输出三维空间里目标物体的位置、大小、方向角以及类别。整体基于DETR3D的架构设计,分为Encoder和Decoder两个部分。Encoder部分以BEV query map、当前帧的多目图像、历史帧的BEV feature map作为输入,输出当前帧的BEV feature map。其中,设计的spatial-cross-attention使用BEV query map去聚合BEV每个3D位置投影到2D多目图像上的特征,设计的temporal-cross-attention使用BEV query map去聚合BEV每个3D位置在历史帧BEV feature map上的特征,使得当前帧的BEV feature map具备时空融合的特征。在Decoder部分,以object queries作为输入,输出其对应的3D bounding box和label。其中,object queries会聚合self-attention特征以及其在Encoder输出的BEV feature map上的特征。目前BEVFormer在nuScenes数据集上的精度依然处于领先水平。
## <h2 id="3">模型库</h2>
- BEVFormer在nuScenes Val set数据集上的表现
| 模型 | 骨干网络 | mAP | NDS | 模型下载 | 配置文件 | 日志 |
| ---- | ------ | --- | ----| ------- |------- | ---- |
| ResNet50-FPN | BEVFormer-tiny | 26.22 | 36.53 | [model](https://paddle3d.bj.bcebos.com/models/bevformer/bevformer_tiny_r50_fpn_nuscenes/model.pdparams) | [config](../../../configs/bevformer/bevformer_tiny_r50_fpn_nuscenes.yml) | [log](https://paddle3d.bj.bcebos.com/models/bevformer/bevformer_tiny_r50_fpn_nuscenes/train.log)\|[vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=062bfe8678693d3f5a63c43eab7a65aa) |
**注意:nuScenes benchmark使用8张V100 GPU训练得出。**
## <h2 id="4">训练 & 评估</h2>
### <h3 id="41">nuScenes数据集</h3>
#### 数据准备
- 目前Paddle3D中提供的BEVFormer模型支持在nuScenes数据集上训练,因此需要先准备nuScenes数据集,请在[官网](https://www.nuscenes.org/nuscenes)进行下载,并且需要下载CAN bus expansion数据,将数据集目录准备如下:
```
nuscenes_dataset_root
|-- can_bus
|—— samples
|—— sweeps
|—— maps
|—— v1.0-trainval
```
在Paddle3D的目录下创建软链接 `datasets/nuscenes`,指向到上面的数据集目录:
```
mkdir datasets
ln -s /path/to/nuscenes_dataset_root ./datasets
mv ./datasets/nuscenes_dataset_root ./datasets/nuscenes
```
为加速训练过程中Nuscenes数据集的加载和解析,需要事先将Nuscenes数据集里的标注信息存储在`pkl`后缀文件中。执行以下命令会生成`bevformer_nuscenes_annotation_train.pkl`和`bevformer_nuscenes_annotation_val.pkl`:
```
python tools/create_bevformer_nus_infos.py --dataset_root ./datasets/nuscenes --can_bus_root ./datasets/nuscenes --save_dir ./datasets/nuscenes
```
生成完后的数据集目录:
```
nuscenes_dataset_root
|-- can_bus
|—— samples
|—— sweeps
|—— maps
|—— v1.0-trainval
|—— bevformer_nuscenes_annotation_train.pkl
|—— bevformer_nuscenes_annotation_val.pkl
```
#### 训练
nuScenes数据集上的训练使用8张GPU:
下载骨干网络的预训练模型参数:
```
wget https://paddledet.bj.bcebos.com/models/pretrained/ResNet50_cos_pretrained.pdparams
```
```
python -m paddle.distributed.launch --gpus 0,1,2,3,4,5,6,7 tools/train.py --config configs/bevformer/bevformer_tiny_r50_fpn_nuscenes.yml --save_dir ./output_bevformer_tiny --num_workers 4 --save_interval 1 --model ./ResNet50_cos_pretrained.pdparams
```
训练启动参数介绍可参考文档[全流程速览](../../quickstart.md#模型训练)。
#### 评估
```
python tools/evaluate.py --config configs/bevformer/bevformer_tiny_r50_fpn_nuscenes.yml --model ./output_bevformer_tiny/epoch_24/model.pdparams --num_workers 4
```
评估启动参数介绍可参考文档[全流程速览](../../quickstart.md#模型评估)。
## <h2 id="8">导出 & 部署</h2>
### <h3 id="81">模型导出</h3>
运行以下命令,将训练时保存的动态图模型文件导出成推理引擎能够加载的静态图模型文件。
```
python tools/export.py --config configs/bevformer/bevformer_tiny.yml --model ./output_bevformer_tiny/epoch_24/model.pdparams --save_dir ./output_bevformer_tiny_inference
```
| 参数 | 说明 |
| -- | -- |
| config | **[必填]** 训练配置文件所在路径 |
| model | **[必填]** 训练时保存的模型文件`model.pdparams`所在路径 |
| save_dir | **[必填]** 保存导出模型的路径,`save_dir`下将会生成三个文件:`bevformer_inference.pdiparams `、`bevformer_inference.pdiparams.info`和`bevformer_inference.pdmodel` |
### <h3 id="82">模型部署</h3>
部署代码开发进行中。
| 0
|
apollo_public_repos/apollo-model-centerpoint/docs/models
|
apollo_public_repos/apollo-model-centerpoint/docs/models/paconv/README.md
|
# PAConv:Position Adaptive Convolution with Dynamic Kernel Assembling on Point Clouds
## 目录
* [引用](#1)
* [简介](#2)
* [模型库](#3)
* [使用教程](#4)
* [数据准备](#41)
* [训练](#42)
* [评估](#43)
* [导出部署](#5)
* [执行预测](#51)
* [python部署](#52)
* [自定义数据集](#6)
<br>
## <h2 id="1">引用</h2>
> Xu, Mutian and Ding, Runyu and Zhao, Hengshuang and Qi, Xiaojuan. "PAConv: Position Adaptive Convolution with Dynamic Kernel Assembling on Point Clouds." In Proceedings of CVPR 2021.
<br>
## <h2 id="2">简介</h2>
该论文介绍了位置自适应卷积(PAConv),一种用于三维点云处理的通用卷积运算。PAConv的关键是通过动态组合存储在权重库中的基本权重矩阵来构造卷积矩阵,其中这些权重矩阵的系数通过核心网从点位置自适应学习。通过这种方式,内核构建在数据驱动管理器中,使PAConv比二维卷积具有更大的灵活性,可以更好地处理不规则和无序的点云数据。此外,通过组合权重矩阵而不是从点位置预测核,降低了学习过程的复杂性。
此外,与现有的点云卷积运算不同,它们的网络架构通常是经过精心设计的,该论文中的PAConv可以集成到基于经典MLP的点云处理网络中,而不需要改变网络配置。即使建立在简单的网络上,该论文中的的方法仍然接近甚至超过最先进的模型,并显著提高了分类和分割任务的基线性能并且效率相当高。
<br>
## <h2 id="3">模型库</h2>
| 模型 | Accuracy | Vote Accuracy | 模型下载 | 配置文件 | 日志 |
| :--: | :--------: | :-------------------:| :------: | :-----: | :--: |
|PAConv | 93.4 | 93.47 | [model]() | [config]() | [log]() \| [vdl]() |
<br>
## <h2 id="4">使用教程</h2>
下面的教程将从数据准备开始,说明如何训练PAConv模型
### <h3 id="41">数据准备</h3>
目前Paddle3D中提供的模型支持在ModelNet40数据集上训练,因此需要先准备ModelNet40数据集,请在[官网](https://shapenet.cs.stanford.edu/media/modelnet40_ply_hdf5_2048.zip)进行下载。
将数据解压后按照下方的目录结构进行组织
```shell
$ tree modelnet40_ply_hdf5_2048
modelnet40_ply_hdf5_2048
├── ply_data_test0.h5
├── ply_data_test1.h5
├── ply_data_test_0_id2file.json
├── ply_data_test_1_id2file.json
├── ply_data_train0.h5
├── ply_data_train1.h5
├── ply_data_train2.h5
├── ply_data_train3.h5
├── ply_data_train4.h5
├── ply_data_train_0_id2file.json
├── ply_data_train_1_id2file.json
├── ply_data_train_2_id2file.json
├── ply_data_train_3_id2file.json
├── ply_data_train_4_id2file.json
├── shape_names.txt
├── test_files.txt
└── train_files.txt
```
在Paddle3D的目录下创建软链接 `datasets/modelnet40_ply_hdf5_2048`,指向到上面的数据集目录
```shell
mkdir datasets
ln -s path/to/modelnet40_ply_hdf5_2048 datasets/modelnet40_ply_hdf5_2048
```
### <h3 id="42">训练</h3>
使用如下命令启动训练
```shell
# 每隔10步打印一次训练进度
# 每隔300步保存一次模型,模型参数将被保存在output目录下
python tools/train.py --config configs/paconv/paconv_modelnet40.yml --num_workers 2 --log_interval 10 --save_interval 300 --do_eval
```
### <h3 id="43">评估</h3>
使用如下命令启动评估
```shell
export CUDA_VISIBLE_DEVICES=0
# 使用Paddle3D提供的预训练模型进行评估
python tools/evaluate.py --config configs/paconv/paconv_modelnet40.yml --num_workers 2 --batch_size 16 --model output/iter_3000/model.pdparams
```
<br>
## <h2 id="5">导出部署</h2>
使用如下命令导出训练完成的模型
```shell
# 导出Paddle3D提供的预训练模型
python tools/export.py --config configs/paconv/paconv_modelnet40.yml --model output/iter_70000/model.pdparams --input_shape 1 1024 3
```
### <h3 id="51">执行预测</h3>
命令参数说明如下:
| 参数 | 说明 |
| -- | -- |
| model_file | 导出模型的结构文件`paconv.pdmodel`所在路径 |
| params_file | 导出模型的参数文件`paconv.pdiparams`所在路径 |
| input_file | 待预测的点云文件路径 |
| use_gpu | 是否使用GPU进行预测,默认为False|
| precision | 模型精度可设置fp32或fp16。默认fp32 |
| enable_benchmark | 是否开启benchmark |
| batch_size | 批次大小 |
| cpu_threads | cpu线程数 |
| enable_mkldnn | 是否使用mkldnn |
### <h3 id="52">Python部署</h3>
进入代码目录 `deploy/paconv/python`,运行以下命令,执行预测:
* 执行CPU预测
```shell
python infer.py --model_file /path/to/paconv.pdmodel --params_file /path/to/paconv.pdiparams --input_file /path/to/pointcloud --use_gpu=False --batch_size=1
```
* 执行GPU预测
```shell
python infer.py --model_file /path/to/paconv.pdmodel --params_file /path/to/paconv.pdiparams --input_file /path/to/pointcloud --use_gpu=True --batch_size=1
```
<br>
## <h2 id="6">自定义数据集</h2>
如果您想在自定义数据集上进行训练,请参考[自定义数据准备教程](../datasets/custom.md)将数据组织成ModelNet40数据格式即可
| 0
|
apollo_public_repos/apollo-model-centerpoint/docs/models
|
apollo_public_repos/apollo-model-centerpoint/docs/models/voxel_rcnn/README.md
|
# Voxel r-cnn: Towards high performance voxel-based 3d object detection
## 目录
* [引用](#1)
* [简介](#2)
* [模型库](#3)
* [训练 & 评估](#4)
* [KITTI数据集](#41)
* [导出 & 部署](#5)
* [自定义数据集](#6)
## <h2 id="1">引用</h2>
> Deng, Jiajun, et al. "Voxel r-cnn: Towards high performance voxel-based 3d object detection." Proceedings of the AAAI Conference on Artificial Intelligence. Vol. 35. No. 2. 2021.
## <h2 id="2">简介</h2>
Voxel RCNN在仅使用Voxel-Based的情况下,通过调整模型参数达到当时Point-Based和Voxel-Based相结合的SOTA方法的精度。并对RCNN模型结构做了改进,使得模型速度得到了大幅提升。
## <h2 id="3">模型库</h2>
- Voxel-RCNN在KITTI Val set数据集上的表现:
| 模型 | Car Mod@0.7 AP_R11 / AP_R40 | V100 Paddle Inference FP32(FPS) | 模型下载 | 配置文件 | 日志 |
| --- | --------------------------- | -------------------------------- | ------ | --------|--------|
| Voxel-RCNN | 84.64 / 85.49 | 22.39 | [model](https://paddle3d.bj.bcebos.com/models/voxel_rcnn/voxel_rcnn_005voxel_kitti_car/model.pdparams) | [config](../../../configs/voxel_rcnn/voxel_rcnn_005voxel_kitti_car.yml) | [log](https://paddle3d.bj.bcebos.com/models/voxel_rcnn/voxel_rcnn_005voxel_kitti_car/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=15cbecb8132e91dfa4fbd6d8f904c0a7) |
**注意:** KITTI benchmark使用8张V100 GPU训练得出。
## <h2 id="4">训练 & 评估</h2>
### <h3 id="41">KITTI数据集</h3>
- 目前Paddle3D中提供的Voxel-RCNN模型支持在KITTI数据集上训练,因此需要先准备KITTI数据集,请在[官网](http://www.cvlibs.net/datasets/kitti/eval_object.php?obj_benchmark=3d)进行下载:
1. Download Velodyne point clouds, if you want to use laser information (29 GB)
2. training labels of object data set (5 MB)
3. camera calibration matrices of object data set (16 MB)
并下载数据集的划分文件列表:
```
wget https://bj.bcebos.com/paddle3d/datasets/KITTI/ImageSets.tar.gz
```
将数据解压后按照下方的目录结构进行组织:
```
kitti_dataset_root
|—— training
| |—— label_2
| | |—— 000001.txt
| | |—— ...
| |—— calib
| | |—— 000001.txt
| | |—— ...
| |—— velodyne
| | |—— 000001.bin
| | |—— ...
|—— ImageSets
│ |—— test.txt
│ |—— train.txt
│ |—— trainval.txt
│ |—— val.txt
```
在Paddle3D的目录下创建软链接 `datasets/KITTI`,指向到上面的数据集目录:
```
mkdir datasets
ln -s /path/to/kitti_dataset_root ./datasets
mv ./datasets/kitti_dataset_root ./datasets/KITTI
```
- 生成训练时数据增强所需的真值库:
```
python tools/create_det_gt_database.py --dataset_name kitti --dataset_root ./datasets/KITTI --save_dir ./datasets/KITTI
```
`--dataset_root`指定KITTI数据集所在路径,`--save_dir`指定用于保存所生成的真值库的路径。该命令执行后,`save_dir`生成的目录如下:
```
kitti_train_gt_database
|—— anno_info_train.pkl
|—— Car
| |—— 4371_Car_7.bin
| |—— ...
|—— Cyclist
```
#### 训练
KITTI数据集上的训练使用8张GPU:
```
python -m paddle.distributed.launch --gpus 0,1,2,3,4,5,6,7 tools/train.py --config configs/voxel_rcnn/voxel_rcnn_car.yml --save_dir ./output_voxel_rcnn --num_workers 4 --save_interval 1
```
训练启动参数介绍可参考文档[全流程速览](../../quickstart.md#模型训练)。
#### 评估
```
python tools/evaluate.py --config configs/voxel_rcnn/voxel_rcnn_car.yml --model ./output_voxel_rcnn/epoch_80/model.pdparams --batch_size 1 --num_workers 4
```
**注意**:Voxel-RCNN的评估目前只支持batch_size为1。
评估启动参数介绍可参考文档[全流程速览](../../quickstart.md#模型评估)。
## <h2 id="5">导出 & 部署</h2>
### 模型导出
运行以下命令,将训练时保存的动态图模型文件导出成推理引擎能够加载的静态图模型文件。
```
python tools/export.py --config configs/voxel_rcnn/voxel_rcnn_car.yml --model /path/to/model.pdparams --save_dir /path/to/output
```
| 参数 | 说明 |
| -- | -- |
| config | **[必填]** 训练配置文件所在路径 |
| model | **[必填]** 训练时保存的模型文件`model.pdparams`所在路径 |
| save_dir | **[必填]** 保存导出模型的路径,`save_dir`下将会生成三个文件:`voxel_rcnn.pdiparams `、`voxel_rcnn.pdiparams.info`和`voxel_rcnn.pdmodel` |
### C++部署
#### Linux系统
#### 环境依赖
- GCC >= 5.4.0
- Cmake >= 3.5.1
- Ubuntu 16.04/18.04
> 说明:本文档的部署环节在以下环境中进行过测试并通过:
测试环境:
- GCC==8.2.0
- Cmake==3.16.0
- Ubuntu 18.04
- CUDA 11.2
- cuDNN==8.1.1
- Paddle Inferece==2.3.1
- TensorRT-8.2.5.1.Linux.x86_64-gnu.cuda-11.4.cudnn8.2
#### 编译步骤
**注意:目前Voxel-RCNN的仅支持使用GPU进行推理。**
- step 1: 进入部署代码所在路径
```
cd deploy/voxel_rcnn/cpp
```
- step 2: 下载Paddle Inference C++预编译库
Paddle Inference针对**是否使用GPU**、**是否支持TensorRT**、以及**不同的CUDA/cuDNN/GCC版本**均提供已经编译好的库文件,请至[Paddle Inference C++预编译库下载列表](https://www.paddlepaddle.org.cn/inference/user_guides/download_lib.html#c)选择符合的版本。
- step 3: 修改`compile.sh`中的编译参数
主要修改编译脚本`compile.sh`中的以下参数:
| 参数 | 说明 |
| -- | -- |
| WITH_GPU | 是否使用gpu。ON或OFF, OFF表示使用CPU,默认ON|
| USE_TENSORRT | 是否使用TensorRT加速。ON或OFF,默认OFF|
| LIB_DIR | Paddle Inference C++预编译包所在路径,该路径下的内容应有:`CMakeCache.txt`、`paddle`、`third_party`和`version.txt` |
| CUDNN_LIB | cuDNN`libcudnn.so`所在路径 |
| CUDA_LIB | CUDA`libcudart.so `所在路径 |
| TENSORRT_ROOT | TensorRT所在路径。**非必须**,如果`USE_TENSORRT`设置为`ON`时,需要填写该路径,该路径下的内容应有`bin`、`lib`和`include`等|
- step 4: 开始编译
```
sh compile.sh
```
### 执行预测
**注意:目前Voxel-RCNN的仅支持使用GPU进行推理。**
执行命令参数说明
| 参数 | 说明 |
| -- | -- |
| model_file | 导出模型的结构文件`voxel_rcnn.pdmodel`所在路径 |
| params_file | 导出模型的参数文件`voxel_rcnn.pdiparams`所在路径 |
| lidar_file | 待预测的点云文件所在路径 |
| num_point_dim | 点云文件中每个点的维度大小。例如,若每个点的信息是`x, y, z, intensity`,则`num_point_dim`填写为4 |
| point_cloud_range | 输入模型的点云所处的空间范围,超出此范围内的点将被滤除。格式为`"X_min Y_min Z_min X_max Y_Max Z_max"`|
```
./build/main --model_file /path/to/voxel_rcnn.pdmodel --params_file /path/to/voxel_rcnn.pdiparams --lidar_file /path/to/lidar.pcd.bin --num_point_dim 4 --point_cloud_range "0 -40 -3 70.4 40 1"
```
**注意:** 请预先确认实际待测试点云文件的维度是否是4,如果不是4,`--num_point_dim`请修改为实际值。
### 开启TensorRT加速预测【可选】
**注意:请根据编译步骤的step 3,修改`compile.sh`中TensorRT相关的编译参数,并重新编译。**
运行命令参数说明如下:
| 参数 | 说明 |
| -- | -- |
| model_file | 导出模型的结构文件`voxel_rcnn.pdmodel`所在路径 |
| params_file | 导出模型的参数文件`voxel_rcnn.pdiparams`所在路径 |
| lidar_file | 待预测的点云文件所在路径 |
| num_point_dim | 点云文件中每个点的维度大小。例如,若每个点的信息是`x, y, z, intensity`,则`num_point_dim`填写为4 |
| point_cloud_range | 输入模型的点云所处的空间范围,超出此范围内的点将被滤除。格式为`"X_min Y_min Z_min X_max Y_Max Z_max"`|
| use_trt | 是否使用TensorRT进行加速,默认0|
| trt_precision | 当use_trt设置为1时,模型精度可设置0或1,0表示fp32, 1表示fp16。默认0 |
| trt_use_static | 当trt_use_static设置为1时,**在首次运行程序的时候会将TensorRT的优化信息进行序列化到磁盘上,下次运行时直接加载优化的序列化信息而不需要重新生成**。默认0 |
| trt_static_dir | 当trt_use_static设置为1时,保存优化信息的路径 |
| collect_shape_info | 是否收集模型动态shape信息。默认0。**只需首次运行,下次运行时直接加载生成的shape信息文件即可进行TensorRT加速推理** |
| dynamic_shape_file | 保存模型动态shape信息的文件路径。 |
* **首次运行TensorRT**,收集模型动态shape信息,并保存至`--dynamic_shape_file`指定的文件中
```
./build/main --model_file /path/to/voxel_rcnn.pdmodel --params_file /path/to/voxel_rcnn.pdiparams --lidar_file /path/to/lidar.pcd.bin --num_point_dim 4 --point_cloud_range "0 -40 -3 70.4 40 1" --use_trt 1 --collect_shape_info 1 --dynamic_shape_file /path/to/shape_info.txt
```
* 加载`--dynamic_shape_file`指定的模型动态shape信息,使用FP32精度进行预测
```
./build/main --model_file /path/to/voxel_rcnn.pdmodel --params_file /path/to/voxel_rcnn.pdiparams --lidar_file /path/to/lidar.pcd.bin --num_point_dim 4 --point_cloud_range "0 -40 -3 70.4 40 1" --use_trt 1 --dynamic_shape_file /path/to/shape_info.txt
```
* 加载`--dynamic_shape_file`指定的模型动态shape信息,使用FP16精度进行预测
```
./build/main --model_file /path/to/voxel_rcnn.pdmodel --params_file /path/to/voxel_rcnn.pdiparams --lidar_file /path/to/lidar.pcd.bin --num_point_dim 4 --point_cloud_range "0 -40 -3 70.4 40 1" --use_trt 1 --dynamic_shape_file /path/to/shape_info.txt --trt_precision 1
```
* 如果觉得每次运行时模型加载的时间过长,可以设置`trt_use_static`和`trt_static_dir`,首次运行时将TensorRT的优化信息保存在硬盘中,后续直接反序列化优化信息即可
```
./build/main --model_file /path/to/voxel_rcnn.pdmodel --params_file /path/to/voxel_rcnn.pdiparams --lidar_file /path/to/lidar.pcd.bin --num_point_dim 4 --point_cloud_range "0 -40 -3 70.4 40 1" --use_trt 1 --dynamic_shape_file /path/to/shape_info.txt --trt_precision 1 --trt_use_static 1 --trt_static_dir /path/to/OptimCacheDir
```
### Python部署
**注意:目前Voxel-RCNN的仅支持使用GPU进行推理。**
命令参数说明如下:
| 参数 | 说明 |
| -- | -- |
| model_file | 导出模型的结构文件`voxel_rcnn.pdmodel`所在路径 |
| params_file | 导出模型的参数文件`voxel_rcnn.pdiparams`所在路径 |
| lidar_file | 待预测的点云文件所在路径 |
| num_point_dim | 点云文件中每个点的维度大小。例如,若每个点的信息是`x, y, z, intensity`,则`num_point_dim`填写为4 |
| point_cloud_range | 输入模型的点云所处的空间范围,超出此范围内的点将被滤除。格式为`X_min Y_min Z_min X_max Y_Max Z_max`|
| use_trt | 是否使用TensorRT进行加速,默认0|
| trt_precision | 当use_trt设置为1时,模型精度可设置0或1,0表示fp32, 1表示fp16。默认0 |
| trt_use_static | 当trt_use_static设置为1时,**在首次运行程序的时候会将TensorRT的优化信息进行序列化到磁盘上,下次运行时直接加载优化的序列化信息而不需要重新生成**。默认0 |
| trt_static_dir | 当trt_use_static设置为1时,保存优化信息的路径 |
| collect_shape_info | 是否收集模型动态shape信息。默认0。**只需首次运行,后续直接加载生成的shape信息文件即可进行TensorRT加速推理** |
| dynamic_shape_file | 保存模型动态shape信息的文件路径。 |
运行以下命令,执行预测:
```
python infer.py --model_file /path/to/voxel_rcnn.pdmodel --params_file /path/to/voxel_rcnn.pdiparams --lidar_file /path/to/lidar.pcd.bin --num_point_dim 4 --point_cloud_range 0 -40 -3 70.4 40 1
```
## <h2 id="6">自定义数据集</h2>
请参考文档[自定义数据集格式说明](../../../datasets/custom.md)准备自定义数据集。
| 0
|
apollo_public_repos/apollo-model-centerpoint/docs/models
|
apollo_public_repos/apollo-model-centerpoint/docs/models/dd3d/README.md
|
# DD3D: Is Pseudo-Lidar needed for Monocular 3D Object detection?
## 目录
* [引用](#1)
* [简介](#2)
* [训练配置](#3)
* [使用教程](#4)
* [数据准备](#5)
* [训练](#6)
* [评估](#7)
## <h2 id="1">引用</h2>
> Dennis Park and Rares Ambrus and Vitor Guizilini and Jie Li and Adrien Gaidon. "Is Pseudo-Lidar needed for Monocular 3D Object detection?" IEEE/CVF International Conference on Computer Vision (ICCV), 2021.
## <h2 id="2">简介</h2>
DD3D是一个端到端single-stage单目相机目标检测模型,融合了PL方法的优势(scaling with depth pre-training) 和end-to-end方法的优势(simplicity and generalization performance),训练过程简单,只包含depth pre-training和detection fine-tuning。在模型发布时,DD3D是NuScenes(nuscenes.org/object-det)单目3D检测排名第一的工作(截止2021.9.3)。
## <h2 id="3">训练配置</h2>
我们提供了在开源数据集KITTI上的训练配置与结果,详见[DD3D训练配置](../../../configs/dd3d)
## <h2 id="4">模型库</h2>
| 模型 | 骨干网络 | Car<br>Easy Mod. Hard | 模型下载 | 配置文件 | 日志 |
| :--: | :-------: | :-------------------: | :------: | :-----: | :--: |
|DD3D | dla_34 | 23.49 17.57 15.21 | [model](https://paddle3d.bj.bcebos.com/models/dd3d/dd3d_dla_34_kitti/model.pdparams) | [config](../../../configs/dd3d/dd3d_dla_34_kitti.yml) | [log](https://paddle3d.bj.bcebos.com/models/dd3d/dd3d_dla_34_kitti/train.log) \| [vdl](https://www.paddlepaddle.org.cn/paddle/visualdl/service/app/scalar?id=9862e660790ca627384dded9e1cd0a50) |
|DD3D | v2_99 | 29.17 23.42 20.73 | [model](https://paddle3d.bj.bcebos.com/models/dd3d/dd3d_v2_99_kitti/model.pdparams) | [config](../../../configs/dd3d/dd3d_v2_99_kitti.yml) | [log](https://paddle3d.bj.bcebos.com/models/dd3d/dd3d_v2_99_kitti/train.log) \| [vdl](https://www.paddlepaddle.org.cn/paddle/visualdl/service/app/scalar?id=52a9cd89f47b4c91f95bae558323f07c) |
## <h2 id="5">使用教程</h2>
## <h2 id="6">数据准备</h2>
请下载KITTI单目3D检测数据集,数据集信息请参考[KITTI官网](http://www.cvlibs.net/datasets/kitti/)
*注意:KITTI官网只区分了训练集和测试集,我们遵循业界的普遍做法,将7481个训练集样本,进一步划分为3712个训练集样本和3769个验证集样本*
下载好后的数据集目录结构
```
kttti
├── ImageSets
| ├── test.txt
| ├── train.txt
| └── val.txt
├── testing
| ├── calib
| └── image_2
├── training
├── calib
├── depth_2
├── image_2
└── label_2
...
```
将kitti数据软链至datasets/KITTI,或更改配置文件数据集路径。
## <h2 id="7">训练</h2>
单卡训练,先运行以下命令,进行warmup
```
python -u tools/train.py --config configs/dd3d/dd3d_dla_34_kitti_warmup.yml
```
然后进行训练
```
python -u tools/train.py --config configs/dd3d/dd3d_dla_34_kitti.yml --resume
```
多卡训练,先运行以下命令,进行warmup
```
export CUDA_VISIBLE_DEVICES=0,1,2,3
fleetrun tools/train.py --config configs/dd3d/dd3d_dla_34_kitti_warmup.yml
```
然后进行训练
```
export CUDA_VISIBLE_DEVICES=0,1,2,3
fleetrun tools/train.py --config configs/dd3d/dd3d_dla_34_kitti.yml --resume
```
训练中断,可以通过`--resume`进行继续训练。
## <h2 id="8">评估</h2>
运行以下命令,进行评估
```
python tools/evaluate.py --config configs/dd3d/dd3d_dla_34_kitti.yml --model pretrained_model_path
```
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/_base_/kitti_mono.yml
|
train_dataset:
type: KittiMonoDataset
dataset_root: datasets/KITTI
transforms:
- type: LoadImage
- type: Normalize
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
mode: train
val_dataset:
type: KittiMonoDataset
dataset_root: datasets/KITTI
transforms:
- type: LoadImage
- type: Normalize
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
mode: val
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/_base_/semantickitti.yml
|
train_dataset:
type: SemanticKITTISegDataset
dataset_root: datasets/SemanticKITTI
sequences: [ 0, 1, 2, 3, 4, 5, 6, 7, 9, 10 ]
transforms:
- type: LoadSemanticKITTIRange
project_label: true
- type: NormalizeRangeImage
mean: [ 12.12, 10.88, 0.23, -1.04, 0.21 ] # range, x, y, z, remission
std: [ 12.32, 11.47, 6.91, 0.86, 0.16 ] # range, x, y, z, remission
mode: train
val_dataset:
type: SemanticKITTISegDataset
dataset_root: datasets/SemanticKITTI
sequences: [ 8, ]
transforms:
- type: LoadSemanticKITTIRange
project_label: false
- type: NormalizeRangeImage
mean: [ 12.12, 10.88, 0.23, -1.04, 0.21 ] # range, x, y, z, remission
std: [ 12.32, 11.47, 6.91, 0.86, 0.16 ] # range, x, y, z, remission
mode: val
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/smoke/smoke_dla34_no_dcn_kitti.yml
|
_base_: '../_base_/kitti_mono.yml'
batch_size: 8
iters: 70000
train_dataset:
transforms:
- type: LoadImage
reader: pillow
to_chw: False
- type: Gt2SmokeTarget
mode: train
num_classes: 3
input_size: [1280, 384]
- type: Normalize
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
val_dataset:
transforms:
- type: LoadImage
reader: pillow
to_chw: False
- type: Gt2SmokeTarget
mode: val
num_classes: 3
input_size: [1280, 384]
- type: Normalize
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
optimizer:
type: Adam
lr_scheduler:
type: MultiStepDecay
milestones: [36000, 55000]
learning_rate: 1.25e-4
model:
type: SMOKE
backbone:
type: DLA34
# This will automatically save to ~/.paddle3d/pretrained/dla34/dla34.pdparams
pretrained: "https://bj.bcebos.com/paddle3d/pretrained/dla34.pdparams"
head:
type: SMOKEPredictor
num_classes: 3
reg_channels: [1, 2, 3, 2, 2]
num_chanels: 256
norm_type: "gn"
in_channels: 64
depth_ref: [28.01, 16.32]
# dim_ref is the reference size mentioned in the paper, the order here is [l, h, w]
dim_ref: [[3.88, 1.63, 1.53], [1.78, 1.70, 0.58], [0.88, 1.73, 0.67]]
max_detection: 50
pred_2d: True
export:
transforms:
- type: LoadImage
reader: pillow
to_chw: False
to_rgb: True
- type: Normalize
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/smoke/smoke_hrnet18_no_dcn_kitti_mini.yml
|
# This is a training configuration for a simplified version of KITTI. It is just for a quick start,
# all the hyperparameters are not strictly tuned, so the training result is not optimal
_base_: '../_base_/kitti_mono.yml'
batch_size: 8
iters: 10000
train_dataset:
transforms:
- type: LoadImage
reader: pillow
to_chw: False
- type: Gt2SmokeTarget
mode: train
num_classes: 3
input_size: [1280, 384]
- type: Normalize
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
val_dataset:
transforms:
- type: LoadImage
reader: pillow
to_chw: False
- type: Gt2SmokeTarget
mode: val
num_classes: 3
input_size: [1280, 384]
- type: Normalize
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
optimizer:
type: Adam
lr_scheduler:
type: MultiStepDecay
milestones: [5000, 8000]
learning_rate: 1.25e-4
model:
type: SMOKE
backbone:
type: $paddleseg.HRNet_W18
pretrained: https://bj.bcebos.com/paddleseg/dygraph/hrnet_w18_ssld.tar.gz
head:
type: SMOKEPredictor
num_classes: 3
reg_channels: [1, 2, 3, 2, 2]
num_chanels: 256
norm_type: "gn"
in_channels: 270
depth_ref: [28.01, 16.32]
# dim_ref is the reference size mentioned in the paper, the order here is [l, h, w]
dim_ref: [[3.88, 1.63, 1.53], [1.78, 1.70, 0.58], [0.88, 1.73, 0.67]]
max_detection: 50
pred_2d: True
export:
transforms:
- type: LoadImage
reader: pillow
to_chw: False
to_rgb: True
- type: Normalize
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/smoke/README.md
|
# SMOKE:Single-Stage Monocular 3D Object Detection via Keypoint Estimation
## 目录
* [引用](#引用)
* [简介](#简介)
* [训练配置](#训练配置)
* [使用教程](#使用教程)
* [数据准备](#数据准备)
* [训练](#训练)
* [评估](#评估)
* [导出部署](#导出部署)
* [自定义数据集](#自定义数据集)
<br>
## 引用
> Liu, Zechen, Zizhang Wu, and Roland Tóth. "Smoke: Single-stage monocular 3d object detection via keypoint estimation." In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, pp. 996-997. 2020.
<br>
## 简介
SMOKE是一个单阶段的单目3D检测模型,该论文创新性地提出了预测物体中心点投影来间接预测物体3D检测框的方法。我们参照了Apollo项目对于该模型的[修改](https://github.com/ApolloAuto/apollo/tree/master/modules/perception/camera#architecture):
* 使用普通卷积替代了原论文中使用的可形变卷积
* 添加了一个头部来预测 2D 中心点和 3D 中心点之间的偏移
* 添加了另一个头部来预测 2D 边界框的宽度和高度。可以通过预测的二维中心、宽度和高度直接获得二维边界框
<br>
## 模型库
| 模型 | 骨干网络 | 3DmAP Mod. | Car<br>Easy Mod. Hard | Pedestrian<br>Easy Mod. Hard | Cyclist<br>Easy Mod. Hard | 模型下载 | 配置文件 | 日志 |
| :--: | :-------: | :--------: | :-------------------: | :--------------------------: | :-----------------------: | :------: | :-----: | :--: |
|SMOKE | DLA34 | 2.94 | 6.26 5.16 4.54 | 3.04 2.73 2.23 | 1.69 0.95 0.94 | [model](https://bj.bcebos.com/paddle3d/models/smoke/smoke_dla34_no_dcn_kitti/model.pdparams) | [config](../../../configs/smoke/smoke_dla34_no_dcn_kitti.yml) | [log](https://bj.bcebos.com/paddle3d/models/smoke/smoke_dla34_no_dcn_kitti/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=1650ec346b4426486bd079b506fc1f86) |
|SMOKE | HRNet18 | 4.05 | 8.48 6.44 5.74 | 5.02 4.23 3.06 | 2.59 1.49 1.37 | [model](https://bj.bcebos.com/paddle3d/models/smoke/smoke_hrnet18_no_dcn_kitti/model.pdparams) | [config](../../../configs/smoke/smoke_hrnet18_no_dcn_kitti.yml) | [log](https://bj.bcebos.com/paddle3d/models/smoke/smoke_hrnet18_no_dcn_kitti/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=4e31655b33d0f44b0c19399df8fb7b00) |
**注意:** KITTI benchmark使用4张V100 GPU训练得出。
<br>
## 使用教程
下面的教程将从数据准备开始,说明如何训练SMOKE模型
### 数据准备
目前Paddle3D中提供的SMOKE模型支持在KITTI数据集上训练,因此需要先准备KITTI数据集,请在[官网](http://www.cvlibs.net/datasets/kitti/eval_object.php?obj_benchmark=3d)进行下载:
1. left color images of object data set (12 GB)
2. training labels of object data set (5 MB)
3. camera calibration matrices of object data set (16 MB)
并下载数据集的划分文件列表:
```shell
wget https://bj.bcebos.com/paddle3d/datasets/KITTI/ImageSets.tar.gz
```
将数据解压后按照下方的目录结构进行组织
```shell
$ tree KITTI
KITTI
├── ImageSets
│ ├── test.txt
│ ├── train.txt
│ ├── trainval.txt
│ └── val.txt
└── training
├── calib
├── image_2
└── label_2
```
在Paddle3D的目录下创建软链接 `datasets/KITTI`,指向到上面的数据集目录
### 训练
使用如下命令启动4卡训练
```shell
export CUDA_VISIBLE_DEVICES=0,1,2,3
# 每隔50步打印一次训练进度
# 每隔5000步保存一次模型,模型参数将被保存在output目录下
fleetrun tools/train.py --config configs/smoke/smoke_dla34_no_dcn_kitti.yml --num_workers 2 --log_interval 50 --save_interval 5000
```
### 评估
使用如下命令启动评估
```shell
export CUDA_VISIBLE_DEVICES=0
# 使用Paddle3D提供的预训练模型进行评估
python tools/evaluate.py --config configs/smoke/smoke_dla34_no_dcn_kitti.yml --num_workers 2 --model output/iter_70000/model.pdparams
```
<br>
## 导出部署
使用如下命令导出训练完成的模型
```shell
# 导出Paddle3D提供的预训练模型
python tools/export.py --config configs/smoke/smoke_dla34_no_dcn_kitti.yml --model output/iter_70000/model.pdparams
```
### 执行预测
命令参数说明如下:
| 参数 | 说明 |
| -- | -- |
| model_file | 导出模型的结构文件`smoke.pdmodel`所在路径 |
| params_file | 导出模型的参数文件`smoke.pdiparams`所在路径 |
| image | 待预测的图片路径 |
| use_gpu | 是否使用GPU进行预测,默认为False|
| use_trt | 是否使用TensorRT进行加速,默认为False|
| trt_precision | 当use_trt设置为1时,模型精度可设置0/1/2,0表示fp32,1表示int8,2表示fp16。默认0 |
| collect_dynamic_shape_info | 是否收集模型动态shape信息。默认为False。**只需首次运行,下次运行时直接加载生成的shape信息文件即可进行TensorRT加速推理** |
| dynamic_shape_file | 保存收集到的模型动态shape信息的文件路径。默认为dynamic_shape_info.txt |
### Python部署
进入代码目录 `deploy/smoke/python`,运行以下命令,执行预测:
* 执行CPU预测
```shell
python infer.py --model_file /path/to/smoke.pdmodel --params_file /path/to/smoke.pdiparams --image /path/to/image
```
* 执行GPU预测
```shell
python infer.py --model_file /path/to/smoke.pdmodel --params_file /path/to/smoke.pdiparams --image /path/to/image --use_gpu
```
* 执行CPU预测并显示3d框
```shell
python vis.py --model_file /path/to/smoke.pdmodel --params_file /path/to/smoke.pdiparams --image /path/to/image
```
* 执行GPU预测并显示3d框
```shell
python vis.py --model_file /path/to/smoke.pdmodel --params_file /path/to/smoke.pdiparams --image /path/to/image --use_gpu
```
* 执行TRT预测
**注意:需要下载支持TRT版本的paddlepaddle以及nvidia对应版本的TensorRT库**
* **首次运行TensorRT**,收集模型动态shape信息,并保存至`--dynamic_shape_file`指定的文件中
```shell
python infer.py --model_file /path/to/smoke.pdmodel --params_file /path/to/smoke.pdiparams --image /path/to/image --collect_shape_info --dynamic_shape_file /path/to/shape_info.txt
```
* 加载`--dynamic_shape_file`指定的模型动态shape信息,使用FP32精度进行预测
```shell
python infer.py --model_file /path/to/smoke.pdmodel --params_file /path/to/smoke.pdiparams --image /path/to/image --use_trt --dynamic_shape_file /path/to/shape_info.txt
```
### C++部署
#### 编译步骤
- step 1: 进入部署代码所在路径
```shell
cd deploy/smoke/cpp
```
- step 2: 下载Paddle Inference C++预编译库
Paddle Inference针对**是否使用GPU**、**是否支持TensorRT**、以及**不同的CUDA/cuDNN/GCC版本**均提供已经编译好的库文件,请至[Paddle Inference C++预编译库下载列表](https://www.paddlepaddle.org.cn/inference/user_guides/download_lib.html#c)选择符合的版本。
- step 3: 下载OpenCV
- step 4: 修改`compile.sh`中的编译参数
主要修改编译脚本`compile.sh`中的以下参数:
| 参数 | 说明 |
| -- | -- |
| WITH_GPU | 是否使用gpu。ON或OFF, OFF表示使用CPU,默认ON|
| USE_TENSORRT | 是否使用TensorRT加速。ON或OFF,默认OFF|
| LIB_DIR | Paddle Inference C++预编译包所在路径,该路径下的内容应有:`CMakeCache.txt`、`paddle`、`third_party`和`version.txt` |
| CUDNN_LIB | cuDNN`libcudnn.so`所在路径 |
| CUDA_LIB | CUDA`libcudart.so `所在路径 |
| TENSORRT_ROOT | TensorRT所在路径。**非必须**,如果`USE_TENSORRT`设置为`ON`时,需要填写该路径,该路径下的内容应有`bin`、`lib`和`include`等|
- step 5: 开始编译
```shell
sh compile.sh
```
- step 6: 执行预测
```shell
./build/infer --model_file /path/to/smoke.pdmodel --params_file /path/to/smoke.pdiparams --image /path/to/image
```
**注意:如果要使用TRT预测,请根据编译步骤的step 3,修改`compile.sh`中TensorRT相关的编译参数,并重新编译。**
<br>
## 自定义数据集
如果您想在自定义数据集上进行训练,请参考[自定义数据准备教程](../datasets/custom.md)将数据组织成KITTI数据格式即可
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/smoke/smoke_hrnet18_no_dcn_kitti.yml
|
_base_: '../_base_/kitti_mono.yml'
batch_size: 8
iters: 70000
train_dataset:
transforms:
- type: LoadImage
reader: pillow
to_chw: False
- type: Gt2SmokeTarget
mode: train
num_classes: 3
input_size: [1280, 384]
- type: Normalize
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
val_dataset:
transforms:
- type: LoadImage
reader: pillow
to_chw: False
- type: Gt2SmokeTarget
mode: val
num_classes: 3
input_size: [1280, 384]
- type: Normalize
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
optimizer:
type: Adam
lr_scheduler:
type: MultiStepDecay
milestones: [36000, 55000]
learning_rate: 1.25e-4
model:
type: SMOKE
backbone:
type: $paddleseg.HRNet_W18
pretrained: https://bj.bcebos.com/paddleseg/dygraph/hrnet_w18_ssld.tar.gz
head:
type: SMOKEPredictor
num_classes: 3
reg_channels: [1, 2, 3, 2, 2]
num_chanels: 256
norm_type: "gn"
in_channels: 270
depth_ref: [28.01, 16.32]
# dim_ref is the reference size mentioned in the paper, the order here is [l, h, w]
dim_ref: [[3.88, 1.63, 1.53], [1.78, 1.70, 0.58], [0.88, 1.73, 0.67]]
max_detection: 50
pred_2d: True
export:
transforms:
- type: LoadImage
reader: pillow
to_chw: False
to_rgb: True
- type: Normalize
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/smoke/smoke_dla34_no_dcn_kitti_amp.yml
|
_base_: '../_base_/kitti_mono.yml'
batch_size: 8
iters: 70000
amp_cfg:
enable: True
level: O1
scaler:
init_loss_scaling: 1024.0
custom_black_list: ['matmul_v2', 'elementwise_mul']
train_dataset:
transforms:
- type: LoadImage
reader: pillow
to_chw: False
- type: Gt2SmokeTarget
mode: train
num_classes: 3
input_size: [1280, 384]
- type: Normalize
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
val_dataset:
transforms:
- type: LoadImage
reader: pillow
to_chw: False
- type: Gt2SmokeTarget
mode: val
num_classes: 3
input_size: [1280, 384]
- type: Normalize
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
optimizer:
type: Adam
lr_scheduler:
type: MultiStepDecay
milestones: [36000, 55000]
learning_rate: 1.25e-4
model:
type: SMOKE
backbone:
type: DLA34
# This will automatically save to ~/.paddle3d/pretrained/dla34/dla34.pdparams
pretrained: "https://bj.bcebos.com/paddle3d/pretrained/dla34.pdparams"
head:
type: SMOKEPredictor
num_classes: 3
reg_channels: [1, 2, 3, 2, 2]
num_chanels: 256
norm_type: "gn"
in_channels: 64
depth_ref: [28.01, 16.32]
# dim_ref is the reference size mentioned in the paper, the order here is [l, h, w]
dim_ref: [[3.88, 1.63, 1.53], [1.78, 1.70, 0.58], [0.88, 1.73, 0.67]]
max_detection: 50
pred_2d: True
export:
transforms:
- type: LoadImage
reader: pillow
to_chw: False
to_rgb: True
- type: Normalize
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/quant/centerpoint_kitti.yml
|
slim_type: QAT
quant_config:
weight_quantize_type: channel_wise_abs_max
activation_quantize_type: moving_average_abs_max
weight_bits: 8
activation_bits: 8
dtype: int8
window_size: 10000
moving_rate: 0.9
quantizable_layer_type: ['Conv2D', 'Linear']
finetune_config:
epochs: 80
lr_scheduler:
type: OneCycleWarmupDecayLr
base_learning_rate: 0.001
lr_ratio_peak: 10
lr_ratio_trough: 0.0001
step_ratio_peak: 0.4
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/quant/smoke_kitti.yml
|
slim_type: QAT
quant_config:
weight_quantize_type: channel_wise_abs_max
activation_quantize_type: moving_average_abs_max
weight_bits: 8
activation_bits: 8
dtype: int8
window_size: 10000
moving_rate: 0.9
quantizable_layer_type: ['Conv2D', 'Linear']
finetune_config:
iters: 40000
lr_scheduler:
type: MultiStepDecay
milestones: [32000, 36000]
learning_rate: 1.25e-4
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/petr/petrv2_vovnet_gridmask_p4_800x320_cos_epoch.yml
|
batch_size: 1
epochs: 24
train_dataset:
type: NuscenesMVDataset
dataset_root: data/nuscenes/
ann_file: data/nuscenes/petr_nuscenes_annotation_train.pkl
mode: train
class_names: [
'car', 'truck', 'construction_vehicle', 'bus', 'trailer',
'barrier', 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone'
]
transforms:
- type: LoadMultiViewImageFromFiles
to_float32: True
- type: LoadMultiViewImageFromMultiSweepsFiles
sweeps_num: 1
to_float32: True
pad_empty_sweeps: True
sweep_range: [3, 27]
test_mode: False
- type: LoadAnnotations3D
with_bbox_3d: True
with_label_3d: True
- type: SampleRangeFilter
point_cloud_range: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
- type: SampleNameFilter
classes: [
'car', 'truck', 'construction_vehicle', 'bus', 'trailer',
'barrier', 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone'
]
- type: ResizeCropFlipImage
sample_aug_cfg:
resize_lim: [0.47, 0.625]
final_dim: [320, 800]
bot_pct_lim: [0.0, 0.0]
rot_lim: [0.0, 0.0]
H: 900
W: 1600
rand_flip: True
training: True
- type: GlobalRotScaleTransImage
rot_range: [-0.3925, 0.3925]
translation_std: [0, 0, 0]
scale_ratio_range: [0.95, 1.05]
reverse_angle: True
training: True
- type: NormalizeMultiviewImage
mean: [103.530, 116.280, 123.675]
std: [57.375, 57.120, 58.395]
- type: PadMultiViewImage
size_divisor: 32
- type: SampleFilerByKey
keys: ['gt_bboxes_3d', 'gt_labels_3d', 'img']
meta_keys: ['filename', 'ori_shape', 'img_shape', 'lidar2img',
'intrinsics', 'extrinsics', 'pad_shape',
'scale_factor', 'flip', 'box_type_3d', 'img_norm_cfg', 'sample_idx',
'timestamp']
val_dataset:
type: NuscenesMVDataset
dataset_root: data/nuscenes/
ann_file: data/nuscenes/petr_nuscenes_annotation_val.pkl
mode: val
class_names: ['car', 'truck', 'construction_vehicle', 'bus', 'trailer',
'barrier', 'motorcycle', 'bicycle', 'pedestrian',
'traffic_cone']
transforms:
- type: LoadMultiViewImageFromFiles
to_float32: True
- type: LoadMultiViewImageFromMultiSweepsFiles
sweeps_num: 1
to_float32: True
pad_empty_sweeps: True
sweep_range: [3, 27]
- type: ResizeCropFlipImage
sample_aug_cfg:
resize_lim: [0.47, 0.625]
final_dim: [320, 800]
bot_pct_lim: [0.0, 0.0]
rot_lim: [0.0, 0.0]
H: 900
W: 1600
rand_flip: True
training: False
- type: NormalizeMultiviewImage
mean: [103.530, 116.280, 123.675]
std: [57.375, 57.120, 58.395]
- type: PadMultiViewImage
size_divisor: 32
- type: SampleFilerByKey
keys: ['img']
meta_keys: ['filename', 'ori_shape', 'img_shape', 'lidar2img',
'intrinsics', 'extrinsics', 'pad_shape',
'scale_factor', 'flip', 'box_type_3d', 'img_norm_cfg', 'sample_idx',
'timestamp']
optimizer:
type: AdamW
weight_decay: 0.01
grad_clip:
type: ClipGradByGlobalNorm
clip_norm: 35
# auto_skip_clip: True
lr_scheduler:
type: LinearWarmup
learning_rate:
# type: CosineAnnealingDecay
type: CosineAnnealingDecayByEpoch
learning_rate: 0.0002
# T_max: 84408 # 3517 * 24
T_max: 24
eta_min: 0.0000002
warmup_steps: 500
start_lr: 0.00006666666
end_lr: 0.0002
model:
type: Petr3D
use_recompute: True
use_grid_mask: True
backbone:
type: VoVNetCP ###use checkpoint to save memory
spec_name: V-99-eSE
norm_eval: True
frozen_stages: -1
input_ch: 3
out_features: ('stage4','stage5',)
neck:
type: CPFPN ###remove unused parameters
in_channels: [768, 1024]
out_channels: 256
num_outs: 2
pts_bbox_head:
type: PETRHead
num_classes: 10
in_channels: 256
num_query: 900
LID: true
with_multiview: true
with_position: true
with_fpe: true
with_time: true
with_multi: true
position_range: [-61.2, -61.2, -10.0, 61.2, 61.2, 10.0]
code_weights: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
normedlinear: False
transformer:
type: PETRTransformer
decoder_embed_dims: 256
decoder:
type: PETRTransformerDecoder
return_intermediate: True
num_layers: 6
transformerlayers:
type: PETRTransformerDecoderLayer
attns:
- type: MultiHeadAttention
embed_dims: 256
num_heads: 8
attn_drop: 0.1
drop_prob: 0.1
- type: PETRMultiheadAttention
embed_dims: 256
num_heads: 8
attn_drop: 0.1
drop_prob: 0.1
batch_first: True
feedforward_channels: 2048
ffn_dropout: 0.1
operation_order: ['self_attn', 'norm', 'cross_attn', 'norm', 'ffn', 'norm']
positional_encoding:
type: SinePositionalEncoding3D
num_feats: 128
normalize: True
bbox_coder:
type: NMSFreeCoder
post_center_range: [-61.2, -61.2, -10.0, 61.2, 61.2, 10.0]
point_cloud_range: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
max_num: 300
voxel_size: [0.2, 0.2, 8]
num_classes: 10
loss_cls:
type: WeightedFocalLoss
gamma: 2.0
alpha: 0.25
loss_weight: 2.0
reduction: sum
loss_bbox:
type: WeightedL1Loss
loss_weight: 0.25
reduction: sum
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/petr/petrv2_vovnet_gridmask_p4_800x320_dn_amp.yml
|
batch_size: 1
epochs: 24
amp_cfg:
# only enable backbone and fpn
enable: False
level: O1
scaler:
init_loss_scaling: 512.0
train_dataset:
type: NuscenesMVDataset
dataset_root: data/nuscenes/
ann_file: data/nuscenes/petr_nuscenes_annotation_train.pkl
mode: train
use_valid_flag: True
class_names: [
'car', 'truck', 'construction_vehicle', 'bus', 'trailer',
'barrier', 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone'
]
transforms:
- type: LoadMultiViewImageFromFiles
to_float32: True
- type: LoadMultiViewImageFromMultiSweepsFiles
sweeps_num: 1
to_float32: True
pad_empty_sweeps: True
sweep_range: [3, 27]
test_mode: False
- type: LoadAnnotations3D
with_bbox_3d: True
with_label_3d: True
- type: SampleRangeFilter
point_cloud_range: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
- type: SampleNameFilter
classes: [
'car', 'truck', 'construction_vehicle', 'bus', 'trailer',
'barrier', 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone'
]
- type: ResizeCropFlipImage
sample_aug_cfg:
resize_lim: [0.47, 0.625]
final_dim: [320, 800]
bot_pct_lim: [0.0, 0.0]
rot_lim: [0.0, 0.0]
H: 900
W: 1600
rand_flip: True
training: True
- type: GlobalRotScaleTransImage
rot_range: [-0.3925, 0.3925]
translation_std: [0, 0, 0]
scale_ratio_range: [0.95, 1.05]
reverse_angle: True
training: True
- type: NormalizeMultiviewImage
mean: [103.530, 116.280, 123.675]
std: [57.375, 57.120, 58.395]
- type: PadMultiViewImage
size_divisor: 32
- type: SampleFilerByKey
keys: ['gt_bboxes_3d', 'gt_labels_3d', 'img']
meta_keys: ['filename', 'ori_shape', 'img_shape', 'lidar2img',
'intrinsics', 'extrinsics', 'pad_shape',
'scale_factor', 'flip', 'box_type_3d', 'img_norm_cfg', 'sample_idx',
'timestamp', 'gt_bboxes_3d','gt_labels_3d']
val_dataset:
type: NuscenesMVDataset
dataset_root: data/nuscenes/
ann_file: data/nuscenes/petr_nuscenes_annotation_val.pkl
mode: val
class_names: ['car', 'truck', 'construction_vehicle', 'bus', 'trailer',
'barrier', 'motorcycle', 'bicycle', 'pedestrian',
'traffic_cone']
transforms:
- type: LoadMultiViewImageFromFiles
to_float32: True
- type: LoadMultiViewImageFromMultiSweepsFiles
sweeps_num: 1
to_float32: True
pad_empty_sweeps: True
sweep_range: [3, 27]
- type: ResizeCropFlipImage
sample_aug_cfg:
resize_lim: [0.47, 0.625]
final_dim: [320, 800]
bot_pct_lim: [0.0, 0.0]
rot_lim: [0.0, 0.0]
H: 900
W: 1600
rand_flip: True
training: False
- type: NormalizeMultiviewImage
mean: [103.530, 116.280, 123.675]
std: [57.375, 57.120, 58.395]
- type: PadMultiViewImage
size_divisor: 32
- type: SampleFilerByKey
keys: ['img']
meta_keys: ['filename', 'ori_shape', 'img_shape', 'lidar2img',
'intrinsics', 'extrinsics', 'pad_shape',
'scale_factor', 'flip', 'box_type_3d', 'img_norm_cfg', 'sample_idx',
'timestamp']
optimizer:
type: AdamW
weight_decay: 0.01
grad_clip:
type: ClipGradByGlobalNorm
clip_norm: 35
# auto_skip_clip: True
lr_scheduler:
type: LinearWarmup
learning_rate:
type: CosineAnnealingDecay
learning_rate: 0.0002
T_max: 84408 # 3517 * 24
eta_min: 0.0000002
warmup_steps: 500
start_lr: 0.00006666666
end_lr: 0.0002
model:
type: Petr3D
use_recompute: True
use_grid_mask: True
backbone:
type: VoVNetCP ###use checkpoint to save memory
spec_name: V-99-eSE
norm_eval: True
frozen_stages: -1
input_ch: 3
out_features: ('stage4','stage5',)
neck:
type: CPFPN ###remove unused parameters
in_channels: [768, 1024]
out_channels: 256
num_outs: 2
pts_bbox_head:
type: PETRHead
num_classes: 10
in_channels: 256
num_query: 900
LID: true
with_multiview: true
with_position: true
with_fpe: true
with_time: true
with_multi: true
with_denoise: true
scalar: 10 ##noise groups
noise_scale: 1.0
dn_weight: 1.0 ##dn loss weight
split: 0.75 ###positive rate
position_range: [-61.2, -61.2, -10.0, 61.2, 61.2, 10.0]
code_weights: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
normedlinear: False
transformer:
type: PETRDNTransformer
embed_dims: 256
decoder:
type: PETRTransformerDecoder
return_intermediate: True
num_layers: 6
transformerlayers:
type: PETRTransformerDecoderLayer
attns:
- type: MultiHeadAttention
embed_dims: 256
num_heads: 8
attn_drop: 0.1
drop_prob: 0.1
- type: PETRMultiheadAttention
embed_dims: 256
num_heads: 8
attn_drop: 0.1
drop_prob: 0.1
batch_first: True
feedforward_channels: 2048
ffn_dropout: 0.1
operation_order: ['self_attn', 'norm', 'cross_attn', 'norm', 'ffn', 'norm']
positional_encoding:
type: SinePositionalEncoding3D
num_feats: 128
normalize: True
bbox_coder:
type: NMSFreeCoder
post_center_range: [-61.2, -61.2, -10.0, 61.2, 61.2, 10.0]
point_cloud_range: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
max_num: 300
voxel_size: [0.2, 0.2, 8]
num_classes: 10
loss_cls:
type: WeightedFocalLoss
gamma: 2.0
alpha: 0.25
loss_weight: 2.0
reduction: sum
loss_bbox:
type: WeightedL1Loss
loss_weight: 0.25
reduction: sum
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/petr/petr_vovnet_gridmask_p4_800x320_amp.yml
|
batch_size: 1
epochs: 24
amp_cfg:
# only enable backbone and fpn
enable: False
level: O1
scaler:
init_loss_scaling: 512.0
train_dataset:
type: NuscenesMVDataset
dataset_root: data/nuscenes/
ann_file: data/nuscenes/petr_nuscenes_annotation_train.pkl
mode: train
class_names: [
'car', 'truck', 'construction_vehicle', 'bus', 'trailer',
'barrier', 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone'
]
transforms:
- type: LoadMultiViewImageFromFiles
to_float32: True
- type: LoadAnnotations3D
with_bbox_3d: True
with_label_3d: True
- type: SampleRangeFilter
point_cloud_range: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
- type: SampleNameFilter
classes: [
'car', 'truck', 'construction_vehicle', 'bus', 'trailer',
'barrier', 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone'
]
- type: ResizeCropFlipImage
sample_aug_cfg:
resize_lim: [0.47, 0.625]
final_dim: [320, 800]
bot_pct_lim: [0.0, 0.0]
rot_lim: [0.0, 0.0]
H: 900
W: 1600
rand_flip: True
training: True
- type: GlobalRotScaleTransImage
rot_range: [-0.3925, 0.3925]
translation_std: [0, 0, 0]
scale_ratio_range: [0.95, 1.05]
reverse_angle: True
training: True
- type: NormalizeMultiviewImage
mean: [103.530, 116.280, 123.675]
std: [57.375, 57.120, 58.395]
- type: PadMultiViewImage
size_divisor: 32
- type: SampleFilerByKey
keys: ['gt_bboxes_3d', 'gt_labels_3d', 'img']
val_dataset:
type: NuscenesMVDataset
dataset_root: data/nuscenes/
ann_file: data/nuscenes/petr_nuscenes_annotation_val.pkl
mode: val
class_names: ['car', 'truck', 'construction_vehicle', 'bus', 'trailer',
'barrier', 'motorcycle', 'bicycle', 'pedestrian',
'traffic_cone']
transforms:
- type: LoadMultiViewImageFromFiles
to_float32: True
- type: ResizeCropFlipImage
sample_aug_cfg:
resize_lim: [0.47, 0.625]
final_dim: [320, 800]
bot_pct_lim: [0.0, 0.0]
rot_lim: [0.0, 0.0]
H: 900
W: 1600
rand_flip: True
training: False
- type: NormalizeMultiviewImage
mean: [103.530, 116.280, 123.675]
std: [57.375, 57.120, 58.395]
- type: PadMultiViewImage
size_divisor: 32
- type: SampleFilerByKey
keys: ['img']
model:
type: Petr3D
use_recompute: True
use_grid_mask: True
backbone:
# use recompute to save memory
type: VoVNetCP
spec_name: V-99-eSE
norm_eval: True
frozen_stages: -1
input_ch: 3
out_features: ('stage4','stage5',)
neck:
# remove unused parameters
type: CPFPN
in_channels: [768, 1024]
out_channels: 256
num_outs: 2
pts_bbox_head:
type: PETRHead
num_classes: 10
in_channels: 256
num_query: 900
LID: true
with_multiview: true
position_range: [-61.2, -61.2, -10.0, 61.2, 61.2, 10.0]
normedlinear: False
transformer:
type: PETRTransformer
decoder_embed_dims: 256
decoder:
type: PETRTransformerDecoder
return_intermediate: True
num_layers: 6
transformerlayers:
type: PETRTransformerDecoderLayer
attns:
- type: MultiHeadAttention
embed_dims: 256
num_heads: 8
attn_drop: 0.1
drop_prob: 0.1
- type: PETRMultiheadAttention
embed_dims: 256
num_heads: 8
attn_drop: 0.1
drop_prob: 0.1
batch_first: True
feedforward_channels: 2048
ffn_dropout: 0.1
operation_order: ['self_attn', 'norm', 'cross_attn', 'norm', 'ffn', 'norm']
positional_encoding:
type: SinePositionalEncoding3D
num_feats: 128
normalize: True
bbox_coder:
type: NMSFreeCoder
post_center_range: [-61.2, -61.2, -10.0, 61.2, 61.2, 10.0]
point_cloud_range: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
max_num: 300
voxel_size: [0.2, 0.2, 8]
num_classes: 10
loss_cls:
type: WeightedFocalLoss
gamma: 2.0
alpha: 0.25
loss_weight: 2.0
reduction: sum
loss_bbox:
type: WeightedL1Loss
loss_weight: 0.25
reduction: sum
optimizer:
type: AdamW
weight_decay: 0.01
grad_clip:
type: ClipGradByGlobalNorm
clip_norm: 35
lr_scheduler:
type: LinearWarmup
learning_rate:
type: CosineAnnealingDecay
learning_rate: 0.0002
T_max: 84408 # 3517 * 24e
eta_min: 0.0000002
warmup_steps: 500
start_lr: 0.0
end_lr: 0.0002
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/petr/petrv2_vovnet_gridmask_p4_800x320.yml
|
batch_size: 1
epochs: 24
train_dataset:
type: NuscenesMVDataset
dataset_root: data/nuscenes/
ann_file: data/nuscenes/petr_nuscenes_annotation_train.pkl
mode: train
use_valid_flag: True
class_names: [
'car', 'truck', 'construction_vehicle', 'bus', 'trailer',
'barrier', 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone'
]
transforms:
- type: LoadMultiViewImageFromFiles
to_float32: True
- type: LoadMultiViewImageFromMultiSweepsFiles
sweeps_num: 1
to_float32: True
pad_empty_sweeps: True
sweep_range: [3, 27]
test_mode: False
- type: LoadAnnotations3D
with_bbox_3d: True
with_label_3d: True
- type: SampleRangeFilter
point_cloud_range: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
- type: SampleNameFilter
classes: [
'car', 'truck', 'construction_vehicle', 'bus', 'trailer',
'barrier', 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone'
]
- type: ResizeCropFlipImage
sample_aug_cfg:
resize_lim: [0.47, 0.625]
final_dim: [320, 800]
bot_pct_lim: [0.0, 0.0]
rot_lim: [0.0, 0.0]
H: 900
W: 1600
rand_flip: True
training: True
- type: GlobalRotScaleTransImage
rot_range: [-0.3925, 0.3925]
translation_std: [0, 0, 0]
scale_ratio_range: [0.95, 1.05]
reverse_angle: True
training: True
- type: NormalizeMultiviewImage
mean: [103.530, 116.280, 123.675]
std: [57.375, 57.120, 58.395]
- type: PadMultiViewImage
size_divisor: 32
- type: SampleFilerByKey
keys: ['gt_bboxes_3d', 'gt_labels_3d', 'img']
meta_keys: ['filename', 'ori_shape', 'img_shape', 'lidar2img',
'intrinsics', 'extrinsics', 'pad_shape',
'scale_factor', 'flip', 'box_type_3d', 'img_norm_cfg', 'sample_idx',
'timestamp']
val_dataset:
type: NuscenesMVDataset
dataset_root: data/nuscenes/
ann_file: data/nuscenes/petr_nuscenes_annotation_val.pkl
mode: val
class_names: ['car', 'truck', 'construction_vehicle', 'bus', 'trailer',
'barrier', 'motorcycle', 'bicycle', 'pedestrian',
'traffic_cone']
transforms:
- type: LoadMultiViewImageFromFiles
to_float32: True
- type: LoadMultiViewImageFromMultiSweepsFiles
sweeps_num: 1
to_float32: True
pad_empty_sweeps: True
sweep_range: [3, 27]
- type: ResizeCropFlipImage
sample_aug_cfg:
resize_lim: [0.47, 0.625]
final_dim: [320, 800]
bot_pct_lim: [0.0, 0.0]
rot_lim: [0.0, 0.0]
H: 900
W: 1600
rand_flip: True
training: False
- type: NormalizeMultiviewImage
mean: [103.530, 116.280, 123.675]
std: [57.375, 57.120, 58.395]
- type: PadMultiViewImage
size_divisor: 32
- type: SampleFilerByKey
keys: ['img']
meta_keys: ['filename', 'ori_shape', 'img_shape', 'lidar2img',
'intrinsics', 'extrinsics', 'pad_shape',
'scale_factor', 'flip', 'box_type_3d', 'img_norm_cfg', 'sample_idx',
'timestamp']
optimizer:
type: AdamW
weight_decay: 0.01
grad_clip:
type: ClipGradByGlobalNorm
clip_norm: 35
# auto_skip_clip: True
lr_scheduler:
type: LinearWarmup
learning_rate:
type: CosineAnnealingDecay
learning_rate: 0.0002
T_max: 84408 # 3517 * 24
eta_min: 0.0000002
warmup_steps: 500
start_lr: 0.00006666666
end_lr: 0.0002
model:
type: Petr3D
use_recompute: True
use_grid_mask: True
backbone:
type: VoVNetCP ###use checkpoint to save memory
spec_name: V-99-eSE
norm_eval: True
frozen_stages: -1
input_ch: 3
out_features: ('stage4','stage5',)
neck:
type: CPFPN ###remove unused parameters
in_channels: [768, 1024]
out_channels: 256
num_outs: 2
pts_bbox_head:
type: PETRHead
num_classes: 10
in_channels: 256
num_query: 900
LID: true
with_multiview: true
with_position: true
with_fpe: true
with_time: true
with_multi: true
position_range: [-61.2, -61.2, -10.0, 61.2, 61.2, 10.0]
code_weights: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
normedlinear: False
transformer:
type: PETRTransformer
decoder_embed_dims: 256
decoder:
type: PETRTransformerDecoder
return_intermediate: True
num_layers: 6
transformerlayers:
type: PETRTransformerDecoderLayer
attns:
- type: MultiHeadAttention
embed_dims: 256
num_heads: 8
attn_drop: 0.1
drop_prob: 0.1
- type: PETRMultiheadAttention
embed_dims: 256
num_heads: 8
attn_drop: 0.1
drop_prob: 0.1
batch_first: True
feedforward_channels: 2048
ffn_dropout: 0.1
operation_order: ['self_attn', 'norm', 'cross_attn', 'norm', 'ffn', 'norm']
positional_encoding:
type: SinePositionalEncoding3D
num_feats: 128
normalize: True
bbox_coder:
type: NMSFreeCoder
post_center_range: [-61.2, -61.2, -10.0, 61.2, 61.2, 10.0]
point_cloud_range: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
max_num: 300
voxel_size: [0.2, 0.2, 8]
num_classes: 10
loss_cls:
type: WeightedFocalLoss
gamma: 2.0
alpha: 0.25
loss_weight: 2.0
reduction: sum
loss_bbox:
type: WeightedL1Loss
loss_weight: 0.25
reduction: sum
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/petr/petrv2_vovnet_gridmask_p4_1600x640_dn_multiscale_amp.yml
|
batch_size: 1
epochs: 24
amp_cfg:
# only enable backbone and fpn
enable: False
level: O1
scaler:
init_loss_scaling: 512.0
train_dataset:
type: NuscenesMVDataset
dataset_root: data/nuscenes/
ann_file: data/nuscenes/petr_nuscenes_annotation_train.pkl
mode: train
use_valid_flag: True
class_names: [
'car', 'truck', 'construction_vehicle', 'bus', 'trailer',
'barrier', 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone'
]
transforms:
- type: LoadMultiViewImageFromFiles
to_float32: True
- type: LoadMultiViewImageFromMultiSweepsFiles
sweeps_num: 1
to_float32: True
pad_empty_sweeps: True
sweep_range: [3, 27]
test_mode: False
- type: LoadAnnotations3D
with_bbox_3d: True
with_label_3d: True
- type: SampleRangeFilter
point_cloud_range: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
- type: SampleNameFilter
classes: [
'car', 'truck', 'construction_vehicle', 'bus', 'trailer',
'barrier', 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone'
]
- type: ResizeCropFlipImage
sample_aug_cfg:
resize_lim: [0.94, 1.25]
final_dim: [640, 1600]
bot_pct_lim: [0.0, 0.0]
rot_lim: [0.0, 0.0]
H: 900
W: 1600
rand_flip: True
training: True
- type: GlobalRotScaleTransImage
rot_range: [-0.3925, 0.3925]
translation_std: [0, 0, 0]
scale_ratio_range: [0.95, 1.05]
reverse_angle: True
training: True
- type: NormalizeMultiviewImage
mean: [103.530, 116.280, 123.675]
std: [57.375, 57.120, 58.395]
- type: PadMultiViewImage
size_divisor: 32
- type: SampleFilerByKey
keys: ['gt_bboxes_3d', 'gt_labels_3d', 'img']
meta_keys: ['filename', 'ori_shape', 'img_shape', 'lidar2img',
'intrinsics', 'extrinsics', 'pad_shape',
'scale_factor', 'flip', 'box_type_3d', 'img_norm_cfg', 'sample_idx',
'timestamp', 'gt_bboxes_3d','gt_labels_3d']
val_dataset:
type: NuscenesMVDataset
dataset_root: data/nuscenes/
ann_file: data/nuscenes/petr_nuscenes_annotation_val.pkl
mode: val
class_names: ['car', 'truck', 'construction_vehicle', 'bus', 'trailer',
'barrier', 'motorcycle', 'bicycle', 'pedestrian',
'traffic_cone']
transforms:
- type: LoadMultiViewImageFromFiles
to_float32: True
- type: LoadMultiViewImageFromMultiSweepsFiles
sweeps_num: 1
to_float32: True
pad_empty_sweeps: True
sweep_range: [3, 27]
- type: ResizeCropFlipImage
sample_aug_cfg:
resize_lim: [0.94, 1.25]
final_dim: [640, 1600]
bot_pct_lim: [0.0, 0.0]
rot_lim: [0.0, 0.0]
H: 900
W: 1600
rand_flip: True
training: False
- type: NormalizeMultiviewImage
mean: [103.530, 116.280, 123.675]
std: [57.375, 57.120, 58.395]
- type: PadMultiViewImage
size_divisor: 32
- type: SampleFilerByKey
keys: ['img']
meta_keys: ['filename', 'ori_shape', 'img_shape', 'lidar2img',
'intrinsics', 'extrinsics', 'pad_shape',
'scale_factor', 'flip', 'box_type_3d', 'img_norm_cfg', 'sample_idx',
'timestamp']
optimizer:
type: AdamW
weight_decay: 0.01
grad_clip:
type: ClipGradByGlobalNorm
clip_norm: 35
# auto_skip_clip: True
lr_scheduler:
type: LinearWarmup
learning_rate:
type: CosineAnnealingDecay
learning_rate: 0.0002
T_max: 84408 # 3517 * 24
eta_min: 0.0000002
warmup_steps: 500
start_lr: 0.00006666666
end_lr: 0.0002
model:
type: Petr3D
use_recompute: True
use_grid_mask: True
us_ms: True
multi_scale: [0.5, 1.0]
backbone:
type: VoVNet ###can't use checkpoint here
spec_name: V-99-eSE
norm_eval: True
frozen_stages: -1
input_ch: 3
out_features: ('stage4','stage5',)
neck:
type: CPFPN ###remove unused parameters
in_channels: [768, 1024]
out_channels: 256
num_outs: 2
pts_bbox_head:
type: PETRHead
num_classes: 10
in_channels: 512 ###multi scale features concat
num_query: 900
LID: true
with_multiview: true
with_position: true
with_fpe: true
with_time: true
with_multi: true
with_denoise: true
scalar: 10 ##noise groups
noise_scale: 1.0
dn_weight: 1.0 ##dn loss weight
split: 0.75 ###positive rate
position_range: [-61.2, -61.2, -10.0, 61.2, 61.2, 10.0]
code_weights: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
normedlinear: False
transformer:
type: PETRDNTransformer
embed_dims: 256
decoder:
type: PETRTransformerDecoder
return_intermediate: True
num_layers: 6
transformerlayers:
type: PETRTransformerDecoderLayer
attns:
- type: MultiHeadAttention
embed_dims: 256
num_heads: 8
attn_drop: 0.1
drop_prob: 0.1
- type: PETRMultiheadAttention
embed_dims: 256
num_heads: 8
attn_drop: 0.1
drop_prob: 0.1
batch_first: True
feedforward_channels: 2048
ffn_dropout: 0.1
operation_order: ['self_attn', 'norm', 'cross_attn', 'norm', 'ffn', 'norm']
positional_encoding:
type: SinePositionalEncoding3D
num_feats: 128
normalize: True
bbox_coder:
type: NMSFreeCoder
post_center_range: [-61.2, -61.2, -10.0, 61.2, 61.2, 10.0]
point_cloud_range: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
max_num: 300
voxel_size: [0.2, 0.2, 8]
num_classes: 10
loss_cls:
type: WeightedFocalLoss
gamma: 2.0
alpha: 0.25
loss_weight: 2.0
reduction: sum
loss_bbox:
type: WeightedL1Loss
loss_weight: 0.25
reduction: sum
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/petr/petrv2_vovnet_gridmask_p4_800x320_dn_centerview_amp.yml
|
batch_size: 1
epochs: 24
amp_cfg:
# only enable backbone and fpn
enable: False
level: O1
scaler:
init_loss_scaling: 512.0
train_dataset:
type: NuscenesMVDataset
dataset_root: data/nuscenes/
ann_file: data/nuscenes/petr_nuscenes_annotation_train.pkl
mode: train
use_valid_flag: True
class_names: [
'car', 'truck', 'construction_vehicle', 'bus', 'trailer',
'barrier', 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone'
]
transforms:
- type: LoadMultiViewImageFromFiles
to_float32: True
- type: LoadMultiViewImageFromMultiSweepsFiles
sweeps_num: 1
to_float32: True
pad_empty_sweeps: True
sweep_range: [3, 27]
test_mode: False
- type: LoadAnnotations3D
with_bbox_3d: True
with_label_3d: True
- type: SampleRangeFilter
point_cloud_range: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
- type: SampleNameFilter
classes: [
'car', 'truck', 'construction_vehicle', 'bus', 'trailer',
'barrier', 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone'
]
- type: MSResizeCropFlipImage
sample_aug_cfg:
resize_lim: [0.47, 0.625]
final_dim: [320, 800]
bot_pct_lim: [0.0, 0.0]
rot_lim: [0.0, 0.0]
H: 900
W: 1600
rand_flip: True
training: True
view_num: 12
- type: GlobalRotScaleTransImage
rot_range: [-0.3925, 0.3925]
translation_std: [0, 0, 0]
scale_ratio_range: [0.95, 1.05]
reverse_angle: True
training: True
- type: NormalizeMultiviewImage
mean: [103.530, 116.280, 123.675]
std: [57.375, 57.120, 58.395]
- type: PadMultiViewImage
size_divisor: 32
- type: SampleFilerByKey
keys: ['gt_bboxes_3d', 'gt_labels_3d', 'img']
meta_keys: ['filename', 'ori_shape', 'img_shape', 'lidar2img',
'intrinsics', 'extrinsics', 'pad_shape',
'scale_factor', 'flip', 'box_type_3d', 'img_norm_cfg', 'sample_idx',
'timestamp', 'gt_bboxes_3d','gt_labels_3d']
val_dataset:
type: NuscenesMVDataset
dataset_root: data/nuscenes/
ann_file: data/nuscenes/petr_nuscenes_annotation_val.pkl
mode: val
class_names: ['car', 'truck', 'construction_vehicle', 'bus', 'trailer',
'barrier', 'motorcycle', 'bicycle', 'pedestrian',
'traffic_cone']
transforms:
- type: LoadMultiViewImageFromFiles
to_float32: True
- type: LoadMultiViewImageFromMultiSweepsFiles
sweeps_num: 1
to_float32: True
pad_empty_sweeps: True
sweep_range: [3, 27]
- type: MSResizeCropFlipImage
sample_aug_cfg:
resize_lim: [0.47, 0.625]
final_dim: [320, 800]
bot_pct_lim: [0.0, 0.0]
rot_lim: [0.0, 0.0]
H: 900
W: 1600
rand_flip: True
training: False
view_num: 12
- type: NormalizeMultiviewImage
mean: [103.530, 116.280, 123.675]
std: [57.375, 57.120, 58.395]
- type: PadMultiViewImage
size_divisor: 32
- type: SampleFilerByKey
keys: ['img']
meta_keys: ['filename', 'ori_shape', 'img_shape', 'lidar2img',
'intrinsics', 'extrinsics', 'pad_shape',
'scale_factor', 'flip', 'box_type_3d', 'img_norm_cfg', 'sample_idx',
'timestamp']
optimizer:
type: AdamW
weight_decay: 0.01
grad_clip:
type: ClipGradByGlobalNorm
clip_norm: 35
# auto_skip_clip: True
lr_scheduler:
type: LinearWarmup
learning_rate:
type: CosineAnnealingDecay
learning_rate: 0.0002
T_max: 84408 # 3517 * 24
eta_min: 0.0000002
warmup_steps: 500
start_lr: 0.00006666666
end_lr: 0.0002
model:
type: Petr3D
use_recompute: True
use_grid_mask: True
backbone:
type: VoVNetCP ###use checkpoint to save memory
spec_name: V-99-eSE
norm_eval: True
frozen_stages: -1
input_ch: 3
out_features: ('stage4','stage5',)
neck:
type: CPFPN ###remove unused parameters
in_channels: [768, 1024]
out_channels: 256
num_outs: 2
pts_bbox_head:
type: PETRHead
num_classes: 10
in_channels: 256
num_query: 900
LID: true
with_multiview: true
with_position: true
with_fpe: true
with_time: true
with_multi: true
with_denoise: true
scalar: 10 ##noise groups
noise_scale: 1.0
dn_weight: 1.0 ##dn loss weight
split: 0.75 ###positive rate
position_range: [-61.2, -61.2, -10.0, 61.2, 61.2, 10.0]
code_weights: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
normedlinear: False
transformer:
type: PETRDNTransformer
embed_dims: 256
decoder:
type: PETRTransformerDecoder
return_intermediate: True
num_layers: 6
transformerlayers:
type: PETRTransformerDecoderLayer
attns:
- type: MultiHeadAttention
embed_dims: 256
num_heads: 8
attn_drop: 0.1
drop_prob: 0.1
- type: PETRMultiheadAttention
embed_dims: 256
num_heads: 8
attn_drop: 0.1
drop_prob: 0.1
batch_first: True
feedforward_channels: 2048
ffn_dropout: 0.1
operation_order: ['self_attn', 'norm', 'cross_attn', 'norm', 'ffn', 'norm']
positional_encoding:
type: SinePositionalEncoding3D
num_feats: 128
normalize: True
bbox_coder:
type: NMSFreeCoder
post_center_range: [-61.2, -61.2, -10.0, 61.2, 61.2, 10.0]
point_cloud_range: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
max_num: 300
voxel_size: [0.2, 0.2, 8]
num_classes: 10
loss_cls:
type: WeightedFocalLoss
gamma: 2.0
alpha: 0.25
loss_weight: 2.0
reduction: sum
loss_bbox:
type: WeightedL1Loss
loss_weight: 0.25
reduction: sum
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/petr/petr_vovnet_gridmask_p4_800x320.yml
|
batch_size: 1
epochs: 24
train_dataset:
type: NuscenesMVDataset
dataset_root: data/nuscenes/
ann_file: data/nuscenes/petr_nuscenes_annotation_train.pkl
mode: train
class_names: [
'car', 'truck', 'construction_vehicle', 'bus', 'trailer',
'barrier', 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone'
]
transforms:
- type: LoadMultiViewImageFromFiles
to_float32: True
- type: LoadAnnotations3D
with_bbox_3d: True
with_label_3d: True
- type: SampleRangeFilter
point_cloud_range: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
- type: SampleNameFilter
classes: [
'car', 'truck', 'construction_vehicle', 'bus', 'trailer',
'barrier', 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone'
]
- type: ResizeCropFlipImage
sample_aug_cfg:
resize_lim: [0.47, 0.625]
final_dim: [320, 800]
bot_pct_lim: [0.0, 0.0]
rot_lim: [0.0, 0.0]
H: 900
W: 1600
rand_flip: True
training: True
- type: GlobalRotScaleTransImage
rot_range: [-0.3925, 0.3925]
translation_std: [0, 0, 0]
scale_ratio_range: [0.95, 1.05]
reverse_angle: True
training: True
- type: NormalizeMultiviewImage
mean: [103.530, 116.280, 123.675]
std: [57.375, 57.120, 58.395]
- type: PadMultiViewImage
size_divisor: 32
- type: SampleFilerByKey
keys: ['gt_bboxes_3d', 'gt_labels_3d', 'img']
val_dataset:
type: NuscenesMVDataset
dataset_root: data/nuscenes/
ann_file: data/nuscenes/petr_nuscenes_annotation_val.pkl
mode: val
class_names: ['car', 'truck', 'construction_vehicle', 'bus', 'trailer',
'barrier', 'motorcycle', 'bicycle', 'pedestrian',
'traffic_cone']
transforms:
- type: LoadMultiViewImageFromFiles
to_float32: True
- type: ResizeCropFlipImage
sample_aug_cfg:
resize_lim: [0.47, 0.625]
final_dim: [320, 800]
bot_pct_lim: [0.0, 0.0]
rot_lim: [0.0, 0.0]
H: 900
W: 1600
rand_flip: True
training: False
- type: NormalizeMultiviewImage
mean: [103.530, 116.280, 123.675]
std: [57.375, 57.120, 58.395]
- type: PadMultiViewImage
size_divisor: 32
- type: SampleFilerByKey
keys: ['img']
model:
type: Petr3D
use_recompute: True
use_grid_mask: True
backbone:
# use recompute to save memory
type: VoVNetCP
spec_name: V-99-eSE
norm_eval: True
frozen_stages: -1
input_ch: 3
out_features: ('stage4','stage5',)
neck:
# remove unused parameters
type: CPFPN
in_channels: [768, 1024]
out_channels: 256
num_outs: 2
pts_bbox_head:
type: PETRHead
num_classes: 10
in_channels: 256
num_query: 900
LID: true
with_multiview: true
position_range: [-61.2, -61.2, -10.0, 61.2, 61.2, 10.0]
normedlinear: False
transformer:
type: PETRTransformer
decoder_embed_dims: 256
decoder:
type: PETRTransformerDecoder
return_intermediate: True
num_layers: 6
transformerlayers:
type: PETRTransformerDecoderLayer
attns:
- type: MultiHeadAttention
embed_dims: 256
num_heads: 8
attn_drop: 0.1
drop_prob: 0.1
- type: PETRMultiheadAttention
embed_dims: 256
num_heads: 8
attn_drop: 0.1
drop_prob: 0.1
batch_first: True
feedforward_channels: 2048
ffn_dropout: 0.1
operation_order: ['self_attn', 'norm', 'cross_attn', 'norm', 'ffn', 'norm']
positional_encoding:
type: SinePositionalEncoding3D
num_feats: 128
normalize: True
bbox_coder:
type: NMSFreeCoder
post_center_range: [-61.2, -61.2, -10.0, 61.2, 61.2, 10.0]
point_cloud_range: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
max_num: 300
voxel_size: [0.2, 0.2, 8]
num_classes: 10
loss_cls:
type: WeightedFocalLoss
gamma: 2.0
alpha: 0.25
loss_weight: 2.0
reduction: sum
loss_bbox:
type: WeightedL1Loss
loss_weight: 0.25
reduction: sum
optimizer:
type: AdamW
weight_decay: 0.01
grad_clip:
type: ClipGradByGlobalNorm
clip_norm: 35
lr_scheduler:
type: LinearWarmup
learning_rate:
type: CosineAnnealingDecay
learning_rate: 0.0002
T_max: 84408 # 3517 * 24e
eta_min: 0.0000002
warmup_steps: 500
start_lr: 0.0
end_lr: 0.0002
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/petr/petrv2_vovnet_gridmask_p4_800x320_amp.yml
|
batch_size: 1
epochs: 24
amp_cfg:
# only enable backbone and fpn
enable: False
level: O1
scaler:
init_loss_scaling: 512.0
train_dataset:
type: NuscenesMVDataset
dataset_root: data/nuscenes/
ann_file: data/nuscenes/petr_nuscenes_annotation_train.pkl
mode: train
use_valid_flag: True
class_names: [
'car', 'truck', 'construction_vehicle', 'bus', 'trailer',
'barrier', 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone'
]
transforms:
- type: LoadMultiViewImageFromFiles
to_float32: True
- type: LoadMultiViewImageFromMultiSweepsFiles
sweeps_num: 1
to_float32: True
pad_empty_sweeps: True
sweep_range: [3, 27]
test_mode: False
- type: LoadAnnotations3D
with_bbox_3d: True
with_label_3d: True
- type: SampleRangeFilter
point_cloud_range: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
- type: SampleNameFilter
classes: [
'car', 'truck', 'construction_vehicle', 'bus', 'trailer',
'barrier', 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone'
]
- type: ResizeCropFlipImage
sample_aug_cfg:
resize_lim: [0.47, 0.625]
final_dim: [320, 800]
bot_pct_lim: [0.0, 0.0]
rot_lim: [0.0, 0.0]
H: 900
W: 1600
rand_flip: True
training: True
- type: GlobalRotScaleTransImage
rot_range: [-0.3925, 0.3925]
translation_std: [0, 0, 0]
scale_ratio_range: [0.95, 1.05]
reverse_angle: True
training: True
- type: NormalizeMultiviewImage
mean: [103.530, 116.280, 123.675]
std: [57.375, 57.120, 58.395]
- type: PadMultiViewImage
size_divisor: 32
- type: SampleFilerByKey
keys: ['gt_bboxes_3d', 'gt_labels_3d', 'img']
meta_keys: ['filename', 'ori_shape', 'img_shape', 'lidar2img',
'intrinsics', 'extrinsics', 'pad_shape',
'scale_factor', 'flip', 'box_type_3d', 'img_norm_cfg', 'sample_idx',
'timestamp']
val_dataset:
type: NuscenesMVDataset
dataset_root: data/nuscenes/
ann_file: data/nuscenes/petr_nuscenes_annotation_val.pkl
mode: val
class_names: ['car', 'truck', 'construction_vehicle', 'bus', 'trailer',
'barrier', 'motorcycle', 'bicycle', 'pedestrian',
'traffic_cone']
transforms:
- type: LoadMultiViewImageFromFiles
to_float32: True
- type: LoadMultiViewImageFromMultiSweepsFiles
sweeps_num: 1
to_float32: True
pad_empty_sweeps: True
sweep_range: [3, 27]
- type: ResizeCropFlipImage
sample_aug_cfg:
resize_lim: [0.47, 0.625]
final_dim: [320, 800]
bot_pct_lim: [0.0, 0.0]
rot_lim: [0.0, 0.0]
H: 900
W: 1600
rand_flip: True
training: False
- type: NormalizeMultiviewImage
mean: [103.530, 116.280, 123.675]
std: [57.375, 57.120, 58.395]
- type: PadMultiViewImage
size_divisor: 32
- type: SampleFilerByKey
keys: ['img']
meta_keys: ['filename', 'ori_shape', 'img_shape', 'lidar2img',
'intrinsics', 'extrinsics', 'pad_shape',
'scale_factor', 'flip', 'box_type_3d', 'img_norm_cfg', 'sample_idx',
'timestamp']
optimizer:
type: AdamW
weight_decay: 0.01
grad_clip:
type: ClipGradByGlobalNorm
clip_norm: 35
# auto_skip_clip: True
lr_scheduler:
type: LinearWarmup
learning_rate:
type: CosineAnnealingDecay
learning_rate: 0.0002
T_max: 84408 # 3517 * 24
eta_min: 0.0000002
warmup_steps: 500
start_lr: 0.00006666666
end_lr: 0.0002
model:
type: Petr3D
use_recompute: True
use_grid_mask: True
backbone:
type: VoVNetCP ###use checkpoint to save memory
spec_name: V-99-eSE
norm_eval: True
frozen_stages: -1
input_ch: 3
out_features: ('stage4','stage5',)
neck:
type: CPFPN ###remove unused parameters
in_channels: [768, 1024]
out_channels: 256
num_outs: 2
pts_bbox_head:
type: PETRHead
num_classes: 10
in_channels: 256
num_query: 900
LID: true
with_multiview: true
with_position: true
with_fpe: true
with_time: true
with_multi: true
position_range: [-61.2, -61.2, -10.0, 61.2, 61.2, 10.0]
code_weights: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
normedlinear: False
transformer:
type: PETRTransformer
decoder_embed_dims: 256
decoder:
type: PETRTransformerDecoder
return_intermediate: True
num_layers: 6
transformerlayers:
type: PETRTransformerDecoderLayer
attns:
- type: MultiHeadAttention
embed_dims: 256
num_heads: 8
attn_drop: 0.1
drop_prob: 0.1
- type: PETRMultiheadAttention
embed_dims: 256
num_heads: 8
attn_drop: 0.1
drop_prob: 0.1
batch_first: True
feedforward_channels: 2048
ffn_dropout: 0.1
operation_order: ['self_attn', 'norm', 'cross_attn', 'norm', 'ffn', 'norm']
positional_encoding:
type: SinePositionalEncoding3D
num_feats: 128
normalize: True
bbox_coder:
type: NMSFreeCoder
post_center_range: [-61.2, -61.2, -10.0, 61.2, 61.2, 10.0]
point_cloud_range: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
max_num: 300
voxel_size: [0.2, 0.2, 8]
num_classes: 10
loss_cls:
type: WeightedFocalLoss
gamma: 2.0
alpha: 0.25
loss_weight: 2.0
reduction: sum
loss_bbox:
type: WeightedL1Loss
loss_weight: 0.25
reduction: sum
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/centerpoint/centerpoint_pillars_016voxel_kitti_mini.yml
|
# This is a training configuration for a simplified version of KITTI. It is just for a quick start,
# all the hyperparameters are not strictly tuned, so the training result is not optimal
batch_size: 4
epochs: 20
train_dataset:
type: KittiPCDataset
dataset_root: datasets/KITTI
transforms:
- type: LoadPointCloud
dim: 4
use_dim: 4
- type: RemoveCameraInvisiblePointsKITTI
- type: SamplingDatabase
min_num_points_in_box_per_class:
Car: 5
Cyclist: 5
Pedestrian: 5
max_num_samples_per_class:
Car: 15
Cyclist: 10
ignored_difficulty: [-1]
database_anno_path: datasets/KITTI/kitti_train_gt_database/anno_info_train.pkl
database_root: datasets/KITTI/
class_names: ["Car", "Cyclist", "Pedestrian"]
- type: RandomObjectPerturb
rotation_range: [-0.15707963267, 0.15707963267]
translation_std: [0.25, 0.25, 0.25]
max_num_attempts: 100
- type: RandomVerticalFlip
- type: GlobalRotate
min_rot: -0.78539816
max_rot: 0.78539816
- type: GlobalScale
min_scale: 0.95
max_scale: 1.05
- type: GlobalTranslate
translation_std: [0.2, 0.2, 0.2]
- type: ShufflePoint
- type: FilterBBoxOutsideRange
point_cloud_range: [0, -39.68, -3, 69.12, 39.68, 1]
- type: Gt2CenterPointTarget
tasks:
- num_class: 1
class_names: ["Car"]
- num_class: 2
class_names: ["Cyclist", "Pedestrian"]
down_ratio: 2
point_cloud_range: [0, -39.68, -3, 69.12, 39.68, 1]
voxel_size: [0.16, 0.16, 4]
gaussian_overlap: 0.1
max_objs: 500
min_radius: 2
mode: train
class_balanced_sampling: False
class_names: ["Car", "Cyclist", "Pedestrian"]
val_dataset:
type: KittiPCDataset
dataset_root: datasets/KITTI
transforms:
- type: LoadPointCloud
dim: 4
use_dim: 4
- type: RemoveCameraInvisiblePointsKITTI
mode: val
class_names: ["Car", "Cyclist", "Pedestrian"]
optimizer:
type: OneCycleAdam
beta2: 0.99
weight_decay: 0.01
grad_clip:
type: ClipGradByGlobalNorm
clip_norm: 35
beta1:
type: OneCycleDecayWarmupMomentum
momentum_peak: 0.95
momentum_trough: 0.85
step_ratio_peak: 0.4
lr_scheduler:
type: OneCycleWarmupDecayLr
base_learning_rate: 0.001
lr_ratio_peak: 10
lr_ratio_trough: 0.0001
step_ratio_peak: 0.4
model:
type: CenterPoint
voxelizer:
type: HardVoxelizer
point_cloud_range: [0, -39.68, -3, 69.12, 39.68, 1]
voxel_size: [0.16, 0.16, 4]
max_num_points_in_voxel: 100
max_num_voxels: [12000, 40000]
voxel_encoder:
type: PillarFeatureNet
in_channels: 4
feat_channels: [64, 64]
with_distance: False
max_num_points_in_voxel: 100
voxel_size: [0.16, 0.16, 4]
point_cloud_range: [0, -39.68, -3, 69.12, 39.68, 1]
legacy: False
middle_encoder:
type: PointPillarsScatter
in_channels: 64
voxel_size: [0.16, 0.16, 4]
point_cloud_range: [0, -39.68, -3, 69.12, 39.68, 1]
backbone:
type: SecondBackbone
in_channels: 64
out_channels: [64, 128, 256]
layer_nums: [3, 5, 5]
downsample_strides: [1, 2, 2]
neck:
type: SecondFPN
in_channels: [64, 128, 256]
out_channels: [128, 128, 128]
upsample_strides: [0.5, 1, 2]
use_conv_for_no_stride: True
bbox_head:
type: CenterHead
in_channels: 384 # sum([128, 128, 128])
tasks:
- num_class: 1
class_names: ["Car"]
- num_class: 2
class_names: ["Cyclist", "Pedestrian"]
weight: 2.5 # loc_loss weight
code_weights: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] # [x, y, z, w, h, l, sin(angle), cos(angle)] weight in loc loss
common_heads:
reg: [2, 2] # classes, num_conv
height: [1, 2]
dim: [3, 2]
rot: [2, 2]
test_cfg:
post_center_limit_range: [-10., -50., -10., 80., 50., 10.]
nms:
nms_pre_max_size: 1000
nms_post_max_size: 83
nms_iou_threshold: 0.1
score_threshold: 0.1
point_cloud_range: [0, -39.68, -3, 69.12, 39.68, 1]
down_ratio: 2
voxel_size: [0.16, 0.16, 4]
export:
transforms:
- type: LoadPointCloud
dim: 4
use_dim: 4
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/centerpoint/centerpoint_pillars_02voxel_nuscenes_10sweep.yml
|
batch_size: 4
epochs: 20
train_dataset:
type: NuscenesPCDataset
dataset_root: datasets/nuscenes/
transforms:
- type: LoadPointCloud
dim: 5
use_dim: 4
use_time_lag: True
sweep_remove_radius: 1
- type: SamplingDatabase
min_num_points_in_box_per_class:
car: 5
truck: 5
bus: 5
trailer: 5
construction_vehicle: 5
traffic_cone: 5
barrier: 5
motorcycle: 5
bicycle: 5
pedestrian: 5
max_num_samples_per_class:
car: 2
truck: 3
construction_vehicle: 7
bus: 4
trailer: 6
barrier: 2
motorcycle: 6
bicycle: 6
pedestrian: 2
traffic_cone: 2
database_anno_path: datasets/nuscenes/gt_database_train_nsweeps10_withvelo/anno_info_train_nsweeps10_withvelo.pkl
database_root: datasets/nuscenes/
class_names: ["car", "truck", "construction_vehicle", "bus", "trailer", "barrier", "motorcycle", "bicycle", "pedestrian", "traffic_cone"]
- type: RandomVerticalFlip
- type: RandomHorizontalFlip
- type: GlobalRotate
min_rot: -0.3925
max_rot: 0.3925
- type: GlobalScale
min_scale: 0.95
max_scale: 1.05
- type: ShufflePoint
- type: FilterBBoxOutsideRange
point_cloud_range: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
- type: Gt2CenterPointTarget
tasks:
- num_class: 1
class_names: ["car"]
- num_class: 2
class_names: ["truck", "construction_vehicle"]
- num_class: 2
class_names: ["bus", "trailer"]
- num_class: 1
class_names: ["barrier"]
- num_class: 2
class_names: ["motorcycle", "bicycle"]
- num_class: 2
class_names: ["pedestrian", "traffic_cone"]
down_ratio: 4
point_cloud_range: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
voxel_size: [0.2, 0.2, 8]
gaussian_overlap: 0.1
max_objs: 500
min_radius: 2
mode: train
max_sweeps: 10
class_balanced_sampling: True
class_names: ["car", "truck", "construction_vehicle", "bus", "trailer", "barrier", "motorcycle", "bicycle", "pedestrian", "traffic_cone"]
val_dataset:
type: NuscenesPCDataset
dataset_root: datasets/nuscenes/
transforms:
- type: LoadPointCloud
dim: 5
use_dim: 4
use_time_lag: True
sweep_remove_radius: 1
mode: val
max_sweeps: 10
class_names: ["car", "truck", "construction_vehicle", "bus", "trailer", "barrier", "motorcycle", "bicycle", "pedestrian", "traffic_cone"]
optimizer:
type: OneCycleAdam
beta2: 0.99
weight_decay: 0.01
grad_clip:
type: ClipGradByGlobalNorm
clip_norm: 35
beta1:
type: OneCycleDecayWarmupMomentum
momentum_peak: 0.95
momentum_trough: 0.85
step_ratio_peak: 0.4
lr_scheduler:
type: OneCycleWarmupDecayLr
base_learning_rate: 0.0001
lr_ratio_peak: 10
lr_ratio_trough: 0.0001
step_ratio_peak: 0.4
model:
type: CenterPoint
voxelizer:
type: HardVoxelizer
point_cloud_range: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
voxel_size: [0.2, 0.2, 8]
max_num_points_in_voxel: 20
max_num_voxels: [30000, 60000]
voxel_encoder:
type: PillarFeatureNet
in_channels: 5
feat_channels: [64, 64]
with_distance: False
max_num_points_in_voxel: 20
voxel_size: [0.2, 0.2, 8]
point_cloud_range: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
legacy: False
middle_encoder:
type: PointPillarsScatter
in_channels: 64
voxel_size: [0.2, 0.2, 8]
point_cloud_range: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
backbone:
type: SecondBackbone
in_channels: 64
out_channels: [64, 128, 256]
layer_nums: [3, 5, 5]
downsample_strides: [2, 2, 2]
neck:
type: SecondFPN
in_channels: [64, 128, 256]
out_channels: [128, 128, 128]
upsample_strides: [0.5, 1, 2]
use_conv_for_no_stride: True
bbox_head:
type: CenterHead
in_channels: 384 # sum([128, 128, 128])
tasks:
- num_class: 1
class_names: ["car"]
- num_class: 2
class_names: ["truck", "construction_vehicle"]
- num_class: 2
class_names: ["bus", "trailer"]
- num_class: 1
class_names: ["barrier"]
- num_class: 2
class_names: ["motorcycle", "bicycle"]
- num_class: 2
class_names: ["pedestrian", "traffic_cone"]
weight: 0.25 # loc_loss weight
code_weights: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2, 1.0, 1.0] # [x, y, z, w, h, l, vx, vy, sin(angle), cos(angle)] weight in loc loss
common_heads:
reg: [2, 2] # classes, num_conv
height: [1, 2]
dim: [3, 2]
rot: [2, 2]
vel: [2, 2]
test_cfg:
post_center_limit_range: [-61.2, -61.2, -10.0, 61.2, 61.2, 10.0]
max_per_img: 500
nms:
nms_pre_max_size: 1000
nms_post_max_size: 83
nms_iou_threshold: 0.2
score_threshold: 0.1
point_cloud_range: [-51.2, -51.2]
down_ratio: 4
voxel_size: [0.2, 0.2]
box_with_velocity: True
export:
transforms:
- type: LoadPointCloud
dim: 5
use_dim: 4
use_time_lag: True
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/centerpoint/centerpoint_voxels_008voxel_kitti.yml
|
batch_size: 4
epochs: 160
train_dataset:
type: KittiPCDataset
dataset_root: datasets/KITTI
transforms:
- type: LoadPointCloud
dim: 4
use_dim: 4
- type: RemoveCameraInvisiblePointsKITTI
- type: SamplingDatabase
min_num_points_in_box_per_class:
Car: 5
Cyclist: 5
Pedestrian: 5
max_num_samples_per_class:
Car: 15
Cyclist: 10
ignored_difficulty: [-1]
database_anno_path: datasets/KITTI/kitti_train_gt_database/anno_info_train.pkl
database_root: datasets/KITTI/
class_names: ["Car", "Cyclist", "Pedestrian"]
- type: RandomObjectPerturb
rotation_range: [-0.15707963267, 0.15707963267]
translation_std: [0.25, 0.25, 0.25]
max_num_attempts: 100
- type: RandomVerticalFlip
- type: GlobalRotate
min_rot: -0.78539816
max_rot: 0.78539816
- type: GlobalScale
min_scale: 0.95
max_scale: 1.05
- type: GlobalTranslate
translation_std: [0.2, 0.2, 0.2]
- type: ShufflePoint
- type: FilterPointOutsideRange
point_cloud_range: [0, -39.68, -3, 69.12, 39.68, 1]
- type: FilterBBoxOutsideRange
point_cloud_range: [0, -39.68, -3, 69.12, 39.68, 1]
- type: Gt2CenterPointTarget
tasks:
- num_class: 1
class_names: ["Car"]
- num_class: 2
class_names: ["Cyclist", "Pedestrian"]
down_ratio: 8
point_cloud_range: [0, -39.68, -3, 69.12, 39.68, 1]
voxel_size: [0.08, 0.08, 0.1]
gaussian_overlap: 0.1
max_objs: 500
min_radius: 2
mode: train
class_balanced_sampling: False
class_names: ["Car", "Cyclist", "Pedestrian"]
val_dataset:
type: KittiPCDataset
dataset_root: datasets/KITTI
transforms:
- type: LoadPointCloud
dim: 4
use_dim: 4
- type: RemoveCameraInvisiblePointsKITTI
- type: FilterPointOutsideRange
point_cloud_range: [0, -39.68, -3, 69.12, 39.68, 1]
mode: val
class_names: ["Car", "Cyclist", "Pedestrian"]
optimizer:
type: OneCycleAdam
beta2: 0.99
weight_decay: 0.01
grad_clip:
type: ClipGradByGlobalNorm
clip_norm: 35
beta1:
type: OneCycleDecayWarmupMomentum
momentum_peak: 0.95
momentum_trough: 0.85
step_ratio_peak: 0.4
lr_scheduler:
type: OneCycleWarmupDecayLr
base_learning_rate: 0.001
lr_ratio_peak: 10
lr_ratio_trough: 0.0001
step_ratio_peak: 0.4
model:
type: CenterPoint
voxelizer:
type: HardVoxelizer
point_cloud_range: [0, -39.68, -3, 69.12, 39.68, 1]
voxel_size: [0.08, 0.08, 0.1]
max_num_points_in_voxel: 100
max_num_voxels: [12000, 40000]
voxel_encoder:
type: VoxelMean
in_channels: 4
middle_encoder:
type: SparseResNet3D
in_channels: 4
voxel_size: [0.08, 0.08, 0.1]
point_cloud_range: [0, -39.68, -3, 69.12, 39.68, 1]
backbone:
type: SecondBackbone
in_channels: 256
out_channels: [128, 256]
layer_nums: [5, 5]
downsample_strides: [1, 2]
neck:
type: SecondFPN
in_channels: [128, 256]
out_channels: [256, 256]
upsample_strides: [1, 2]
use_conv_for_no_stride: True
bbox_head:
type: CenterHead
in_channels: 512 # sum([256, 256])
tasks:
- num_class: 1
class_names: ["Car"]
- num_class: 2
class_names: ["Cyclist", "Pedestrian"]
weight: 2.5 # loc_loss weight
code_weights: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] # [x, y, z, w, h, l, sin(angle), cos(angle)] weight in loc loss
common_heads:
reg: [2, 2]
height: [1, 2]
dim: [3, 2]
rot: [2, 2]
test_cfg:
post_center_limit_range: [-10., -50., -10., 80., 50., 10.]
nms:
nms_pre_max_size: 1000
nms_post_max_size: 83
nms_iou_threshold: 0.1
score_threshold: 0.1
point_cloud_range: [0, -39.68, -3, 69.12, 39.68, 1]
down_ratio: 8
voxel_size: [0.08, 0.08, 0.1]
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/centerpoint/README.md
|
# CenterPoint:Center-based 3D Object Detection and Tracking
## 目录
* [引用](#1)
* [简介](#2)
* [模型库](#3)
* [训练 & 评估](#4)
* [nuScenes数据集](#41)
* [KITTI数据集](#42)
* [导出 & 部署](#8)
* [Apollo模型](#9)
* [训练自定义数据集](#10)
## <h2 id="1">引用</h2>
> Yin, Tianwei and Zhou, Xingyi and Krahenbuhl, Philipp. "Center-Based 3D Object Detection and Tracking." In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 11784-11793. 2021.
## <h2 id="2">简介</h2>
CenterPoint是Anchor-Free的三维物体检测器,以点云作为输入,将三维物体在Bird-View下的中心点作为关键点,基于关键点检测的方式回归物体的尺寸、方向和速度。相比于Anchor-Based的三维物体检测器,CenterPoint不需要人为设定Anchor尺寸,面向物体尺寸多样不一的场景时其精度表现更高,且简易的模型设计使其在性能上也表现更加高效。
Paddle3D实现的CenterPoint做了以下优化:
- 对模型的前后处理做了性能优化。CenterPoint-Pillars在[nuScenes](https://www.nuscenes.org/nuscenes) val set上精度有50.97mAP,速度在Tesla V100上达到了50.28FPS。
- 提供[KITTI数据集](http://www.cvlibs.net/datasets/kitti/eval_object.php?obj_benchmark=3d)上的训练配置和Baseline。CenterPoint-Pillars在KITTI val set上精度达到64.75 mAP,速度在Tesla V100上达到了43.96FPS。
跟原论文相比,Paddle3D实现的CenterPoint有以下差异:
- 未提供第二个阶段的实现。在原论文中,作者还设计了第二个阶段来进一步精炼物体的位置、尺寸和方向,并在[Waymo数据集](https://waymo.com/open/)上做了验证。Paddle3D目前还未适配Waymo数据集,所以第二个阶段暂未实现。
- 未提供在nuScenes数据集上将预测速度用于多目标跟踪的实现。
## <h2 id="3">模型库</h2>
- CenterPoint在nuScenes Val set数据集上的表现
| 模型 | 体素格式 | mAP | NDS | V100 TensorRT FP32(FPS) | V100 TensorRT FP16(FPS) | 模型下载 | 配置文件 | 日志 |
| ---- | ---------------- | --- | --- | ----------------------- | ----------------------- | -------- | -------- | ---- |
| CenterPoint | 2D-Pillars | 50.97 | 61.30 | 50.28 | 63.43 | [model](https://bj.bcebos.com/paddle3d/models/centerpoint//centerpoint_pillars_02voxel_nuscenes_10sweep/model.pdparams) | [config](../../../configs/centerpoint/centerpoint_pillars_02voxel_nuscenes_10sweep.yml) | [log](https://bj.bcebos.com/paddle3d/models/centerpoint//centerpoint_pillars_02voxel_nuscenes_10sweep/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=f150eb3b4db30c7bd4ff2dfac5ca4166) |
| CenterPoint | 3D-Voxels | 59.25 | 66.74 | 21.90 | 26.93 | [model]( https://bj.bcebos.com/paddle3d/models/centerpoint/centerpoint_voxels_0075voxel_nuscenes_10sweep/model.pdparams) | [config](../../../configs/centerpoint/centerpoint_voxels_0075voxel_nuscenes_10sweep.yml) | [log]( https://bj.bcebos.com/paddle3d/models/centerpoint/centerpoint_voxels_0075voxel_nuscenes_10sweep/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=2cf9f2123ea8393cf873e8f8ae907fdc) |
**注意:nuScenes benchmark使用4张V100 GPU训练得出。3D Sparse Conv功能需要安装Paddle 2.4版。**
- CenterPoint在KITTI Val set数据集上的表现
| 模型 | 体素格式 | 3DmAP Mod. | Car<br>Easy Mod. Hard | Pedestrian<br>Easy Mod. Hard | Cyclist<br>Easy Mod. Hard | V100 TensorRT FP32(FPS) | V100 TensorRT FP16(FPS) | 模型下载 | 配置文件 | 日志 |
| ---- | ---------------- | ---------- | ------------------ | ------------------------- | -----------------------| ----------------------- | ----------------------- | -------- | -------- | ---- |
| CenterPoint | 2D-Pillars | 64.75 | 85.99 76.69 73.62 | 57.66 54.03 49.75 | 84.30 63.52 59.47 | 43.96 | 74.21 | [model]( https://bj.bcebos.com/paddle3d/models/centerpoint//centerpoint_pillars_016voxel_kitti/model.pdparams) | [config](../../../configs/centerpoint/centerpoint_pillars_016voxel_kitti.yml)| [log]( https://bj.bcebos.com/paddle3d/models/centerpoint//centerpoint_pillars_016voxel_kitti/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=7f2b637cfce7995a55b915216b8b1171) |
| 模型 | 体素格式 | BEVmAP Mod. | Car<br>Easy Mod. Hard | Pedestrian<br>Easy Mod. Hard | Cyclist<br>Easy Mod. Hard | V100 TensorRT FP32(FPS) | V100 TensorRT FP16(FPS) | 模型下载 | 配置文件 | 日志 |
| ---- | ---------------- | ----------- | ------------------ | ------------------------- | ---------------------- | ----------------------- | ----------------------- | -------- | -------- | ---- |
| CenterPoint | 2D-Pillars | 71.87 | 93.03 87.33 86.21 | 66.46 62.66 58.54 | 86.59 65.62 61.58 | 43.96 | 74.21 | [model]( https://bj.bcebos.com/paddle3d/models/centerpoint//centerpoint_pillars_016voxel_kitti/model.pdparams) | [config](../../../configs/centerpoint/centerpoint_pillars_016voxel_kitti.yml)| [log]( https://bj.bcebos.com/paddle3d/models/centerpoint//centerpoint_pillars_016voxel_kitti/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=7f2b637cfce7995a55b915216b8b1171) |
**注意:** KITTI benchmark使用8张V100 GPU训练得出。
## <h2 id="4">训练 & 评估</h2>
### <h3 id="41">nuScenes数据集</h3>
#### 数据准备
- 目前Paddle3D中提供的CenterPoint模型支持在nuScenes数据集上训练,因此需要先准备nuScenes数据集,请在[官网](https://www.nuscenes.org/nuscenes)进行下载,并将数据集目录准备如下:
```
nuscenes_dataset_root
|—— samples
|—— sweeps
|—— maps
|—— v1.0-trainval
```
在Paddle3D的目录下创建软链接 `datasets/nuscenes`,指向到上面的数据集目录:
```
mkdir datasets
ln -s /path/to/nuscenes_dataset_root ./datasets
mv ./datasets/nuscenes_dataset_root ./datasets/nuscenes
```
- 生成训练时数据增强所需的真值库:
```
python tools/create_det_gt_database.py --dataset_name nuscenes --dataset_root ./datasets/nuscenes --save_dir ./datasets/nuscenes
```
`--dataset_root`指定nuScenes数据集所在路径,`--save_dir`指定用于保存所生成的真值库的路径。该命令执行后,`save_dir`生成的目录如下:
```
gt_database_train_nsweeps10_withvelo
|—— anno_info_train_nsweeps10_withvelo.pkl
|—— bicycle
| |—— 20646_bicycle_4.bin
| |—— ...
|—— car
|—— ...
```
#### 训练
nuScenes数据集上的训练使用4张GPU:
```
python -m paddle.distributed.launch --gpus 0,1,2,3 tools/train.py --config configs/centerpoint/centerpoint_pillars_02voxel_nuscenes_10sweep.yml --save_dir ./output_nuscenes --num_workers 3 --save_interval 5
```
训练启动参数介绍可参考文档[全流程速览](../../quickstart.md#模型训练)。
#### 评估
```
python tools/evaluate.py --config configs/centerpoint/centerpoint_pillars_02voxel_nuscenes_10sweep.yml --model ./output_nuscenes/epoch_20/model.pdparams --batch_size 1 --num_workers 3
```
**注意**:CenterPoint的评估目前只支持batch_size为1。
评估启动参数介绍可参考文档[全流程速览](../../quickstart.md#模型评估)。
### <h3 id="42">KITTI数据集</h3>
- 目前Paddle3D中提供的CenterPoint模型支持在KITTI数据集上训练,因此需要先准备KITTI数据集,请在[官网](http://www.cvlibs.net/datasets/kitti/eval_object.php?obj_benchmark=3d)进行下载:
1. Download Velodyne point clouds, if you want to use laser information (29 GB)
2. training labels of object data set (5 MB)
3. camera calibration matrices of object data set (16 MB)
并下载数据集的划分文件列表:
```
wget https://bj.bcebos.com/paddle3d/datasets/KITTI/ImageSets.tar.gz
```
将数据解压后按照下方的目录结构进行组织:
```
kitti_dataset_root
|—— training
| |—— label_2
| | |—— 000001.txt
| | |—— ...
| |—— calib
| | |—— 000001.txt
| | |—— ...
| |—— velodyne
| | |—— 000001.bin
| | |—— ...
|—— ImageSets
│ |—— test.txt
│ |—— train.txt
│ |—— trainval.txt
│ |—— val.txt
```
在Paddle3D的目录下创建软链接 `datasets/KITTI`,指向到上面的数据集目录:
```
mkdir datasets
ln -s /path/to/kitti_dataset_root ./datasets
mv ./datasets/kitti_dataset_root ./datasets/KITTI
```
- 生成训练时数据增强所需的真值库:
```
python tools/create_det_gt_database.py --dataset_name kitti --dataset_root ./datasets/KITTI --save_dir ./datasets/KITTI
```
`--dataset_root`指定KITTI数据集所在路径,`--save_dir`指定用于保存所生成的真值库的路径。该命令执行后,`save_dir`生成的目录如下:
```
kitti_train_gt_database
|—— anno_info_train.pkl
|—— Car
| |—— 4371_Car_7.bin
| |—— ...
|—— Cyclist
```
#### 训练
KITTI数据集上的训练使用8张GPU:
```
python -m paddle.distributed.launch --gpus 0,1,2,3,4,5,6,7 tools/train.py --config configs/centerpoint/centerpoint_pillars_016voxel_kitti.yml --save_dir ./output_kitti --num_workers 4 --save_interval 5
```
训练启动参数介绍可参考文档[全流程速览](../../quickstart.md#模型训练)。
#### 评估
```
python tools/evaluate.py --config configs/centerpoint/centerpoint_pillars_016voxel_kitti.yml --model ./output_kitti/epoch_160/model.pdparams --batch_size 1 --num_workers 4
```
**注意**:CenterPoint的评估目前只支持batch_size为1。
评估启动参数介绍可参考文档[全流程速览](../../quickstart.md#模型评估)。
## <h2 id="8">导出 & 部署</h2>
### <h3 id="81">模型导出</h3>
运行以下命令,将训练时保存的动态图模型文件导出成推理引擎能够加载的静态图模型文件。
```
python tools/export.py --config configs/centerpoint/centerpoint_pillars_02voxel_nuscenes_10sweep.yml --model /path/to/model.pdparams --save_dir /path/to/output
```
| 参数 | 说明 |
| -- | -- |
| config | **[必填]** 训练配置文件所在路径 |
| model | **[必填]** 训练时保存的模型文件`model.pdparams`所在路径 |
| save_dir | **[必填]** 保存导出模型的路径,`save_dir`下将会生成三个文件:`centerpoint.pdiparams `、`centerpoint.pdiparams.info`和`centerpoint.pdmodel` |
### C++部署
#### Linux系统
#### 环境依赖
- GCC >= 5.4.0
- Cmake >= 3.5.1
- Ubuntu 16.04/18.04
> 说明:本文档的部署环节在以下环境中进行过测试并通过:
测试环境一:
- GCC==8.2.0
- Cmake==3.16.0
- Ubuntu 18.04
- CUDA 11.2
- cuDNN==8.1.1
- Paddle Inferece==2.3.1
- TensorRT-8.2.5.1.Linux.x86_64-gnu.cuda-11.4.cudnn8.2
测试环境二:
- GCC==7.5.0
- Cmake==3.19.6
- Ubuntu 18.04
- CUDA==11.1
- cuDNN==8.0.4
- Paddle Inferece==2.3.1
- TensorRT-8.2.5.1.Linux.x86_64-gnu.cuda-11.4.cudnn8.2
#### 编译步骤
**注意:目前CenterPoint的仅支持使用GPU进行推理。**
- step 1: 进入部署代码所在路径
```
cd deploy/centerpoint/cpp
```
- step 2: 下载Paddle Inference C++预编译库
Paddle Inference针对**是否使用GPU**、**是否支持TensorRT**、以及**不同的CUDA/cuDNN/GCC版本**均提供已经编译好的库文件,请至[Paddle Inference C++预编译库下载列表](https://www.paddlepaddle.org.cn/inference/user_guides/download_lib.html#c)选择符合的版本。
- step 3: 修改`compile.sh`中的编译参数
主要修改编译脚本`compile.sh`中的以下参数:
| 参数 | 说明 |
| -- | -- |
| WITH_GPU | 是否使用gpu。ON或OFF, OFF表示使用CPU,默认ON|
| USE_TENSORRT | 是否使用TensorRT加速。ON或OFF,默认OFF|
| LIB_DIR | Paddle Inference C++预编译包所在路径,该路径下的内容应有:`CMakeCache.txt`、`paddle`、`third_party`和`version.txt` |
| CUDNN_LIB | cuDNN`libcudnn.so`所在路径 |
| CUDA_LIB | CUDA`libcudart.so `所在路径 |
| TENSORRT_ROOT | TensorRT所在路径。**非必须**,如果`USE_TENSORRT`设置为`ON`时,需要填写该路径,该路径下的内容应有`bin`、`lib`和`include`等|
- step 4: 开始编译
```
sh compile.sh
```
### 执行预测
**注意:目前CenterPoint的仅支持使用GPU进行推理。**
执行命令参数说明
| 参数 | 说明 |
| -- | -- |
| model_file | 导出模型的结构文件`centerpoint.pdmodel`所在路径 |
| params_file | 导出模型的参数文件`centerpoint.pdiparams`所在路径 |
| lidar_file | 待预测的点云文件所在路径 |
| num_point_dim | 点云文件中每个点的维度大小。例如,若每个点的信息是`x, y, z, intensity`,则`num_point_dim`填写为4 |
| with_timelag | 该参数仅针对由多帧融合而成的点云文件,融合后的点云文件通常每个点都会包含时间差(timelag)。若点云维度大于等于5且第5维信息是timelag,需设置为1,默认0 |
```
./build/main --model_file /path/to/centerpoint.pdmodel --params_file /path/to/centerpoint.pdiparams --lidar_file /path/to/lidar.pcd.bin --num_point_dim 5
```
**注意:** 请预先确认实际待测试点云文件的维度是否是5,如果不是5,`--num_point_dim`请修改为实际值。如果待测试的点云文件是由多帧融合而成且点云维度大于等于5且第5维信息是timelag,可将`--with_timelag`设置为1。
### 开启TensorRT加速预测【可选】
**注意:请根据编译步骤的step 3,修改`compile.sh`中TensorRT相关的编译参数,并重新编译。**
运行命令参数说明如下:
| 参数 | 说明 |
| -- | -- |
| model_file | 导出模型的结构文件`centerpoint.pdmodel`所在路径 |
| params_file | 导出模型的参数文件`centerpoint.pdiparams`所在路径 |
| lidar_file | 待预测的点云文件所在路径 |
| num_point_dim | 点云文件中每个点的维度大小。例如,若每个点的信息是`x, y, z, intensity`,则`num_point_dim`填写为4 |
| with_timelag | 仅针对`nuscenes`数据集,若使用`nuscenes`数据集训练的模型,需设置为1,默认0 |
| use_trt | 是否使用TensorRT进行加速,默认0|
| trt_precision | 当use_trt设置为1时,模型精度可设置0或1,0表示fp32, 1表示fp16。默认0 |
| trt_use_static | 当trt_use_static设置为1时,**在首次运行程序的时候会将TensorRT的优化信息进行序列化到磁盘上,下次运行时直接加载优化的序列化信息而不需要重新生成**。默认0 |
| trt_static_dir | 当trt_use_static设置为1时,保存优化信息的路径 |
| collect_shape_info | 是否收集模型动态shape信息。默认0。**只需首次运行,下次运行时直接加载生成的shape信息文件即可进行TensorRT加速推理** |
| dynamic_shape_file | 保存模型动态shape信息的文件路径。 |
* **首次运行TensorRT**,收集模型动态shape信息,并保存至`--dynamic_shape_file`指定的文件中
```
./build/main --model_file /path/to/centerpoint.pdmodel --params_file /path/to/centerpoint.pdiparams --lidar_file /path/to/lidar.pcd.bin --num_point_dim 5 --use_trt 1 --collect_shape_info 1 --dynamic_shape_file /path/to/shape_info.txt
```
* 加载`--dynamic_shape_file`指定的模型动态shape信息,使用FP32精度进行预测
```
./build/main --model_file /path/to/centerpoint.pdmodel --params_file /path/to/centerpoint.pdiparams --lidar_file /path/to/lidar.pcd.bin --num_point_dim 5 --use_trt 1 --dynamic_shape_file /path/to/shape_info.txt
```
* 加载`--dynamic_shape_file`指定的模型动态shape信息,使用FP16精度进行预测
```
./build/main --model_file /path/to/centerpoint.pdmodel --params_file /path/to/centerpoint.pdiparams --lidar_file /path/to/lidar.pcd.bin --num_point_dim 5 --use_trt 1 --dynamic_shape_file /path/to/shape_info.txt --trt_precision 1
```
* 如果觉得每次运行时模型加载的时间过长,可以设置`trt_use_static`和`trt_static_dir`,首次运行时将TensorRT的优化信息保存在硬盘中,后续直接反序列化优化信息即可
```
./build/main --model_file /path/to/centerpoint.pdmodel --params_file /path/to/centerpoint.pdiparams --lidar_file /path/to/lidar.pcd.bin --num_point_dim 4 --use_trt 1 --dynamic_shape_file /path/to/shape_info.txt --trt_precision 1 --trt_use_static 1 --trt_static_dir /path/to/OptimCacheDir
```
### Python部署
**注意:目前CenterPoint的仅支持使用GPU进行推理。**
命令参数说明如下:
| 参数 | 说明 |
| -- | -- |
| model_file | 导出模型的结构文件`centerpoint.pdmodel`所在路径 |
| params_file | 导出模型的参数文件`centerpoint.pdiparams`所在路径 |
| lidar_file | 待预测的点云文件所在路径 |
| num_point_dim | 点云文件中每个点的维度大小。例如,若每个点的信息是`x, y, z, intensity`,则`num_point_dim`填写为4 |
| with_timelag | 该参数仅针对由多帧融合而成的点云文件,融合后的点云文件通常每个点都会包含时间差(timelag)。若点云维度大于等于5且第5维信息是timelag,需设置为1>,默认0 |
| use_trt | 是否使用TensorRT进行加速,默认0|
| trt_precision | 当use_trt设置为1时,模型精度可设置0或1,0表示fp32, 1表示fp16。默认0 |
| trt_use_static | 当trt_use_static设置为1时,**在首次运行程序的时候会将TensorRT的优化信息进行序列化到磁盘上,下次运行时直接加载优化的序列化信息而不需要重新生成**。默认0 |
| trt_static_dir | 当trt_use_static设置为1时,保存优化信息的路径 |
| collect_shape_info | 是否收集模型动态shape信息。默认0。**只需首次运行,后续直接加载生成的shape信息文件即可进行TensorRT加速推理** |
| dynamic_shape_file | 保存模型动态shape信息的文件路径。 |
运行以下命令,执行预测:
```
python infer.py --model_file /path/to/centerpoint.pdmodel --params_file /path/to/centerpoint.pdiparams --lidar_file /path/to/lidar.pcd.bin --num_point_dim 5
```
Python开启TensorRT的推理步骤与C++开启TensorRT加速推理一致,请参考文档前面介绍【开启TensorRT加速预测】并将C++命令参数替换成Python的命令参数。推荐使用PaddlePaddle 的官方镜像,镜像内已经预安装TensorRT。官方镜像请至[Paddle官网](https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/install/docker/linux-docker.html)进行下载。
## <h2 id="9">Apollo模型</h2>
Apollo使用百度自动驾驶数据对CenterPoint进行了训练和优化,检测效果和泛化能力都获得大幅提升,可以提供复杂城市道路场景下实时、准确、稳定的3D目标检测效果。
模型文件下载地址:
| 模型文件 | 下载地址 |
| -- | -- |
| Apollo CenterPoint训练权重文件 | [Link](https://apollo-pkg-beta.bj.bcebos.com/perception_model/centerpoint_core_pretrained_model.zip) |
| Apollo CenterPoint可部署文件 | [Link](https://apollo-pkg-beta.bj.bcebos.com/perception_model/center_point_paddle.zip) |
检测效果:
<div align=center>
<img src="../../../images/centerpoint_result2.png" width="1200"/>
</div>
<div align=center>
<img src="../../../images/centerpoint_result3.png" width="1200"/>
</div>
<div align=center>
<img src="../../../images/centerpoint_result1.png" width="1200"/>
</div>
## <h2 id="10">训练自定义数据集</h2>
下面将以apolloscape数据集为例,介绍用户训练自己数据集的完整流程
### 转化为KITTI标准格式
推荐用户将处理为标准的KITTI格式,数据组织形式如下所示:
```
|-- ImageSets
|-- train.txt
|-- val.txt
|-- training
|-- label_2
|-- 000000.txt
|-- 000001.txt
|-- ....
|-- velodyne
|-- 000000.bin
|-- 000001.bin
|-- ....
```
train.txt和val.txt分别保存着训练数据和评测数据的索引,下面是train.txt的内容示例:
```
000000
000001
000003
000004
000006
000008
000010
000011
000014
....
```
velodyne文件夹中存放着每一帧的点云数据,以.bin形式存储
label_2文件夹中存放着每一帧的标签信息,下面是标签文件示例:
```
pedestrian 0 0 0 0 0 0 0 1.018863 0.648392 0.348114 -32.12419891357422 40.14154434204102 -0.9670228362083435 -1.637705
pedestrian 0 0 0 0 0 0 0 0.661574 0.297775 0.735925 -18.38454437255859 -4.152974128723145 -1.521841764450073 1.564056
pedestrian 0 0 0 0 0 0 0 0.772804 0.287372 0.35712 -12.922926902771 25.13016510009766 -0.3287706673145294 0.02878607
pedestrian 0 0 0 0 0 0 0 0.620953 0.373367 0.447131 -12.88798904418945 25.85581016540527 -0.4463132917881012 0.07662772
cyclist 0 0 0 0 0 0 0 1.716547 0.619485 1.912308 7.602930545806885 -3.483364820480347 -0.9519524574279785 -0.03732504
```
上述标签文件仿照KITTI格式,但稍有不同,按顺序15个元素的含义如下所示:
| KITTI数据集 | 类别 |被截断程度 |被遮挡程度 | 观测角 |2d box 左、上、右、下边界坐标 |3d box 高度、宽度、长度 | 3d box在相机坐标系下的中心坐标 |3d box在相机坐标系下的旋转角 |
| -- | -- | -- | -- | -- | -- | -- | -- | -- |
| 用户自主数据集(例如apolloscape数据集) | 类别 | 0 |0 | 0 |0 |3d box 高度、宽度、长度 | 3d box在雷达坐标系下的中心坐标 |3d box在雷达坐标系下的旋转角 |
### 类别映射
当前centerpoint模型输出5种类别,如下所示
| 类别 | 包括类别 |
| -- | -- |
| smallMot | 小型车 |
| bigMot | 大型车 |
| nonMot | 三轮车 二轮车 骑摩托车的人 骑三轮车的人 骑自行车的人 |
| pedestrian | 行人 |
| TrafficCone | 交通锥筒 水马 施工牌 防撞筒 |
推荐用户将自主数据集的类别映射为上述5种类别
修改paddle3d/datasets/apollo/apollo_utils.py中的class_information进行类别映射,以apolloscape数据集为例
* apolloscape中小型车的类别为smallvehicle,需要映射为smallMot,也即'map_class': 'smallMot'
* difficulty_threshold表示根据点云数量定义目标困难程度,[20, 40]表示小于20个点时为hard,大于20小于40个点时为moderate,大于40个点为easy。用户可自定义设置
```
class_information = {
# smallMot
'smallmot': {'map_class': 'smallMot', 'difficulty_threshold': [20, 40]},
'midmot': {'map_class': 'smallMot', 'difficulty_threshold': [20, 40]},
'smallcar': {'map_class': 'smallMot', 'difficulty_threshold': [20, 40]},
'smallvehicle': {'map_class': 'smallMot', 'difficulty_threshold': [20, 40]},
.....
```
在Paddle3D的目录下创建软链接 `datasets/apolloscape`,指向到上面的数据集目录:
```
mkdir datasets
ln -s /path/to/apolloscape ./datasets/apolloscape
```
### 生成gt base
使用object sample数据增强方法可以显著增强检测效果,需要使用下面的脚本生成gt base
```
python tools/create_det_gt_database.py --config configs/centerpoint/centerpoint_pillars_02voxel_apolloscape.yml --dataset_name apollo
```
### 训练&评测&导出
训练apolloscape数据集
```
python -m paddle.distributed.launch --gpus 0,1,2,3,4,5,6,7 tools/train.py --config configs/centerpoint/centerpoint_pillars_02voxel_apolloscape.yml --save_dir ./output_apolloscape --num_workers 4 --save_interval 5
```
用户可使用Apollo官方提供的[预训练模型](https://apollo-pkg-beta.bj.bcebos.com/perception_model/centerpoint_core_pretrained_model.zip)进行训练,以获得更好的检测效果,预训练模型的模型配置可参考centerpoint_pillars_02voxel_apolloscape.yml,训练方式如下:
```
python -m paddle.distributed.launch --gpus 0,1,2,3,4,5,6,7 tools/train.py --config configs/centerpoint/centerpoint_pillars_02voxel_apolloscape.yml --model ./centerpoint_core_pretrained_model/model.pdparams --save_dir ./output_apolloscape --num_workers 4 --save_interval 5
```
评测apolloscape数据集
```
python tools/evaluate.py --config configs/centerpoint/centerpoint_pillars_02voxel_apolloscape.yml --model ./output_apolloscape/epoch_160/model.pdparams --batch_size 1 --num_workers 4
```
导出模型
```
python tools/export.py --config configs/centerpoint/centerpoint_pillars_02voxel_apolloscape.yml --model /path/to/model.pdparams --save_dir /path/to/output --export_for_apollo
```
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/centerpoint/centerpoint_voxels_010voxel_apolloscape.yml
|
batch_size: 8
epochs: 32
amp_cfg:
use_amp: False
enable: False
level: O1
scaler:
init_loss_scaling: 32.0
train_dataset:
type: ApolloPCDataset
dataset_root: datasets
dataset_list: ['apolloscape']
transforms:
- type: LoadPointCloud
dim: 5
use_dim: 4
sep: ''
- type: SamplingDatabaseV2
min_num_points_in_box_per_class:
smallMot: 20
bigMot: 20
nonMot: 10
pedestrian: 8
TrafficCone: 8
max_num_samples_per_class:
smallMot: 10
bigMot: 10
nonMot: 10
pedestrian: 10
TrafficCone: 10
ignored_difficulty: [-1]
database_anno_list: ['apolloscape']
database_root: datasets/
class_names: ['smallMot', 'bigMot', 'nonMot', 'pedestrian', 'TrafficCone']
# - type: RandomObjectPerturb
# rotation_range: [-0.15707963267, 0.15707963267]
# translation_std: [0.25, 0.25, 0.25]
# max_num_attempts: 100
- type: FilterSmallBBox
size_thr: [0.01, 0.01, 0.01]
- type: RandomVerticalFlip
- type: RandomHorizontalFlip
- type: GlobalRotate
min_rot: -0.78539816
max_rot: 0.78539816
- type: GlobalScale
min_scale: 0.9
max_scale: 1.1
- type: GlobalTranslate
translation_std: [0.5, 0.5, 0.5]
- type: ShufflePoint
- type: FilterPointOutsideRange
point_cloud_range: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
- type: FilterBBoxOutsideRange
point_cloud_range: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
- type: Gt2CenterPointTarget
tasks: # *
- num_class: 1
class_names: ["smallMot"]
- num_class: 1
class_names: ["bigMot"]
- num_class: 1
class_names: ['nonMot']
- num_class: 1
class_names: ['pedestrian']
- num_class: 1
class_names: ['TrafficCone']
down_ratio: 8
point_cloud_range: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
voxel_size: [0.1, 0.1, 0.2]
gaussian_overlap: 0.1
max_objs: 500
min_radius: 2
mode: train
class_balanced_sampling: False
class_names: ['smallMot', 'bigMot', 'nonMot', 'pedestrian', 'TrafficCone']
val_dataset:
type: ApolloPCDataset
dataset_root: datasets
dataset_list: ['apolloscape']
transforms:
- type: LoadPointCloud
dim: 5
use_dim: 4
sep: ''
- type: FilterPointOutsideRange
point_cloud_range: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
mode: val
class_names: ['smallMot', 'bigMot', 'nonMot', 'pedestrian', 'TrafficCone']
eval_class_map: {'smallMot': 'smallMot', 'bigMot': 'bigMot', 'nonMot': 'nonMot', 'Tricyclist': 'nonMot', 'pedestrian': 'pedestrian', 'TrafficCone': 'TrafficCone'}
distance_threshold: 51.2
optimizer:
type: OneCycleAdam
beta2: 0.99
weight_decay: 0.01
grad_clip:
type: ClipGradByGlobalNorm
clip_norm: 35
beta1:
type: OneCycleDecayWarmupMomentum
momentum_peak: 0.95
momentum_trough: 0.85
step_ratio_peak: 0.4
lr_scheduler:
type: OneCycleWarmupDecayLr
base_learning_rate: 0.0001
lr_ratio_peak: 10
lr_ratio_trough: 0.0001
step_ratio_peak: 0.4
model:
type: CenterPoint
voxelizer:
type: HardVoxelizer
point_cloud_range: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
voxel_size: [0.1, 0.1, 0.2]
max_num_points_in_voxel: 100
max_num_voxels: [120000, 160000]
voxel_encoder:
type: VoxelMean
in_channels: 4
middle_encoder:
type: SparseResNet3D
in_channels: 4
voxel_size: [0.1, 0.1, 0.2]
point_cloud_range: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
backbone:
type: SecondBackbone
in_channels: 256
out_channels: [128, 256]
layer_nums: [5, 5]
downsample_strides: [1, 2]
neck:
type: SecondFPN
in_channels: [128, 256]
out_channels: [256, 256]
upsample_strides: [1, 2]
use_conv_for_no_stride: True
bbox_head:
type: CenterHead
in_channels: 512
tasks:
- num_class: 1
class_names: ["smallMot"]
- num_class: 1
class_names: ["bigMot"]
- num_class: 1
class_names: ['nonMot']
- num_class: 1
class_names: ['pedestrian']
- num_class: 1
class_names: ['TrafficCone']
weight: 0.25
code_weights: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
common_heads:
reg: [2, 2]
height: [1, 2]
dim: [3, 2]
rot: [2, 2]
test_cfg:
post_center_limit_range: [-90., -90., -10., 90., 90., 10.]
nms:
nms_pre_max_size: 1000
nms_post_max_size: 83
nms_iou_threshold: 0.2
score_threshold: 0.1
point_cloud_range: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
down_ratio: 8
voxel_size: [0.1, 0.1, 0.2]
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/centerpoint/centerpoint_pillars_02voxel_apolloscape.yml
|
batch_size: 8
epochs: 5
amp_cfg:
use_amp: False
enable: False
level: O1
scaler:
init_loss_scaling: 32.0
train_dataset:
type: ApolloPCDataset
dataset_root: datasets/
dataset_list: ['apolloscape']
transforms:
- type: LoadPointCloud
dim: 5
use_dim: 4
sep: ''
- type: SamplingDatabaseV2
min_num_points_in_box_per_class:
smallMot: 20
bigMot: 20
nonMot: 10
pedestrian: 8
TrafficCone: 8
max_num_samples_per_class:
smallMot: 10
bigMot: 10
nonMot: 10
pedestrian: 10
TrafficCone: 10
ignored_difficulty: [-1]
database_anno_list: ['apolloscape']
database_root: datasets/
class_names: ['smallMot', 'bigMot', 'nonMot', 'pedestrian', 'TrafficCone']
# - type: RandomObjectPerturb
# rotation_range: [-0.15707963267, 0.15707963267]
# translation_std: [0.25, 0.25, 0.25]
# max_num_attempts: 100
- type: FilterSmallBBox
size_thr: [0.01, 0.01, 0.01]
- type: RandomVerticalFlip
- type: RandomHorizontalFlip
- type: GlobalRotate
min_rot: -0.78539816
max_rot: 0.78539816
- type: GlobalScale
min_scale: 0.9
max_scale: 1.1
- type: GlobalTranslate
translation_std: [0.5, 0.5, 0.5]
- type: ShufflePoint
- type: FilterPointOutsideRange
point_cloud_range: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
- type: FilterBBoxOutsideRange
point_cloud_range: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
- type: Gt2CenterPointTarget
tasks: # *
- num_class: 1
class_names: ["smallMot"]
- num_class: 1
class_names: ["bigMot"]
- num_class: 1
class_names: ['nonMot']
- num_class: 1
class_names: ['pedestrian']
- num_class: 1
class_names: ['TrafficCone']
down_ratio: 4
point_cloud_range: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
voxel_size: [0.20, 0.20, 8]
gaussian_overlap: 0.1
max_objs: 500
min_radius: 2
mode: train
class_balanced_sampling: False
class_names: ['smallMot', 'bigMot', 'nonMot', 'pedestrian', 'TrafficCone']
val_dataset:
type: ApolloPCDataset
dataset_root: datasets
dataset_list: ['apolloscape']
transforms:
- type: LoadPointCloud
dim: 5
use_dim: 4
sep: ''
- type: FilterPointOutsideRange
point_cloud_range: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
mode: val
class_names: ['smallMot', 'bigMot', 'nonMot', 'pedestrian', 'TrafficCone']
eval_class_map: {'smallMot': 'smallMot', 'bigMot': 'bigMot', 'nonMot': 'nonMot', 'Tricyclist': 'nonMot', 'pedestrian': 'pedestrian', 'TrafficCone': 'TrafficCone'}
distance_threshold: 51.2
optimizer:
type: OneCycleAdam
beta2: 0.99
weight_decay: 0.01
grad_clip:
type: ClipGradByGlobalNorm
clip_norm: 35
beta1:
type: OneCycleDecayWarmupMomentum
momentum_peak: 0.95
momentum_trough: 0.85
step_ratio_peak: 0.4
lr_scheduler:
type: OneCycleWarmupDecayLr
base_learning_rate: 0.0001
lr_ratio_peak: 10
lr_ratio_trough: 0.0001
step_ratio_peak: 0.4
model:
type: CenterPoint
voxelizer:
type: HardVoxelizer
point_cloud_range: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
voxel_size: [0.20, 0.20, 8]
max_num_points_in_voxel: 20
max_num_voxels: [30000, 60000]
voxel_encoder:
type: PillarFeatureNet
in_channels: 4
feat_channels: [64, 64]
with_distance: False
max_num_points_in_voxel: 20
voxel_size: [0.20, 0.20, 8]
point_cloud_range: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
legacy: False
middle_encoder:
type: PointPillarsScatter
in_channels: 64
voxel_size: [0.20, 0.20, 8]
point_cloud_range: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
backbone:
type: SecondBackbone
in_channels: 64
out_channels: [64, 128, 256]
layer_nums: [3, 5, 5]
downsample_strides: [2, 2, 2]
neck:
type: SecondFPN
in_channels: [64, 128, 256]
out_channels: [128, 128, 128]
upsample_strides: [0.5, 1, 2]
use_conv_for_no_stride: True
bbox_head:
type: CenterHead
in_channels: 384
tasks:
- num_class: 1
class_names: ["smallMot"]
- num_class: 1
class_names: ["bigMot"]
- num_class: 1
class_names: ['nonMot']
- num_class: 1
class_names: ['pedestrian']
- num_class: 1
class_names: ['TrafficCone']
weight: 0.25
code_weights: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] # [x, y, z, w, h, l, sin(angle), cos(angle)] weight in loc loss
common_heads:
reg: [2, 2]
height: [1, 2]
dim: [3, 2]
rot: [2, 2]
test_cfg:
post_center_limit_range: [-90., -90., -10., 90., 90., 10.]
nms:
nms_pre_max_size: 1000
nms_post_max_size: 83
nms_iou_threshold: 0.2
score_threshold: 0.5
point_cloud_range: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
down_ratio: 4
voxel_size: [0.20, 0.20, 8]
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/centerpoint/centerpoint_voxels_0075voxel_nuscenes_10sweep.yml
|
batch_size: 4
epochs: 20
train_dataset:
type: NuscenesPCDataset
dataset_root: datasets/nuscenes/
transforms:
- type: LoadPointCloud
dim: 5
use_dim: 4
use_time_lag: True
sweep_remove_radius: 1
- type: SamplingDatabase
min_num_points_in_box_per_class:
car: 5
truck: 5
bus: 5
trailer: 5
construction_vehicle: 5
traffic_cone: 5
barrier: 5
motorcycle: 5
bicycle: 5
pedestrian: 5
max_num_samples_per_class:
car: 2
truck: 3
construction_vehicle: 7
bus: 4
trailer: 6
barrier: 2
motorcycle: 6
bicycle: 6
pedestrian: 2
traffic_cone: 2
database_anno_path: datasets/nuscenes/gt_database_train_nsweeps10_withvelo/anno_info_train_nsweeps10_withvelo.pkl
database_root: datasets/nuscenes
class_names: ["car", "truck", "construction_vehicle", "bus", "trailer", "barrier", "motorcycle", "bicycle", "pedestrian", "traffic_cone"]
- type: RandomVerticalFlip
- type: RandomHorizontalFlip
- type: GlobalRotate
min_rot: -0.78539816
max_rot: 0.78539816
- type: GlobalScale
min_scale: 0.9
max_scale: 1.1
- type: GlobalTranslate
translation_std: [0.5, 0.5, 0.5]
- type: ShufflePoint
- type: FilterBBoxOutsideRange
point_cloud_range: [-54, -54, -5.0, 54, 54, 3.0]
- type: Gt2CenterPointTarget
tasks:
- num_class: 1
class_names: ["car"]
- num_class: 2
class_names: ["truck", "construction_vehicle"]
- num_class: 2
class_names: ["bus", "trailer"]
- num_class: 1
class_names: ["barrier"]
- num_class: 2
class_names: ["motorcycle", "bicycle"]
- num_class: 2
class_names: ["pedestrian", "traffic_cone"]
down_ratio: 8
point_cloud_range: [-54, -54, -5.0, 54, 54, 3.0]
voxel_size: [0.075, 0.075, 0.2]
gaussian_overlap: 0.1
max_objs: 500
min_radius: 2
mode: train
max_sweeps: 10
class_balanced_sampling: True
class_names: ["car", "truck", "construction_vehicle", "bus", "trailer", "barrier", "motorcycle", "bicycle", "pedestrian", "traffic_cone"]
val_dataset:
type: NuscenesPCDataset
dataset_root: datasets/nuscenes/
transforms:
- type: LoadPointCloud
dim: 5
use_dim: 4
use_time_lag: True
sweep_remove_radius: 1
mode: val
max_sweeps: 10
class_names: ["car", "truck", "construction_vehicle", "bus", "trailer", "barrier", "motorcycle", "bicycle", "pedestrian", "traffic_cone"]
optimizer:
type: OneCycleAdam
beta2: 0.99
weight_decay: 0.01
grad_clip:
type: ClipGradByGlobalNorm
clip_norm: 35
beta1:
type: OneCycleDecayWarmupMomentum
momentum_peak: 0.95
momentum_trough: 0.85
step_ratio_peak: 0.4
lr_scheduler:
type: OneCycleWarmupDecayLr
base_learning_rate: 0.0001
lr_ratio_peak: 10
lr_ratio_trough: 0.0001
step_ratio_peak: 0.4
model:
type: CenterPoint
voxelizer:
type: HardVoxelizer
point_cloud_range: [-54, -54, -5.0, 54, 54, 3.0]
voxel_size: [0.075, 0.075, 0.2]
max_num_points_in_voxel: 10
max_num_voxels: [120000, 160000]
voxel_encoder:
type: VoxelMean
in_channels: 5
middle_encoder:
type: SparseResNet3D
in_channels: 5
voxel_size: [0.075, 0.075, 0.2]
point_cloud_range: [-54, -54, -5.0, 54, 54, 3.0]
backbone:
type: SecondBackbone
in_channels: 256
out_channels: [128, 256]
layer_nums: [5, 5]
downsample_strides: [1, 2]
neck:
type: SecondFPN
in_channels: [128, 256]
out_channels: [256, 256]
upsample_strides: [1, 2]
use_conv_for_no_stride: True
bbox_head:
type: CenterHead
in_channels: 512 # sum([256, 256])
tasks:
- num_class: 1
class_names: ["car"]
- num_class: 2
class_names: ["truck", "construction_vehicle"]
- num_class: 2
class_names: ["bus", "trailer"]
- num_class: 1
class_names: ["barrier"]
- num_class: 2
class_names: ["motorcycle", "bicycle"]
- num_class: 2
class_names: ["pedestrian", "traffic_cone"]
weight: 0.25
code_weights: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2, 1.0, 1.0]
common_heads:
reg: [2, 2]
height: [1, 2]
dim: [3, 2]
rot: [2, 2]
vel: [2, 2]
test_cfg:
post_center_limit_range: [-61.2, -61.2, -10.0, 61.2, 61.2, 10.0]
nms:
nms_pre_max_size: 1000
nms_post_max_size: 83
nms_iou_threshold: 0.2
score_threshold: 0.1
point_cloud_range: [-54, -54, -5.0, 54, 54, 3.0]
down_ratio: 8
voxel_size: [0.075, 0.075]
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/centerpoint/centerpoint_pillars_016voxel_kitti.yml
|
batch_size: 4
epochs: 160
train_dataset:
type: KittiPCDataset
dataset_root: datasets/KITTI
transforms:
- type: LoadPointCloud
dim: 4
use_dim: 4
- type: RemoveCameraInvisiblePointsKITTI
- type: SamplingDatabase
min_num_points_in_box_per_class:
Car: 5
Cyclist: 5
Pedestrian: 5
max_num_samples_per_class:
Car: 15
Cyclist: 10
ignored_difficulty: [-1]
database_anno_path: datasets/KITTI/kitti_train_gt_database/anno_info_train.pkl
database_root: datasets/KITTI/
class_names: ["Car", "Cyclist", "Pedestrian"]
- type: RandomObjectPerturb
rotation_range: [-0.15707963267, 0.15707963267]
translation_std: [0.25, 0.25, 0.25]
max_num_attempts: 100
- type: RandomVerticalFlip
- type: GlobalRotate
min_rot: -0.78539816
max_rot: 0.78539816
- type: GlobalScale
min_scale: 0.95
max_scale: 1.05
- type: GlobalTranslate
translation_std: [0.2, 0.2, 0.2]
- type: ShufflePoint
- type: FilterBBoxOutsideRange
point_cloud_range: [0, -39.68, -3, 69.12, 39.68, 1]
- type: Gt2CenterPointTarget
tasks:
- num_class: 1
class_names: ["Car"]
- num_class: 2
class_names: ["Cyclist", "Pedestrian"]
down_ratio: 2
point_cloud_range: [0, -39.68, -3, 69.12, 39.68, 1]
voxel_size: [0.16, 0.16, 4]
gaussian_overlap: 0.1
max_objs: 500
min_radius: 2
mode: train
class_balanced_sampling: False
class_names: ["Car", "Cyclist", "Pedestrian"]
val_dataset:
type: KittiPCDataset
dataset_root: datasets/KITTI
transforms:
- type: LoadPointCloud
dim: 4
use_dim: 4
- type: RemoveCameraInvisiblePointsKITTI
mode: val
class_names: ["Car", "Cyclist", "Pedestrian"]
optimizer:
type: OneCycleAdam
beta2: 0.99
weight_decay: 0.01
grad_clip:
type: ClipGradByGlobalNorm
clip_norm: 35
beta1:
type: OneCycleDecayWarmupMomentum
momentum_peak: 0.95
momentum_trough: 0.85
step_ratio_peak: 0.4
lr_scheduler:
type: OneCycleWarmupDecayLr
base_learning_rate: 0.001
lr_ratio_peak: 10
lr_ratio_trough: 0.0001
step_ratio_peak: 0.4
model:
type: CenterPoint
voxelizer:
type: HardVoxelizer
point_cloud_range: [0, -39.68, -3, 69.12, 39.68, 1]
voxel_size: [0.16, 0.16, 4]
max_num_points_in_voxel: 100
max_num_voxels: [12000, 40000]
voxel_encoder:
type: PillarFeatureNet
in_channels: 4
feat_channels: [64, 64]
with_distance: False
max_num_points_in_voxel: 100
voxel_size: [0.16, 0.16, 4]
point_cloud_range: [0, -39.68, -3, 69.12, 39.68, 1]
legacy: False
middle_encoder:
type: PointPillarsScatter
in_channels: 64
voxel_size: [0.16, 0.16, 4]
point_cloud_range: [0, -39.68, -3, 69.12, 39.68, 1]
backbone:
type: SecondBackbone
in_channels: 64
out_channels: [64, 128, 256]
layer_nums: [3, 5, 5]
downsample_strides: [1, 2, 2]
neck:
type: SecondFPN
in_channels: [64, 128, 256]
out_channels: [128, 128, 128]
upsample_strides: [0.5, 1, 2]
use_conv_for_no_stride: True
bbox_head:
type: CenterHead
in_channels: 384 # sum([128, 128, 128])
tasks:
- num_class: 1
class_names: ["Car"]
- num_class: 2
class_names: ["Cyclist", "Pedestrian"]
weight: 2.5 # loc_loss weight
code_weights: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] # [x, y, z, w, h, l, sin(angle), cos(angle)] weight in loc loss
common_heads:
reg: [2, 2] # classes, num_conv
height: [1, 2]
dim: [3, 2]
rot: [2, 2]
test_cfg:
post_center_limit_range: [-10., -50., -10., 80., 50., 10.]
nms:
nms_pre_max_size: 1000
nms_post_max_size: 83
nms_iou_threshold: 0.1
score_threshold: 0.1
point_cloud_range: [0, -39.68, -3, 69.12, 39.68, 1]
down_ratio: 2
voxel_size: [0.16, 0.16, 4]
export:
transforms:
- type: LoadPointCloud
dim: 4
use_dim: 4
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/iassd/iassd_waymo.yaml
|
batch_size: 4 #on 4 gpus, total bs = 16
epochs: 30
train_dataset:
type: WaymoPCDataset
dataset_root: datasets/waymo
class_names: [ "Vehicle", "Pedestrian", "Cyclist" ]
sampled_interval: 5
transforms:
- type: SamplingDatabase
min_num_points_in_box_per_class:
Vehicle: 5
Pedestrian: 5
Cyclist: 5
max_num_samples_per_class:
Vehicle: 15
Pedestrian: 10
Cyclist: 10
ignored_difficulty: [ -1 ]
database_anno_path: datasets/waymo/waymo_train_gt_database/waymo_train_gt_database_infos.pkl
database_root: datasets/waymo
class_names: [ "Vehicle", "Pedestrian", "Cyclist" ]
- type: RandomVerticalFlip
- type: RandomHorizontalFlip
- type: GlobalRotate
min_rot: -0.78539816
max_rot: 0.78539816
- type: GlobalScale
min_scale: 0.95
max_scale: 1.05
- type: FilterBBoxOutsideRange
point_cloud_range: &point_cloud_range [-75.2, -75.2, -2, 75.2, 75.2, 4]
- type: FilterPointOutsideRange
point_cloud_range: *point_cloud_range
- type: ShufflePoint
- type: SamplePointByVoxels
voxel_size: [0.1, 0.1, 0.15]
max_points_per_voxel: 5
max_num_of_voxels: 80000
num_points: 65536
point_cloud_range: *point_cloud_range
- type: ConvertBoxFormat
mode: train
val_dataset:
type: WaymoPCDataset
dataset_root: datasets/waymo
class_names: [ "Vehicle", "Pedestrian", "Cyclist" ]
sampled_interval: 1
transforms:
- type: FilterPointOutsideRange
point_cloud_range: *point_cloud_range
- type: SamplePointByVoxels
voxel_size: [0.1, 0.1, 0.15]
max_points_per_voxel: 5
max_num_of_voxels: 90000
num_points: 65536
point_cloud_range: *point_cloud_range
mode: val
model:
type: IASSD
backbone:
type: IASSD_Backbone
npoint_list: [16384, 4096, 2048, 1024, null, 1024]
sample_method_list: &sample_method_list ["D-FPS", "D-FPS", "ctr_aware", "ctr_aware", null, null]
radius_list: [[0.2,0.8], [0.8,1.6], [1.6,4.8], [], [], [4.8, 6.4]]
nsample_list: [[16,32], [16,32], [16,32], [], [], [16, 32]]
mlps: [[[16,16,32], [32,32,64]],
[[64,64,128], [64,96,128]],
[[128,128,256], [128,256,256]],
[],
[128],
[[256,256,512], [256,512,1024]]]
layer_types: ["SA_Layer", "SA_Layer", "SA_Layer", "SA_Layer", "Vote_Layer", "SA_Layer"]
dilated_group: [False, False, False, False, False, False]
aggregation_mlps: [[64], [128], [256], [256], [], [512]]
confidence_mlps: [[], [128], [256], [], [], []]
layer_input: [0, 1, 2, 3, 4, 3]
ctr_index: [-1, -1, -1, -1, -1, 5]
max_translate_range: [3., 3., 2.]
input_channel: 5
num_classes: 3
head:
type: IASSD_Head
input_channel: 512 #last aggregation mlp
cls_fc: [256, 256]
reg_fc: [256, 256]
num_classes: 3
target_config:
gt_extra_width: [0.2, 0.2, 0.2]
extra_width: [1.0, 1.0, 1.0]
box_coder_config: {
'angle_bin_num': 12,
'use_mean_size': True,
'mean_size': [
[4.7, 2.1, 1.7],
[0.91, 0.86, 1.73],
[1.78, 0.84, 1.78]
]
}
loss_config:
loss_cls: WeightedClassificationLoss
loss_reg: WeightedSmoothL1Loss
loss_ins: WeightedClassificationLoss
sample_method_list: *sample_method_list
corner_loss_regularization: True
centerness_regularization: True
centerness_regularization_sa: True
loss_weight: {
'ins_aware_weight': [0, 1.0, 1.0],
'vote_weight': 1.0,
'point_cls_weight': 1.0,
'point_box_weight': 1.0,
'corner_weight': 1.0,
'code_weights': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
'dir_weight': 0.2
}
post_process_cfg:
score_thresh: 0.1
nms_config:
nms_thresh: 0.1
nms_pre_maxsize: 4096
nms_post_maxsize: 500
optimizer:
type: AdamWOnecycle
clip_grad_by_norm: 10.0
learning_rate: 0.01
beta1: 0.9
beta2: 0.99
weight_decay: 0.01
lr_scheduler:
type: OneCycle
total_step: 59280 #change to your correspondent total iters
lr_max: 0.01
moms: [0.95, 0.85]
div_factor: 10
pct_start: 0.4
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/iassd/iassd_kitti.yaml
|
batch_size: 8 #on 4 gpus, total bs = 32
epochs: 80
train_dataset:
type: KittiPCDataset
dataset_root: datasets/KITTI
class_names: [ "Car", "Pedestrian", "Cyclist"]
use_road_plane: True
transforms:
- type: LoadPointCloud
dim: 4
use_dim: 4
- type: RemoveCameraInvisiblePointsKITTIV2
- type: SamplingDatabase
min_num_points_in_box_per_class:
Car: 5
Pedestrian: 5
Cyclist: 5
max_num_samples_per_class:
Car: 20
Pedestrian: 15
Cyclist: 15
ignored_difficulty: [ -1 ]
database_anno_path: datasets/KITTI/kitti_train_gt_database/anno_info_train.pkl
database_root: datasets/KITTI
class_names: [ "Car", "Pedestrian", "Cyclist" ]
- type: RandomVerticalFlip
- type: GlobalRotate
min_rot: -0.78539816
max_rot: 0.78539816
- type: GlobalScale
min_scale: 0.95
max_scale: 1.05
- type: FilterBBoxOutsideRange
point_cloud_range: &point_cloud_range [ 0, -40, -3, 70.4, 40, 1 ]
- type: FilterPointOutsideRange
point_cloud_range: *point_cloud_range
- type: SamplePoint
num_points: 16384
- type: ShufflePoint
- type: ConvertBoxFormat
mode: train
val_dataset:
type: KittiPCDataset
dataset_root: datasets/KITTI
class_names: [ "Car", "Pedestrian", "Cyclist"]
transforms:
- type: LoadPointCloud
dim: 4
use_dim: 4
- type: RemoveCameraInvisiblePointsKITTIV2
- type: FilterPointOutsideRange
point_cloud_range: *point_cloud_range
- type: SamplePoint
num_points: 16384
mode: val
model:
type: IASSD
backbone:
type: IASSD_Backbone
npoint_list: [4096, 1024, 512, 256, null, 256]
sample_method_list: &sample_method_list ["D-FPS", "D-FPS", "ctr_aware", "ctr_aware", null, null]
radius_list: [[0.2,0.8], [0.8,1.6], [1.6,4.8], [], [], [4.8, 6.4]]
nsample_list: [[16,32], [16,32], [16,32], [], [], [16, 32]]
mlps: [[[16,16,32], [32,32,64]],
[[64,64,128], [64,96,128]],
[[128,128,256], [128,256,256]],
[],
[128],
[[256,256,512], [256,512,1024]]]
layer_types: ["SA_Layer", "SA_Layer", "SA_Layer", "SA_Layer", "Vote_Layer", "SA_Layer"]
dilated_group: [False, False, False, False, False, False]
aggregation_mlps: [[64], [128], [256], [256], [], [512]]
confidence_mlps: [[], [128], [256], [], [], []]
layer_input: [0, 1, 2, 3, 4, 3]
ctr_index: [-1, -1, -1, -1, -1, 5]
max_translate_range: [3., 3., 2.]
input_channel: 4
num_classes: 3
head:
type: IASSD_Head
input_channel: 512
cls_fc: [256, 256]
reg_fc: [256, 256]
num_classes: 3
target_config:
gt_extra_width: [0.2, 0.2, 0.2]
extra_width: [1.0, 1.0, 1.0]
box_coder_config: {
'angle_bin_num': 12,
'use_mean_size': True,
'mean_size': [
[3.9, 1.6, 1.56],
[0.8, 0.6, 1.73],
[1.76, 0.6, 1.73]
]
}
loss_config:
loss_cls: WeightedClassificationLoss
loss_reg: WeightedSmoothL1Loss
loss_ins: WeightedClassificationLoss
sample_method_list: *sample_method_list
corner_loss_regularization: True
centerness_regularization: True
centerness_regularization_sa: True
loss_weight: {
'ins_aware_weight': [0, 1.0, 1.0],
'vote_weight': 1.0,
'point_cls_weight': 1.0,
'point_box_weight': 1.0,
'corner_weight': 1.0,
'code_weights': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
'dir_weight': 0.2
}
post_process_cfg:
score_thresh: 0.1
nms_config:
nms_thresh: 0.01
nms_pre_maxsize: 4096
nms_post_maxsize: 500
optimizer:
type: AdamWOnecycle
clip_grad_by_norm: 10.0
learning_rate: 0.01
beta1: 0.9
beta2: 0.99
weight_decay: 0.01
lr_scheduler:
type: OneCycle
total_step: 9280 #change to your correspondent total iter
lr_max: 0.01
moms: [0.95, 0.85]
div_factor: 10
pct_start: 0.4
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/caddn/caddn_ocrnet_hrnet_w18_kitti.yml
|
batch_size: 4
iters: 74240 # 928*80
sync_bn: true
train_dataset:
type: KittiDepthDataset
dataset_root: data/kitti
point_cloud_range: [2, -30.08, -3.0, 46.8, 30.08, 1.0]
depth_downsample_factor: 4
voxel_size: [0.16, 0.16, 0.16]
class_names: ['Car', 'Pedestrian', 'Cyclist']
mode: train
val_dataset:
type: KittiDepthDataset
dataset_root: data/kitti
point_cloud_range: [2, -30.08, -3.0, 46.8, 30.08, 1.0]
depth_downsample_factor: 4
voxel_size: [0.16, 0.16, 0.16]
class_names: ['Car', 'Pedestrian', 'Cyclist']
mode: val
optimizer:
type: AdamWOnecycle
clip_grad_by_norm: 10.0
beta1: 0.95
beta2: 0.99
weight_decay: 0.01
lr_scheduler:
type: OneCycle
total_step: 74240 # 928 * 80
lr_max: 0.001
moms: [0.95, 0.85]
div_factor: 10
pct_start: 0.4
model:
type: CADDN
pretrained: "https://bj.bcebos.com/paddle3d/pretrained/hrnet18.pdparams"
backbone_3d:
type: HRNet_W18
class_head:
type: OCRHead
num_classes: 81
backbone_indices: [1]
in_channels: [270]
align_corners: False
bev_cfg:
layer_nums: [10, 10, 10]
layer_strides: [2, 2, 2]
num_filters: [64, 128, 256]
upsample_strides: [1, 2, 4]
num_upsample_filters: [128, 128, 128]
input_channels: 64
dense_head:
type: AnchorHeadSingle
model_cfg:
class_agnostic: False
use_direction_classifier: True
dir_offset: 0.78539
dir_limit_offset: 0.0
input_channels: 384
point_cloud_range: [2, -30.08, -3.0, 46.8, 30.08, 1.0]
class_names: ['Car', 'Pedestrian', 'Cyclist']
predict_boxes_when_training: False
voxel_size: [0.16, 0.16, 0.16]
anchor_generator_cfg: [
{
'class_name': 'Car',
'anchor_sizes': [[3.9, 1.6, 1.56]],
'anchor_rotations': [0, 1.57],
'anchor_bottom_heights': [-1.78],
'align_center': False,
'feature_map_stride': 2,
'matched_threshold': 0.6,
'unmatched_threshold': 0.45
},
{
'class_name': 'Pedestrian',
'anchor_sizes': [[0.8, 0.6, 1.73]],
'anchor_rotations': [0, 1.57],
'anchor_bottom_heights': [-0.6],
'align_center': False,
'feature_map_stride': 2,
'matched_threshold': 0.5,
'unmatched_threshold': 0.35
},
{
'class_name': 'Cyclist',
'anchor_sizes': [[1.76, 0.6, 1.73]],
'anchor_rotations': [0, 1.57],
'anchor_bottom_heights': [-0.6],
'align_center': False,
'feature_map_stride': 2,
'matched_threshold': 0.5,
'unmatched_threshold': 0.35
}
]
anchor_target_cfg:
pos_fraction: -1.0
sample_size: 512
norm_by_num_examples: False
match_height: False
num_dir_bins: 2
loss_weights:
cls_weight: 1.0
loc_weight: 2.0
dir_weight: 0.2
code_weights: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
map_to_bev_cfg:
in_channels: 1600
out_channels: 64
kernel_size: 1
stride: 1
bias_attr: False
padding: 0
ffe_cfg:
channel_reduce_cfg:
in_channels: 256
out_channels: 64
kernel_size: 1
stride: 1
bias_attr: False
padding: 0
downsample_factor: 4
ddn_loss:
weight: 3.0
alpha: 0.25
beta: 2.0
fg_weight: 13
bg_weight: 1
f2v_cfg:
pc_range: [2, -30.08, -3.0, 46.8, 30.08, 1.0]
voxel_size: [0.16, 0.16, 0.16]
sample_cfg:
mode: bilinear
padding_mode: zeros
disc_cfg:
mode: LID
num_bins: 80
depth_min: 2.0
depth_max: 46.8
post_process_cfg:
recall_thresh_list: [0.3, 0.5, 0.7]
score_thresh: 0.1
eval_metric: kitti
nms_config:
nms_thresh: 0.01
nms_pre_maxsize: 4096
nms_post_maxsize: 500
export:
transforms:
- type: LoadImage
to_chw: False
to_rgb: True
- type: Normalize
mean: [0, 0, 0]
std: [1, 1, 1]
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/caddn/README.md
|
# CADDN:Categorical Depth DistributionNetwork for Monocular 3D Object Detection
## 目录
* [引用](#1)
* [简介](#2)
* [训练配置](#3)
* [使用教程](#4)
* [数据准备](#5)
* [训练](#6)
* [评估](#7)
* [导出 & 部署](#8)
* [自定义数据集](#9)
* [Apollo使用教程](#10)
## <h2 id="1">引用</h2>
> Cody Reading, Ali Harakeh, Julia Chae, Steven L. Waslander. "Categorical Depth DistributionNetwork for Monocular 3D Object Detection." Computer Vision and Pattern Recognition (CVPR), 2021.
## <h2 id="2">简介</h2>
单目3D物体检测是自动驾驶汽车的关键问题,与典型的多传感器系统相比,单目3D检测提供了一种配置简单的解决方案。单目3D检测的主要挑战在于准确预测物体深度,由于缺乏直接的距离测量,必须从物体和场景线索中推断出物体深度。目前一些方法试图通过直接估计深度来辅助3D检测,但由于深度不准确,性能有限。而CaDDN模型提出了解决方案,它使用每个像素的预测分类深度分布,将丰富的上下文特征信息投射到3D空间中适当的深度区间。然后,CaDDN模型使用计算效率高的鸟瞰投影和单级检测器来生成最终的输出包围框。同时CaDDN模型被设计为一种完全可微的端到端联合深度估计和目标检测方法。在模型发布时,CaDDN在Kitti 3D对象检测基准上获得了已发表的单目方法中的第一名,到目前,CaDDN模型的指标仍具有竞争力。
## <h2 id="3">训练配置</h2>
我们提供了在开源数据集上的训练配置与结果,详见[CADDN训练配置](../../../configs/caddn)
## <h2 id="4">模型库</h2>
| 模型 | 骨干网络 | 3DmAP Mod. | Car<br>Easy Mod. Hard | Pedestrian<br>Easy Mod. Hard | Cyclist<br>Easy Mod. Hard | 模型下载 | 配置文件 | 日志 |
| :--: | :-------: | :--------: | :-------------------: | :--------------------------: | :-----------------------: | :------: | :-----: | :--: |
|CADDN | ocrnet_hrnet_w18 | 7.86 | 22.50 15.78 13.95 | 10.09 7.12 5.57 | 1.27 0.69 0.69 | [model](https://paddle3d.bj.bcebos.com/models/caddn/caddn_ocrnet_hrnet_w18_kitti/model.pdparams) | [config](../../../configs/caddn/caddn_ocrnet_hrnet_w18_kitti.yml) | [log](https://paddle3d.bj.bcebos.com/models/caddn/caddn_ocrnet_hrnet_w18_kitti/train.log) \| [vdl](https://www.paddlepaddle.org.cn/paddle/visualdl/service/app/scalar?id=36ff3161e13f37bb318fc2d78e679983) |
|CADDN | deeplabv3p_resnet101_os8 | 7.21 | 21.45 14.36 12.57 | 9.15 6.53 5.12 | 1.82 0.74 0.75 | [model](https://paddle3d.bj.bcebos.com/models/caddn/caddn_deeplabv3p_resnet101_os8_kitti/model.pdparams) | [config](../../../configs/caddn/caddn_deeplabv3p_resnet101_os8_kitti.yml) | [log](https://paddle3d.bj.bcebos.com/models/caddn/caddn_deeplabv3p_resnet101_os8_kitti/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=a56f45325b80ce7f7e29f185efaed28c) |
## <h2 id="5">使用教程</h2>
## <h2 id="6">数据准备</h2>
请下载KITTI单目3D检测数据集,数据集信息请参考[KITTI官网](http://www.cvlibs.net/datasets/kitti/)
*注意:KITTI官网只区分了训练集和测试集,我们遵循业界的普遍做法,将7481个训练集样本,进一步划分为3712个训练集样本和3769个验证集样本*
下载好后的数据集目录结构
```
kttti
├── gt_database
├── ImageSets
| ├── test.txt
| ├── train.txt
| └── val.txt
├── testing
| ├── calib
| └── image_2
├── training
| ├── calib
| ├── depth_2
| ├── image_2
| └── label_2
├── kitti_infos_test.pkl
├── kitti_infos_train.pkl
├── kitti_infos_val.pkl
...
```
将kitti数据软链至data/kitti,或更改配置文件数据集路径。
备注:准备好kitti数据集后,上述的.pkl是通过下列命令生成
```
python tools/creat_caddn_kitti_infos.py
```
| 参数 | 说明 |
| -- | -- |
| dataset_root | **[选填]** kitti数据集路径,默认data/kitti |
| save_dir | **[选填]** 生成的.pkl文件保存路径,默认data/kitti |
## <h2 id="7">训练</h2>
运行以下命令,进行单卡训练
```
python -u tools/train.py --config configs/caddn/caddn_deeplabv3p_resnet101_os8_kitti.yml
```
运行以下命令,进行多卡训练
```
export CUDA_VISIBLE_DEVICES=0,1,2,3
fleetrun tools/train.py --config configs/caddn/caddn_deeplabv3p_resnet101_os8_kitti.yml
```
训练中断,可以通过`--resume`进行继续训练。
## <h2 id="8">评估</h2>
运行以下命令,进行评估
```
python tools/evaluate.py --config configs/caddn/caddn_deeplabv3p_resnet101_os8_kitti.yml --model pretrained_model_path
```
## <h2 id="9">导出 & 部署</h2>
### <h3 id="91">模型导出</h3>模型导出
运行以下命令,将训练时保存的动态图模型文件导出成推理引擎能够加载的静态图模型文件。
```
python tools/export.py --config configs/caddn/caddn_deeplabv3p_resnet101_os8_kitti.yml --model /path/to/model.pdparams --save_dir /path/to/output
```
| 参数 | 说明 |
| -- | -- |
| config | **[必填]** 训练配置文件所在路径 |
| model | **[必填]** 训练时保存的模型文件`model.pdparams`所在路径 |
| save_dir | **[必填]** 保存导出模型的路径,`save_dir`下将会生成三个文件:`caddn.pdiparams `、`caddn.pdiparams.info`和`caddn.pdmodel` |
提供训练好的导出模型
| 配置文件 | 下载 |
| -- | -- |
| caddn_ocrnet_hrnet_w18_kitti | [下载](https://paddle3d.bj.bcebos.com/models/caddn/caddn_ocrnet_hrnet_w18_kitti/model.zip) |
| caddn_deeplabv3p_resnet101_os8_kitti | [下载](https://paddle3d.bj.bcebos.com/models/caddn/caddn_deeplabv3p_resnet101_os8_kitti/model.zip) |
### C++部署
#### Linux系统
#### 环境依赖
- GCC >= 5.4.0
- Cmake >= 3.5.1
- Ubuntu 16.04/18.04
> 说明:本文档的部署环节在以下环境中进行过测试并通过:
测试环境:
- GCC==8.2.0
- Cmake==3.16.0
- Ubuntu 18.04
- CUDA 11.2
- cuDNN==8.1.1
- Paddle Inferece==2.3.1
- TensorRT-8.2.5.1.Linux.x86_64-gnu.cuda-11.4.cudnn8.2
#### 编译步骤
**注意:目前CADDN的仅支持使用GPU进行推理。**
- step 1: 进入部署代码所在路径
```
cd deploy/caddn/cpp
```
- step 2: 下载Paddle Inference C++预编译库
Paddle Inference针对**是否使用GPU**、**是否支持TensorRT**、以及**不同的CUDA/cuDNN/GCC版本**均提供已经编译好的库文件,请至[Paddle Inference C++预编译库下载列表](https://www.paddlepaddle.org.cn/inference/user_guides/download_lib.html#c)选择符合的版本。
- step 3: 修改`compile.sh`中的编译参数
主要修改编译脚本`compile.sh`中的以下参数:
| 参数 | 说明 |
| -- | -- |
| WITH_GPU | 是否使用gpu。ON或OFF, OFF表示使用CPU,默认ON|
| USE_TENSORRT | 是否使用TensorRT加速。ON或OFF,默认OFF|
| LIB_DIR | Paddle Inference C++预编译包所在路径,该路径下的内容应有:`CMakeCache.txt`、`paddle`、`third_party`和`version.txt` |
| CUDNN_LIB | cuDNN`libcudnn.so`所在路径 |
| CUDA_LIB | CUDA`libcudart.so `所在路径 |
| TENSORRT_ROOT | TensorRT所在路径。**非必须**,如果`USE_TENSORRT`设置为`ON`时,需要填写该路径,该路径下的内容应有`bin`、`lib`和`include`等|
- step 4: 开始编译
```
sh compile.sh
```
### 执行预测
**注意:目前CADDN的仅支持使用GPU进行推理。**
执行命令参数说明
| 参数 | 说明 |
| -- | -- |
| model_file | 导出模型的结构文件`caddn.pdmodel`所在路径 |
| params_file | 导出模型的参数文件`caddn.pdiparams`所在路径 |
| image_file | 待预测的图像文件所在路径 |
执行命令:
```
./build/main --model_file /path/to/caddn.pdmodel --params_file /path/to/caddn.pdiparams --image_file /path/to/image.png
```
### 开启TensorRT加速预测【可选】
**注意:请根据编译步骤的step 3,修改`compile.sh`中TensorRT相关的编译参数,并重新编译。**
运行命令参数说明如下:
| 参数 | 说明 |
| -- | -- |
| model_file | 导出模型的结构文件`caddn.pdmodel`所在路径 |
| params_file | 导出模型的参数文件`caddn.pdiparams`所在路径 |
| image_file | 待预测的图像文件所在路径 |
| use_trt | 是否使用TensorRT进行加速,默认0|
| trt_precision | 当use_trt设置为1时,模型精度可设置0或1,0表示fp32, 1表示fp16。默认0 |
| trt_use_static | 当trt_use_static设置为1时,**在首次运行程序的时候会将TensorRT的优化信息进行序列化到磁盘上,下次运行时直接加载优化的序列化信息而不需要重新生成**。默认0 |
| trt_static_dir | 当trt_use_static设置为1时,保存优化信息的路径 |
| collect_shape_info | 是否收集模型动态shape信息。默认0。**只需首次运行,下次运行时直接加载生成的shape信息文件即可进行TensorRT加速推理** |
| dynamic_shape_file | 保存模型动态shape信息的文件路径。 |
* **首次运行TensorRT**,收集模型动态shape信息,并保存至`--dynamic_shape_file`指定的文件中
```
./build/main --model_file /path/to/caddn.pdmodel --params_file /path/to/caddn.pdiparams --image_file /path/to/image.png --use_trt 1 --collect_shape_info 1 --dynamic_shape_file /path/to/shape_info.txt
```
* 加载`--dynamic_shape_file`指定的模型动态shape信息,使用FP32精度进行预测
```
./build/main --model_file /path/to/caddn.pdmodel --params_file /path/to/caddn.pdiparams --image_file /path/to/image.png --use_trt 1 --dynamic_shape_file /path/to/shape_info.txt
```
* 加载`--dynamic_shape_file`指定的模型动态shape信息,使用FP16精度进行预测
```
./build/main --model_file /path/to/caddn.pdmodel --params_file /path/to/caddn.pdiparams --image_file /path/to/image.png --use_trt 1 --dynamic_shape_file /path/to/shape_info.txt --trt_precision 1
```
* 如果觉得每次运行时模型加载的时间过长,可以设置`trt_use_static`和`trt_static_dir`,首次运行时将TensorRT的优化信息保存在硬盘中,后续直接反序列化优化信息即可
```
./build/main --model_file /path/to/caddn.pdmodel --params_file /path/to/caddn.pdiparams --image_file /path/to/image.png --use_trt 1 --collect_shape_info 1 --dynamic_shape_file /path/to/shape_info.txt --trt_precision 1 --trt_use_static 1 --trt_static_dir /path/to/OptimCacheDir
```
### Python部署
进入部署代码所在路径
```
cd deploy/caddn/python
```
**注意:目前CADDN仅支持使用GPU进行推理。**
命令参数说明如下:
| 参数 | 说明 |
| -- | -- |
| model_file | 导出模型的结构文件`caddn.pdmodel`所在路径 |
| params_file | 导出模型的参数文件`caddn.pdiparams`所在路径 |
| img_path | 待预测的图像文件所在路径 |
| use_trt | 是否使用TensorRT进行加速,默认0|
| trt_precision | 当use_trt设置为1时,模型精度可设置0或1,0表示fp32, 1表示fp16。默认0 |
| trt_use_static | 当trt_use_static设置为1时,**在首次运行程序的时候会将TensorRT的优化信息进行序列化到磁盘上,下次运行时直接加载优化的序列化信息而不需要重新生成**。默认0 |
| trt_static_dir | 当trt_use_static设置为1时,保存优化信息的路径 |
| collect_shape_info | 是否收集模型动态shape信息。默认0。**只需首次运行,后续直接加载生成的shape信息文件即可进行TensorRT加速推理** |
| dynamic_shape_file | 保存模型动态shape信息的文件路径。 |
运行以下命令,执行预测:
```
python infer.py --model_file /path/to/caddn.pdmodel --params_file /path/to/caddn.pdiparams --img_path /path/to/image.png
```
## <h2 id="9">自定义数据集</h2>
## <h2 id="10">Apollo使用教程</h2>
基于Paddle3D训练完成的CADDN模型可以直接部署到Apollo架构中使用,请参考[教程](https://github.com/ApolloAuto/apollo/blob/master/modules/perception/README_paddle3D_CN.md)
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/caddn/caddn_deeplabv3p_resnet101_os8_kitti.yml
|
batch_size: 4
iters: 74240 # 928*80
sync_bn: true
train_dataset:
type: KittiDepthDataset
dataset_root: data/kitti
point_cloud_range: [2, -30.08, -3.0, 46.8, 30.08, 1.0]
depth_downsample_factor: 4
voxel_size: [0.16, 0.16, 0.16]
class_names: ['Car', 'Pedestrian', 'Cyclist']
mode: train
val_dataset:
type: KittiDepthDataset
dataset_root: data/kitti
point_cloud_range: [2, -30.08, -3.0, 46.8, 30.08, 1.0]
depth_downsample_factor: 4
voxel_size: [0.16, 0.16, 0.16]
class_names: ['Car', 'Pedestrian', 'Cyclist']
mode: val
optimizer:
type: AdamWOnecycle
clip_grad_by_norm: 10.0
beta1: 0.95
beta2: 0.99
weight_decay: 0.01
lr_scheduler:
type: OneCycle
total_step: 74240 # 928 * 80
lr_max: 0.001
moms: [0.95, 0.85]
div_factor: 10
pct_start: 0.4
model:
type: CADDN
pretrained: "https://bj.bcebos.com/paddle3d/pretrained/deeplabv3_resnet101.pdparams"
backbone_3d:
type: ResNet
layers: 101
return_idx: [0, 3]
class_head:
type: DeepLabV3Head
num_classes: 81
backbone_indices: [1]
backbone_channels: 2048
aspp_ratios: [1, 12, 24, 36]
aspp_out_channels: 256
align_corners: False
bev_cfg:
layer_nums: [10, 10, 10]
layer_strides: [2, 2, 2]
num_filters: [64, 128, 256]
upsample_strides: [1, 2, 4]
num_upsample_filters: [128, 128, 128]
input_channels: 64
dense_head:
type: AnchorHeadSingle
model_cfg:
class_agnostic: False
use_direction_classifier: True
dir_offset: 0.78539
dir_limit_offset: 0.0
input_channels: 384
point_cloud_range: [2, -30.08, -3.0, 46.8, 30.08, 1.0]
class_names: ['Car', 'Pedestrian', 'Cyclist']
predict_boxes_when_training: False
voxel_size: [0.16, 0.16, 0.16]
anchor_generator_cfg: [
{
'class_name': 'Car',
'anchor_sizes': [[3.9, 1.6, 1.56]],
'anchor_rotations': [0, 1.57],
'anchor_bottom_heights': [-1.78],
'align_center': False,
'feature_map_stride': 2,
'matched_threshold': 0.6,
'unmatched_threshold': 0.45
},
{
'class_name': 'Pedestrian',
'anchor_sizes': [[0.8, 0.6, 1.73]],
'anchor_rotations': [0, 1.57],
'anchor_bottom_heights': [-0.6],
'align_center': False,
'feature_map_stride': 2,
'matched_threshold': 0.5,
'unmatched_threshold': 0.35
},
{
'class_name': 'Cyclist',
'anchor_sizes': [[1.76, 0.6, 1.73]],
'anchor_rotations': [0, 1.57],
'anchor_bottom_heights': [-0.6],
'align_center': False,
'feature_map_stride': 2,
'matched_threshold': 0.5,
'unmatched_threshold': 0.35
}
]
anchor_target_cfg:
pos_fraction: -1.0
sample_size: 512
norm_by_num_examples: False
match_height: False
num_dir_bins: 2
loss_weights:
cls_weight: 1.0
loc_weight: 2.0
dir_weight: 0.2
code_weights: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
map_to_bev_cfg:
in_channels: 1600
out_channels: 64
kernel_size: 1
stride: 1
bias_attr: False
padding: 0
ffe_cfg:
channel_reduce_cfg:
in_channels: 256
out_channels: 64
kernel_size: 1
stride: 1
bias_attr: False
padding: 0
downsample_factor: 4
ddn_loss:
weight: 3.0
alpha: 0.25
beta: 2.0
fg_weight: 13
bg_weight: 1
f2v_cfg:
pc_range: [2, -30.08, -3.0, 46.8, 30.08, 1.0]
voxel_size: [0.16, 0.16, 0.16]
sample_cfg:
mode: bilinear
padding_mode: zeros
disc_cfg:
mode: LID
num_bins: 80
depth_min: 2.0
depth_max: 46.8
post_process_cfg:
recall_thresh_list: [0.3, 0.5, 0.7]
score_thresh: 0.1
eval_metric: kitti
nms_config:
nms_thresh: 0.01
nms_pre_maxsize: 4096
nms_post_maxsize: 500
export:
transforms:
- type: LoadImage
to_chw: False
to_rgb: True
- type: Normalize
mean: [0, 0, 0]
std: [1, 1, 1]
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/squeezesegv3/squeezesegv3_rangenet21_semantickitti.yml
|
_base_: '../_base_/semantickitti.yml'
batch_size: 2
iters: 179250 # 150 epochs
optimizer:
type: Momentum
momentum: 0.9
weight_decay: 0.0008
lr_scheduler:
type: LinearWarmup
learning_rate:
type: ExponentialDecay
learning_rate: 0.008
gamma: 0.999995805413129 # .995 ** (1 / steps_per_epoch)
warmup_steps: 1195 # 1 epoch
start_lr: 0.0
end_lr: 0.008
model:
type: SqueezeSegV3
num_classes: 20
backbone:
type: SACRangeNet21
in_channels: 5
encoder_dropout_prob: 0.01
decoder_dropout_prob: 0.01
loss:
type: SSGLossComputation
num_classes: 20
epsilon_w: 0.001
ignore_index: 0
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/squeezesegv3/README.md
|
# SqueezeSegV3: Spatially-Adaptive Convolution for Efficient Point-Cloud Segmentation
## 目录
* [引用](#h2-id1h2)
* [简介](#h2-id2h2)
* [模型库](#h2-id3h2)
* [训练配置](#h2-id4h2)
* [使用教程](#h2-id5h2)
* [数据准备](#h3-id51h3)
* [训练](#h3-id52h3)
* [评估](#h3-id53h3)
* [模型导出](#h3-id54h3)
* [模型部署](#h3-id55h3)
## <h2 id="1">引用</h2>
> Xu, Chenfeng, Bichen Wu, Zining Wang, Wei Zhan, Peter Vajda, Kurt Keutzer, και Masayoshi Tomizuka. ‘SqueezeSegV3: Spatially-Adaptive Convolution for Efficient Point-Cloud Segmentation’. CoRR abs/2004.01803 (2020).
## <h2 id="2">简介</h2>
SqueezeSegV3是一个点云语义分割模型。该论文延续了SqueezeSeg系列将三维空间点云投影至二维空间进行特征提取的思想,并在RangeNet++模型结构的
基础上创新性地提出并应用了空间自适应卷积(Spatially-Adaptive Convolution)。
## <h2 id="3">模型库</h2>
- SqueezeSegV3在SemanticKITTI Val set数据集上的表现
| 模型 | mIoU (Point Cloud / Range View) | mAcc (Point Cloud / Range View) | 模型下载 | 配置文件 |
|:---------------:|:-------------------------------:|:-------------------------------:|:----------------------------------------------------------------------------------------------------------------:|:---------------------------------------------------------------------------------:|
| SqueezeSegV3-21 | 46.3 / 51.2 | 87.3 / 90.0 | [model](https://bj.bcebos.com/paddle3d/models/squeezesegv3/squeezesegv3_rangenet21_semantickitti/model.pdparams) | [config](../../../configs/squeezesegv3/squeezesegv3_rangenet21_semantickitti.yml) |
| SqueezeSegV3-53 | 48.8 / 54.2 | 88.4 / 91.2 | [model](https://bj.bcebos.com/paddle3d/models/squeezesegv3/squeezesegv3_rangenet53_semantickitti/model.pdparams) | [config](../../../configs/squeezesegv3/squeezesegv3_rangenet53_semantickitti.yml) |
## <h2 id="4">训练配置</h2>
我们提供了在开源数据集上的训练配置与结果,详见[SqueezeSegV3训练配置](../../../configs/squeezesegv3)。
## <h2 id="5">使用教程</h2>
### <h3 id="51">数据准备</h3>
1. 数据格式
SqueezeSegV3模型目前仅适配[SemanticKITTI](http://semantic-kitti.org/dataset.html)格式的数据集。需将数据集放置于
`Paddle3D/datasets/SemanticKITTI`目录下,或在[配置文件](../../../configs/_base_/semantickitti.yml)中指定数据集路径。数据集文件结构如下:
```
└── Paddle3D/datasets/SemanticKITTI
├── sqeuences
├── 00
├── velodyne
├── 000000.bin
├── ...
├── labels
├── 000000.label
├── ...
├── poses.txt
```
2. 数据划分
SemanticKITTI数据集共包含`00`至`21`共22个序列,其中官方默认的数据集划分为:
- 训练集:00, 01, 02, 03, 04, 05, 06, 07, 09, 10
- 验证集:08
- 测试集:11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21
如需使用自己的划分,可在[配置文件](../../../configs/_base_/semantickitti.yml)中指定。
### <h3 id="52">训练</h3>
位于`Paddle3D/`目录下,执行:
```shell
python -m paddle.distributed.launch --gpus 0,1,2,3,4,5,6,7 \
tools/train.py \
--config configs/squeezesegv3/squeezesegv3_rangenet53_semantickitti.yml \
--save_interval 1195 \
--keep_checkpoint_max 50 \
--save_dir outputs/squeezesegv3/rangenet21_semantickitti \
--do_eval
```
训练脚本支持设置如下参数:
| 参数名 | 用途 | 是否必选项 | 默认值 |
|:--------------------|:-------------------------------|:------|:---------|
| gpus | 使用的GPU编号 | 是 | - |
| config | 配置文件 | 是 | - |
| save_dir | 模型和visualdl日志文件的保存根路径 | 否 | output |
| num_workers | 用于异步读取数据的进程数量, 大于等于1时开启子进程读取数据 | 否 | 0 |
| save_interval | 模型保存的间隔步数 | 否 | 1000 |
| do_eval | 是否在保存模型时进行评估 | 否 | 否 |
| log_interval | 打印日志的间隔步数 | 否 | 10 |
| keep_checkpoint_max | 最新模型保存个数 | 否 | 5 |
| resume | 是否从断点恢复训练 | 否 | 否 |
| batch_size | mini-batch大小(每张GPU) | 否 | 在配置文件中指定 |
| iters | 训练轮数 | 否 | 在配置文件中指定 |
| learning_rate | 学习率 | 否 | 在配置文件中指定 |
| seed | 固定随机种子 | 否 | None |
### <h3 id="53">评估</h3>
位于`Paddle3D/`目录下,执行:
```shell
python tools/evaluate.py \
--config configs/squeezesegv3/squeezesegv3_rangenet53_semantickitti.yml \
--model /path/to/model.pdparams
```
评估脚本支持设置如下参数:
| 参数名 | 用途 | 是否必选项 | 默认值 |
|:--------------------|:-------------------------------|:------|:---------|
| config | 配置文件 | 是 | - |
| model | 待评估模型路径 | 是 | - |
| num_workers | 用于异步读取数据的进程数量, 大于等于1时开启子进程读取数据 | 否 | 0 |
| batch_size | mini-batch大小 | 否 | 在配置文件中指定 |
### <h3 id="54">模型导出</h3>
运行以下命令,将训练时保存的动态图模型文件导出成推理引擎能够加载的静态图模型文件。
```shell
python tools/export.py \
--config configs/squeezesegv3/squeezesegv3_rangenet53_semantickitti.yml \
--model /path/to/model.pdparams \
--input_shape 64 1024 \
--save_dir /path/to/output
```
模型导出脚本支持设置如下参数:
| 参数名 | 用途 | 是否必选项 | 默认值 |
|:------------|:-------------------------------------------------------------------------------------------------------------|:------|:---------|
| config | 配置文件 | 是 | - |
| model | 待导出模型参数`model.pdparams`路径 | 是 | - |
| input_shape | 指定模型的输入尺寸,支持`N, C, H, W`或`H, W`格式 | 是 | - |
| save_dir | 保存导出模型的路径,`save_dir`下将会生成三个文件:`squeezesegv3.pdiparams `、`squeezesegv3.pdiparams.info`和`squeezesegv3.pdmodel` | 否 | `deploy` |
### <h3 id="55">模型部署</h3>
#### C++部署
Coming soon...
#### Python部署
命令参数说明如下:
| 参数 | 说明 |
| -- | -- |
| model_file | 导出模型的结构文件`squeezesegv3.pdmodel`所在路径 |
| params_file | 导出模型的参数文件`squeezesegv3.pdiparams`所在路径 |
| lidar_file | 待预测的点云文件所在路径 |
| img_mean | 点云投影到range-view后所成图像的均值,例如为`12.12,10.88,0.23,-1.04,0.21` |
| img_std | 点云投影到range-view后所成图像的方差,例如为`12.32,11.47,6.91,0.86,0.16` |
| use_trt | 是否使用TensorRT进行加速,默认0|
| trt_precision | 当use_trt设置为1时,模型精度可设置0或1,0表示fp32, 1表示fp16。默认0 |
| trt_use_static | 当trt_use_static设置为1时,**在首次运行程序的时候会将TensorRT的优化信息进行序列化到磁盘上,下次运行时直接加载优化的序列化信息而不需要重新生成**。默认0 |
| trt_static_dir | 当trt_use_static设置为1时,保存优化信息的路径 |
运行以下命令,执行预测:
```
python infer.py --model_file /path/to/squeezesegv3.pdmodel --params_file /path/to/squeezesegv3.pdiparams --lidar_file /path/to/lidar.pcd.bin --img_mean 12.12,10.88,0.23,-1.04,0.21 --img_std 12.32,11.47,6.91,0.86,0.16
```
如果要开启TensorRT的话,请卸载掉原有的`paddlepaddel_gpu`,至[Paddle官网](https://paddleinference.paddlepaddle.org.cn/user_guides/download_lib.html#python)下载与TensorRT连编的预编译Paddle Inferece安装包,选择符合本地环境CUDA/cuDNN/TensorRT版本的安装包完成安装即可。
运行以下命令,开启TensorRT加速模型预测:
```
python infer.py --model_file /path/to/squeezesegv3.pdmodel --params_file /path/to/squeezesegv3.pdiparams --lidar_file /path/to/lidar.pcd.bin --img_mean 12.12,10.88,0.23,-1.04,0.21 --img_std 12.32,11.47,6.91,0.86,0.16 --use_trt 1
```
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/squeezesegv3/squeezesegv3_rangenet53_semantickitti.yml
|
_base_: '../_base_/semantickitti.yml'
batch_size: 1
iters: 179250 # 150 epochs
optimizer:
type: Momentum
momentum: 0.9
weight_decay: 0.0008
lr_scheduler:
type: LinearWarmup
learning_rate:
type: ExponentialDecay
learning_rate: 0.004
gamma: 0.999995805413129 # .995 ** (1 / steps_per_epoch)
warmup_steps: 1195 # 1 epoch
start_lr: 0.0
end_lr: 0.004
model:
type: SqueezeSegV3
num_classes: 20
backbone:
type: SACRangeNet53
in_channels: 5
encoder_dropout_prob: 0.01
decoder_dropout_prob: 0.01
loss:
type: SSGLossComputation
num_classes: 20
epsilon_w: 0.001
ignore_index: 0
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/pv_rcnn/pv_rcnn_005voxel_kitti.yml
|
batch_size: 2
epochs: 80
train_dataset:
type: KittiPCDataset
dataset_root: datasets/KITTI
transforms:
- type: LoadPointCloud
dim: 4
use_dim: 4
- type: RemoveCameraInvisiblePointsKITTIV2
- type: SamplingDatabase
min_num_points_in_box_per_class:
Car: 5
Cyclist: 5
Pedestrian: 5
max_num_samples_per_class:
Car: 15
Cyclist: 10
Pedestrian: 10
ignored_difficulty: [-1]
database_anno_path: datasets/KITTI/kitti_train_gt_database/anno_info_train.pkl
database_root: datasets/KITTI
class_names: ["Car", "Pedestrian", "Cyclist"]
- type: RandomVerticalFlip
- type: GlobalRotate
min_rot: -0.78539816
max_rot: 0.78539816
- type: GlobalScale
min_scale: 0.95
max_scale: 1.05
- type: ShufflePoint
- type: FilterPointOutsideRange
point_cloud_range: [0, -40, -3, 70.4, 40, 1]
- type: FilterBBoxOutsideRange
point_cloud_range: [0, -40, -3, 70.4, 40, 1]
- type: Gt2PVRCNNTarget
mode: train
class_balanced_sampling: False
class_names: ["Car", "Pedestrian", "Cyclist"]
use_road_plane: True
val_dataset:
type: KittiPCDataset
dataset_root: datasets/KITTI
transforms:
- type: LoadPointCloud
dim: 4
use_dim: 4
- type: RemoveCameraInvisiblePointsKITTIV2
- type: FilterPointOutsideRange
point_cloud_range: [0, -40, -3, 70.4, 40, 1]
mode: val
class_names: ["Car", "Pedestrian", "Cyclist"]
model:
type: PVRCNN
num_class: 3
voxelizer:
type: HardVoxelizer
point_cloud_range: [0, -40, -3, 70.4, 40, 1]
voxel_size: [0.05, 0.05, 0.1]
max_num_points_in_voxel: 5
max_num_voxels: [16000, 40000]
voxel_encoder:
type: VoxelMean
in_channels: 4
middle_encoder:
type: SparseNet3D
in_channels: 4
voxel_size: [0.05, 0.05, 0.1]
point_cloud_range: [0, -40, -3, 70.4, 40, 1]
backbone:
type: SecondBackbone
in_channels: 256
out_channels: [128, 256]
layer_nums: [5, 5]
downsample_strides: [1, 2]
neck:
type: SecondFPN
in_channels: [128, 256]
out_channels: [256, 256]
upsample_strides: [1, 2]
use_conv_for_no_stride: False
dense_head:
type: AnchorHeadSingle
model_cfg:
use_direction_classifier: True
dir_offset: 0.78539
dir_limit_offset: 0.0
input_channels: 512
point_cloud_range: [0, -40, -3, 70.4, 40, 1]
class_names: ['Car', 'Pedestrian', 'Cyclist']
predict_boxes_when_training: True
voxel_size: [0.05, 0.05, 0.1]
anchor_generator_cfg: [
{
'class_name': 'Car',
'anchor_sizes': [[3.9, 1.6, 1.56]],
'anchor_rotations': [0, 1.57],
'anchor_bottom_heights': [-1.78],
'align_center': False,
'feature_map_stride': 8,
'matched_threshold': 0.6,
'unmatched_threshold': 0.45
},
{
'class_name': 'Pedestrian',
'anchor_sizes': [[0.8, 0.6, 1.73]],
'anchor_rotations': [0, 1.57],
'anchor_bottom_heights': [-0.6],
'align_center': False,
'feature_map_stride': 8,
'matched_threshold': 0.5,
'unmatched_threshold': 0.35
},
{
'class_name': 'Cyclist',
'anchor_sizes': [[1.76, 0.6, 1.73]],
'anchor_rotations': [0, 1.57],
'anchor_bottom_heights': [-0.6],
'align_center': False,
'feature_map_stride': 8,
'matched_threshold': 0.5,
'unmatched_threshold': 0.35
}
]
anchor_target_cfg:
pos_fraction: -1.0
sample_size: 512
norm_by_num_examples: False
match_height: False
num_dir_bins: 2
loss_weights:
cls_weight: 1.0
loc_weight: 2.0
dir_weight: 0.2
code_weights: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
point_encoder:
type: VoxelSetAbstraction
voxel_size: [0.05, 0.05, 0.1]
point_cloud_range: [0, -40, -3, 70.4, 40, 1]
num_bev_features: 256
num_rawpoint_features: 4
model_cfg:
point_source: 'raw_points'
num_keypoints: 2048
out_channels: 128
sample_method: 'FPS'
features_source: ['bev', 'x_conv1', 'x_conv2', 'x_conv3', 'x_conv4', 'raw_points']
sa_layer:
raw_points:
mlps: [[16, 16], [16, 16]]
pool_radius: [0.4, 0.8]
nsample: [16, 16]
x_conv1:
downsample_stride: 1
mlps: [[16, 16], [16, 16]]
pool_radius: [0.4, 0.8]
nsample: [16, 16]
x_conv2:
downsample_stride: 2
mlps: [[32, 32], [32, 32]]
pool_radius: [0.8, 1.2]
nsample: [16, 32]
x_conv3:
downsample_stride: 4
mlps: [[64, 64], [64, 64]]
pool_radius: [1.2, 2.4]
nsample: [16, 32]
x_conv4:
downsample_stride: 8
mlps: [[64, 64], [64, 64]]
pool_radius: [2.4, 4.8]
nsample: [16, 32]
point_head:
type: PointHeadSimple
num_class: 3
input_channels: 640
model_cfg:
cls_fc: [256, 256]
class_agnostic: True
use_point_features_before_fusion: True
target_config:
gt_extra_width: [0.2, 0.2, 0.2]
loss_config:
loss_weights:
point_cls_weight: 1.0
roi_head:
type: PVRCNNHead
input_channels: 128
num_class: 1
model_cfg:
class_agnostic: True
voxel_size: [0.05, 0.05, 0.1]
point_cloud_range: [0, -40, -3, 70.4, 40, 1]
shared_fc: [256, 256]
cls_fc: [256, 256]
reg_fc: [256, 256]
dp_ratio: 0.3
nms_config:
train:
nms_type: nms_gpu
multi_class_nms: False
nms_pre_maxsize: 9000
nms_post_maxsize: 512
nms_thresh: 0.8
test:
nms_type: nms_gpu
multi_class_nms: False
nms_pre_maxsize: 1024
nms_post_maxsize: 100
nms_thresh: 0.7
roi_grid_pool:
grid_size: 6
mlps: [[64, 64], [64, 64]]
pool_radius: [0.8, 1.6]
nsample: [16, 16]
pool_method: max_pool
target_config:
box_coder: ResidualCoder
roi_per_image: 128
fg_ratio: 0.5
sample_roi_by_each_class: True
cls_score_type: roi_iou
cls_fg_thresh: 0.75
cls_bg_thresh: 0.25
cls_bg_thresh_lo: 0.1
hard_bg_ratio: 0.8
reg_fg_thresh: 0.55
loss_config:
cls_loss: BinaryCrossEntropy
reg_loss: smooth-l1
corner_loss_regularization: True
loss_weights: {
'rcnn_cls_weight': 1.0,
'rcnn_reg_weight': 1.0,
'rcnn_corner_weight': 1.0,
'code_weights': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
}
post_process_cfg:
score_thresh: 0.1
output_raw_score: False
nms_config:
multi_classes_nms: False
nms_type: nms_gpu
nms_thresh: 0.1
nms_pre_maxsize: 4096
nms_post_maxsize: 500
optimizer:
type: OneCycleAdam
beta2: 0.99
weight_decay: 0.01
grad_clip:
type: ClipGradByGlobalNorm
clip_norm: 10
beta1:
type: OneCycleDecayWarmupMomentum
momentum_peak: 0.95
momentum_trough: 0.85
step_ratio_peak: 0.4
lr_scheduler:
type: OneCycleWarmupDecayLr
base_learning_rate: 0.001
lr_ratio_peak: 10
lr_ratio_trough: 0.0001
step_ratio_peak: 0.4
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/pv_rcnn/README.md
|
# PV-RCNN: Point-Voxel Feature Set Abstraction for 3D Object Detection
## 目录
* [引用](#1)
* [简介](#2)
* [模型库](#3)
* [训练 & 评估](#4)
* [KITTI数据集](#41)
* [导出 & 部署](#5)
* [自定义数据集](#6)
## <h2 id="1">引用</h2>
> Shi, Shaoshuai, et al. "Pv-rcnn: Point-voxel feature set abstraction for 3d object detection." Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 2020.
## <h2 id="2">简介</h2>
PV-RCNN是Voxel-Based和Point-based相结合的Anchor-Based二阶段点云三维物体检测方法。在精度提优方面,PV-RCNN在Voxel-Branch中使用3D Sparse Conv学习有序体素特征,在Point-Branch中基于PointNet++提取无序点云的特征,将两种表示方法的优点充分结合,有效提升模型的精度。在性能提速方面,针对提取每个ROI内所有点云导致耗时太长的问题,PV-RCNN仅提取ROI内数量有限且能代表周围点云的关键点的特征,有效提升模型的性能。
## <h2 id="3">模型库</h2>
- PV-RCNN在KITTI Val set数据集上的表现:
| 模型 | Car Mod@0.7 AP_R11 / AP_R40 | Pedestrian Mod@0.5 AP_R11 / AP_R40| Cyclist Mod@0.5 AP_R11 / AP_R40 | V100 TensorRT FP32(FPS) | V100 TensorRT FP16(FPS) | 模型下载 | 配置文件 | 日志 |
| ---- | ---------- |----------- |------------------ | -------------- | ----------------| ------ | ------- | ----- |
| PV-RCNN | 83.78 / 84.72 | 58.91 / 58.30 | 73.29 / 73.27 | 10.14 | 10.74 | [model](https://paddle3d.bj.bcebos.com/models/pv_rcnn/pv_rcnn_005voxel_kitti/model.pdparams) | [config](../../../configs/pv_rcnn/pv_rcnn_005voxel_kitti.yml)| [log](https://paddle3d.bj.bcebos.com/models/pv_rcnn/pv_rcnn_005voxel_kitti/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=2166361cceed2624cee057ea583ee257) |
**注意:** KITTI benchmark使用8张V100 GPU训练得出。
## <h2 id="4">训练 & 评估</h2>
### <h3 id="41">KITTI数据集</h3>
- 目前Paddle3D中提供的PV-RCNN模型支持在KITTI数据集上训练,因此需要先准备KITTI数据集,请在[官网](http://www.cvlibs.net/datasets/kitti/eval_object.php?obj_benchmark=3d)进行下载:
1. Download Velodyne point clouds, if you want to use laser information (29 GB)
2. training labels of object data set (5 MB)
3. camera calibration matrices of object data set (16 MB)
并下载数据集的划分文件列表:
```
wget https://bj.bcebos.com/paddle3d/datasets/KITTI/ImageSets.tar.gz
```
将数据解压后按照下方的目录结构进行组织:
```
kitti_dataset_root
|—— training
| |—— label_2
| | |—— 000001.txt
| | |—— ...
| |—— calib
| | |—— 000001.txt
| | |—— ...
| |—— velodyne
| | |—— 000001.bin
| | |—— ...
|—— ImageSets
│ |—— test.txt
│ |—— train.txt
│ |—— trainval.txt
│ |—— val.txt
```
在Paddle3D的目录下创建软链接 `datasets/KITTI`,指向到上面的数据集目录:
```
mkdir datasets
ln -s /path/to/kitti_dataset_root ./datasets
mv ./datasets/kitti_dataset_root ./datasets/KITTI
```
- 生成训练时数据增强所需的真值库:
```
python tools/create_det_gt_database.py --dataset_name kitti --dataset_root ./datasets/KITTI --save_dir ./datasets/KITTI
```
`--dataset_root`指定KITTI数据集所在路径,`--save_dir`指定用于保存所生成的真值库的路径。该命令执行后,`save_dir`生成的目录如下:
```
kitti_train_gt_database
|—— anno_info_train.pkl
|—— Car
| |—— 4371_Car_7.bin
| |—— ...
|—— Cyclist
```
#### 训练
KITTI数据集上的训练使用8张GPU:
```
python -m paddle.distributed.launch --gpus 0,1,2,3,4,5,6,7 tools/train.py --config configs/pv_rcnn/pv_rcnn.yml --save_dir ./output_pv_rcnn --num_workers 4 --save_interval 1
```
训练启动参数介绍可参考文档[全流程速览](../../quickstart.md#模型训练)。
#### 评估
```
python tools/evaluate.py --config configs/pv_rcnn/pv_rcnn.yml --model ./output_pv_rcnn/epoch_80/model.pdparams --batch_size 1 --num_workers 4
```
**注意**:PV-RCNN的评估目前只支持batch_size为1。
评估启动参数介绍可参考文档[全流程速览](../../quickstart.md#模型评估)。
## <h2 id="5">导出 & 部署</h2>
### 模型导出
运行以下命令,将训练时保存的动态图模型文件导出成推理引擎能够加载的静态图模型文件。
```
python tools/export.py --config configs/pv_rcnn/pv_rcnn.yml --model /path/to/model.pdparams --save_dir /path/to/output
```
| 参数 | 说明 |
| -- | -- |
| config | **[必填]** 训练配置文件所在路径 |
| model | **[必填]** 训练时保存的模型文件`model.pdparams`所在路径 |
| save_dir | **[必填]** 保存导出模型的路径,`save_dir`下将会生成三个文件:`pv_rcnn.pdiparams `、`pv_rcnn.pdiparams.info`和`pv_rcnn.pdmodel` |
### C++部署
#### Linux系统
#### 环境依赖
- GCC >= 5.4.0
- Cmake >= 3.5.1
- Ubuntu 16.04/18.04
> 说明:本文档的部署环节在以下环境中进行过测试并通过:
测试环境:
- GCC==8.2.0
- Cmake==3.16.0
- Ubuntu 18.04
- CUDA 11.2
- cuDNN==8.1.1
- Paddle Inferece==2.3.1
- TensorRT-8.2.5.1.Linux.x86_64-gnu.cuda-11.4.cudnn8.2
#### 编译步骤
**注意:目前PV-RCNN的仅支持使用GPU进行推理。**
- step 1: 进入部署代码所在路径
```
cd deploy/pv_rcnn/cpp
```
- step 2: 下载Paddle Inference C++预编译库
Paddle Inference针对**是否使用GPU**、**是否支持TensorRT**、以及**不同的CUDA/cuDNN/GCC版本**均提供已经编译好的库文件,请至[Paddle Inference C++预编译库下载列表](https://www.paddlepaddle.org.cn/inference/user_guides/download_lib.html#c)选择符合的版本。
- step 3: 修改`compile.sh`中的编译参数
主要修改编译脚本`compile.sh`中的以下参数:
| 参数 | 说明 |
| -- | -- |
| WITH_GPU | 是否使用gpu。ON或OFF, OFF表示使用CPU,默认ON|
| USE_TENSORRT | 是否使用TensorRT加速。ON或OFF,默认OFF|
| LIB_DIR | Paddle Inference C++预编译包所在路径,该路径下的内容应有:`CMakeCache.txt`、`paddle`、`third_party`和`version.txt` |
| CUDNN_LIB | cuDNN`libcudnn.so`所在路径 |
| CUDA_LIB | CUDA`libcudart.so `所在路径 |
| TENSORRT_ROOT | TensorRT所在路径。**非必须**,如果`USE_TENSORRT`设置为`ON`时,需要填写该路径,该路径下的内容应有`bin`、`lib`和`include`等|
- step 4: 开始编译
```
sh compile.sh
```
### 执行预测
**注意:目前PV-RCNN的仅支持使用GPU进行推理。**
执行命令参数说明
| 参数 | 说明 |
| -- | -- |
| model_file | 导出模型的结构文件`pv_rcnn.pdmodel`所在路径 |
| params_file | 导出模型的参数文件`pv_rcnn.pdiparams`所在路径 |
| lidar_file | 待预测的点云文件所在路径 |
| num_point_dim | 点云文件中每个点的维度大小。例如,若每个点的信息是`x, y, z, intensity`,则`num_point_dim`填写为4 |
| point_cloud_range | 输入模型的点云所处的空间范围,超出此范围内的点将被滤除。格式为`"X_min Y_min Z_min X_max Y_Max Z_max"`|
```
./build/main --model_file /path/to/pv_rcnn.pdmodel --params_file /path/to/pv_rcnn.pdiparams --lidar_file /path/to/lidar.pcd.bin --num_point_dim 4 --point_cloud_range "0 -40 -3 70.4 40 1"
```
**注意:** 请预先确认实际待测试点云文件的维度是否是4,如果不是4,`--num_point_dim`请修改为实际值。
### 开启TensorRT加速预测【可选】
**注意:请根据编译步骤的step 3,修改`compile.sh`中TensorRT相关的编译参数,并重新编译。**
运行命令参数说明如下:
| 参数 | 说明 |
| -- | -- |
| model_file | 导出模型的结构文件`pv_rcnn.pdmodel`所在路径 |
| params_file | 导出模型的参数文件`pv_rcnn.pdiparams`所在路径 |
| lidar_file | 待预测的点云文件所在路径 |
| num_point_dim | 点云文件中每个点的维度大小。例如,若每个点的信息是`x, y, z, intensity`,则`num_point_dim`填写为4 |
| point_cloud_range | 输入模型的点云所处的空间范围,超出此范围内的点将被滤除。格式为`"X_min Y_min Z_min X_max Y_Max Z_max"`|
| use_trt | 是否使用TensorRT进行加速,默认0|
| trt_precision | 当use_trt设置为1时,模型精度可设置0或1,0表示fp32, 1表示fp16。默认0 |
| trt_use_static | 当trt_use_static设置为1时,**在首次运行程序的时候会将TensorRT的优化信息进行序列化到磁盘上,下次运行时直接加载优化的序列化信息而不需要重新生成**。默认0 |
| trt_static_dir | 当trt_use_static设置为1时,保存优化信息的路径 |
| collect_shape_info | 是否收集模型动态shape信息。默认0。**只需首次运行,下次运行时直接加载生成的shape信息文件即可进行TensorRT加速推理** |
| dynamic_shape_file | 保存模型动态shape信息的文件路径。 |
* **首次运行TensorRT**,收集模型动态shape信息,并保存至`--dynamic_shape_file`指定的文件中
```
./build/main --model_file /path/to/pv_rcnn.pdmodel --params_file /path/to/pv_rcnn.pdiparams --lidar_file /path/to/lidar.pcd.bin --num_point_dim 4 --point_cloud_range "0 -40 -3 70.4 40 1" --use_trt 1 --collect_shape_info 1 --dynamic_shape_file /path/to/shape_info.txt
```
* 加载`--dynamic_shape_file`指定的模型动态shape信息,使用FP32精度进行预测
```
./build/main --model_file /path/to/pv_rcnn.pdmodel --params_file /path/to/pv_rcnn.pdiparams --lidar_file /path/to/lidar.pcd.bin --num_point_dim 4 --point_cloud_range "0 -40 -3 70.4 40 1" --use_trt 1 --dynamic_shape_file /path/to/shape_info.txt
```
* 加载`--dynamic_shape_file`指定的模型动态shape信息,使用FP16精度进行预测
```
./build/main --model_file /path/to/pv_rcnn.pdmodel --params_file /path/to/pv_rcnn.pdiparams --lidar_file /path/to/lidar.pcd.bin --num_point_dim 4 --point_cloud_range "0 -40 -3 70.4 40 1" --use_trt 1 --dynamic_shape_file /path/to/shape_info.txt --trt_precision 1
```
* 如果觉得每次运行时模型加载的时间过长,可以设置`trt_use_static`和`trt_static_dir`,首次运行时将TensorRT的优化信息保存在硬盘中,后续直接反序列化优化信息即可
```
./build/main --model_file /path/to/pv_rcnn.pdmodel --params_file /path/to/pv_rcnn.pdiparams --lidar_file /path/to/lidar.pcd.bin --num_point_dim 4 --point_cloud_range "0 -40 -3 70.4 40 1" --use_trt 1 --dynamic_shape_file /path/to/shape_info.txt --trt_precision 1 --trt_use_static 1 --trt_static_dir /path/to/OptimCacheDir
```
### Python部署
**注意:目前PV-RCNN的仅支持使用GPU进行推理。**
命令参数说明如下:
| 参数 | 说明 |
| -- | -- |
| model_file | 导出模型的结构文件`pv_rcnn.pdmodel`所在路径 |
| params_file | 导出模型的参数文件`pv_rcnn.pdiparams`所在路径 |
| lidar_file | 待预测的点云文件所在路径 |
| num_point_dim | 点云文件中每个点的维度大小。例如,若每个点的信息是`x, y, z, intensity`,则`num_point_dim`填写为4 |
| point_cloud_range | 输入模型的点云所处的空间范围,超出此范围内的点将被滤除。格式为`X_min Y_min Z_min X_max Y_Max Z_max`|
| use_trt | 是否使用TensorRT进行加速,默认0|
| trt_precision | 当use_trt设置为1时,模型精度可设置0或1,0表示fp32, 1表示fp16。默认0 |
| trt_use_static | 当trt_use_static设置为1时,**在首次运行程序的时候会将TensorRT的优化信息进行序列化到磁盘上,下次运行时直接加载优化的序列化信息而不需要重新生成**。默认0 |
| trt_static_dir | 当trt_use_static设置为1时,保存优化信息的路径 |
| collect_shape_info | 是否收集模型动态shape信息。默认0。**只需首次运行,后续直接加载生成的shape信息文件即可进行TensorRT加速推理** |
| dynamic_shape_file | 保存模型动态shape信息的文件路径。 |
运行以下命令,执行预测:
```
python infer.py --model_file /path/to/pv_rcnn.pdmodel --params_file /path/to/pv_rcnn.pdiparams --lidar_file /path/to/lidar.pcd.bin --num_point_dim 4 --point_cloud_range 0 -40 -3 70.4 40 1
```
## <h2 id="6">自定义数据集</h2>
请参考文档[自定义数据集格式说明](../../../datasets/custom.md)准备自定义数据集。
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/pointpillars/pointpillars_xyres16_kitti_cyclist_pedestrian.yml
|
batch_size: 2
iters: 296960 # 160 epochs
train_dataset:
type: KittiPCDataset
dataset_root: datasets/KITTI
class_names: [ "Cyclist", "Pedestrian" ]
transforms:
- type: LoadPointCloud
dim: 4
use_dim: 4
- type: RemoveCameraInvisiblePointsKITTI
- type: SamplingDatabase
min_num_points_in_box_per_class:
Cyclist: 5
max_num_samples_per_class:
Cyclist: 8
ignored_difficulty: [ -1 ]
database_anno_path: datasets/KITTI/kitti_train_gt_database/anno_info_train.pkl
database_root: datasets/KITTI/
class_names: [ "Cyclist", "Pedestrian" ]
- type: RandomObjectPerturb
rotation_range: [ -0.15707963267, 0.15707963267 ]
translation_std: [ 0.25, 0.25, 0.25 ]
max_num_attempts: 100
- type: RandomVerticalFlip
- type: GlobalRotate
min_rot: -0.78539816
max_rot: 0.78539816
- type: GlobalScale
min_scale: 0.95
max_scale: 1.05
- type: GlobalTranslate
translation_std: [ 0.2, 0.2, 0.2 ]
- type: FilterBBoxOutsideRange
point_cloud_range: [ 0, -19.84, -2.5, 47.36, 19.84, 0.5 ]
- type: ShufflePoint
- type: HardVoxelize
point_cloud_range: [ 0, -19.84, -2.5, 47.36, 19.84, 0.5 ]
voxel_size: [ 0.16, 0.16, 3 ]
max_points_in_voxel: 100
max_voxel_num: 12000
- type: GenerateAnchors
output_stride_factor: 1 # RPN `downsample_strides`[0] // `upsample_strides`[0]
point_cloud_range: [ 0, -19.84, -2.5, 47.36, 19.84, 0.5 ]
voxel_size: [ 0.16, 0.16, 3 ]
anchor_configs:
- sizes: [ 0.6, 1.76, 1.73 ] # wlh
anchor_strides: [ 0.16, 0.16, 0.0 ]
anchor_offsets: [ 0.08, -19.76, -1.465 ]
rotations: [ 0, 1.57 ]
matched_threshold: 0.5
unmatched_threshold: 0.35
- sizes: [ 0.6, 0.8, 1.73 ] # wlh
anchor_strides: [ 0.16, 0.16, 0.0 ]
anchor_offsets: [ 0.08, -19.76, -1.465 ]
rotations: [ 0, 1.57 ]
matched_threshold: 0.5
unmatched_threshold: 0.35
anchor_area_threshold: 1
- type: Gt2PointPillarsTarget
rpn_batch_size: 512
mode: train
val_dataset:
type: KittiPCDataset
dataset_root: datasets/KITTI
class_names: [ "Cyclist", "Pedestrian" ]
transforms:
- type: LoadPointCloud
dim: 4
use_dim: 4
- type: RemoveCameraInvisiblePointsKITTI
- type: HardVoxelize
point_cloud_range: [ 0, -19.84, -2.5, 47.36, 19.84, 0.5 ]
voxel_size: [ 0.16, 0.16, 3 ]
max_points_in_voxel: 100
max_voxel_num: 12000
- type: GenerateAnchors
output_stride_factor: 1
point_cloud_range: [ 0, -19.84, -2.5, 47.36, 19.84, 0.5 ]
voxel_size: [ 0.16, 0.16, 3 ]
anchor_configs:
- sizes: [ 0.6, 1.76, 1.73 ] # wlh
anchor_strides: [ 0.16, 0.16, 0.0 ]
anchor_offsets: [ 0.08, -19.76, -1.465 ]
rotations: [ 0, 1.57 ]
matched_threshold: 0.5
unmatched_threshold: 0.35
- sizes: [ 0.6, 0.8, 1.73 ] # wlh
anchor_strides: [ 0.16, 0.16, 0.0 ]
anchor_offsets: [ 0.08, -19.76, -1.465 ]
rotations: [ 0, 1.57 ]
matched_threshold: 0.5
unmatched_threshold: 0.35
anchor_area_threshold: 1
mode: val
model:
type: PointPillars
voxelizer:
type: HardVoxelizer
point_cloud_range: [ 0, -19.84, -2.5, 47.36, 19.84, 0.5 ]
voxel_size: [ 0.16, 0.16, 3 ]
max_num_points_in_voxel: 100
max_num_voxels: 12000
pillar_encoder:
type: PillarFeatureNet
in_channels: 4
feat_channels: [ 64 ]
with_distance: False
max_num_points_in_voxel: 100
voxel_size: [ 0.16, 0.16, 3 ]
point_cloud_range: [ 0, -19.84, -2.5, 47.36, 19.84, 0.5 ]
legacy: False
middle_encoder:
type: PointPillarsScatter
in_channels: 64
voxel_size: [ 0.16, 0.16, 3 ]
point_cloud_range: [ 0, -19.84, -2.5, 47.36, 19.84, 0.5 ]
backbone:
type: SecondBackbone
in_channels: 64
out_channels: [ 64, 128, 256 ]
layer_nums: [ 3, 5, 5 ]
downsample_strides: [ 1, 2, 2 ]
neck:
type: SecondFPN
in_channels: [ 64, 128, 256 ]
out_channels: [ 128, 128, 128 ]
upsample_strides: [ 1, 2, 4 ]
use_conv_for_no_stride: False
head:
type: SSDHead
num_classes: 2
feature_channels: 384 # sum(upsample_channels)
num_anchor_per_loc: 4
encode_background_as_zeros: True
use_direction_classifier: True
box_code_size: 7
nms_score_threshold: 0.05
nms_pre_max_size: 1000
nms_post_max_size: 300
nms_iou_threshold: 0.5
prediction_center_limit_range: [ 0, -19.84, -2.5, 47.36, 19.84, 0.5 ]
loss:
type: PointPillarsLoss
num_classes: 2
classification_loss:
type: SigmoidFocalClassificationLoss
gamma: 2.0
alpha: 0.25
regression_loss:
type: WeightedSmoothL1RegressionLoss
sigma: 3.0
code_weights: [ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 ]
direction_loss:
type: WeightedSoftmaxClassificationLoss
classification_loss_weight: 1.0
regression_loss_weight: 2.0
direction_loss_weight: 0.2
fg_cls_weight: 1.0
bg_cls_weight: 1.0
encode_rot_error_by_sin: True
use_direction_classifier: True
encode_background_as_zeros: True
box_code_size: 7
anchor_configs:
- sizes: [ 0.6, 1.76, 1.73 ] # wlh
anchor_strides: [ 0.16, 0.16, 0.0 ]
anchor_offsets: [ 0.08, -19.76, -1.465 ]
rotations: [ 0, 1.57 ]
matched_threshold: 0.5
unmatched_threshold: 0.35
- sizes: [ 0.6, 0.8, 1.73 ] # wlh
anchor_strides: [ 0.16, 0.16, 0.0 ]
anchor_offsets: [ 0.08, -19.76, -1.465 ]
rotations: [ 0, 1.57 ]
matched_threshold: 0.5
unmatched_threshold: 0.35
anchor_area_threshold: 1
optimizer:
type: Adam
weight_decay: 0.0001
grad_clip:
type: ClipGradByGlobalNorm
clip_norm: 10.0
lr_scheduler:
type: StepDecay
learning_rate: 0.0002
step_size: 27840 # decay every 15 epochs
gamma: 0.8
export:
transforms:
- type: LoadPointCloud
dim: 4
use_dim: 4
- type: HardVoxelize
point_cloud_range: [ 0, -39.68, -3, 69.12, 39.68, 1 ]
voxel_size: [ 0.16, 0.16, 4 ]
max_points_in_voxel: 32
max_voxel_num: 16000
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/pointpillars/README.md
|
# PointPillars: Fast Encoders for Object Detection from Point Clouds
## 目录
* [引用](#h2-id1h2)
* [简介](#h2-id2h2)
* [模型库](#h2-id3h2)
* [训练配置](#h2-id4h2)
* [使用教程](#h2-id5h2)
* [数据准备](#h3-id51h3)
* [训练](#h3-id52h3)
* [评估](#h3-id53h3)
* [模型导出](#h3-id54h3)
* [模型部署](#h3-id55h3)
## <h2 id="1">引用</h2>
> Lang, Alex H., Sourabh, Vora, Holger, Caesar, Lubing, Zhou, Jiong, Yang, and Oscar, Beijbom. "PointPillars: Fast Encoders for Object Detection From Point Clouds." . In 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (pp. 12689-12697).2019.
## <h2 id="2">简介</h2>
PointPillars是目前工业界应用广泛的点云检测模型,其最主要的特点是检测速度和精度的平衡。PointPillars 在 [VoxelNet](https://arxiv.org/abs/1711.06396) 和 [SECOND](https://pdfs.semanticscholar.org/5125/a16039cabc6320c908a4764f32596e018ad3.pdf)
的基础上针对性能进行了优化,将点云转化为柱体(Pillars)表示,从而使得编码后的点云特征可以使用2D卷积神经网络进行检测任务。
## <h2 id="3">模型库</h2>
- PointPillars在KITTI Val set数据集上Car类别的表现
| 模型 | Car<br>Easy Mod. Hard | V100 TensorRT FP32(FPS) | V100 TensorRT FP16(FPS) | 模型下载 | 配置文件 |
|:------------:|:---------------------:|:-----------------------:|:-----------------------:|:--------------------------------------------------------------------------------------------------------:|:--------------------------------------------------------------------------:|
| PointPillars | 86.90 75.21 71.57 | 37.3 | 40.5 | [model](https://bj.bcebos.com/paddle3d/models/pointpillars/pointpillars_xyres16_kitti_car/model.pdparams) | [config](../../../configs/pointpillars/pointpillars_xyres16_kitti_car.yml) |
- PointPillars在KITTI Val set数据集上Cyclist及Pedestrian类别的表现
| 模型 | Cyclist<br>Easy Mod. Hard | Pedestrian<br>Easy Mod. Hard | V100 TensorRT FP32(FPS) | V100 TensorRT FP16(FPS) | 模型下载 | 配置文件 |
|:------------:|:-------------------------:|:----------------------------:|:-----------------------:|:-----------------------:|:-----------------------------------------------------------------------------------------------------------------------:|:-----------------------------------------------------------------------------------------:|
| PointPillars | 84.36 64.66 60.53 | 66.13 60.36 54.40 | 30.0 | 30.2 | [model](https://bj.bcebos.com/paddle3d/models/pointpillars/pointpillars_xyres16_kitti_cyclist_pedestrian/model.pdparams) | [config](../../../configs/pointpillars/pointpillars_xyres16_kitti_cyclist_pedestrian.yml) |
## <h2 id="4">训练配置</h2>
我们提供了在开源数据集上的训练配置与结果,详见[PointPillars 训练配置](../../../configs/pointpillars)。
## <h2 id="5">使用教程</h2>
### <h3 id="51">数据准备</h3>
- 目前Paddle3D中提供的PointPillars模型支持在KITTI数据集上训练,因此需要先准备KITTI数据集,请在[官网](http://www.cvlibs.net/datasets/kitti/eval_object.php?obj_benchmark=3d)进行下载:
1. Download Velodyne point clouds, if you want to use laser information (29 GB)
2. training labels of object data set (5 MB)
3. camera calibration matrices of object data set (16 MB)
并下载数据集的划分文件列表:
```shell
wget https://bj.bcebos.com/paddle3d/datasets/KITTI/ImageSets.tar.gz
```
将数据解压后按照下方的目录结构进行组织:
```
└── kitti_dataset_root
|—— training
|—— label_2
|—— 000001.txt
|—— ...
|—— calib
|—— 000001.txt
|—— ...
|—— velodyne
|—— 000001.bin
|—— ...
|—— ImageSets
|—— test.txt
|—— train.txt
|—— trainval.txt
|—— val.txt
```
在Paddle3D的目录下创建软链接 `datasets/KITTI`,指向到上面的数据集目录:
```
mkdir datasets
ln -s /path/to/kitti_dataset_root ./datasets
mv ./datasets/kitti_dataset_root ./datasets/KITTI
```
- 生成训练时数据增强所需的真值库:
```
python tools/create_det_gt_database.py --dataset_name kitti --dataset_root ./datasets/KITTI --save_dir ./datasets/KITTI
```
`--dataset_root`指定KITTI数据集所在路径,`--save_dir`指定用于保存所生成的真值库的路径。该命令执行后,`save_dir`生成的目录如下:
```
└── kitti_train_gt_database
|—— anno_info_train.pkl
|—— Car
|—— 4371_Car_7.bin
|—— ...
|—— Cyclist
```
### <h3 id="52">训练</h3>
位于`Paddle3D/`目录下,执行:
```shell
python -m paddle.distributed.launch --gpus 0 \
tools/train.py \
--config configs/pointpillars/pointpillars_xyres16_kitti_car.yml \
--save_interval 1856 \
--keep_checkpoint_max 100 \
--save_dir outputs/pointpillars \
--do_eval \
--num_workers 8
```
训练脚本支持设置如下参数:
| 参数名 | 用途 | 是否必选项 | 默认值 |
|:--------------------|:-------------------------------|:------:|:---------:|
| gpus | 使用的GPU编号 | 是 | - |
| config | 配置文件 | 是 | - |
| save_dir | 模型和visualdl日志文件的保存根路径 | 否 | output |
| num_workers | 用于异步读取数据的进程数量, 大于等于1时开启子进程读取数据 | 否 | 2 |
| save_interval | 模型保存的间隔步数 | 否 | 1000 |
| do_eval | 是否在保存模型时进行评估 | 否 | 否 |
| log_interval | 打印日志的间隔步数 | 否 | 10 |
| keep_checkpoint_max | 最新模型保存个数 | 否 | 5 |
| resume | 是否从断点恢复训练 | 否 | 否 |
| batch_size | mini-batch大小(每张GPU) | 否 | 在配置文件中指定 |
| iters | 训练轮数 | 否 | 在配置文件中指定 |
| learning_rate | 学习率 | 否 | 在配置文件中指定 |
| seed | Paddle的全局随机种子值 | 否 | None |
### <h3 id="53">评估</h3>
位于`Paddle3D/`目录下,执行:
```shell
python tools/evaluate.py \
--config configs/pointpillars/pointpillars_xyres16_kitti_car.yml \
--model /path/to/model.pdparams \
--num_workers 8
```
评估脚本支持设置如下参数:
| 参数名 | 用途 | 是否必选项 | 默认值 |
|:--------------------|:-------------------------------|:------:|:---------:|
| config | 配置文件 | 是 | - |
| model | 待评估模型路径 | 是 | - |
| num_workers | 用于异步读取数据的进程数量, 大于等于1时开启子进程读取数据 | 否 | 2 |
| batch_size | mini-batch大小 | 否 | 在配置文件中指定 |
### <h3 id="54">模型导出</h3>
运行以下命令,将训练时保存的动态图模型文件导出成推理引擎能够加载的静态图模型文件。
```shell
python tools/export.py \
--config configs/pointpillars/pointpillars_xyres16_kitti_car.yml \
--model /path/to/model.pdparams \
--save_dir /path/to/output
```
模型导出脚本支持设置如下参数:
| 参数名 | 用途 | 是否必选项 | 默认值 |
|:------------|:-------------------------------------------------------------------------------------------------------------|:------:|:---------:|
| config | 配置文件 | 是 | - |
| model | 待导出模型参数`model.pdparams`路径 | 是 | - |
| save_dir | 保存导出模型的路径,`save_dir`下将会生成三个文件:`pointpillars.pdiparams `、`pointpillars.pdiparams.info`和`pointpillars.pdmodel` | 否 | `deploy` |
### <h3 id="55">模型部署</h3>
#### C++部署(Linux系统)
#### 环境依赖:
- GCC >= 5.4.0
- Cmake >= 3.5.1
- Ubuntu 16.04/18.04
> 说明:本文档的部署环节在以下环境中进行过测试并通过:
- GCC==8.2.0
- Cmake==3.16.0
- Ubuntu 18.04
- CUDA 11.2
- cuDNN==8.1.1
- PaddleInference==2.3.1
- TensorRT-8.2.5.1.Linux.x86_64-gnu.cuda-11.4.cudnn8.2
#### 编译步骤:
**注意:目前PointPillars的仅支持使用GPU进行推理。**
- step 1: 进入部署代码所在路径
```commandline
cd deploy/pointpillars/cpp
```
- step 2: 下载Paddle Inference C++预编译库
Paddle Inference针对**是否使用GPU**、**是否支持TensorRT**、以及**不同的CUDA/cuDNN/GCC版本**
均提供已经编译好的库文件,请至[Paddle Inference C++预编译库下载列表](https://www.paddlepaddle.org.cn/inference/user_guides/download_lib.html#c)
选择符合的版本。
- step 3: 修改`compile.sh`中的编译参数
主要修改编译脚本`compile.sh`中的以下参数:
| 参数名 | 说明 | 是否必选项 | 默认值 |
|:--------------|:---------------------------------------------------------------------------------------------|:------:|:-----------------------------------------------------------------:|
| WITH_GPU | 是否使用GPU | 否 | ON |
| USE_TENSORRT | 是否使用TensorRT加速 | 否 | ON |
| LIB_DIR | Paddle Inference C++预编译包所在路径,该路径下的内容应有:`CMakeCache.txt`、`paddle`、`third_party`和`version.txt` | 是 | - |
| CUDNN_LIB | cuDNN`libcudnn.so`所在路径 | 否 | `/usr/lib/x86_64-linux-gnu` |
| CUDA_LIB | CUDA`libcuda.so`所在路径 | 否 | `/usr/local/cuda/lib64` |
| TENSORRT_ROOT | TensorRT所在路径 | 否 | 如果`USE_TENSORRT`设置为`ON`时,需要填写该路径,该路径下的内容应有`bin`、`lib`和`include`等 |
- step 4: 开始编译
```commandline
sh compile.sh
```
#### 执行预测:
**注意:目前Pointpillars仅支持使用GPU进行推理。**
执行命令参数说明
| 参数名 | 说明 | 是否必选项 | 默认值 |
|:--------------------|:------------------------------------------------------------------------|:------:|:----:|
| model_file | 导出模型的结构文件`pointpillars.pdmodel`所在路径 | 是 | - |
| params_file | 导出模型的参数文件`pointpillars.pdiparams`所在路径 | 是 | - |
| lidar_file | 待预测的点云文件所在路径 | 是 | - |
| point_cloud_range | 模型中将点云划分为柱体(pillars)时选取的点云范围,格式为`"X_min Y_min Z_min X_max Y_Max Z_max"` | 是 | - |
| voxel_size | 模型中将点云划分为柱体(pillars)时每个柱体的尺寸,格式为`"X_size Y_size Z_size"` | 是 | - |
| max_points_in_voxel | 模型中将点云划分为柱体(pillars)时每个柱体包含点数量上限 | 是 | - |
| max_voxel_num | 模型中将点云划分为柱体(pillars)时保留的柱体数量上限 | 是 | - |
| num_point_dim | 点云文件中每个点的维度大小。例如,若每个点的信息是`x, y, z, intensity`,则`num_point_dim`填写为4 | 否 | 4 |
执行命令:
```shell
./build/main \
--model_file /path/to/pointpillars.pdmodel \
--params_file /path/to/pointpillars.pdiparams \
--lidar_file /path/to/lidar.pcd.bin \
--point_cloud_range "0 -39.68 -3 69.12 39.68 1" \
--voxel_size ".16 .16 4" \
--max_points_in_voxel 32 \
--max_voxel_num 40000
```
#### 开启TensorRT加速预测【可选】:
**注意:请根据编译步骤的step 3,修改`compile.sh`中TensorRT相关的编译参数,并重新编译。**
运行命令参数说明如下:
| 参数名 | 说明 | 是否必选项 | 默认值 |
|:--------------------|:----------------------------------------------------------------------------------------|:------:|:----:|
| model_file | 导出模型的结构文件`pointpillars.pdmodel`所在路径 | 是 | - |
| params_file | 导出模型的参数文件`pointpillars.pdiparams`所在路径 | 是 | - |
| lidar_file | 待预测的点云文件所在路径 | 是 | - |
| point_cloud_range | 模型中将点云划分为柱体(pillars)时选取的点云范围,格式为`"X_min Y_min Z_min X_max Y_Max Z_max"` | 是 | - |
| voxel_size | 模型中将点云划分为柱体(pillars)时每个柱体的尺寸,格式为`"X_size Y_size Z_size"` | 是 | - |
| max_points_in_voxel | 模型中将点云划分为柱体(pillars)时每个柱体包含点数量上限 | 是 | - |
| max_voxel_num | 模型中将点云划分为柱体(pillars)时保留的柱体数量上限 | 是 | - |
| num_point_dim | 点云文件中每个点的维度大小。例如,若每个点的信息是`x, y, z, intensity`,则`num_point_dim`填写为4 | 否 | 4 |
| use_trt | 是否开启TensorRT加速预测 | 否 | 0 |
| trt_precision | 当`use_trt`设置为1时,模型精度可设置0或1,0表示fp32, 1表示fp16 | 否 | 0 |
| trt_use_static | 当`trt_use_static`设置为1时,**在首次运行程序的时候会将TensorRT的优化信息进行序列化到磁盘上,下次运行时直接加载优化的序列化信息而不需要重新生成** | 否 | 0 |
| trt_static_dir | 当`trt_use_static`设置为1时,保存优化信息的路径 | 否 | - |
| collect_shape_info | 当`use_trt`设置为1时,是否收集模型动态shape信息。默认0。**只需首次运行,下次运行时直接加载生成的shape信息文件即可进行TensorRT加速推理** | 否 | 0 |
| dynamic_shape_file | 当`collect_shape_info`设置为1时,保存模型动态shape信息的文件路径 | 否 | - |
* **首次运行TensorRT**,收集模型动态shape信息,并保存至`--dynamic_shape_file`指定的文件中
```shell
./build/main \
--model_file /path/to/pointpillars.pdmodel \
--params_file /path/to/pointpillars.pdiparams \
--lidar_file /path/to/lidar.bin \
--num_point_dim 4 \
--point_cloud_range "0 -39.68 -3 69.12 39.68 1" \
--voxel_size ".16 .16 4" \
--max_points_in_voxel 32 \
--max_voxel_num 40000 \
--use_trt 1 \
--collect_shape_info 1 \
--dynamic_shape_file /path/to/shape_info.txt
```
* 加载`--dynamic_shape_file`指定的模型动态shape信息,使用FP32精度进行预测
```shell
./build/main \
--model_file /path/to/pointpillars.pdmodel \
--params_file /path/to/pointpillars.pdiparams \
--lidar_file /path/to/lidar.bin \
--num_point_dim 4 \
--point_cloud_range "0 -39.68 -3 69.12 39.68 1" \
--voxel_size ".16 .16 4" \
--max_points_in_voxel 32 \
--max_voxel_num 40000 \
--use_trt 1 \
--dynamic_shape_file /path/to/shape_info.txt
```
* 加载`--dynamic_shape_file`指定的模型动态shape信息,使用FP16精度进行预测
```shell
./build/main \
--model_file /path/to/pointpillars.pdmodel \
--params_file /path/to/pointpillars.pdiparams \
--lidar_file /path/to/lidar.bin \
--num_point_dim 4 \
--point_cloud_range "0 -39.68 -3 69.12 39.68 1" \
--voxel_size ".16 .16 4" \
--max_points_in_voxel 32 \
--max_voxel_num 40000 \
--use_trt 1 \
--dynamic_shape_file /path/to/shape_info.txt \
--trt_precision 1
```
* 如果觉得每次运行时模型加载的时间过长,可以设置`trt_use_static`和`trt_static_dir`,首次运行时将TensorRT的优化信息保存在硬盘中,后续直接反序列化优化信息即可
```shell
./build/main \
--model_file /path/to/pointpillars.pdmodel \
--params_file /path/to/pointpillars.pdiparams \
--lidar_file /path/to/lidar.bin \
--num_point_dim 4 \
--point_cloud_range "0 -39.68 -3 69.12 39.68 1" \
--voxel_size ".16 .16 4" \
--max_points_in_voxel 32 \
--max_voxel_num 40000 \
--use_trt 1 \
--dynamic_shape_file /path/to/shape_info.txt \
--trt_use_static 1 \
--trt_static_dir /path/to/OptimCacheDir
```
#### Python 部署
**注意:目前PointPillars的仅支持使用GPU进行推理。**
运行命令参数说明如下:
| 参数名 | 用途 | 是否必选项 | 默认值 |
|:--------------------|:--------------------------------------------------------------------------------------|:------|:----|
| mdoel_file | 导出模型的结构文件`pointpillars.pdmodel`所在路径 | 是 | - |
| params_file | 导出模型的参数文件`pointpillars.pdiparams`所在路径 | 是 | - |
| lidar_file | 待预测的点云所在路径 | 是 | - |
| point_cloud_range | 模型中将点云划分为柱体(pillars)时选取的点云范围,格式为`X_min Y_min Z_min X_max Y_Max Z_max` | 是 | - |
| voxel_size | 模型中将点云划分为柱体(pillars)时每个柱体的尺寸,格式为`X_size Y_size Z_size` | 是 | - |
| max_points_in_voxel | 模型中将点云划分为柱体(pillars)时每个柱体包含点数量上限 | 是 | - |
| max_voxel_num | 模型中将点云划分为柱体(pillars)时保留的柱体数量上限 | 是 | - |
| num_point_dim | 点云文件中每个点的维度大小。例如,若每个点的信息是`x, y, z, intensity`,则`num_point_dim`填写为4 | 否 | 4 |
| use_trt | 是否使用TensorRT进行加速 | 否 | 0 |
| trt_precision | 当use_trt设置为1时,模型精度可设置0或1,0表示fp32, 1表示fp16 | 否 | 0 |
| trt_use_static | 当trt_use_static设置为1时,**在首次运行程序的时候会将TensorRT的优化信息进行序列化到磁盘上,下次运行时直接加载优化的序列化信息而不需要重新生成** | 否 | 0 | |
| trt_static_dir | 当trt_use_static设置为1时,保存优化信息的路径 | 否 | - |
| collect_shape_info | 是否收集模型动态shape信息。默认0。**只需首次运行,后续直接加载生成的shape信息文件即可进行TensorRT加速推理** | 否 | 0 | |
| dynamic_shape_file | 保存模型动态shape信息的文件路径 | 否 | - |
运行以下命令,执行预测:
```shell
python infer.py \
--model_file /path/to/pointpillars.pdmodel \
--params_file /path/to/pointpillars.pdiparams \
--lidar_file /path/to/lidar.bin \
--point_cloud_range 0 -39.68 -3 69.12 39.68 1 \
--voxel_size .16 .16 4 \
--max_points_in_voxel 32 \
--max_voxel_num 40000
```
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/pointpillars/pointpillars_xyres16_kitti_car.yml
|
batch_size: 2
iters: 296960 # 160 epochs
train_dataset:
type: KittiPCDataset
dataset_root: datasets/KITTI
class_names: [ "Car" ]
transforms:
- type: LoadPointCloud
dim: 4
use_dim: 4
- type: RemoveCameraInvisiblePointsKITTI
- type: SamplingDatabase
min_num_points_in_box_per_class:
Car: 5
max_num_samples_per_class:
Car: 15
ignored_difficulty: [ -1 ]
database_anno_path: datasets/KITTI/kitti_train_gt_database/anno_info_train.pkl
database_root: datasets/KITTI/
class_names: [ "Car" ]
- type: RandomObjectPerturb
rotation_range: [ -0.15707963267, 0.15707963267 ]
translation_std: [ 0.25, 0.25, 0.25 ]
max_num_attempts: 100
- type: RandomVerticalFlip
- type: GlobalRotate
min_rot: -0.78539816
max_rot: 0.78539816
- type: GlobalScale
min_scale: 0.95
max_scale: 1.05
- type: GlobalTranslate
translation_std: [ 0.2, 0.2, 0.2 ]
- type: FilterBBoxOutsideRange
point_cloud_range: [ 0, -39.68, -3, 69.12, 39.68, 1 ]
- type: ShufflePoint
- type: HardVoxelize
point_cloud_range: [ 0, -39.68, -3, 69.12, 39.68, 1 ]
voxel_size: [ 0.16, 0.16, 4 ]
max_points_in_voxel: 32
max_voxel_num: 16000
- type: GenerateAnchors
output_stride_factor: 2
point_cloud_range: [ 0, -39.68, -3, 69.12, 39.68, 1 ]
voxel_size: [ 0.16, 0.16, 4 ]
anchor_configs:
- sizes: [ 1.6, 3.9, 1.56 ] # wlh
anchor_strides: [ 0.32, 0.32, 0.0 ]
anchor_offsets: [ 0.16, -39.52, -1.78 ]
rotations: [ 0, 1.57 ]
matched_threshold: 0.6
unmatched_threshold: 0.45
anchor_area_threshold: 1
- type: Gt2PointPillarsTarget
rpn_batch_size: 512
mode: train
val_dataset:
type: KittiPCDataset
dataset_root: datasets/KITTI
class_names: [ "Car" ]
transforms:
- type: LoadPointCloud
dim: 4
use_dim: 4
- type: RemoveCameraInvisiblePointsKITTI
- type: HardVoxelize
point_cloud_range: [ 0, -39.68, -3, 69.12, 39.68, 1 ]
voxel_size: [ 0.16, 0.16, 4 ]
max_points_in_voxel: 32
max_voxel_num: 40000
- type: GenerateAnchors
output_stride_factor: 2 # RPN `downsample_strides`[0] // `upsample_strides`[0]
point_cloud_range: [ 0, -39.68, -3, 69.12, 39.68, 1 ]
voxel_size: [ 0.16, 0.16, 4 ]
anchor_configs:
- sizes: [ 1.6, 3.9, 1.56 ] # wlh
anchor_strides: [ 0.32, 0.32, 0.0 ]
anchor_offsets: [ 0.16, -39.52, -1.78 ]
rotations: [ 0, 1.57 ]
matched_threshold: 0.6
unmatched_threshold: 0.45
anchor_area_threshold: 1
mode: val
model:
type: PointPillars
voxelizer:
type: HardVoxelizer
point_cloud_range: [ 0, -39.68, -3, 69.12, 39.68, 1 ]
voxel_size: [ 0.16, 0.16, 4 ]
max_num_points_in_voxel: 32
max_num_voxels: [ 16000, 40000 ] # train, test
pillar_encoder:
type: PillarFeatureNet
in_channels: 4
feat_channels: [ 64 ]
with_distance: False
max_num_points_in_voxel: 32
voxel_size: [ 0.16, 0.16, 4 ]
point_cloud_range: [ 0, -39.68, -3, 69.12, 39.68, 1 ]
legacy: False
middle_encoder:
type: PointPillarsScatter
in_channels: 64
voxel_size: [ 0.16, 0.16, 4 ]
point_cloud_range: [ 0, -39.68, -3, 69.12, 39.68, 1 ]
backbone:
type: SecondBackbone
in_channels: 64
out_channels: [ 64, 128, 256 ]
layer_nums: [ 3, 5, 5 ]
downsample_strides: [ 2, 2, 2 ]
neck:
type: SecondFPN
in_channels: [ 64, 128, 256 ]
out_channels: [ 128, 128, 128 ]
upsample_strides: [ 1, 2, 4 ]
use_conv_for_no_stride: False
head:
type: SSDHead
num_classes: 1
feature_channels: 384 # sum(upsample_channels)
num_anchor_per_loc: 2
encode_background_as_zeros: True
use_direction_classifier: True
box_code_size: 7
nms_score_threshold: 0.05
nms_pre_max_size: 1000
nms_post_max_size: 300
nms_iou_threshold: 0.5
prediction_center_limit_range: [ 0, -39.68, -5, 69.12, 39.68, 5 ]
loss:
type: PointPillarsLoss
num_classes: 1
classification_loss:
type: SigmoidFocalClassificationLoss
gamma: 2.0
alpha: 0.25
regression_loss:
type: WeightedSmoothL1RegressionLoss
sigma: 3.0
code_weights: [ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 ]
direction_loss:
type: WeightedSoftmaxClassificationLoss
classification_loss_weight: 1.0
regression_loss_weight: 2.0
direction_loss_weight: 0.2
fg_cls_weight: 1.0
bg_cls_weight: 1.0
encode_rot_error_by_sin: True
use_direction_classifier: True
encode_background_as_zeros: True
box_code_size: 7
anchor_configs:
- sizes: [ 1.6, 3.9, 1.56 ] # wlh
anchor_strides: [ 0.32, 0.32, 0.0 ]
anchor_offsets: [ 0.16, -39.52, -1.78 ]
rotations: [ 0, 1.57 ]
matched_threshold: 0.6
unmatched_threshold: 0.45
anchor_area_threshold: 1
optimizer:
type: Adam
weight_decay: 0.0001
grad_clip:
type: ClipGradByGlobalNorm
clip_norm: 10.0
lr_scheduler:
type: StepDecay
learning_rate: 0.0002
step_size: 27840 # decay every 15 epochs
gamma: 0.8
export:
transforms:
- type: LoadPointCloud
dim: 4
use_dim: 4
- type: HardVoxelize
point_cloud_range: [ 0, -39.68, -3, 69.12, 39.68, 1 ]
voxel_size: [ 0.16, 0.16, 4 ]
max_points_in_voxel: 32
max_voxel_num: 16000
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/bevformer/bevformer_tiny_r50_fpn_fp16_nuscenes.yml
|
batch_size: 2
epochs: 24
amp_cfg:
enable: False
level: O1
scaler:
init_loss_scaling: 512.0
train_dataset:
type: NuscenesMVDataset
dataset_root: ./datasets/nuscenes
ann_file: ./datasets/nuscenes/bevformer_nuscenes_annotation_train.pkl
queue_length: 3
use_valid_flag: True
mode: train
class_names: [
'car', 'truck', 'construction_vehicle', 'bus', 'trailer',
'barrier', 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone'
]
transforms:
- type: LoadMultiViewImageFromFiles
to_float32: True
- type: PhotoMetricDistortionMultiViewImage
- type: LoadAnnotations3D
with_bbox_3d: True
with_label_3d: True
- type: SampleRangeFilter
point_cloud_range: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
- type: SampleNameFilter
classes: [
'car', 'truck', 'construction_vehicle', 'bus', 'trailer',
'barrier', 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone'
]
- type: NormalizeMultiviewImage
mean: [123.675, 116.28, 103.53]
std: [58.395, 57.12, 57.375]
to_rgb: True
- type: RandomScaleImageMultiViewImage
scales: [0.5]
- type: PadMultiViewImage
size_divisor: 32
- type: SampleFilerByKey
keys: ['gt_bboxes_3d', 'gt_labels_3d', 'img']
val_dataset:
type: NuscenesMVDataset
dataset_root: ./datasets/nuscenes
ann_file: ./datasets/nuscenes/bevformer_nuscenes_annotation_val.pkl
queue_length: 3
mode: val
class_names: ['car', 'truck', 'construction_vehicle', 'bus', 'trailer',
'barrier', 'motorcycle', 'bicycle', 'pedestrian',
'traffic_cone']
transforms:
- type: LoadMultiViewImageFromFiles
to_float32: True
- type: NormalizeMultiviewImage
mean: [123.675, 116.28, 103.53]
std: [58.395, 57.12, 57.375]
to_rgb: True
- type: RandomScaleImageMultiViewImage
scales: [0.5]
- type: PadMultiViewImage
size_divisor: 32
- type: SampleFilerByKey
keys: ['img']
optimizer:
type: AdamW
weight_decay: 0.01
grad_clip:
type: ClipGradByGlobalNorm
clip_norm: 35
lr_scheduler:
type: LinearWarmup
learning_rate:
type: CosineAnnealingDecayByEpoch
learning_rate: 2.8e-4
T_max: 24
eta_min: 2.8e-7
warmup_steps: 500
start_lr: 0.00009333333
end_lr: 2.8e-4
model:
type: BEVFormer
use_grid_mask: True
video_test_mode: True
backbone:
type: $paddledet.ResNet
depth: 50
lr_mult_list: [0.1, 0.1, 0.1, 0.1]
return_idx: [3]
neck:
type: $paddledet.FPN
in_channels: [2048]
out_channel: 256
spatial_scales: [0.03125]
has_extra_convs: True
extra_stage: 0
use_c5: False
pts_bbox_head:
type: 'BEVFormerHead'
bev_h: 50
bev_w: 50
num_classes: 10
in_channels: 256
num_query: 900
sync_cls_avg_factor: True
with_box_refine: True
as_two_stage: False
positional_encoding:
type: 'LearnedPositionalEncoding'
num_feats: 128
row_num_embed: 50
col_num_embed: 50
transformer:
type: 'PerceptionTransformer'
rotate_prev_bev: True
use_shift: True
use_can_bus: True
embed_dims: 256
encoder:
type: 'BEVFormerEncoder'
num_layers: 3
point_cloud_range: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
num_points_in_pillar: 4
return_intermediate: False
transformerlayers:
type_name: 'BEVFormerLayer'
attn_cfgs: [
{
type_name: 'TemporalSelfAttention',
embed_dims: 256,
num_levels: 1
},
{
type_name: 'SpatialCrossAttention',
point_cloud_range: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0],
deformable_attention: {
type_name: 'MSDeformableAttention3D',
embed_dims: 256,
num_points: 8,
num_levels: 1
},
embed_dims: 256
}
]
feedforward_channels: 512
ffn_dropout: 0.1
operation_order: ['self_attn', 'norm', 'cross_attn', 'norm', 'ffn', 'norm']
decoder:
type: 'DetectionTransformerDecoder'
num_layers: 6
return_intermediate: True
transformerlayers:
type_name: 'DetrTransformerDecoderLayer'
attn_cfgs: [
{
type_name: 'MultiheadAttention',
embed_dims: 256,
num_heads: 8,
dropout: 0.1
},
{
type_name: 'CustomMSDeformableAttention',
embed_dims: 256,
num_levels: 1
},
]
feedforward_channels: 512
ffn_dropout: 0.1
operation_order: ['self_attn', 'norm', 'cross_attn', 'norm', 'ffn', 'norm']
bbox_coder:
type: 'NMSFreeCoder'
post_center_range: [-61.2, -61.2, -10.0, 61.2, 61.2, 10.0]
point_cloud_range: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
max_num: 300
voxel_size: [0.2, 0.2, 8]
num_classes: 10
loss_cls:
type: 'WeightedFocalLoss'
use_sigmoid: True
gamma: 2.0
alpha: 0.25
loss_weight: 2.0
loss_bbox:
type: 'L1Loss'
loss_weight: 0.25
loss_iou:
type: 'GIoULoss'
loss_weight: 0.0
assigner:
type: 'HungarianAssigner3D'
cls_cost:
type: 'FocalLossCost'
weight: 2.0
reg_cost:
type: 'BBox3DL1Cost'
weight: 0.25
iou_cost:
type: 'IoUCost'
weight: 0.0 # Fake cost. This is just to make it compatible with DETR head.
sampler:
type: 'PseudoSampler'
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/bevformer/README.md
|
# BEVFormer: Learning Bird's-Eye-View Representation from Multi-Camera Images via Spatiotemporal Transformers
## 目录
* [引用](#1)
* [简介](#2)
* [模型库](#3)
* [训练 & 评估](#4)
* [nuScenes数据集](#41)
* [导出 & 部署](#8)
## <h2 id="1">引用</h2>
```
@article{li2022bevformer,
title={BEVFormer: Learning Bird’s-Eye-View Representation from Multi-Camera Images via Spatiotemporal Transformers},
author={Li, Zhiqi and Wang, Wenhai and Li, Hongyang and Xie, Enze and Sima, Chonghao and Lu, Tong and Qiao, Yu and Dai, Jifeng}
journal={arXiv preprint arXiv:2203.17270},
year={2022}
}
```
## <h2 id="2">简介</h2>
BEVFormer以多目图像作为输入,输出三维空间里目标物体的位置、大小、方向角以及类别。整体基于DETR3D的架构设计,分为Encoder和Decoder两个部分。Encoder部分以BEV query map、当前帧的多目图像、历史帧的BEV feature map作为输入,输出当前帧的BEV feature map。其中,设计的spatial-cross-attention使用BEV query map去聚合BEV每个3D位置投影到2D多目图像上的特征,设计的temporal-cross-attention使用BEV query map去聚合BEV每个3D位置在历史帧BEV feature map上的特征,使得当前帧的BEV feature map具备时空融合的特征。在Decoder部分,以object queries作为输入,输出其对应的3D bounding box和label。其中,object queries会聚合self-attention特征以及其在Encoder输出的BEV feature map上的特征。目前BEVFormer在nuScenes数据集上的精度依然处于领先水平。
## <h2 id="3">模型库</h2>
- BEVFormer在nuScenes Val set数据集上的表现
| 模型 | 骨干网络 | mAP | NDS | 模型下载 | 配置文件 | 日志 |
| ---- | ------ | --- | ----| ------- |------- | ---- |
| ResNet50-FPN | BEVFormer-tiny | 26.22 | 36.53 | [model](https://paddle3d.bj.bcebos.com/models/bevformer/bevformer_tiny_r50_fpn_nuscenes/model.pdparams) | [config](../../../configs/bevformer/bevformer_tiny_r50_fpn_nuscenes.yml) | [log](https://paddle3d.bj.bcebos.com/models/bevformer/bevformer_tiny_r50_fpn_nuscenes/train.log)\|[vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=062bfe8678693d3f5a63c43eab7a65aa) |
**注意:nuScenes benchmark使用8张V100 GPU训练得出。**
## <h2 id="4">训练 & 评估</h2>
### <h3 id="41">nuScenes数据集</h3>
#### 数据准备
- 目前Paddle3D中提供的BEVFormer模型支持在nuScenes数据集上训练,因此需要先准备nuScenes数据集,请在[官网](https://www.nuscenes.org/nuscenes)进行下载,并且需要下载CAN bus expansion数据,将数据集目录准备如下:
```
nuscenes_dataset_root
|-- can_bus
|—— samples
|—— sweeps
|—— maps
|—— v1.0-trainval
```
在Paddle3D的目录下创建软链接 `datasets/nuscenes`,指向到上面的数据集目录:
```
mkdir datasets
ln -s /path/to/nuscenes_dataset_root ./datasets
mv ./datasets/nuscenes_dataset_root ./datasets/nuscenes
```
为加速训练过程中Nuscenes数据集的加载和解析,需要事先将Nuscenes数据集里的标注信息存储在`pkl`后缀文件中。执行以下命令会生成`bevformer_nuscenes_annotation_train.pkl`和`bevformer_nuscenes_annotation_val.pkl`:
```
python tools/create_bevformer_nus_infos.py --dataset_root ./datasets/nuscenes --can_bus_root ./datasets/nuscenes --save_dir ./datasets/nuscenes
```
生成完后的数据集目录:
```
nuscenes_dataset_root
|-- can_bus
|—— samples
|—— sweeps
|—— maps
|—— v1.0-trainval
|—— bevformer_nuscenes_annotation_train.pkl
|—— bevformer_nuscenes_annotation_val.pkl
```
#### 训练
nuScenes数据集上的训练使用8张GPU:
下载骨干网络的预训练模型参数:
```
wget https://paddledet.bj.bcebos.com/models/pretrained/ResNet50_cos_pretrained.pdparams
```
```
python -m paddle.distributed.launch --gpus 0,1,2,3,4,5,6,7 tools/train.py --config configs/bevformer/bevformer_tiny_r50_fpn_nuscenes.yml --save_dir ./output_bevformer_tiny --num_workers 4 --save_interval 1 --model ./ResNet50_cos_pretrained.pdparams
```
训练启动参数介绍可参考文档[全流程速览](../../quickstart.md#模型训练)。
#### 评估
```
python tools/evaluate.py --config configs/bevformer/bevformer_tiny_r50_fpn_nuscenes.yml --model ./output_bevformer_tiny/epoch_24/model.pdparams --num_workers 4
```
评估启动参数介绍可参考文档[全流程速览](../../quickstart.md#模型评估)。
## <h2 id="8">导出 & 部署</h2>
### <h3 id="81">模型导出</h3>
运行以下命令,将训练时保存的动态图模型文件导出成推理引擎能够加载的静态图模型文件。
```
python tools/export.py --config configs/bevformer/bevformer_tiny.yml --model ./output_bevformer_tiny/epoch_24/model.pdparams --save_dir ./output_bevformer_tiny_inference
```
| 参数 | 说明 |
| -- | -- |
| config | **[必填]** 训练配置文件所在路径 |
| model | **[必填]** 训练时保存的模型文件`model.pdparams`所在路径 |
| save_dir | **[必填]** 保存导出模型的路径,`save_dir`下将会生成三个文件:`bevformer_inference.pdiparams `、`bevformer_inference.pdiparams.info`和`bevformer_inference.pdmodel` |
### <h3 id="82">模型部署</h3>
部署代码开发进行中。
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/bevformer/bevformer_tiny_r50_fpn_nuscenes.yml
|
batch_size: 1
epochs: 24
train_dataset:
type: NuscenesMVDataset
dataset_root: ./datasets/nuscenes
ann_file: ./datasets/nuscenes/bevformer_nuscenes_annotation_train.pkl
queue_length: 3
use_valid_flag: True
mode: train
class_names: [
'car', 'truck', 'construction_vehicle', 'bus', 'trailer',
'barrier', 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone'
]
transforms:
- type: LoadMultiViewImageFromFiles
to_float32: True
- type: PhotoMetricDistortionMultiViewImage
- type: LoadAnnotations3D
with_bbox_3d: True
with_label_3d: True
- type: SampleRangeFilter
point_cloud_range: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
- type: SampleNameFilter
classes: [
'car', 'truck', 'construction_vehicle', 'bus', 'trailer',
'barrier', 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone'
]
- type: NormalizeMultiviewImage
mean: [123.675, 116.28, 103.53]
std: [58.395, 57.12, 57.375]
to_rgb: True
- type: RandomScaleImageMultiViewImage
scales: [0.5]
- type: PadMultiViewImage
size_divisor: 32
- type: SampleFilerByKey
keys: ['gt_bboxes_3d', 'gt_labels_3d', 'img']
val_dataset:
type: NuscenesMVDataset
dataset_root: ./datasets/nuscenes
ann_file: ./datasets/nuscenes/bevformer_nuscenes_annotation_val.pkl
queue_length: 3
mode: val
class_names: ['car', 'truck', 'construction_vehicle', 'bus', 'trailer',
'barrier', 'motorcycle', 'bicycle', 'pedestrian',
'traffic_cone']
transforms:
- type: LoadMultiViewImageFromFiles
to_float32: True
- type: NormalizeMultiviewImage
mean: [123.675, 116.28, 103.53]
std: [58.395, 57.12, 57.375]
to_rgb: True
- type: RandomScaleImageMultiViewImage
scales: [0.5]
- type: PadMultiViewImage
size_divisor: 32
- type: SampleFilerByKey
keys: ['img']
optimizer:
type: AdamW
weight_decay: 0.01
grad_clip:
type: ClipGradByGlobalNorm
clip_norm: 35
lr_scheduler:
type: LinearWarmup
learning_rate:
type: CosineAnnealingDecayByEpoch
learning_rate: 0.0002
T_max: 24
eta_min: 0.0000002
warmup_steps: 500
start_lr: 0.00006666666
end_lr: 0.0002
model:
type: BEVFormer
use_grid_mask: True
video_test_mode: True
backbone:
type: $paddledet.ResNet
depth: 50
lr_mult_list: [0.1, 0.1, 0.1, 0.1]
return_idx: [3]
neck:
type: $paddledet.FPN
in_channels: [2048]
out_channel: 256
spatial_scales: [0.03125]
has_extra_convs: True
extra_stage: 0
use_c5: False
pts_bbox_head:
type: 'BEVFormerHead'
bev_h: 50
bev_w: 50
num_classes: 10
in_channels: 256
num_query: 900
sync_cls_avg_factor: True
with_box_refine: True
as_two_stage: False
positional_encoding:
type: 'LearnedPositionalEncoding'
num_feats: 128
row_num_embed: 50
col_num_embed: 50
transformer:
type: 'PerceptionTransformer'
rotate_prev_bev: True
use_shift: True
use_can_bus: True
embed_dims: 256
encoder:
type: 'BEVFormerEncoder'
num_layers: 3
point_cloud_range: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
num_points_in_pillar: 4
return_intermediate: False
transformerlayers:
type_name: 'BEVFormerLayer'
attn_cfgs: [
{
type_name: 'TemporalSelfAttention',
embed_dims: 256,
num_levels: 1
},
{
type_name: 'SpatialCrossAttention',
point_cloud_range: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0],
deformable_attention: {
type_name: 'MSDeformableAttention3D',
embed_dims: 256,
num_points: 8,
num_levels: 1
},
embed_dims: 256
}
]
feedforward_channels: 512
ffn_dropout: 0.1
operation_order: ['self_attn', 'norm', 'cross_attn', 'norm', 'ffn', 'norm']
decoder:
type: 'DetectionTransformerDecoder'
num_layers: 6
return_intermediate: True
transformerlayers:
type_name: 'DetrTransformerDecoderLayer'
attn_cfgs: [
{
type_name: 'MultiheadAttention',
embed_dims: 256,
num_heads: 8,
dropout: 0.1
},
{
type_name: 'CustomMSDeformableAttention',
embed_dims: 256,
num_levels: 1
},
]
feedforward_channels: 512
ffn_dropout: 0.1
operation_order: ['self_attn', 'norm', 'cross_attn', 'norm', 'ffn', 'norm']
bbox_coder:
type: 'NMSFreeCoder'
post_center_range: [-61.2, -61.2, -10.0, 61.2, 61.2, 10.0]
point_cloud_range: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
max_num: 300
voxel_size: [0.2, 0.2, 8]
num_classes: 10
loss_cls:
type: 'WeightedFocalLoss'
use_sigmoid: True
gamma: 2.0
alpha: 0.25
loss_weight: 2.0
loss_bbox:
type: 'L1Loss'
loss_weight: 0.25
loss_iou:
type: 'GIoULoss'
loss_weight: 0.0
assigner:
type: 'HungarianAssigner3D'
cls_cost:
type: 'FocalLossCost'
weight: 2.0
reg_cost:
type: 'BBox3DL1Cost'
weight: 0.25
iou_cost:
type: 'IoUCost'
weight: 0.0 # Fake cost. This is just to make it compatible with DETR head.
sampler:
type: 'PseudoSampler'
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/paconv/paconv_modelnet40.yml
|
batch_size: 32
epochs: 350
train_dataset:
type: ModelNet40
dataset_root: datasets/modelnet40_ply_hdf5_2048
num_points: 1024
transforms:
- type: GlobalScale
min_scale: 0.667
max_scale: 1.5
size: 3
- type: GlobalTranslate
translation_std: 0.2
distribution: uniform
- type: ShufflePoint
mode: train
val_dataset:
type: ModelNet40
dataset_root: datasets/modelnet40_ply_hdf5_2048
num_points: 1024
mode: test
optimizer:
type: Momentum
momentum: 0.9
weight_decay: 0.0001
lr_scheduler:
type: CosineAnnealingDecay
learning_rate: 0.1
T_max: 107450
eta_min: 0.001
model:
type: PAConv
k_neighbors: 20
calc_scores: softmax
num_matrices: [8, 8, 8, 8]
dropout: 0.5
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/paconv/README.md
|
# PAConv:Position Adaptive Convolution with Dynamic Kernel Assembling on Point Clouds
## 目录
* [引用](#1)
* [简介](#2)
* [模型库](#3)
* [使用教程](#4)
* [数据准备](#41)
* [训练](#42)
* [评估](#43)
* [导出部署](#5)
* [执行预测](#51)
* [python部署](#52)
* [自定义数据集](#6)
<br>
## <h2 id="1">引用</h2>
> Xu, Mutian and Ding, Runyu and Zhao, Hengshuang and Qi, Xiaojuan. "PAConv: Position Adaptive Convolution with Dynamic Kernel Assembling on Point Clouds." In Proceedings of CVPR 2021.
<br>
## <h2 id="2">简介</h2>
该论文介绍了位置自适应卷积(PAConv),一种用于三维点云处理的通用卷积运算。PAConv的关键是通过动态组合存储在权重库中的基本权重矩阵来构造卷积矩阵,其中这些权重矩阵的系数通过核心网从点位置自适应学习。通过这种方式,内核构建在数据驱动管理器中,使PAConv比二维卷积具有更大的灵活性,可以更好地处理不规则和无序的点云数据。此外,通过组合权重矩阵而不是从点位置预测核,降低了学习过程的复杂性。
此外,与现有的点云卷积运算不同,它们的网络架构通常是经过精心设计的,该论文中的PAConv可以集成到基于经典MLP的点云处理网络中,而不需要改变网络配置。即使建立在简单的网络上,该论文中的的方法仍然接近甚至超过最先进的模型,并显著提高了分类和分割任务的基线性能并且效率相当高。
<br>
## <h2 id="3">模型库</h2>
| 模型 | Accuracy | Vote Accuracy | 模型下载 | 配置文件 | 日志 |
| :--: | :--------: | :-------------------:| :------: | :-----: | :--: |
|PAConv | 93.4 | 93.47 | [model]() | [config]() | [log]() \| [vdl]() |
<br>
## <h2 id="4">使用教程</h2>
下面的教程将从数据准备开始,说明如何训练PAConv模型
### <h3 id="41">数据准备</h3>
目前Paddle3D中提供的模型支持在ModelNet40数据集上训练,因此需要先准备ModelNet40数据集,请在[官网](https://shapenet.cs.stanford.edu/media/modelnet40_ply_hdf5_2048.zip)进行下载。
将数据解压后按照下方的目录结构进行组织
```shell
$ tree modelnet40_ply_hdf5_2048
modelnet40_ply_hdf5_2048
├── ply_data_test0.h5
├── ply_data_test1.h5
├── ply_data_test_0_id2file.json
├── ply_data_test_1_id2file.json
├── ply_data_train0.h5
├── ply_data_train1.h5
├── ply_data_train2.h5
├── ply_data_train3.h5
├── ply_data_train4.h5
├── ply_data_train_0_id2file.json
├── ply_data_train_1_id2file.json
├── ply_data_train_2_id2file.json
├── ply_data_train_3_id2file.json
├── ply_data_train_4_id2file.json
├── shape_names.txt
├── test_files.txt
└── train_files.txt
```
在Paddle3D的目录下创建软链接 `datasets/modelnet40_ply_hdf5_2048`,指向到上面的数据集目录
```shell
mkdir datasets
ln -s path/to/modelnet40_ply_hdf5_2048 datasets/modelnet40_ply_hdf5_2048
```
### <h3 id="42">训练</h3>
使用如下命令启动训练
```shell
# 每隔10步打印一次训练进度
# 每隔300步保存一次模型,模型参数将被保存在output目录下
python tools/train.py --config configs/paconv/paconv_modelnet40.yml --num_workers 2 --log_interval 10 --save_interval 300 --do_eval
```
### <h3 id="43">评估</h3>
使用如下命令启动评估
```shell
export CUDA_VISIBLE_DEVICES=0
# 使用Paddle3D提供的预训练模型进行评估
python tools/evaluate.py --config configs/paconv/paconv_modelnet40.yml --num_workers 2 --batch_size 16 --model output/iter_3000/model.pdparams
```
<br>
## <h2 id="5">导出部署</h2>
使用如下命令导出训练完成的模型
```shell
# 导出Paddle3D提供的预训练模型
python tools/export.py --config configs/paconv/paconv_modelnet40.yml --model output/iter_70000/model.pdparams --input_shape 1 1024 3
```
### <h3 id="51">执行预测</h3>
命令参数说明如下:
| 参数 | 说明 |
| -- | -- |
| model_file | 导出模型的结构文件`paconv.pdmodel`所在路径 |
| params_file | 导出模型的参数文件`paconv.pdiparams`所在路径 |
| input_file | 待预测的点云文件路径 |
| use_gpu | 是否使用GPU进行预测,默认为False|
| precision | 模型精度可设置fp32或fp16。默认fp32 |
| enable_benchmark | 是否开启benchmark |
| batch_size | 批次大小 |
| cpu_threads | cpu线程数 |
| enable_mkldnn | 是否使用mkldnn |
### <h3 id="52">Python部署</h3>
进入代码目录 `deploy/paconv/python`,运行以下命令,执行预测:
* 执行CPU预测
```shell
python infer.py --model_file /path/to/paconv.pdmodel --params_file /path/to/paconv.pdiparams --input_file /path/to/pointcloud --use_gpu=False --batch_size=1
```
* 执行GPU预测
```shell
python infer.py --model_file /path/to/paconv.pdmodel --params_file /path/to/paconv.pdiparams --input_file /path/to/pointcloud --use_gpu=True --batch_size=1
```
<br>
## <h2 id="6">自定义数据集</h2>
如果您想在自定义数据集上进行训练,请参考[自定义数据准备教程](../datasets/custom.md)将数据组织成ModelNet40数据格式即可
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/voxel_rcnn/README.md
|
# Voxel r-cnn: Towards high performance voxel-based 3d object detection
## 目录
* [引用](#1)
* [简介](#2)
* [模型库](#3)
* [训练 & 评估](#4)
* [KITTI数据集](#41)
* [导出 & 部署](#5)
* [自定义数据集](#6)
## <h2 id="1">引用</h2>
> Deng, Jiajun, et al. "Voxel r-cnn: Towards high performance voxel-based 3d object detection." Proceedings of the AAAI Conference on Artificial Intelligence. Vol. 35. No. 2. 2021.
## <h2 id="2">简介</h2>
Voxel RCNN在仅使用Voxel-Based的情况下,通过调整模型参数达到当时Point-Based和Voxel-Based相结合的SOTA方法的精度。并对RCNN模型结构做了改进,使得模型速度得到了大幅提升。
## <h2 id="3">模型库</h2>
- Voxel-RCNN在KITTI Val set数据集上的表现:
| 模型 | Car Mod@0.7 AP_R11 / AP_R40 | V100 Paddle Inference FP32(FPS) | 模型下载 | 配置文件 | 日志 |
| --- | --------------------------- | -------------------------------- | ------ | --------|--------|
| Voxel-RCNN | 84.64 / 85.49 | 22.39 | [model](https://paddle3d.bj.bcebos.com/models/voxel_rcnn/voxel_rcnn_005voxel_kitti_car/model.pdparams) | [config](../../../configs/voxel_rcnn/voxel_rcnn_005voxel_kitti_car.yml) | [log](https://paddle3d.bj.bcebos.com/models/voxel_rcnn/voxel_rcnn_005voxel_kitti_car/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=15cbecb8132e91dfa4fbd6d8f904c0a7) |
**注意:** KITTI benchmark使用8张V100 GPU训练得出。
## <h2 id="4">训练 & 评估</h2>
### <h3 id="41">KITTI数据集</h3>
- 目前Paddle3D中提供的Voxel-RCNN模型支持在KITTI数据集上训练,因此需要先准备KITTI数据集,请在[官网](http://www.cvlibs.net/datasets/kitti/eval_object.php?obj_benchmark=3d)进行下载:
1. Download Velodyne point clouds, if you want to use laser information (29 GB)
2. training labels of object data set (5 MB)
3. camera calibration matrices of object data set (16 MB)
并下载数据集的划分文件列表:
```
wget https://bj.bcebos.com/paddle3d/datasets/KITTI/ImageSets.tar.gz
```
将数据解压后按照下方的目录结构进行组织:
```
kitti_dataset_root
|—— training
| |—— label_2
| | |—— 000001.txt
| | |—— ...
| |—— calib
| | |—— 000001.txt
| | |—— ...
| |—— velodyne
| | |—— 000001.bin
| | |—— ...
|—— ImageSets
│ |—— test.txt
│ |—— train.txt
│ |—— trainval.txt
│ |—— val.txt
```
在Paddle3D的目录下创建软链接 `datasets/KITTI`,指向到上面的数据集目录:
```
mkdir datasets
ln -s /path/to/kitti_dataset_root ./datasets
mv ./datasets/kitti_dataset_root ./datasets/KITTI
```
- 生成训练时数据增强所需的真值库:
```
python tools/create_det_gt_database.py --dataset_name kitti --dataset_root ./datasets/KITTI --save_dir ./datasets/KITTI
```
`--dataset_root`指定KITTI数据集所在路径,`--save_dir`指定用于保存所生成的真值库的路径。该命令执行后,`save_dir`生成的目录如下:
```
kitti_train_gt_database
|—— anno_info_train.pkl
|—— Car
| |—— 4371_Car_7.bin
| |—— ...
|—— Cyclist
```
#### 训练
KITTI数据集上的训练使用8张GPU:
```
python -m paddle.distributed.launch --gpus 0,1,2,3,4,5,6,7 tools/train.py --config configs/voxel_rcnn/voxel_rcnn_car.yml --save_dir ./output_voxel_rcnn --num_workers 4 --save_interval 1
```
训练启动参数介绍可参考文档[全流程速览](../../quickstart.md#模型训练)。
#### 评估
```
python tools/evaluate.py --config configs/voxel_rcnn/voxel_rcnn_car.yml --model ./output_voxel_rcnn/epoch_80/model.pdparams --batch_size 1 --num_workers 4
```
**注意**:Voxel-RCNN的评估目前只支持batch_size为1。
评估启动参数介绍可参考文档[全流程速览](../../quickstart.md#模型评估)。
## <h2 id="5">导出 & 部署</h2>
### 模型导出
运行以下命令,将训练时保存的动态图模型文件导出成推理引擎能够加载的静态图模型文件。
```
python tools/export.py --config configs/voxel_rcnn/voxel_rcnn_car.yml --model /path/to/model.pdparams --save_dir /path/to/output
```
| 参数 | 说明 |
| -- | -- |
| config | **[必填]** 训练配置文件所在路径 |
| model | **[必填]** 训练时保存的模型文件`model.pdparams`所在路径 |
| save_dir | **[必填]** 保存导出模型的路径,`save_dir`下将会生成三个文件:`voxel_rcnn.pdiparams `、`voxel_rcnn.pdiparams.info`和`voxel_rcnn.pdmodel` |
### C++部署
#### Linux系统
#### 环境依赖
- GCC >= 5.4.0
- Cmake >= 3.5.1
- Ubuntu 16.04/18.04
> 说明:本文档的部署环节在以下环境中进行过测试并通过:
测试环境:
- GCC==8.2.0
- Cmake==3.16.0
- Ubuntu 18.04
- CUDA 11.2
- cuDNN==8.1.1
- Paddle Inferece==2.3.1
- TensorRT-8.2.5.1.Linux.x86_64-gnu.cuda-11.4.cudnn8.2
#### 编译步骤
**注意:目前Voxel-RCNN的仅支持使用GPU进行推理。**
- step 1: 进入部署代码所在路径
```
cd deploy/voxel_rcnn/cpp
```
- step 2: 下载Paddle Inference C++预编译库
Paddle Inference针对**是否使用GPU**、**是否支持TensorRT**、以及**不同的CUDA/cuDNN/GCC版本**均提供已经编译好的库文件,请至[Paddle Inference C++预编译库下载列表](https://www.paddlepaddle.org.cn/inference/user_guides/download_lib.html#c)选择符合的版本。
- step 3: 修改`compile.sh`中的编译参数
主要修改编译脚本`compile.sh`中的以下参数:
| 参数 | 说明 |
| -- | -- |
| WITH_GPU | 是否使用gpu。ON或OFF, OFF表示使用CPU,默认ON|
| USE_TENSORRT | 是否使用TensorRT加速。ON或OFF,默认OFF|
| LIB_DIR | Paddle Inference C++预编译包所在路径,该路径下的内容应有:`CMakeCache.txt`、`paddle`、`third_party`和`version.txt` |
| CUDNN_LIB | cuDNN`libcudnn.so`所在路径 |
| CUDA_LIB | CUDA`libcudart.so `所在路径 |
| TENSORRT_ROOT | TensorRT所在路径。**非必须**,如果`USE_TENSORRT`设置为`ON`时,需要填写该路径,该路径下的内容应有`bin`、`lib`和`include`等|
- step 4: 开始编译
```
sh compile.sh
```
### 执行预测
**注意:目前Voxel-RCNN的仅支持使用GPU进行推理。**
执行命令参数说明
| 参数 | 说明 |
| -- | -- |
| model_file | 导出模型的结构文件`voxel_rcnn.pdmodel`所在路径 |
| params_file | 导出模型的参数文件`voxel_rcnn.pdiparams`所在路径 |
| lidar_file | 待预测的点云文件所在路径 |
| num_point_dim | 点云文件中每个点的维度大小。例如,若每个点的信息是`x, y, z, intensity`,则`num_point_dim`填写为4 |
| point_cloud_range | 输入模型的点云所处的空间范围,超出此范围内的点将被滤除。格式为`"X_min Y_min Z_min X_max Y_Max Z_max"`|
```
./build/main --model_file /path/to/voxel_rcnn.pdmodel --params_file /path/to/voxel_rcnn.pdiparams --lidar_file /path/to/lidar.pcd.bin --num_point_dim 4 --point_cloud_range "0 -40 -3 70.4 40 1"
```
**注意:** 请预先确认实际待测试点云文件的维度是否是4,如果不是4,`--num_point_dim`请修改为实际值。
### 开启TensorRT加速预测【可选】
**注意:请根据编译步骤的step 3,修改`compile.sh`中TensorRT相关的编译参数,并重新编译。**
运行命令参数说明如下:
| 参数 | 说明 |
| -- | -- |
| model_file | 导出模型的结构文件`voxel_rcnn.pdmodel`所在路径 |
| params_file | 导出模型的参数文件`voxel_rcnn.pdiparams`所在路径 |
| lidar_file | 待预测的点云文件所在路径 |
| num_point_dim | 点云文件中每个点的维度大小。例如,若每个点的信息是`x, y, z, intensity`,则`num_point_dim`填写为4 |
| point_cloud_range | 输入模型的点云所处的空间范围,超出此范围内的点将被滤除。格式为`"X_min Y_min Z_min X_max Y_Max Z_max"`|
| use_trt | 是否使用TensorRT进行加速,默认0|
| trt_precision | 当use_trt设置为1时,模型精度可设置0或1,0表示fp32, 1表示fp16。默认0 |
| trt_use_static | 当trt_use_static设置为1时,**在首次运行程序的时候会将TensorRT的优化信息进行序列化到磁盘上,下次运行时直接加载优化的序列化信息而不需要重新生成**。默认0 |
| trt_static_dir | 当trt_use_static设置为1时,保存优化信息的路径 |
| collect_shape_info | 是否收集模型动态shape信息。默认0。**只需首次运行,下次运行时直接加载生成的shape信息文件即可进行TensorRT加速推理** |
| dynamic_shape_file | 保存模型动态shape信息的文件路径。 |
* **首次运行TensorRT**,收集模型动态shape信息,并保存至`--dynamic_shape_file`指定的文件中
```
./build/main --model_file /path/to/voxel_rcnn.pdmodel --params_file /path/to/voxel_rcnn.pdiparams --lidar_file /path/to/lidar.pcd.bin --num_point_dim 4 --point_cloud_range "0 -40 -3 70.4 40 1" --use_trt 1 --collect_shape_info 1 --dynamic_shape_file /path/to/shape_info.txt
```
* 加载`--dynamic_shape_file`指定的模型动态shape信息,使用FP32精度进行预测
```
./build/main --model_file /path/to/voxel_rcnn.pdmodel --params_file /path/to/voxel_rcnn.pdiparams --lidar_file /path/to/lidar.pcd.bin --num_point_dim 4 --point_cloud_range "0 -40 -3 70.4 40 1" --use_trt 1 --dynamic_shape_file /path/to/shape_info.txt
```
* 加载`--dynamic_shape_file`指定的模型动态shape信息,使用FP16精度进行预测
```
./build/main --model_file /path/to/voxel_rcnn.pdmodel --params_file /path/to/voxel_rcnn.pdiparams --lidar_file /path/to/lidar.pcd.bin --num_point_dim 4 --point_cloud_range "0 -40 -3 70.4 40 1" --use_trt 1 --dynamic_shape_file /path/to/shape_info.txt --trt_precision 1
```
* 如果觉得每次运行时模型加载的时间过长,可以设置`trt_use_static`和`trt_static_dir`,首次运行时将TensorRT的优化信息保存在硬盘中,后续直接反序列化优化信息即可
```
./build/main --model_file /path/to/voxel_rcnn.pdmodel --params_file /path/to/voxel_rcnn.pdiparams --lidar_file /path/to/lidar.pcd.bin --num_point_dim 4 --point_cloud_range "0 -40 -3 70.4 40 1" --use_trt 1 --dynamic_shape_file /path/to/shape_info.txt --trt_precision 1 --trt_use_static 1 --trt_static_dir /path/to/OptimCacheDir
```
### Python部署
**注意:目前Voxel-RCNN的仅支持使用GPU进行推理。**
命令参数说明如下:
| 参数 | 说明 |
| -- | -- |
| model_file | 导出模型的结构文件`voxel_rcnn.pdmodel`所在路径 |
| params_file | 导出模型的参数文件`voxel_rcnn.pdiparams`所在路径 |
| lidar_file | 待预测的点云文件所在路径 |
| num_point_dim | 点云文件中每个点的维度大小。例如,若每个点的信息是`x, y, z, intensity`,则`num_point_dim`填写为4 |
| point_cloud_range | 输入模型的点云所处的空间范围,超出此范围内的点将被滤除。格式为`X_min Y_min Z_min X_max Y_Max Z_max`|
| use_trt | 是否使用TensorRT进行加速,默认0|
| trt_precision | 当use_trt设置为1时,模型精度可设置0或1,0表示fp32, 1表示fp16。默认0 |
| trt_use_static | 当trt_use_static设置为1时,**在首次运行程序的时候会将TensorRT的优化信息进行序列化到磁盘上,下次运行时直接加载优化的序列化信息而不需要重新生成**。默认0 |
| trt_static_dir | 当trt_use_static设置为1时,保存优化信息的路径 |
| collect_shape_info | 是否收集模型动态shape信息。默认0。**只需首次运行,后续直接加载生成的shape信息文件即可进行TensorRT加速推理** |
| dynamic_shape_file | 保存模型动态shape信息的文件路径。 |
运行以下命令,执行预测:
```
python infer.py --model_file /path/to/voxel_rcnn.pdmodel --params_file /path/to/voxel_rcnn.pdiparams --lidar_file /path/to/lidar.pcd.bin --num_point_dim 4 --point_cloud_range 0 -40 -3 70.4 40 1
```
## <h2 id="6">自定义数据集</h2>
请参考文档[自定义数据集格式说明](../../../datasets/custom.md)准备自定义数据集。
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/voxel_rcnn/voxel_rcnn_005voxel_kitti_car.yml
|
batch_size: 2
epochs: 80
train_dataset:
type: KittiPCDataset
dataset_root: datasets/KITTI
transforms:
- type: LoadPointCloud
dim: 4
use_dim: 4
- type: RemoveCameraInvisiblePointsKITTIV2
- type: SamplingDatabase
min_num_points_in_box_per_class:
Car: 5
max_num_samples_per_class:
Car: 15
ignored_difficulty: [-1]
database_anno_path: datasets/KITTI/kitti_train_gt_database/anno_info_train.pkl
database_root: datasets/KITTI/
class_names: ["Car"]
- type: RandomVerticalFlip
- type: GlobalRotate
min_rot: -0.78539816
max_rot: 0.78539816
- type: GlobalScale
min_scale: 0.95
max_scale: 1.05
- type: ShufflePoint
- type: FilterPointOutsideRange
point_cloud_range: [0, -40, -3, 70.4, 40, 1]
- type: FilterBBoxOutsideRange
point_cloud_range: [0, -40, -3, 70.4, 40, 1]
- type: Gt2PVRCNNTarget
mode: train
class_balanced_sampling: False
class_names: ["Car"]
use_road_plane: True
val_dataset:
type: KittiPCDataset
dataset_root: datasets/KITTI
transforms:
- type: LoadPointCloud
dim: 4
use_dim: 4
- type: RemoveCameraInvisiblePointsKITTIV2
- type: FilterPointOutsideRange
point_cloud_range: [0, -40, -3, 70.4, 40, 1]
mode: val
class_names: ["Car"]
model:
type: VoxelRCNN
num_class: 1
voxelizer:
type: HardVoxelizer
point_cloud_range: [0, -40, -3, 70.4, 40, 1]
voxel_size: [0.05, 0.05, 0.1]
max_num_points_in_voxel: 5
max_num_voxels: [16000, 40000]
voxel_encoder:
type: VoxelMean
in_channels: 4
middle_encoder:
type: SparseNet3D
in_channels: 4
voxel_size: [0.05, 0.05, 0.1]
point_cloud_range: [0, -40, -3, 70.4, 40, 1]
backbone:
type: SecondBackbone
in_channels: 256
out_channels: [64, 128]
layer_nums: [5, 5]
downsample_strides: [1, 2]
neck:
type: SecondFPN
in_channels: [64, 128]
out_channels: [128, 128]
upsample_strides: [1, 2]
use_conv_for_no_stride: False
dense_head:
type: AnchorHeadSingle
model_cfg:
use_direction_classifier: True
dir_offset: 0.78539
dir_limit_offset: 0.0
input_channels: 256
point_cloud_range: [0, -40, -3, 70.4, 40, 1]
class_names: ['Car']
predict_boxes_when_training: True
voxel_size: [0.05, 0.05, 0.1]
anchor_generator_cfg: [
{
'class_name': 'Car',
'anchor_sizes': [[3.9, 1.6, 1.56]],
'anchor_rotations': [0, 1.57],
'anchor_bottom_heights': [-1.78],
'align_center': False,
'feature_map_stride': 8,
'matched_threshold': 0.6,
'unmatched_threshold': 0.45
}
]
anchor_target_cfg:
pos_fraction: -1.0
sample_size: 512
norm_by_num_examples: False
match_height: False
num_dir_bins: 2
loss_weights:
cls_weight: 1.0
loc_weight: 2.0
dir_weight: 0.2
code_weights: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
roi_head:
type: VoxelRCNNHead
num_class: 1
input_channels:
x_conv1: 16
x_conv2: 32
x_conv3: 64
x_conv4: 64
point_cloud_range: [0, -40, -3, 70.4, 40, 1]
voxel_size: [0.05, 0.05, 0.1]
model_cfg:
class_agnostic: True
shared_fc: [256, 256]
cls_fc: [256, 256]
reg_fc: [256, 256]
dp_ratio: 0.3
nms_config:
train:
nms_type: nms_gpu
multi_class_nms: False
nms_pre_maxsize: 9000
nms_post_maxsize: 512
nms_thresh: 0.8
test:
nms_type: nms_gpu
multi_class_nms: False
use_fast_nms: False
score_thresh: 0.0
nms_pre_maxsize: 2048
nms_post_maxsize: 100
nms_thresh: 0.7
roi_grid_pool:
features_source: ['x_conv2', 'x_conv3', 'x_conv4']
pre_mlp: True
grid_size: 6
pool_layers:
x_conv2:
mlps: [[32, 32]]
query_ranges: [[4, 4, 4]]
pool_radius: [0.4]
nsample: [16]
pool_method: max_pool
x_conv3:
mlps: [[32, 32]]
query_ranges: [[4, 4, 4]]
pool_radius: [0.8]
nsample: [16]
pool_method: max_pool
x_conv4:
mlps: [[32, 32]]
query_ranges: [[4, 4, 4]]
pool_radius: [1.6]
nsample: [16]
pool_method: max_pool
target_config:
box_coder: ResidualCoder
roi_per_image: 128
fg_ratio: 0.5
sample_roi_by_each_class: True
cls_score_type: roi_iou
cls_fg_thresh: 0.75
cls_bg_thresh: 0.25
cls_bg_thresh_lo: 0.1
hard_bg_ratio: 0.8
reg_fg_thresh: 0.55
loss_config:
cls_loss: BinaryCrossEntropy
reg_loss: smooth-l1
corner_loss_regularization: True
grid_3d_iou_loss: False
loss_weights: {
'rcnn_cls_weight': 1.0,
'rcnn_reg_weight': 1.0,
'rcnn_corner_weight': 1.0,
'code_weights': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
}
post_process_cfg:
score_thresh: 0.3
output_raw_score: False
nms_config:
multi_classes_nms: False
nms_type: nms_gpu
nms_thresh: 0.1
nms_pre_maxsize: 4096
nms_post_maxsize: 500
optimizer:
type: OneCycleAdam
beta2: 0.99
weight_decay: 0.01
grad_clip:
type: ClipGradByGlobalNorm
clip_norm: 10
beta1:
type: OneCycleDecayWarmupMomentum
momentum_peak: 0.95
momentum_trough: 0.85
step_ratio_peak: 0.4
lr_scheduler:
type: OneCycleWarmupDecayLr
base_learning_rate: 0.001
lr_ratio_peak: 10
lr_ratio_trough: 0.0001
step_ratio_peak: 0.4
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/dd3d/dd3d_dla_34_kitti.yml
|
_base_: '../_base_/kitti_mono.yml'
batch_size: 8 #total bs 32
iters: 50000
train_dataset:
transforms:
- type: LoadImage
reader: pillow
to_chw: False
to_rgb: False
- type: ResizeShortestEdge
short_edge_length: [288, 304, 320, 336, 352, 368, 384, 400, 416, 448, 480, 512, 544, 576]
max_size: 10000
sample_style: choice
- type: ToVisionBasedBox
- type: RandomHorizontalFlip
input_type: floating_point_coordinates
- type: RandomBrightness
intensity_min: 0.8
intensity_max: 1.2
- type: RandomSaturation
intensity_min: 0.8
intensity_max: 1.2
- type: RandomContrast
intensity_min: 0.8
intensity_max: 1.2
class_names: ["Car", "Pedestrian", "Cyclist", "Van", "Person_sitting"]
CLASS_MAP: {'Car': 0, 'Pedestrian': 1, 'Cyclist': 2, 'Van': 3, 'Person_sitting': 4}
class_balanced_sampling: True
val_dataset:
transforms:
- type: LoadImage
reader: pillow
to_chw: False
to_rgb: False
- type: ResizeShortestEdge
short_edge_length: [384]
max_size: 10000
sample_style: choice
- type: ToVisionBasedBox
class_names: ["Car", "Pedestrian", "Cyclist", "Van", "Person_sitting"]
CLASS_MAP: {'Car': 0, 'Pedestrian': 1, 'Cyclist': 2, 'Van': 3, 'Person_sitting': 4}
optimizer:
type: Momentum
momentum: 0.9
use_nesterov: False
weight_decay: 0.0001
lr_scheduler:
type: MultiStepDecay
milestones: [43000, 48000]
learning_rate: 0.001
gamma: 0.1
model:
type: DD3D
backbone:
type: DLABase34
norm_type: 'frozen_bn'
out_features: [3, 4, 5]
feature_locations_offset: "none"
fpn:
type: FPN
in_strides: [8, 16, 32]
in_channels: [128, 256, 512]
out_channel: 256
norm: 'FrozenBN'
top_block:
type: LastLevelP6P7
in_channels: 256
out_channels: 256
in_feature: 'p5'
fuse_type: "sum"
fcos2d_head:
type: FCOS2DHead
in_strides: [8, 16, 32, 64, 128]
in_channels: [256, 256, 256, 256, 256]
num_classes: 5
use_scale: True
box2d_scale_init_factor: 1.0
version: "v2"
num_cls_convs: 4
num_box_convs: 4
use_deformable: False
norm: "BN"
fcos2d_loss:
type: FCOS2DLoss
alpha: 0.25
gamma: 2.0
loc_loss_type: 'giou'
num_classes: 5
fcos2d_inference:
type: FCOS2DInference
thresh_with_ctr: True
pre_nms_thresh: 0.05
pre_nms_topk: 1000
post_nms_topk: 100
nms_thresh: 0.75
num_classes: 5
fcos3d_head:
type: FCOS3DHead
in_strides: [8, 16, 32, 64, 128]
in_channels: [256, 256, 256, 256, 256]
num_classes: 5
use_scale: True
depth_scale_init_factor: 0.3
proj_ctr_scale_init_factor: 1.0
use_per_level_predictors: False
mean_depth_per_level: [32.594, 15.178, 8.424, 5.004, 4.662]
std_depth_per_level: [14.682, 7.139, 4.345, 2.399, 2.587]
num_convs: 4
use_deformable: False
norm: 'FrozenBN'
class_agnostic_box3d: False
per_level_predictors: False
fcos3d_loss:
type: FCOS3DLoss
canon_box_sizes: [[1.61876949, 3.89154523, 1.52969237], # Car
[0.62806586, 0.82038497, 1.76784787], # Pedestrian
[0.56898187, 1.77149234, 1.7237099], # Cyclist
[1.9134491 , 5.15499603, 2.18998422], # Van
[2.61168401, 9.22692319, 3.36492722], # Truck
[0.5390196 , 1.08098042, 1.28392158], # Person_sitting
[2.36044838, 15.56991038, 3.5289238], # Tram
[1.24489164, 2.51495357, 1.61402478], # Misc
] # (width, length, height)
min_depth: 0.1
max_depth: 80.0
predict_allocentric_rot: True
scale_depth_by_focal_lengths: True
scale_depth_by_focal_lengths_factor: 500.0
predict_distance: False
smooth_l1_loss_beta: 0.05
max_loss_per_group: 20.0
box3d_loss_weight: 2.0
conf3d_loss_weight: 1.0
conf_3d_temperature: 1.0
num_classes: 5
class_agnostic: False
fcos3d_inference:
type: FCOS3DInference
canon_box_sizes: [[1.61876949, 3.89154523, 1.52969237], # Car
[0.62806586, 0.82038497, 1.76784787], # Pedestrian
[0.56898187, 1.77149234, 1.7237099], # Cyclist
[1.9134491 , 5.15499603, 2.18998422], # Van
[2.61168401, 9.22692319, 3.36492722], # Truck
[0.5390196 , 1.08098042, 1.28392158], # Person_sitting
[2.36044838, 15.56991038, 3.5289238], # Tram
[1.24489164, 2.51495357, 1.61402478], # Misc
] # (width, length, height)
min_depth: 0.1
max_depth: 80.0
predict_allocentric_rot: True
scale_depth_by_focal_lengths: True
scale_depth_by_focal_lengths_factor: 500.0
predict_distance: False
num_classes: 5
class_agnostic: False
prepare_targets:
type: DD3DTargetPreparer
input_strides: [8, 16, 32, 64, 128]
num_classes: 5
center_sample: True
radius: 1.5
dd3d_on: True
sizes_of_interest: [64, 128, 256, 512]
do_nms: True
nusc_sample_aggregate: False
num_classes: 5
pixel_mean: [103.53, 116.28, 123.675]
pixel_std: [57.375, 57.12, 58.395]
input_strides: [8, 16, 32, 64, 128]
size_divisibility: 128
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/dd3d/dd3d_dla_34_kitti_warmup.yml
|
_base_: '../_base_/kitti_mono.yml'
batch_size: 8 #total bs 32
iters: 4000
train_dataset:
transforms:
- type: LoadImage
reader: pillow
to_chw: False
to_rgb: False
- type: ResizeShortestEdge
short_edge_length: [288, 304, 320, 336, 352, 368, 384, 400, 416, 448, 480, 512, 544, 576]
max_size: 10000
sample_style: choice
- type: ToVisionBasedBox
- type: RandomHorizontalFlip
input_type: floating_point_coordinates
- type: RandomBrightness
intensity_min: 0.8
intensity_max: 1.2
- type: RandomSaturation
intensity_min: 0.8
intensity_max: 1.2
- type: RandomContrast
intensity_min: 0.8
intensity_max: 1.2
class_names: ["Car", "Pedestrian", "Cyclist", "Van", "Person_sitting"]
CLASS_MAP: {'Car': 0, 'Pedestrian': 1, 'Cyclist': 2, 'Van': 3, 'Person_sitting': 4}
val_dataset:
transforms:
- type: LoadImage
reader: pillow
to_chw: False
to_rgb: False
- type: ResizeShortestEdge
short_edge_length: 384
max_size: 10000
sample_style: choice
- type: ToVisionBasedBox
class_names: ["Car", "Pedestrian", "Cyclist", "Van", "Person_sitting"]
CLASS_MAP: {'Car': 0, 'Pedestrian': 1, 'Cyclist': 2, 'Van': 3, 'Person_sitting': 4}
optimizer:
type: Momentum
momentum: 0.9
use_nesterov: False
weight_decay: 0.0001
lr_scheduler:
type: LinearWarmup
learning_rate: 0.001
warmup_steps: 4000
start_lr: 0.0000001
end_lr: 0.001
model:
type: DD3D
pretrained: https://paddle3d.bj.bcebos.com/models/dd3d/dd3d_dla_34_kitti/depth.pdparams
backbone:
type: DLABase34
norm_type: 'frozen_bn'
out_features: [3, 4, 5]
feature_locations_offset: "none"
fpn:
type: FPN
in_strides: [8, 16, 32]
in_channels: [128, 256, 512]
out_channel: 256
norm: 'FrozenBN'
top_block:
type: LastLevelP6P7
in_channels: 256
out_channels: 256
in_feature: 'p5'
fuse_type: "sum"
fcos2d_head:
type: FCOS2DHead
in_strides: [8, 16, 32, 64, 128]
in_channels: [256, 256, 256, 256, 256]
num_classes: 5
use_scale: True
box2d_scale_init_factor: 1.0
version: "v2"
num_cls_convs: 4
num_box_convs: 4
use_deformable: False
norm: "BN"
fcos2d_loss:
type: FCOS2DLoss
alpha: 0.25
gamma: 2.0
loc_loss_type: 'giou'
num_classes: 5
fcos2d_inference:
type: FCOS2DInference
thresh_with_ctr: True
pre_nms_thresh: 0.05
pre_nms_topk: 1000
post_nms_topk: 100
nms_thresh: 0.75
num_classes: 5
fcos3d_head:
type: FCOS3DHead
in_strides: [8, 16, 32, 64, 128]
in_channels: [256, 256, 256, 256, 256]
num_classes: 5
use_scale: True
depth_scale_init_factor: 0.3
proj_ctr_scale_init_factor: 1.0
use_per_level_predictors: False
mean_depth_per_level: [32.594, 15.178, 8.424, 5.004, 4.662]
std_depth_per_level: [14.682, 7.139, 4.345, 2.399, 2.587]
num_convs: 4
use_deformable: False
norm: 'FrozenBN'
class_agnostic_box3d: False
per_level_predictors: False
fcos3d_loss:
type: FCOS3DLoss
canon_box_sizes: [[1.61876949, 3.89154523, 1.52969237], # Car
[0.62806586, 0.82038497, 1.76784787], # Pedestrian
[0.56898187, 1.77149234, 1.7237099], # Cyclist
[1.9134491 , 5.15499603, 2.18998422], # Van
[2.61168401, 9.22692319, 3.36492722], # Truck
[0.5390196 , 1.08098042, 1.28392158], # Person_sitting
[2.36044838, 15.56991038, 3.5289238], # Tram
[1.24489164, 2.51495357, 1.61402478], # Misc
] # (width, length, height)
min_depth: 0.1
max_depth: 80.0
predict_allocentric_rot: True
scale_depth_by_focal_lengths: True
scale_depth_by_focal_lengths_factor: 500.0
predict_distance: False
smooth_l1_loss_beta: 0.05
max_loss_per_group: 20.0
box3d_loss_weight: 2.0
conf3d_loss_weight: 1.0
conf_3d_temperature: 1.0
num_classes: 5
class_agnostic: False
fcos3d_inference:
type: FCOS3DInference
canon_box_sizes: [[1.61876949, 3.89154523, 1.52969237], # Car
[0.62806586, 0.82038497, 1.76784787], # Pedestrian
[0.56898187, 1.77149234, 1.7237099], # Cyclist
[1.9134491 , 5.15499603, 2.18998422], # Van
[2.61168401, 9.22692319, 3.36492722], # Truck
[0.5390196 , 1.08098042, 1.28392158], # Person_sitting
[2.36044838, 15.56991038, 3.5289238], # Tram
[1.24489164, 2.51495357, 1.61402478], # Misc
] # (width, length, height)
min_depth: 0.1
max_depth: 80.0
predict_allocentric_rot: True
scale_depth_by_focal_lengths: True
scale_depth_by_focal_lengths_factor: 500.0
predict_distance: False
num_classes: 5
class_agnostic: False
prepare_targets:
type: DD3DTargetPreparer
input_strides: [8, 16, 32, 64, 128]
num_classes: 5
center_sample: True
radius: 1.5
dd3d_on: True
sizes_of_interest: [64, 128, 256, 512]
do_nms: True
nusc_sample_aggregate: False
num_classes: 5
pixel_mean: [103.53, 116.28, 123.675]
pixel_std: [57.375, 57.12, 58.395]
input_strides: [8, 16, 32, 64, 128]
size_divisibility: 128
| 0
|
apollo_public_repos/apollo-model-centerpoint/configs
|
apollo_public_repos/apollo-model-centerpoint/configs/dd3d/dd3d_v2_99_kitti_warmup.yml
|
_base_: '../_base_/kitti_mono.yml'
batch_size: 4 #total bs 16
iters: 8000
train_dataset:
transforms:
- type: LoadImage
reader: pillow
to_chw: False
to_rgb: False
- type: ResizeShortestEdge
short_edge_length: [288, 304, 320, 336, 352, 368, 384, 400, 416, 448, 480, 512, 544, 576]
max_size: 10000
sample_style: choice
- type: ToVisionBasedBox
- type: RandomHorizontalFlip
input_type: floating_point_coordinates
- type: RandomBrightness
intensity_min: 0.8
intensity_max: 1.2
- type: RandomSaturation
intensity_min: 0.8
intensity_max: 1.2
- type: RandomContrast
intensity_min: 0.8
intensity_max: 1.2
class_names: ["Car", "Pedestrian", "Cyclist", "Van", "Person_sitting"]
CLASS_MAP: {'Car': 0, 'Pedestrian': 1, 'Cyclist': 2, 'Van': 3, 'Person_sitting': 4}
val_dataset:
transforms:
- type: LoadImage
reader: pillow
to_chw: False
to_rgb: False
- type: ResizeShortestEdge
short_edge_length: 384
max_size: 10000
sample_style: choice
- type: ToVisionBasedBox
class_names: ["Car", "Pedestrian", "Cyclist", "Van", "Person_sitting"]
CLASS_MAP: {'Car': 0, 'Pedestrian': 1, 'Cyclist': 2, 'Van': 3, 'Person_sitting': 4}
optimizer:
type: Momentum
momentum: 0.9
use_nesterov: False
weight_decay: 0.0001
lr_scheduler:
type: LinearWarmup
learning_rate: 0.0005
warmup_steps: 8000
start_lr: 0.00000005
end_lr: 0.0005
model:
type: DD3D
pretrained: https://paddle3d.bj.bcebos.com/models/dd3d/dd3d_v2_99_kitti/depth.pdparams
backbone:
type: VoVNet99_eSE
norm_type: 'frozen_bn'
input_ch: 3
out_features: ['stage2', 'stage3', 'stage4', 'stage5']
feature_locations_offset: "none"
fpn:
type: FPN
in_strides: [4, 8, 16, 32]
in_channels: [256, 512, 768, 1024]
out_channel: 256
norm: 'FrozenBN'
top_block:
type: LastLevelP6
in_channels: 256
out_channels: 256
in_feature: 'p5'
fuse_type: "sum"
fcos2d_head:
type: FCOS2DHead
in_strides: [4, 8, 16, 32, 64]
in_channels: [256, 256, 256, 256, 256]
num_classes: 5
use_scale: True
box2d_scale_init_factor: 1.0
version: "v2"
num_cls_convs: 4
num_box_convs: 4
use_deformable: False
norm: "BN"
fcos2d_loss:
type: FCOS2DLoss
alpha: 0.25
gamma: 2.0
loc_loss_type: 'giou'
num_classes: 5
fcos2d_inference:
type: FCOS2DInference
thresh_with_ctr: True
pre_nms_thresh: 0.05
pre_nms_topk: 1000
post_nms_topk: 100
nms_thresh: 0.75
num_classes: 5
fcos3d_head:
type: FCOS3DHead
in_strides: [4, 8, 16, 32, 64]
in_channels: [256, 256, 256, 256, 256]
num_classes: 5
use_scale: True
depth_scale_init_factor: 0.3
proj_ctr_scale_init_factor: 1.0
use_per_level_predictors: False
mean_depth_per_level: [32.594, 15.178, 8.424, 5.004, 4.662]
std_depth_per_level: [14.682, 7.139, 4.345, 2.399, 2.587]
num_convs: 4
use_deformable: False
norm: 'FrozenBN'
class_agnostic_box3d: False
per_level_predictors: False
fcos3d_loss:
type: FCOS3DLoss
canon_box_sizes: [[1.61876949, 3.89154523, 1.52969237], # Car
[0.62806586, 0.82038497, 1.76784787], # Pedestrian
[0.56898187, 1.77149234, 1.7237099], # Cyclist
[1.9134491 , 5.15499603, 2.18998422], # Van
[2.61168401, 9.22692319, 3.36492722], # Truck
[0.5390196 , 1.08098042, 1.28392158], # Person_sitting
[2.36044838, 15.56991038, 3.5289238], # Tram
[1.24489164, 2.51495357, 1.61402478], # Misc
] # (width, length, height)
min_depth: 0.1
max_depth: 80.0
predict_allocentric_rot: True
scale_depth_by_focal_lengths: True
scale_depth_by_focal_lengths_factor: 500.0
predict_distance: False
smooth_l1_loss_beta: 0.05
max_loss_per_group: 20.0
box3d_loss_weight: 2.0
conf3d_loss_weight: 1.0
conf_3d_temperature: 1.0
num_classes: 5
class_agnostic: False
fcos3d_inference:
type: FCOS3DInference
canon_box_sizes: [[1.61876949, 3.89154523, 1.52969237], # Car
[0.62806586, 0.82038497, 1.76784787], # Pedestrian
[0.56898187, 1.77149234, 1.7237099], # Cyclist
[1.9134491 , 5.15499603, 2.18998422], # Van
[2.61168401, 9.22692319, 3.36492722], # Truck
[0.5390196 , 1.08098042, 1.28392158], # Person_sitting
[2.36044838, 15.56991038, 3.5289238], # Tram
[1.24489164, 2.51495357, 1.61402478], # Misc
] # (width, length, height)
min_depth: 0.1
max_depth: 80.0
predict_allocentric_rot: True
scale_depth_by_focal_lengths: True
scale_depth_by_focal_lengths_factor: 500.0
predict_distance: False
num_classes: 5
class_agnostic: False
prepare_targets:
type: DD3DTargetPreparer
input_strides: [4, 8, 16, 32, 64]
num_classes: 5
center_sample: True
radius: 1.5
dd3d_on: True
sizes_of_interest: [64, 128, 256, 512]
do_nms: True
nusc_sample_aggregate: False
num_classes: 5
pixel_mean: [103.53, 116.28, 123.675]
pixel_std: [57.375, 57.12, 58.395]
input_strides: [4, 8, 16, 32, 64]
size_divisibility: 64
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.