repo_id
stringlengths 19
138
| file_path
stringlengths 32
200
| content
stringlengths 1
12.9M
| __index_level_0__
int64 0
0
|
|---|---|---|---|
apollo_public_repos/apollo/modules/tools/vehicle_calibration
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration/calibration_data_sample/t65b30.txt
|
a: gear_location = chassis_pb2.Chassis.GEAR_DRIVE
a: brake = 40.0
a: steering_target = 0.0
a: throttle = 0.0
c: vehicle_speed == 0.0
t: 2.0
a: brake = 0.0
a: throttle = 65.0
c: vehicle_speed >= 12.0
a: throttle = 0.0
t: 1.0
a: brake = 30.0
c: vehicle_speed == 0.0
| 0
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration/calibration_data_sample/t22b30.txt
|
a: gear_location = chassis_pb2.Chassis.GEAR_DRIVE
a: brake = 40.0
a: steering_target = 0.0
a: throttle = 0.0
c: vehicle_speed == 0.0
t: 2.0
a: brake = 0.0
a: throttle = 22.0
c: vehicle_speed >= 10.0
a: throttle = 0.0
t: 1.0
a: brake = 30.0
c: vehicle_speed == 0.0
| 0
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration/calibration_data_sample/t30b30.txt
|
a: gear_location = chassis_pb2.Chassis.GEAR_DRIVE
a: brake = 40.0
a: steering_target = 0.0
a: throttle = 0.0
c: vehicle_speed == 0.0
t: 2.0
a: brake = 0.0
a: throttle = 30.0
c: vehicle_speed >= 12.0
a: throttle = 0.0
t: 1.0
a: brake = 30.0
c: vehicle_speed == 0.0
| 0
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration/calibration_data_sample/t75b30.txt
|
a: gear_location = chassis_pb2.Chassis.GEAR_DRIVE
a: brake = 40.0
a: steering_target = 0.0
a: throttle = 0.0
c: vehicle_speed == 0.0
t: 2.0
a: brake = 0.0
a: throttle = 75.0
c: vehicle_speed >= 12.0
a: throttle = 0.0
t: 1.0
a: brake = 30.0
c: vehicle_speed == 0.0
| 0
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration/calibration_data_sample/b22_up.txt
|
a: gear_location = chassis_pb2.Chassis.GEAR_DRIVE
a: brake = 40.0
a: steering_target = 0.0
a: throttle = 0.0
c: vehicle_speed == 0.0
t: 2.0
a: brake = 22.0
a: throttle = 0.0
c: vehicle_speed >= 10.0
| 0
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration/calibration_data_sample/t70b30.txt
|
a: gear_location = chassis_pb2.Chassis.GEAR_DRIVE
a: brake = 40.0
a: steering_target = 0.0
a: throttle = 0.0
c: vehicle_speed == 0.0
t: 2.0
a: brake = 0.0
a: throttle = 70.0
c: vehicle_speed >= 12.0
a: throttle = 0.0
t: 1.0
a: brake = 30.0
c: vehicle_speed == 0.0
| 0
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration/calibration_data_sample/t25b30.txt
|
a: gear_location = chassis_pb2.Chassis.GEAR_DRIVE
a: brake = 40.0
a: steering_target = 0.0
a: throttle = 0.0
c: vehicle_speed == 0.0
t: 2.0
a: brake = 0.0
a: throttle = 25.0
c: vehicle_speed >= 12.0
a: throttle = 0.0
t: 1.0
a: brake = 30.0
c: vehicle_speed == 0.0
| 0
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration/calibration_data_sample/t60b30.txt
|
a: gear_location = chassis_pb2.Chassis.GEAR_DRIVE
a: brake = 40.0
a: steering_target = 0.0
a: throttle = 0.0
c: vehicle_speed == 0.0
t: 2.0
a: brake = 0.0
a: throttle = 60.0
c: vehicle_speed >= 12.0
a: throttle = 0.0
t: 1.0
a: brake = 30.0
c: vehicle_speed == 0.0
| 0
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration/calibration_data_sample/t35b30.txt
|
a: gear_location = chassis_pb2.Chassis.GEAR_DRIVE
a: brake = 40.0
a: steering_target = 0.0
a: throttle = 0.0
c: vehicle_speed == 0.0
t: 2.0
a: brake = 0.0
a: throttle = 35.0
c: vehicle_speed >= 12.0
a: throttle = 0.0
t: 1.0
a: brake = 30.0
c: vehicle_speed == 0.0
| 0
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration/calibration_data_sample/t27b30.txt
|
a: gear_location = chassis_pb2.Chassis.GEAR_DRIVE
a: brake = 40.0
a: steering_target = 0.0
a: throttle = 0.0
c: vehicle_speed == 0.0
t: 2.0
a: brake = 0.0
a: throttle = 27.0
c: vehicle_speed >= 12.0
a: throttle = 0.0
t: 1.0
a: brake = 30.0
c: vehicle_speed == 0.0
| 0
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration/calibration_data_sample/t20b13.txt
|
a: gear_location = chassis_pb2.Chassis.GEAR_DRIVE
a: brake = 40.0
a: steering_target = 0.0
a: throttle = 0.0
c: vehicle_speed == 0.0
t: 2.0
a: brake = 0.0
a: throttle = 20.0
c: vehicle_speed >= 10.0
a: throttle = 0.0
t: 1.0
a: brake = 13.0
c: vehicle_speed == 0.0
| 0
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration/calibration_data_sample/b13_up.txt
|
a: gear_location = chassis_pb2.Chassis.GEAR_DRIVE
a: brake = 40.0
a: steering_target = 0.0
a: throttle = 0.0
c: vehicle_speed == 0.0
t: 2.0
a: brake = 13.0
a: throttle = 0.0
c: vehicle_speed >= 10.0
| 0
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration/calibration_data_sample/t40b30.txt
|
a: gear_location = chassis_pb2.Chassis.GEAR_DRIVE
a: brake = 40.0
a: steering_target = 0.0
a: throttle = 0.0
c: vehicle_speed == 0.0
t: 2.0
a: brake = 0.0
a: throttle = 40.0
c: vehicle_speed >= 12.0
a: throttle = 0.0
t: 1.0
a: brake = 30.0
c: vehicle_speed == 0.0
| 0
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration/calibration_data_sample/t40b25.txt
|
a: gear_location = chassis_pb2.Chassis.GEAR_DRIVE
a: brake = 40.0
a: steering_target = 0.0
a: throttle = 0.0
c: vehicle_speed == 0.0
t: 2.0
a: brake = 0.0
a: throttle = 40.0
c: vehicle_speed >= 12.0
a: throttle = 0.0
t: 1.0
a: brake = 25.0
c: vehicle_speed == 0.0
| 0
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration/calibration_data_sample/b17_up.txt
|
a: gear_location = chassis_pb2.Chassis.GEAR_DRIVE
a: brake = 40.0
a: steering_target = 0.0
a: throttle = 0.0
c: vehicle_speed == 0.0
t: 2.0
a: brake = 17.0
a: throttle = 0.0
c: vehicle_speed >= 10.0
| 0
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration/calibration_data_sample/t40b27.txt
|
a: gear_location = chassis_pb2.Chassis.GEAR_DRIVE
a: brake = 40.0
a: steering_target = 0.0
a: throttle = 0.0
c: vehicle_speed == 0.0
t: 2.0
a: brake = 0.0
a: throttle = 40.0
c: vehicle_speed >= 12.0
a: throttle = 0.0
t: 1.0
a: brake = 27.0
c: vehicle_speed == 0.0
| 0
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration/calibration_data_sample/t40b33.txt
|
a: gear_location = chassis_pb2.Chassis.GEAR_DRIVE
a: brake = 40.0
a: steering_target = 0.0
a: throttle = 0.0
c: vehicle_speed == 0.0
t: 2.0
a: brake = 0.0
a: throttle = 40.0
c: vehicle_speed >= 12.0
a: throttle = 0.0
t: 1.0
a: brake = 33.0
c: vehicle_speed == 0.0
| 0
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration/calibration_data_sample/t40b22.txt
|
a: gear_location = chassis_pb2.Chassis.GEAR_DRIVE
a: brake = 40.0
a: steering_target = 0.0
a: throttle = 0.0
c: vehicle_speed == 0.0
t: 2.0
a: brake = 0.0
a: throttle = 40.0
c: vehicle_speed >= 12.0
a: throttle = 0.0
t: 1.0
a: brake = 22.0
c: vehicle_speed == 0.0
| 0
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration/calibration_data_sample/t20_down.txt
|
a: gear_location = chassis_pb2.Chassis.GEAR_DRIVE
a: brake = 40.0
a: steering_target = 0.0
a: throttle = 0.0
c: vehicle_speed == 0.0
t: 2.0
a: brake = 0.0
a: throttle = 50.0
c: vehicle_speed >= 10.4
a: throttle = 20.0
a: brake = 0.0
c: vehicle_speed == 0.0
| 0
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration/calibration_data_sample/b15_up.txt
|
a: gear_location = chassis_pb2.Chassis.GEAR_DRIVE
a: brake = 40.0
a: steering_target = 0.0
a: throttle = 0.0
c: vehicle_speed == 0.0
t: 2.0
a: brake = 15.0
a: throttle = 0.0
c: vehicle_speed >= 10.0
| 0
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration/calibration_data_sample/t40b35.txt
|
a: gear_location = chassis_pb2.Chassis.GEAR_DRIVE
a: brake = 40.0
a: steering_target = 0.0
a: throttle = 0.0
c: vehicle_speed == 0.0
t: 2.0
a: brake = 0.0
a: throttle = 40.0
c: vehicle_speed >= 12.0
a: throttle = 0.0
t: 1.0
a: brake = 35.0
c: vehicle_speed == 0.0
| 0
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration/calibration_data_sample/t50b30.txt
|
a: gear_location = chassis_pb2.Chassis.GEAR_DRIVE
a: brake = 40.0
a: steering_target = 0.0
a: throttle = 0.0
c: vehicle_speed == 0.0
t: 2.0
a: brake = 0.0
a: throttle = 50.0
c: vehicle_speed >= 12.0
a: throttle = 0.0
t: 1.0
a: brake = 30.0
c: vehicle_speed == 0.0
| 0
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration/calibration_data_sample/t15_down.txt
|
a: gear_location = chassis_pb2.Chassis.GEAR_DRIVE
a: brake = 40.0
a: steering_target = 0.0
a: throttle = 0.0
c: vehicle_speed == 0.0
t: 2.0
a: brake = 0.0
a: throttle = 60.0
c: vehicle_speed >= 11.0
a: throttle = 15.0
a: brake = 0.0
c: vehicle_speed == 0.0
| 0
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration
|
apollo_public_repos/apollo/modules/tools/vehicle_calibration/calibration_data_sample/t40b20.txt
|
a: gear_location = chassis_pb2.Chassis.GEAR_DRIVE
a: brake = 40.0
a: steering_target = 0.0
a: throttle = 0.0
c: vehicle_speed == 0.0
t: 2.0
a: brake = 0.0
a: throttle = 40.0
c: vehicle_speed >= 12.0
a: throttle = 0.0
t: 1.0
a: brake = 20.0
c: vehicle_speed == 0.0
| 0
|
apollo_public_repos/apollo/modules/tools
|
apollo_public_repos/apollo/modules/tools/manual_traffic_light/manual_traffic_light.dag
|
# Define all coms in DAG streaming.
module_config {
module_library : "/apollo/bazel-bin/modules/tools/manual_traffic_light/libmanual_traffic_light_component.so"
timer_components {
class_name : "ManualTrafficLight"
config {
name: "manual_traffic_light_componenet"
flag_file_path: "/apollo/modules/tools/manual_traffic_light/manual_traffic_light.conf"
interval: 200 # milliseconds
}
}
}
| 0
|
apollo_public_repos/apollo/modules/tools
|
apollo_public_repos/apollo/modules/tools/manual_traffic_light/manual_traffic_light.conf
|
--flagfile=/apollo/modules/common/data/global_flagfile.txt
| 0
|
apollo_public_repos/apollo/modules/tools
|
apollo_public_repos/apollo/modules/tools/manual_traffic_light/manual_traffic_light.cc
|
/******************************************************************************
* Copyright 2017 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
#include <poll.h>
#include "absl/strings/match.h"
#include "absl/strings/str_join.h"
#include "cyber/component/timer_component.h"
#include "cyber/cyber.h"
#include "modules/common/adapters/adapter_gflags.h"
#include "modules/common/util/color.h"
#include "modules/common/util/message_util.h"
#include "modules/map/hdmap/adapter/opendrive_adapter.h"
#include "modules/map/hdmap/hdmap_util.h"
#include "modules/common_msgs/perception_msgs/traffic_light_detection.pb.h"
using apollo::common::color::ANSI_GREEN;
using apollo::common::color::ANSI_RED;
using apollo::common::color::ANSI_RESET;
using apollo::hdmap::HDMapUtil;
using apollo::hdmap::SignalInfoConstPtr;
using apollo::localization::LocalizationEstimate;
using apollo::perception::TrafficLight;
using apollo::perception::TrafficLightDetection;
DEFINE_bool(all_lights, false, "set all lights on the map");
DEFINE_double(traffic_light_distance, 1000.0,
"only retrieves traffic lights within this distance");
class ManualTrafficLight final : public apollo::cyber::TimerComponent {
public:
bool Init() {
localization_reader_ = node_->CreateReader<LocalizationEstimate>(
FLAGS_localization_topic,
[this](const std::shared_ptr<LocalizationEstimate> &localization) {
ADEBUG << "Received chassis data: run chassis callback.";
OnLocalization(localization);
});
traffic_light_detection_writer_ =
node_->CreateWriter<TrafficLightDetection>(
FLAGS_traffic_light_detection_topic);
return true;
}
bool Proc() {
std::vector<SignalInfoConstPtr> signals;
bool result = false;
if (FLAGS_all_lights) {
result = GetAllTrafficLights(&signals);
} else {
result = GetTrafficLightsWithinDistance(&signals);
}
if (!result) {
ADEBUG << "Failed to get traffic signals from current location on map";
} else {
ADEBUG << "Received " << signals.size() << " traffic lights";
}
std::vector<std::string> signal_ids;
for (const auto &ptr : signals) {
signal_ids.emplace_back(ptr->id().id());
}
if (IsDifferent(prev_traffic_lights_, signal_ids)) {
prev_traffic_lights_ =
std::unordered_set<std::string>(signal_ids.begin(), signal_ids.end());
updated_ = true;
}
TrafficLightDetection traffic_light_detection;
TrafficLight::Color color = GetKeyBoardColorInput();
ADEBUG << "Color: " << TrafficLight::Color_Name(color);
if (updated_) {
updated_ = false;
const char *print_color = is_green_ ? ANSI_GREEN : ANSI_RED;
std::cout << print_color
<< "Current Light: " << (is_green_ ? "GREEN" : "RED");
if (signal_ids.empty()) {
if (FLAGS_all_lights) {
std::cout << " No lights in the map";
} else {
std::cout << " No lights in the next " << FLAGS_traffic_light_distance
<< " meters";
}
} else {
if (signal_ids.size() < 5) {
std::cout << " IDs: " << absl::StrJoin(signal_ids, " ");
} else {
std::cout << " IDs: "
<< absl::StrJoin(signal_ids.begin(), signal_ids.begin() + 4,
" ")
<< " ...";
}
}
std::cout << std::endl
<< ANSI_RESET << "Press 'c' to change" << std::endl
<< std::endl;
}
CreateTrafficLightDetection(signals, color, &traffic_light_detection);
traffic_light_detection_writer_->Write(traffic_light_detection);
return true;
}
private:
bool GetAllTrafficLights(std::vector<SignalInfoConstPtr> *traffic_lights) {
static auto map_filename = apollo::hdmap::BaseMapFile();
static apollo::hdmap::Map map_proto;
static std::vector<SignalInfoConstPtr> map_traffic_lights;
if (map_proto.lane().empty() && map_traffic_lights.empty()) {
AERROR << "signal size: " << map_proto.signal_size();
if (absl::EndsWith(map_filename, ".xml")) {
if (!apollo::hdmap::adapter::OpendriveAdapter::LoadData(map_filename,
&map_proto)) {
return false;
}
} else if (!apollo::cyber::common::GetProtoFromFile(map_filename,
&map_proto)) {
return false;
}
for (const auto &signal : map_proto.signal()) {
const auto *hdmap = HDMapUtil::BaseMapPtr();
if (!hdmap) {
AERROR << "Invalid HD Map.";
return false;
}
map_traffic_lights.push_back(hdmap->GetSignalById(signal.id()));
}
}
*traffic_lights = map_traffic_lights;
return true;
}
bool GetTrafficLightsWithinDistance(
std::vector<SignalInfoConstPtr> *traffic_lights) {
CHECK_NOTNULL(traffic_lights);
if (!has_localization_) {
AERROR << "No localization received";
return false;
}
const auto *hdmap = HDMapUtil::BaseMapPtr();
if (!hdmap) {
AERROR << "Invalid HD Map.";
return false;
}
auto position = localization_.pose().position();
int ret = hdmap->GetForwardNearestSignalsOnLane(
position, FLAGS_traffic_light_distance, traffic_lights);
if (ret != 0) {
AERROR << "failed to get signals from position: "
<< position.ShortDebugString()
<< " with distance: " << FLAGS_traffic_light_distance << " on map";
return false;
}
return true;
}
bool CreateTrafficLightDetection(
const std::vector<SignalInfoConstPtr> &signals, TrafficLight::Color color,
TrafficLightDetection *detection) {
CHECK_NOTNULL(detection);
for (const auto &iter : signals) {
auto *light = detection->add_traffic_light();
light->set_color(color);
light->set_confidence(1.0);
light->set_tracking_time(1.0);
light->set_id(iter->id().id());
}
apollo::common::util::FillHeader("manual_traffic_light", detection);
return true;
}
TrafficLight::Color GetKeyBoardColorInput() {
int8_t revent = 0; // short
struct pollfd fd = {STDIN_FILENO, POLLIN, revent};
switch (poll(&fd, 1, 100)) {
case -1:
AERROR << "Failed to read keybapoard";
break;
case 0:
break;
default:
char ch = 'x';
std::cin >> ch;
if (ch == 'c') {
is_green_ = !is_green_;
updated_ = true;
}
break;
}
if (is_green_) {
return TrafficLight::GREEN;
} else {
return TrafficLight::RED;
}
}
bool IsDifferent(const std::unordered_set<std::string> &str_set,
const std::vector<std::string> &str_vec) {
if (str_vec.size() != str_set.size()) {
return true;
}
for (const auto &ss : str_vec) {
if (str_set.count(ss) == 0) {
return true;
}
}
return false;
}
void OnLocalization(
const std::shared_ptr<LocalizationEstimate> &localization) {
localization_ = *localization;
has_localization_ = true;
}
private:
bool is_green_ = false;
bool updated_ = true;
bool has_localization_ = false;
LocalizationEstimate localization_;
std::unordered_set<std::string> prev_traffic_lights_;
std::shared_ptr<apollo::cyber::Writer<TrafficLightDetection>>
traffic_light_detection_writer_ = nullptr;
std::shared_ptr<apollo::cyber::Reader<LocalizationEstimate>>
localization_reader_ = nullptr;
};
CYBER_REGISTER_COMPONENT(ManualTrafficLight);
| 0
|
apollo_public_repos/apollo/modules/tools
|
apollo_public_repos/apollo/modules/tools/manual_traffic_light/manual_traffic_light.launch
|
<cyber>
<module>
<name>manual_traffic_light</name>
<dag_conf>/apollo/modules/tools/manual_traffic_light/manual_traffic_light.dag</dag_conf>
<process_name>manual_traffic_light</process_name>
</module>
</cyber>
| 0
|
apollo_public_repos/apollo/modules/tools
|
apollo_public_repos/apollo/modules/tools/manual_traffic_light/BUILD
|
load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_library")
load("//tools:cpplint.bzl", "cpplint")
load("//tools/install:install.bzl", "install")
package(default_visibility = ["//visibility:public"])
install(
name = "install",
library_dest = "tools/lib",
data_dest = "tools/manual_traffic_light",
data = [":conf_files"],
targets = [":libmanual_traffic_light_component.so"],
visibility = ["//visibility:public"],
)
filegroup(
name = "conf_files",
srcs = [
"manual_traffic_light.conf",
"manual_traffic_light.dag",
"manual_traffic_light.launch",
],
)
cc_library(
name = "manual_traffic_light",
srcs = ["manual_traffic_light.cc"],
copts = ["-DMODULE_NAME=\\\"manual_traffic_light\\\""],
deps = [
"//cyber",
"//modules/common/adapters:adapter_gflags",
"//modules/common/util:util_tool",
"//modules/map/hdmap:hdmap_util",
"//modules/common_msgs/perception_msgs:traffic_light_detection_cc_proto",
],
)
cc_binary(
name = "libmanual_traffic_light_component.so",
linkshared = True,
linkstatic = True,
deps = [":manual_traffic_light"],
)
cpplint()
| 0
|
apollo_public_repos/apollo/modules/tools/dataset
|
apollo_public_repos/apollo/modules/tools/dataset/kitti/sensor.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2022 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from datetime import datetime
def to_timestamp(sensor_time):
date_sec, nano_sec = sensor_time.split('.')
time_sec = datetime.strptime(date_sec, '%Y-%m-%d %H:%M:%S')
return datetime.timestamp(time_sec) + float(nano_sec)*1e-9
class Sensor(object):
def __init__(self, timestamp, file_path) -> None:
self.timestamp = to_timestamp(timestamp)
self.file_path = file_path
self.parse()
def parse(self):
raise NotImplementedError("Must override!")
class Lidar(Sensor):
def __init__(self, timestamp, file_path) -> None:
super().__init__(timestamp, file_path)
def parse(self):
pass
class Camera(Sensor):
def __init__(self, timestamp, file_path) -> None:
super().__init__(timestamp, file_path)
def parse(self):
pass
class IMU(Sensor):
def __init__(self, timestamp, file_path) -> None:
super().__init__(timestamp, file_path)
self.lat = None
self.lon = None
self.alt = None
self.roll = None
self.pitch = None
self.yaw = None
self.vn = None
self.ve = None
self.vf = None
self.vl = None
self.vu = None
self.ax = None
self.ay = None
self.az = None
self.af = None
self.al = None
self.au = None
self.wx = None
self.wy = None
self.wz = None
self.wf = None
self.wl = None
self.wu = None
self.pos_accuracy = None
self.vel_accuracy = None
self.navstat = None
self.numsats = None
self.posmode = None
self.velmode = None
self.orimode = None
self.parse()
def parse(self):
with open(self.file_path, 'r') as f:
for line in f:
data = line.strip().split()
if len(data) != 30:
# If length < 30 then we just read the required data
self.lat = float(data[0])
self.lon = float(data[1])
self.alt = float(data[2])
self.roll = float(data[3])
self.pitch = float(data[4])
self.yaw = float(data[5])
print("IMU data length error! require 30 but {}".format(len(data)))
continue
self.lat = float(data[0])
self.lon = float(data[1])
self.alt = float(data[2])
self.roll = float(data[3])
self.pitch = float(data[4])
self.yaw = float(data[5])
self.vn = float(data[6])
self.ve = float(data[7])
self.vf = float(data[8])
self.vl = float(data[9])
self.vu = float(data[10])
self.ax = float(data[11])
self.ay = float(data[12])
self.az = float(data[13])
self.af = float(data[14])
self.al = float(data[15])
self.au = float(data[16])
self.wx = float(data[17])
self.wy = float(data[18])
self.wz = float(data[19])
self.wf = float(data[20])
self.wl = float(data[21])
self.wu = float(data[22])
self.pos_accuracy = float(data[23])
self.vel_accuracy = float(data[24])
self.navstat = data[25]
self.numsats = data[26]
self.posmode = data[27]
self.velmode = data[28]
self.orimode = data[29]
| 0
|
apollo_public_repos/apollo/modules/tools/dataset
|
apollo_public_repos/apollo/modules/tools/dataset/kitti/proj_helper.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2022 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import pyproj
import math
def utm2latlon(x, y, zone):
"""utm to latlon"""
proj = pyproj.Proj(proj='utm', zone=zone, ellps='WGS84')
lon, lat = proj(x, y, inverse=True)
return lat, lon
def latlon2utm(lat, lon):
"""latlon to utm"""
zone = latlon2utmzone(lat, lon)
projector2 = pyproj.Proj(proj='utm', zone=zone, ellps='WGS84')
x, y = projector2(lon, lat)
return x, y, zone
def latlon2utmzone(lat, lon):
"""latlon to utm zone"""
zone_num = math.floor((lon + 180) / 6) + 1
if 56.0 <= lat < 64.0 and 3.0 <= lon < 12.0:
zone_num = 32
if 72.0 <= lat < 84.0:
if 0.0 <= lon < 9.0:
zone_num = 31
elif 9.0 <= lon < 21.0:
zone_num = 33
elif 21.0 <= lon < 33.0:
zone_num = 35
elif 33.0 <= lon < 42.0:
zone_num = 37
return zone_num
| 0
|
apollo_public_repos/apollo/modules/tools/dataset
|
apollo_public_repos/apollo/modules/tools/dataset/kitti/kitti.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2022 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import os
from common import Message, Pose
from sensor import Lidar, Camera, IMU
from geometry import Euler
from proj_helper import latlon2utm
class KITTISchema(object):
"""KITTI schema
Args:
object (_type_): _description_
"""
def __init__(self, dataroot=None) -> None:
self.dataroot = dataroot
self.camera_num = 4
def lidar_schemes(self):
path_name = 'velodyne_points'
timestamps = self._read_timestamps(path_name)
filenames = self._read_filenames(path_name)
assert len(timestamps) == len(filenames)
return [Lidar(t, f) for t, f in zip(timestamps, filenames)]
def camera_schemes(self):
schemes = dict()
for camera_id in range(self.camera_num):
path_name = 'image_{:02d}'.format(camera_id)
timestamps = self._read_timestamps(path_name)
filenames = self._read_filenames(path_name)
assert len(timestamps) == len(filenames)
schemes[path_name] = [Camera(t, f) for t, f in zip(timestamps, filenames)]
return schemes
def imu_schemes(self):
path_name = 'oxts'
timestamps = self._read_timestamps(path_name)
filenames = self._read_filenames(path_name)
assert len(timestamps) == len(filenames)
return [IMU(t, f) for t, f in zip(timestamps, filenames)]
def _read_timestamps(self, file_path, file_name='timestamps.txt'):
timestamps_file = os.path.join(self.dataroot, file_path, file_name)
timestamps = []
with open(timestamps_file, 'r') as f:
for line in f:
timestamps.append(line.strip())
return timestamps
def _read_filenames(self, file_path, sub_path='data'):
filenames = []
absolute_path = os.path.join(self.dataroot, file_path, sub_path)
for f in os.listdir(absolute_path):
file_name = os.path.join(absolute_path, f)
if os.path.isfile(file_name):
filenames.append(file_name)
# Need sorted by name
filenames.sort()
return filenames
class KITTI(object):
"""KITTI dataset
Args:
object (_type_): _description_
"""
def __init__(self, kitti_schema) -> None:
self._kitti_schema = kitti_schema
self._messages = []
self.read_messages()
def __iter__(self):
for message in self._messages:
yield message
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def read_messages(self):
# read lidar
for lidar in self._kitti_schema.lidar_schemes():
msg = Message(channel='velodyne64', timestamp=lidar.timestamp, file_path=lidar.file_path)
self._messages.append(msg)
# read imu
for imu in self._kitti_schema.imu_schemes():
ego_pose = Pose()
utm_x, utm_y, _ = latlon2utm(imu.lat, imu.lon)
ego_pose.set_translation(utm_x, utm_y, 0)
euler = Euler(imu.roll, imu.pitch, imu.yaw)
q = euler.to_quaternion()
ego_pose.set_rotation(q.w, q.x, q.y, q.z)
msg = Message(channel='imu', timestamp=imu.timestamp, file_path=imu.file_path, ego_pose=ego_pose)
self._messages.append(msg)
# read camera
for camera_name, schemes in self._kitti_schema.camera_schemes().items():
for camera in schemes:
msg = Message(channel=camera_name, timestamp=camera.timestamp, file_path=camera.file_path)
self._messages.append(msg)
# sort by timestamp
self._messages.sort(key=lambda msg : msg.timestamp)
| 0
|
apollo_public_repos/apollo/modules/tools/dataset
|
apollo_public_repos/apollo/modules/tools/dataset/kitti/dataset_converter.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2022 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
'''Generate apollo record file by kitti raw sensor data.'''
import logging
import numpy as np
from cyber_record.record import Record
from record_msg.builder import (
ImageBuilder,
PointCloudBuilder,
LocalizationBuilder,
TransformBuilder)
from kitti import KITTISchema, KITTI
from geometry import Quaternion
LOCALIZATION_TOPIC = '/apollo/localization/pose'
TF_TOPIC= '/tf'
# Need to convert to apollo coordinate system
imu_to_velo = np.array(
[[ 9.999976e-01, 7.553071e-04,-2.035826e-03,-8.086759e-01],
[-7.854027e-04, 9.998898e-01,-1.482298e-02, 3.195559e-01],
[ 2.024406e-03, 1.482454e-02, 9.998881e-01,-7.997231e-01],
[ 0.0000000, 0.0000000, 0.0000000, 1.0000000]])
kitti_to_apollo = np.array([[ 0.0000000, 1.0000000, 0.0000000, 0.0000000],
[-1.0000000, 0.0000000, 0.0000000, 0.0000000],
[ 0.0000000, 0.0000000, 1.0000000, 0.0000000],
[ 0.0000000, 0.0000000, 0.0000000, 1.0000000]])
LIDAR_TRANSFORM = np.matmul(np.linalg.inv(imu_to_velo), kitti_to_apollo)
def dataset_to_record(kitti, record_root_path):
"""Construct record message and save it as record
Args:
kitti (_type_): kitti
record_root_path (str): record file saved path
"""
image_builder = ImageBuilder()
pc_builder = PointCloudBuilder()
localization_builder = LocalizationBuilder()
transform_builder = TransformBuilder()
with Record(record_root_path, mode='w') as record:
for msg in kitti:
c, f, ego_pose, t = msg.channel, msg.file_path, msg.ego_pose, msg.timestamp
logging.debug("{}, {}, {}, {}".format(c, f, ego_pose, t))
pb_msg = None
# There're mix gray and rgb image files, so we just choose rgb image
if c == 'image_02' or c == 'image_03':
# KITTI image types: 'gray', 'bgr8'
pb_msg = image_builder.build(f, 'camera', 'bgr8', t)
channel_name = "/apollo/sensor/camera/{}/image".format(c)
elif c.startswith('velodyne'):
pb_msg = pc_builder.build_nuscenes(f, 'velodyne', t, LIDAR_TRANSFORM)
channel_name = "/apollo/sensor/{}/compensator/PointCloud2".format(c)
if pb_msg:
record.write(channel_name, pb_msg, int(t*1e9))
if ego_pose:
rotation = ego_pose.rotation
quat = Quaternion(rotation[0], rotation[1], rotation[2], rotation[3])
heading = quat.to_euler().yaw
pb_msg = localization_builder.build(
ego_pose.translation, ego_pose.rotation, heading, t)
if pb_msg:
record.write(LOCALIZATION_TOPIC, pb_msg, int(t*1e9))
pb_msg = transform_builder.build('world', 'localization',
ego_pose.translation, ego_pose.rotation, t)
if pb_msg:
record.write(TF_TOPIC, pb_msg, int(t*1e9))
def convert_dataset(dataset_path, record_path):
"""Generate apollo record file by KITTI dataset
Args:
dataset_path (str): KITTI dataset path
record_path (str): record file saved path
"""
kitti_schema = KITTISchema(dataroot=dataset_path)
kitti = KITTI(kitti_schema)
print("Start to convert scene, Pls wait!")
dataset_to_record(kitti, record_path)
print("Success! Records saved in '{}'".format(record_path))
| 0
|
apollo_public_repos/apollo/modules/tools/dataset
|
apollo_public_repos/apollo/modules/tools/dataset/kitti/pcd_converter.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2022 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
'''KITTI pcd file to pcl pcd file converter.'''
import logging
import numpy as np
from record_msg import pypcd
def convert_pcd(input_file, output_file):
# Loads LIDAR data from binary numpy format.
# Data is stored as (x, y, z, intensity).
scan = np.fromfile(input_file, dtype=np.dtype([
('x', np.float32),
('y', np.float32),
('z', np.float32),
('intensity', np.float32)]))
logging.debug("points: {},{}".format(np.shape(scan), scan.dtype))
point_cloud = pypcd.PointCloud.from_array(scan)
point_cloud.save(output_file)
print("Success! Pcd file saved to '{}'".format(output_file))
| 0
|
apollo_public_repos/apollo/modules/tools/dataset
|
apollo_public_repos/apollo/modules/tools/dataset/kitti/common.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2022 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
class Pose(object):
def __init__(self) -> None:
self.translation = []
self.rotation = []
def set_translation(self, x, y, z):
self.translation = [x, y, z]
def set_rotation(self, qw, qx, qy, qz):
self.rotation = [qw, qx, qy, qz]
class Message(object):
def __init__(self, channel=None, file_path=None, ego_pose=None,
calibrated_sensor=None, timestamp=None) -> None:
self.channel = channel
self.file_path = file_path
self.ego_pose = ego_pose
self.calibrated_sensor = calibrated_sensor
self.timestamp = timestamp
| 0
|
apollo_public_repos/apollo/modules/tools/dataset
|
apollo_public_repos/apollo/modules/tools/dataset/kitti/geometry.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2022 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import math
import numpy as np
def euler_to_rotation_matrix(theta1, theta2, theta3, order='xyz'):
c1 = np.cos(theta1)
s1 = np.sin(theta1)
c2 = np.cos(theta2)
s2 = np.sin(theta2)
c3 = np.cos(theta3)
s3 = np.sin(theta3)
if order == 'xzx':
matrix = np.array([[c2, -c3*s2, s2*s3],
[c1*s2, c1*c2*c3-s1*s3, -c3*s1-c1*c2*s3],
[s1*s2, c1*s3+c2*c3*s1, c1*c3-c2*s1*s3]])
elif order == 'xyx':
matrix = np.array([[c2, s2*s3, c3*s2],
[s1*s2, c1*c3-c2*s1*s3, -c1*s3-c2*c3*s1],
[-c1*s2, c3*s1+c1*c2*s3, c1*c2*c3-s1*s3]])
elif order == 'yxy':
matrix = np.array([[c1*c3-c2*s1*s3, s1*s2, c1*s3+c2*c3*s1],
[s2*s3, c2, -c3*s2],
[-c3*s1-c1*c2*s3, c1*s2, c1*c2*c3-s1*s3]])
elif order=='yzy':
matrix = np.array([[c1*c2*c3-s1*s3, -c1*s2, c3*s1+c1*c2*s3],
[c3*s2, c2, s2*s3],
[-c1*s3-c2*c3*s1, s1*s2, c1*c3-c2*s1*s3]])
elif order=='zyz':
matrix = np.array([[c1*c2*c3-s1*s3, -c3*s1-c1*c2*s3, c1*s2],
[c1*s3+c2*c3*s1, c1*c3-c2*s1*s3, s1*s2],
[-c3*s2, s2*s3, c2]])
elif order=='zxz':
matrix = np.array([[c1*c3-c2*s1*s3, -c1*s3-c2*c3*s1, s1*s2],
[c3*s1+c1*c2*s3, c1*c2*c3-s1*s3, -c1*s2],
[s2*s3, c3*s2, c2]])
elif order=='xyz':
matrix = np.array([[c2*c3, -c2*s3, s2],
[c1*s3+c3*s1*s2, c1*c3-s1*s2*s3, -c2*s1],
[s1*s3-c1*c3*s2, c3*s1+c1*s2*s3, c1*c2]])
elif order=='xzy':
matrix = np.array([[c2*c3, -s2, c2*s3],
[s1*s3+c1*c3*s2, c1*c2, c1*s2*s3-c3*s1],
[c3*s1*s2-c1*s3, c2*s1, c1*c3+s1*s2*s3]])
elif order=='yxz':
matrix = np.array([[c1*c3+s1*s2*s3, c3*s1*s2-c1*s3, c2*s1],
[c2*s3, c2*c3, -s2],
[c1*s2*s3-c3*s1, c1*c3*s2+s1*s3, c1*c2]])
elif order=='yzx':
matrix = np.array([[c1*c2, s1*s3-c1*c3*s2, c3*s1+c1*s2*s3],
[s2, c2*c3, -c2*s3],
[-c2*s1, c1*s3+c3*s1*s2, c1*c3-s1*s2*s3]])
elif order=='zyx':
matrix = np.array([[c1*c2, c1*s2*s3-c3*s1, s1*s3+c1*c3*s2],
[c2*s1, c1*c3+s1*s2*s3, c3*s1*s2-c1*s3],
[-s2, c2*s3, c2*c3]])
elif order=='zxy':
matrix = np.array([[c1*c3-s1*s2*s3, -c2*s1, c1*s3+c3*s1*s2],
[c3*s1+c1*s2*s3, c1*c2, s1*s3-c1*c3*s2],
[-c2*s3, s2, c2*c3]])
return matrix
def is_rotation_matrix(R) :
Rt = np.transpose(R)
should_be_identity = np.dot(Rt, R)
I = np.identity(3, dtype = R.dtype)
n = np.linalg.norm(I - should_be_identity)
return n < 1e-6
def rotation_matrix_to_euler(R) :
assert(is_rotation_matrix(R))
sy = math.sqrt(R[0,0] * R[0,0] + R[1,0] * R[1,0])
singular = sy < 1e-6
if not singular :
x = math.atan2(R[2,1] , R[2,2])
y = math.atan2(-R[2,0], sy)
z = math.atan2(R[1,0], R[0,0])
else :
x = math.atan2(-R[1,2], R[1,1])
y = math.atan2(-R[2,0], sy)
z = 0
return np.array([x, y, z])
class Euler(object):
def __init__(self, roll, pitch, yaw) -> None:
self.roll = roll
self.pitch = pitch
self.yaw = yaw
def to_quaternion(self):
cr = math.cos(self.roll * 0.5)
sr = math.sin(self.roll * 0.5)
cp = math.cos(self.pitch * 0.5)
sp = math.sin(self.pitch * 0.5)
cy = math.cos(self.yaw * 0.5)
sy = math.sin(self.yaw * 0.5)
qw = cr * cp * cy + sr * sp * sy
qx = sr * cp * cy - cr * sp * sy
qy = cr * sp * cy + sr * cp * sy
qz = cr * cp * sy - sr * sp * cy
return Quaternion(qw, qx, qy, qz)
class Quaternion(object):
def __init__(self, w, x, y, z) -> None:
self.w = w
self.x = x
self.y = y
self.z = z
def to_euler(self):
t0 = 2 * (self.w * self.x + self.y * self.z)
t1 = 1 - 2 * (self.x * self.x + self.y * self.y)
roll = math.atan2(t0, t1)
t2 = 2 * (self.w * self.y - self.z * self.x)
pitch = math.asin(t2)
t3 = 2 * (self.w * self.z + self.x * self.y)
t4 = 1 - 2 * (self.y * self.y + self.z * self.z)
yaw = math.atan2(t3, t4)
return Euler(roll, pitch, yaw)
def __mul__(self, other):
w = self.w * other.w - self.x * other.x - self.y * other.y - self.z * other.z
x = self.w * other.x + self.x * other.w + self.y * other.z - self.z * other.y
y = self.w * other.y - self.x * other.z + self.y * other.w + self.z * other.x
z = self.w * other.z + self.x * other.y - self.y * other.x + self.z * other.w
return Quaternion(w, x, y, z)
| 0
|
apollo_public_repos/apollo/modules/tools/dataset
|
apollo_public_repos/apollo/modules/tools/dataset/kitti/calibration_converter.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2022 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
'''Generate apollo calibration files by kitti calibration data.'''
import os
import yaml
import numpy as np
from itertools import chain
from pathlib import Path
from geometry import Euler, rotation_matrix_to_euler
CALIBRATION_META_ROOT = '../calibration_meta'
# Lidar meta
VELODYNE_PARAMS_PATH = 'velodyne_params'
LIDAR_NOVATEL_EXTRINSICS = 'lidar_novatel_extrinsics.yaml'
# Camera meta
CAMERA_PARAMS_PATH ='camera_params'
CAMERA_EXTRINSICS = 'camera_extrinsics.yaml'
CAMERA_INTRINSICS = 'camera_intrinsics.yaml'
# Radar meta
RADAR_PARAMS_PATH = 'radar_params'
RADAR_EXTRINSICS = 'radar_extrinsics.yaml'
# Frame ID
CAMERA_FRAME_ID = 'velodyne64'
RADAR_FRAME_ID = 'novatel'
LIDAR_FRAME_ID = 'novatel'
def load_yaml(file_path):
"""Read content from yaml
Args:
file_path (str): yaml file
Returns:
dict: yaml object
"""
content = None
with open(file_path, 'r') as f:
content = yaml.safe_load(f)
return content
def save_yaml(file_path, content):
"""Save content to yaml
Args:
file_path (src): file path
content (dict): yaml object
"""
with open(file_path, 'w') as f:
yaml.safe_dump(content, f, sort_keys=False)
def gen_camera_extrinsics(camera_name, calibrated_sensor, calibration_file_path):
"""Generate camera extrinsic and intrinsic file
Args:
camera_name (str): camera name
calibrated_sensor (_type_): kitti calibrated_sensor json object
calibration_file_path (str): saved path
"""
# 1. Generate extrinsics
camera_meta_extrinsics = os.path.join(
CALIBRATION_META_ROOT, CAMERA_PARAMS_PATH, CAMERA_EXTRINSICS)
camera_extrinsics = load_yaml(camera_meta_extrinsics)
camera_extrinsics['header']['frame_id'] = CAMERA_FRAME_ID
camera_extrinsics['child_frame_id'] = camera_name
camera_extrinsics['transform']['translation']['x'] = calibrated_sensor['translation'][0]
camera_extrinsics['transform']['translation']['y'] = calibrated_sensor['translation'][1]
camera_extrinsics['transform']['translation']['z'] = calibrated_sensor['translation'][2]
camera_extrinsics['transform']['rotation']['w'] = calibrated_sensor['rotation'][0]
camera_extrinsics['transform']['rotation']['x'] = calibrated_sensor['rotation'][1]
camera_extrinsics['transform']['rotation']['y'] = calibrated_sensor['rotation'][2]
camera_extrinsics['transform']['rotation']['z'] = calibrated_sensor['rotation'][3]
file_name = CAMERA_EXTRINSICS.replace('camera', camera_name)
file_path = os.path.join(calibration_file_path, CAMERA_PARAMS_PATH)
Path(file_path).mkdir(parents=True, exist_ok=True)
camera_extrinsics_file = os.path.join(file_path, file_name)
save_yaml(camera_extrinsics_file, camera_extrinsics)
def gen_camera_intrinsics(camera_name, calibrated_sensor, calibration_file_path):
"""Generate camera extrinsic and intrinsic file
Args:
camera_name (str): camera name
calibrated_sensor (_type_): kitti calibrated_sensor json object
calibration_file_path (str): saved path
"""
# 2. Generate intrinsics
camera_meta_intrinsics = os.path.join(
CALIBRATION_META_ROOT, CAMERA_PARAMS_PATH, CAMERA_INTRINSICS)
camera_intrinsics = load_yaml(camera_meta_intrinsics)
camera_intrinsics['header']['frame_id'] = CAMERA_FRAME_ID
if 'height' in calibrated_sensor:
camera_intrinsics['height'] = calibrated_sensor['height']
if 'width' in calibrated_sensor:
camera_intrinsics['width'] = calibrated_sensor['width']
camera_intrinsics['K'] = list(calibrated_sensor['K'])
camera_intrinsics['D'] = list(calibrated_sensor['D'])
# camera_intrinsics['R'] =
# camera_intrinsics['P'] =
file_name = CAMERA_INTRINSICS.replace('camera', camera_name)
file_path = os.path.join(calibration_file_path, CAMERA_PARAMS_PATH)
camera_intrinsics_file = os.path.join(file_path, file_name)
save_yaml(camera_intrinsics_file, camera_intrinsics)
def gen_radar_params(radar_name, calibrated_sensor, calibration_file_path):
"""Generate radar extrinsic file
Args:
radar_name (str): radar name
calibrated_sensor (_type_): kitti calibrated_sensor json object
calibration_file_path (_type_): saved path
"""
radar_meta_extrinsics = os.path.join(
CALIBRATION_META_ROOT, RADAR_PARAMS_PATH, RADAR_EXTRINSICS)
radar_extrinsics = load_yaml(radar_meta_extrinsics)
radar_extrinsics['header']['frame_id'] = RADAR_FRAME_ID
radar_extrinsics['child_frame_id'] = radar_name
radar_extrinsics['transform']['translation']['x'] = calibrated_sensor['translation'][0]
radar_extrinsics['transform']['translation']['y'] = calibrated_sensor['translation'][1]
radar_extrinsics['transform']['translation']['z'] = calibrated_sensor['translation'][2]
radar_extrinsics['transform']['rotation']['w'] = calibrated_sensor['rotation'][0]
radar_extrinsics['transform']['rotation']['x'] = calibrated_sensor['rotation'][1]
radar_extrinsics['transform']['rotation']['y'] = calibrated_sensor['rotation'][2]
radar_extrinsics['transform']['rotation']['z'] = calibrated_sensor['rotation'][3]
file_name = RADAR_EXTRINSICS.replace('radar', radar_name)
file_path = os.path.join(calibration_file_path, RADAR_PARAMS_PATH)
Path(file_path).mkdir(parents=True, exist_ok=True)
radar_extrinsics_file = os.path.join(file_path, file_name)
save_yaml(radar_extrinsics_file, radar_extrinsics)
def gen_velodyne_params(lidar_name, calibrated_sensor, calibration_file_path):
"""Generate lidar extrinsic file
Args:
lidar_name (str): lidar name
calibrated_sensor (_type_): kitti calibrated_sensor json object
calibration_file_path (str): saved path
"""
lidar_meta_extrinsics = os.path.join(
CALIBRATION_META_ROOT, VELODYNE_PARAMS_PATH, LIDAR_NOVATEL_EXTRINSICS)
lidar_extrinsics = load_yaml(lidar_meta_extrinsics)
lidar_extrinsics['header']['frame_id'] = LIDAR_FRAME_ID
lidar_extrinsics['child_frame_id'] = lidar_name
lidar_extrinsics['transform']['translation']['x'] = calibrated_sensor['translation'][0]
lidar_extrinsics['transform']['translation']['y'] = calibrated_sensor['translation'][1]
lidar_extrinsics['transform']['translation']['z'] = calibrated_sensor['translation'][2]
lidar_extrinsics['transform']['rotation']['w'] = calibrated_sensor['rotation'][0]
lidar_extrinsics['transform']['rotation']['x'] = calibrated_sensor['rotation'][1]
lidar_extrinsics['transform']['rotation']['y'] = calibrated_sensor['rotation'][2]
lidar_extrinsics['transform']['rotation']['z'] = calibrated_sensor['rotation'][3]
file_name = LIDAR_NOVATEL_EXTRINSICS.replace('lidar', lidar_name)
file_path = os.path.join(calibration_file_path, VELODYNE_PARAMS_PATH)
Path(file_path).mkdir(parents=True, exist_ok=True)
lidar_novatel_extrinsics = os.path.join(file_path, file_name)
save_yaml(lidar_novatel_extrinsics, lidar_extrinsics)
def process_calib_imu_to_velo(dataset_path, calibration_file_path):
absolute_path = os.path.join(dataset_path, 'calib_imu_to_velo.txt')
calibrated_sensor = dict()
with open(absolute_path, 'r') as f:
lines = f.readlines()
r = lines[1].lstrip('R:').split()
r_array = np.array(r, dtype=np.float32)
roll, pitch, yaw = rotation_matrix_to_euler(r_array.reshape((3, 3)))
q = Euler(roll, pitch, yaw).to_quaternion()
calibrated_sensor['rotation'] = [q.w, q.x, q.y, q.z]
t = lines[2].lstrip('T:').split()
calibrated_sensor['translation'] = np.array(t, dtype=np.float32).tolist()
gen_velodyne_params('velodyne64', calibrated_sensor, calibration_file_path)
def process_calib_cam_to_cam(dataset_path, calibration_file_path):
absolute_path = os.path.join(dataset_path, 'calib_cam_to_cam.txt')
with open(absolute_path, 'r') as f:
total_lines = f.readlines()
total_lines = total_lines[2:]
for i in range(4):
lines = total_lines[i*8:(i+1)*8]
calibrated_sensor = dict()
s = lines[0][len('S_00:'):].strip().split()
wh = np.array(s, dtype=np.float32).tolist()
calibrated_sensor['width'] = wh[0]
calibrated_sensor['height'] = wh[1]
k = lines[1][len('S_00:'):].strip().split()
calibrated_sensor['K'] = np.array(k, dtype=np.float32).tolist()
d = lines[2][len('S_00:'):].strip().split()
calibrated_sensor['D'] = np.array(d, dtype=np.float32).tolist()
r = lines[3][len('S_00:'):].strip().split()
r_array = np.array(r, dtype=np.float32)
roll, pitch, yaw = rotation_matrix_to_euler(r_array.reshape((3, 3)))
q = Euler(roll, pitch, yaw).to_quaternion()
calibrated_sensor['rotation'] = [q.w, q.x, q.y, q.z]
t = lines[4][len('S_00:'):].strip().split()
calibrated_sensor['translation'] = np.array(t, dtype=np.float32).tolist()
s_rect = lines[5][len('S_rect_01:'):].strip().split()
s_rect_array = np.array(s_rect, dtype=np.float32).tolist()
r_rect = lines[6][len('S_rect_01:'):].strip().split()
r_rect_array = np.array(r_rect, dtype=np.float32).tolist()
p_rect = lines[7][len('S_rect_01:'):].strip().split()
p_rect_array = np.array(p_rect, dtype=np.float32).tolist()
camera_name = 'camera_{:02d}'.format(i)
gen_camera_intrinsics(camera_name, calibrated_sensor, calibration_file_path)
def process_calib_velo_to_cam(dataset_path, calibration_file_path):
absolute_path = os.path.join(dataset_path, 'calib_velo_to_cam.txt')
calibrated_sensor = dict()
with open(absolute_path, 'r') as f:
lines = f.readlines()
r = lines[1].lstrip('R:').split()
r_array = np.array(r, dtype=np.float32)
roll, pitch, yaw = rotation_matrix_to_euler(r_array.reshape((3, 3)))
q = Euler(roll, pitch, yaw).to_quaternion()
calibrated_sensor['rotation'] = [q.w, q.x, q.y, q.z]
t = lines[2].lstrip('T:').split()
calibrated_sensor['translation'] = np.array(t, dtype=np.float32).tolist()
gen_camera_extrinsics('camera', calibrated_sensor, calibration_file_path)
def convert_calibration(dataset_path, calibration_root_path):
process_calib_imu_to_velo(dataset_path, calibration_root_path)
process_calib_velo_to_cam(dataset_path, calibration_root_path)
process_calib_cam_to_cam(dataset_path, calibration_root_path)
| 0
|
apollo_public_repos/apollo/modules/tools/dataset
|
apollo_public_repos/apollo/modules/tools/dataset/kitti/main.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2022 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import argparse
import os
import sys
import logging
from calibration_converter import convert_calibration
from dataset_converter import convert_dataset
from pcd_converter import convert_pcd
def main(args=sys.argv):
parser = argparse.ArgumentParser(
description="KITTI dataset convert to record tool.",
prog="main.py")
parser.add_argument(
"-i", "--input", action="store", type=str, required=True,
help="Input file or directory.")
parser.add_argument(
"-o", "--output", action="store", type=str, required=False,
help="Output file or directory.")
parser.add_argument(
"-t", "--type", action="store", type=str, required=False,
default="rcd", choices=['rcd', 'cal', 'pcd'],
help="Conversion type. rcd:record, cal:calibration, pcd:pointcloud")
args = parser.parse_args(args[1:])
logging.debug(args)
if args.type == 'rcd':
if os.path.isdir(args.input):
if args.output is None:
args.output = 'result.record'
convert_dataset(args.input, args.output)
else:
logging.error("Pls enter directory! Not '{}'".format(args.input))
elif args.type == 'cal':
if os.path.isdir(args.input):
if args.output is None:
args.output = '.'
convert_calibration(args.input, args.output)
else:
logging.error("Pls enter directory! Not '{}'".format(args.input))
elif args.type == 'pcd':
if os.path.isfile(args.input):
if args.output is None:
args.output = 'result.pcd'
convert_pcd(args.input, args.output)
else:
logging.error("Pls enter file! Not '{}'".format(args.input))
else:
logging.error("Input error! '{}'".format(args.input))
if __name__ == '__main__':
main()
| 0
|
apollo_public_repos/apollo/modules/tools/dataset
|
apollo_public_repos/apollo/modules/tools/dataset/nuscenes/dataset_converter.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2022 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
'''Generate apollo record file by nuscenes raw sensor data.'''
import os
import logging
import numpy as np
from cyber_record.record import Record
from record_msg.builder import (
ImageBuilder,
PointCloudBuilder,
LocalizationBuilder,
TransformBuilder)
from nuscenes import NuScenesSchema, NuScenesHelper, NuScenes
from geometry import Quaternion
LOCALIZATION_TOPIC = '/apollo/localization/pose'
TF_TOPIC= '/tf'
# Need to convert to apollo coordinate system, for nuscenes is 90 degrees
LIDAR_TRANSFORM = np.array([[ 0.0020333, 0.9997041, 0.0242417, 0.9437130],
[-0.9999805, 0.0021757,-0.0058486, 0.0000000],
[-0.0058997,-0.0242294, 0.9996890, 1.8402300],
[ 0.0000000, 0.0000000, 0.0000000, 1.0000000]])
def dataset_to_record(nuscenes, record_root_path):
"""Construct record message and save it as record
Args:
nuscenes (_type_): nuscenes(one scene)
record_root_path (str): record file saved path
"""
image_builder = ImageBuilder()
pc_builder = PointCloudBuilder(dim=5)
localization_builder = LocalizationBuilder()
transform_builder = TransformBuilder()
record_file_name = "{}.record".format(nuscenes.scene_token)
record_file_path = os.path.join(record_root_path, record_file_name)
with Record(record_file_path, mode='w') as record:
for c, f, ego_pose, calibrated_sensor, t in nuscenes:
logging.debug("{}, {}, {}, {}".format(c, f, ego_pose, t))
pb_msg = None
if c.startswith('CAM'):
pb_msg = image_builder.build(f, 'camera', 'rgb8', t/1e6)
channel_name = "/apollo/sensor/camera/{}/image".format(c)
elif c.startswith('LIDAR'):
pb_msg = pc_builder.build_nuscenes(f, 'velodyne', t/1e6, LIDAR_TRANSFORM)
channel_name = "/apollo/sensor/{}/compensator/PointCloud2".format(c)
if pb_msg:
record.write(channel_name, pb_msg, t*1000)
rotation = ego_pose['rotation']
quat = Quaternion(rotation[0], rotation[1], rotation[2], rotation[3])
heading = quat.to_euler().yaw
ego_pose_t = ego_pose['timestamp']
pb_msg = localization_builder.build(
ego_pose['translation'], ego_pose['rotation'], heading, ego_pose_t/1e6)
if pb_msg:
record.write(LOCALIZATION_TOPIC, pb_msg, ego_pose_t*1000)
pb_msg = transform_builder.build('world', 'localization',
ego_pose['translation'], ego_pose['rotation'], ego_pose_t/1e6)
if pb_msg:
record.write(TF_TOPIC, pb_msg, ego_pose_t*1000)
def convert_dataset(dataset_path, record_path):
"""Generate apollo record file by nuscenes dataset
Args:
dataset_path (str): nuscenes dataset path
record_path (str): record file saved path
"""
nuscenes_schema = NuScenesSchema(dataroot=dataset_path)
n_helper = NuScenesHelper(nuscenes_schema)
for scene_token in nuscenes_schema.scene.keys():
print("Start to convert scene: {}, Pls wait!".format(scene_token))
nuscenes = NuScenes(n_helper, scene_token)
dataset_to_record(nuscenes, record_path)
print("Success! Records saved in '{}'".format(record_path))
| 0
|
apollo_public_repos/apollo/modules/tools/dataset
|
apollo_public_repos/apollo/modules/tools/dataset/nuscenes/pcd_converter.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2022 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
'''NuScenes pcd file to pcl pcd file converter.'''
import logging
import numpy as np
from record_msg import pypcd
def convert_pcd(input_file, output_file):
# Loads LIDAR data from binary numpy format.
# Data is stored as (x, y, z, intensity, ring index).
scan = np.fromfile(input_file, dtype=np.dtype([
('x', np.float32),
('y', np.float32),
('z', np.float32),
('intensity', np.float32),
('ring_index', np.float32)]))
logging.debug("points: {},{}".format(np.shape(scan), scan.dtype))
point_cloud = pypcd.PointCloud.from_array(scan)
point_cloud.save(output_file)
print("Success! Pcd file saved to '{}'".format(output_file))
| 0
|
apollo_public_repos/apollo/modules/tools/dataset
|
apollo_public_repos/apollo/modules/tools/dataset/nuscenes/nuscenes.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2022 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
'''NuScenes schema class and helper class to read schema data.'''
import json
import os
class NuScenesSchema(object):
"""NuScenes schema
Args:
object (_type_): _description_
"""
def __init__(self,
dataroot: str = '/data/sets/nuscenes',
version: str = 'v1.0-mini') -> None:
self.dataroot = dataroot
self.version = version
self.table_names = ['category', 'attribute', 'visibility', 'instance',
'sensor', 'calibrated_sensor','ego_pose', 'log', 'scene', 'sample',
'sample_data', 'sample_annotation', 'map']
self.schema_list = dict()
self.schema_hash = dict()
for table_name in self.table_names:
self.schema_list[table_name] = self._load_data(table_name)
self.schema_hash[table_name] = self._create_hash(
self.schema_list[table_name])
@property
def category(self):
return self.schema_hash['category']
@property
def attribute(self):
return self.schema_hash['attribute']
@property
def visibility(self):
return self.schema_hash['visibility']
@property
def instance(self):
return self.schema_hash['instance']
@property
def sensor(self):
return self.schema_hash['sensor']
@property
def calibrated_sensor(self):
return self.schema_hash['calibrated_sensor']
@property
def ego_pose(self):
return self.schema_hash['ego_pose']
@property
def log(self):
return self.schema_hash['log']
@property
def scene(self):
return self.schema_hash['scene']
@property
def sample(self):
return self.schema_hash['sample']
@property
def sample_data(self):
return self.schema_hash['sample_data']
@property
def sample_annotation(self):
return self.schema_hash['sample_annotation']
@property
def map(self):
return self.schema_hash['map']
def _load_data(self, table_name) -> list:
file_path = os.path.join(
self.dataroot, self.version, "{}.json".format(table_name))
with open(file_path, 'r') as f:
data = json.load(f)
return data
def _create_hash(self, objects) -> dict:
schema_hash = dict()
for obj in objects:
schema_hash[obj['token']] = obj
return schema_hash
class NuScenesHelper(object):
"""NuScenesHelper help to query data in dataset
Args:
object (_type_): _description_
"""
def __init__(self, nuscenes_schema) -> None:
self._nuscenes_schema = nuscenes_schema
def get_sample_by_scene(self, scene_token):
scene = self.get_scene(scene_token)
if scene is None:
return []
first_sample_token = scene['first_sample_token']
last_sample_token = scene['last_sample_token']
samples = []
cur_sample_token = first_sample_token
while cur_sample_token != last_sample_token:
cur_sample = self.get_sample(cur_sample_token)
samples.append(cur_sample)
cur_sample_token = cur_sample['next']
# Add cur_sample_token == last_sample_token
cur_sample = self.get_sample(cur_sample_token)
samples.append(cur_sample)
return samples
def get_sample_data_by_sample(self, sample_token):
sample_datas = []
for sample_data in self._nuscenes_schema.sample_data.values():
if sample_data['sample_token'] == sample_token:
sample_datas.append(sample_data)
return sample_datas
def get_sweep_data_by_sample(self, sample_data_token):
sample_data = self._nuscenes_schema.sample_data[sample_data_token]
if sample_data is None:
return []
# add cur
sweep_datas = [sample_data]
# add pre
prev_token = sample_data['prev']
while prev_token:
sample_data = self._nuscenes_schema.sample_data[prev_token]
sweep_datas.insert(0, sample_data)
prev_token = sample_data['prev']
# add next
next_token = sample_data['next']
while next_token:
sample_data = self._nuscenes_schema.sample_data[next_token]
sweep_datas.append(sample_data)
next_token = sample_data['next']
return sweep_datas
def get_scene(self, scene_token):
return self._nuscenes_schema.scene.get(scene_token)
def get_sample(self, sample_token):
return self._nuscenes_schema.sample.get(sample_token)
def get_ego_pose(self, ego_pose_token):
return self._nuscenes_schema.ego_pose.get(ego_pose_token)
def get_calibrated_sensor(self, calibrated_sensor_token):
return self._nuscenes_schema.calibrated_sensor.get(calibrated_sensor_token)
def get_sensor(self, sensor_token):
return self._nuscenes_schema.sensor.get(sensor_token)
@property
def dataset_root(self):
return self._nuscenes_schema.dataroot
class NuScenes(object):
"""NuScenes dataset
Args:
object (_type_): _description_
"""
def __init__(self, nuscenes_helper, scene_token) -> None:
self.nuscenes_helper = nuscenes_helper
self.scene_token = scene_token
def __iter__(self):
return self.read_messages(self.scene_token)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def read_messages(self, scene_token):
total_sample_datas = []
samples = self.nuscenes_helper.get_sample_by_scene(scene_token)
for sample in samples:
sample_datas = self.nuscenes_helper.get_sample_data_by_sample(
sample['token'])
total_sample_datas.extend(sample_datas)
total_sample_datas.sort(key=lambda sample_data: sample_data['timestamp'])
for sample_data in total_sample_datas:
timestamp = sample_data['timestamp']
ego_pose_token = sample_data['ego_pose_token']
ego_pose = self.nuscenes_helper.get_ego_pose(ego_pose_token)
calibrated_sensor_token = sample_data['calibrated_sensor_token']
calibrated_sensor = self.nuscenes_helper.get_calibrated_sensor(
calibrated_sensor_token)
sensor_token = calibrated_sensor['sensor_token']
sensor = self.nuscenes_helper.get_sensor(sensor_token)
channel = sensor['channel']
file_name = sample_data['filename']
file_path = os.path.join(self.nuscenes_helper.dataset_root, file_name)
yield channel, file_path, ego_pose, calibrated_sensor, timestamp
| 0
|
apollo_public_repos/apollo/modules/tools/dataset
|
apollo_public_repos/apollo/modules/tools/dataset/nuscenes/readme.md
|
## Convert dataset
You can use below command to convert nuscenes dataset to apollo record file. There maybe multi sense in one dataset, and we create a record file for each scene.
```shell
python3 main.py -i nuscenes_dataset_path
```
The name of the record file is the `scene_token.record`. If you do not specify a path, the file will be saved in the current path.
## Convert calibration
You can use below command to convert nuscenes calibration to apollo calibration files. There maybe multi sense in one dataset, and we create calibration files for each scene.
```shell
python3 main.py -i nuscenes_dataset_path -t=cal
```
#### Camera intrinsics
Camera intrinsics matrix. ref [link](http://docs.ros.org/en/melodic/api/sensor_msgs/html/msg/CameraInfo.html)
- D: The distortion parameters, size depending on the distortion model. For "plumb_bob", the 5 parameters are: (k1, k2, t1, t2, k3).
- K: Intrinsic camera matrix for the raw (distorted) images.
- R: Rectification matrix (stereo cameras only)
- P: Projection/camera matrix
## Convert lidar pcd
You can use below command to convert nuscenes lidar pcd to normal pcl file, which you can display in visualizer like `pcl_viewer`.
```shell
python3 main.py -i nuscenes_lidar_pcd_file -t=pcd
```
If you do not specify a name, the default name of the file is `result.pcd`, which is saved in the current directory.
| 0
|
apollo_public_repos/apollo/modules/tools/dataset
|
apollo_public_repos/apollo/modules/tools/dataset/nuscenes/geometry.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2022 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import math
import numpy as np
def rotation_matrix(theta1, theta2, theta3, order='xyz'):
c1 = np.cos(theta1)
s1 = np.sin(theta1)
c2 = np.cos(theta2)
s2 = np.sin(theta2)
c3 = np.cos(theta3)
s3 = np.sin(theta3)
if order == 'xzx':
matrix = np.array([[c2, -c3*s2, s2*s3],
[c1*s2, c1*c2*c3-s1*s3, -c3*s1-c1*c2*s3],
[s1*s2, c1*s3+c2*c3*s1, c1*c3-c2*s1*s3]])
elif order == 'xyx':
matrix = np.array([[c2, s2*s3, c3*s2],
[s1*s2, c1*c3-c2*s1*s3, -c1*s3-c2*c3*s1],
[-c1*s2, c3*s1+c1*c2*s3, c1*c2*c3-s1*s3]])
elif order == 'yxy':
matrix = np.array([[c1*c3-c2*s1*s3, s1*s2, c1*s3+c2*c3*s1],
[s2*s3, c2, -c3*s2],
[-c3*s1-c1*c2*s3, c1*s2, c1*c2*c3-s1*s3]])
elif order=='yzy':
matrix = np.array([[c1*c2*c3-s1*s3, -c1*s2, c3*s1+c1*c2*s3],
[c3*s2, c2, s2*s3],
[-c1*s3-c2*c3*s1, s1*s2, c1*c3-c2*s1*s3]])
elif order=='zyz':
matrix = np.array([[c1*c2*c3-s1*s3, -c3*s1-c1*c2*s3, c1*s2],
[c1*s3+c2*c3*s1, c1*c3-c2*s1*s3, s1*s2],
[-c3*s2, s2*s3, c2]])
elif order=='zxz':
matrix = np.array([[c1*c3-c2*s1*s3, -c1*s3-c2*c3*s1, s1*s2],
[c3*s1+c1*c2*s3, c1*c2*c3-s1*s3, -c1*s2],
[s2*s3, c3*s2, c2]])
elif order=='xyz':
matrix = np.array([[c2*c3, -c2*s3, s2],
[c1*s3+c3*s1*s2, c1*c3-s1*s2*s3, -c2*s1],
[s1*s3-c1*c3*s2, c3*s1+c1*s2*s3, c1*c2]])
elif order=='xzy':
matrix = np.array([[c2*c3, -s2, c2*s3],
[s1*s3+c1*c3*s2, c1*c2, c1*s2*s3-c3*s1],
[c3*s1*s2-c1*s3, c2*s1, c1*c3+s1*s2*s3]])
elif order=='yxz':
matrix = np.array([[c1*c3+s1*s2*s3, c3*s1*s2-c1*s3, c2*s1],
[c2*s3, c2*c3, -s2],
[c1*s2*s3-c3*s1, c1*c3*s2+s1*s3, c1*c2]])
elif order=='yzx':
matrix = np.array([[c1*c2, s1*s3-c1*c3*s2, c3*s1+c1*s2*s3],
[s2, c2*c3, -c2*s3],
[-c2*s1, c1*s3+c3*s1*s2, c1*c3-s1*s2*s3]])
elif order=='zyx':
matrix = np.array([[c1*c2, c1*s2*s3-c3*s1, s1*s3+c1*c3*s2],
[c2*s1, c1*c3+s1*s2*s3, c3*s1*s2-c1*s3],
[-s2, c2*s3, c2*c3]])
elif order=='zxy':
matrix = np.array([[c1*c3-s1*s2*s3, -c2*s1, c1*s3+c3*s1*s2],
[c3*s1+c1*s2*s3, c1*c2, s1*s3-c1*c3*s2],
[-c2*s3, s2, c2*c3]])
return matrix
class Euler(object):
def __init__(self, roll, pitch, yaw) -> None:
self.roll = roll
self.pitch = pitch
self.yaw = yaw
def to_quaternion(self):
cr = math.cos(self.roll * 0.5)
sr = math.sin(self.roll * 0.5)
cp = math.cos(self.pitch * 0.5)
sp = math.sin(self.pitch * 0.5)
cy = math.cos(self.yaw * 0.5)
sy = math.sin(self.yaw * 0.5)
qw = cr * cp * cy + sr * sp * sy
qx = sr * cp * cy - cr * sp * sy
qy = cr * sp * cy + sr * cp * sy
qz = cr * cp * sy - sr * sp * cy
return Quaternion(qw, qx, qy, qz)
class Quaternion(object):
def __init__(self, w, x, y, z) -> None:
self.w = w
self.x = x
self.y = y
self.z = z
def to_euler(self):
t0 = 2 * (self.w * self.x + self.y * self.z)
t1 = 1 - 2 * (self.x * self.x + self.y * self.y)
roll = math.atan2(t0, t1)
t2 = 2 * (self.w * self.y - self.z * self.x)
pitch = math.asin(t2)
t3 = 2 * (self.w * self.z + self.x * self.y)
t4 = 1 - 2 * (self.y * self.y + self.z * self.z)
yaw = math.atan2(t3, t4)
return Euler(roll, pitch, yaw)
def __mul__(self, other):
w = self.w * other.w - self.x * other.x - self.y * other.y - self.z * other.z
x = self.w * other.x + self.x * other.w + self.y * other.z - self.z * other.y
y = self.w * other.y - self.x * other.z + self.y * other.w + self.z * other.x
z = self.w * other.z + self.x * other.y - self.y * other.x + self.z * other.w
return Quaternion(w, x, y, z)
| 0
|
apollo_public_repos/apollo/modules/tools/dataset
|
apollo_public_repos/apollo/modules/tools/dataset/nuscenes/calibration_converter.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2022 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
'''Generate apollo calibration files by nuscenes calibration data.'''
import os
import yaml
from itertools import chain
from pathlib import Path
from nuscenes import NuScenesSchema, NuScenesHelper, NuScenes
CALIBRATION_META_ROOT = '../calibration_meta'
# Lidar meta
VELODYNE_PARAMS_PATH = 'velodyne_params'
LIDAR_NOVATEL_EXTRINSICS = 'lidar_novatel_extrinsics.yaml'
# Camera meta
CAMERA_PARAMS_PATH ='camera_params'
CAMERA_EXTRINSICS = 'camera_extrinsics.yaml'
CAMERA_INTRINSICS = 'camera_intrinsics.yaml'
# Radar meta
RADAR_PARAMS_PATH = 'radar_params'
RADAR_EXTRINSICS = 'radar_extrinsics.yaml'
# Frame ID
CAMERA_FRAME_ID = 'novatel'
RADAR_FRAME_ID = 'novatel'
LIDAR_FRAME_ID = 'novatel'
def load_yaml(file_path):
"""Read content from yaml
Args:
file_path (str): yaml file
Returns:
dict: yaml object
"""
content = None
with open(file_path, 'r') as f:
content = yaml.safe_load(f)
return content
def save_yaml(file_path, content):
"""Save content to yaml
Args:
file_path (src): file path
content (dict): yaml object
"""
with open(file_path, 'w') as f:
yaml.safe_dump(content, f, sort_keys=False)
def gen_camera_params(camera_name, calibrated_sensor, calibration_file_path):
"""Generate camera extrinsic and intrinsic file
Args:
camera_name (str): camera name
calibrated_sensor (_type_): nuscenes calibrated_sensor json object
calibration_file_path (str): saved path
"""
# 1. Generate extrinsics
camera_meta_extrinsics = os.path.join(
CALIBRATION_META_ROOT, CAMERA_PARAMS_PATH, CAMERA_EXTRINSICS)
camera_extrinsics = load_yaml(camera_meta_extrinsics)
camera_extrinsics['header']['frame_id'] = CAMERA_FRAME_ID
camera_extrinsics['child_frame_id'] = camera_name
camera_extrinsics['transform']['translation']['x'] = calibrated_sensor['translation'][0]
camera_extrinsics['transform']['translation']['y'] = calibrated_sensor['translation'][1]
camera_extrinsics['transform']['translation']['z'] = calibrated_sensor['translation'][2]
camera_extrinsics['transform']['rotation']['w'] = calibrated_sensor['rotation'][0]
camera_extrinsics['transform']['rotation']['x'] = calibrated_sensor['rotation'][1]
camera_extrinsics['transform']['rotation']['y'] = calibrated_sensor['rotation'][2]
camera_extrinsics['transform']['rotation']['z'] = calibrated_sensor['rotation'][3]
file_name = CAMERA_EXTRINSICS.replace('camera', camera_name)
file_path = os.path.join(calibration_file_path, CAMERA_PARAMS_PATH)
Path(file_path).mkdir(parents=True, exist_ok=True)
camera_extrinsics_file = os.path.join(file_path, file_name)
save_yaml(camera_extrinsics_file, camera_extrinsics)
# 2. Generate intrinsics
camera_meta_intrinsics = os.path.join(
CALIBRATION_META_ROOT, CAMERA_PARAMS_PATH, CAMERA_INTRINSICS)
camera_intrinsics = load_yaml(camera_meta_intrinsics)
raw_camera_intrinsic = calibrated_sensor['camera_intrinsic']
camera_intrinsics['header']['frame_id'] = CAMERA_FRAME_ID
camera_intrinsics['K'] = list(chain.from_iterable(raw_camera_intrinsic))
# Todo(zero): need to complete
# camera_intrinsics['D'] =
# camera_intrinsics['R'] =
# camera_intrinsics['P'] =
file_name = CAMERA_INTRINSICS.replace('camera', camera_name)
camera_intrinsics_file = os.path.join(file_path, file_name)
save_yaml(camera_intrinsics_file, camera_intrinsics)
def gen_radar_params(radar_name, calibrated_sensor, calibration_file_path):
"""Generate radar extrinsic file
Args:
radar_name (str): radar name
calibrated_sensor (_type_): nuscenes calibrated_sensor json object
calibration_file_path (_type_): saved path
"""
radar_meta_extrinsics = os.path.join(
CALIBRATION_META_ROOT, RADAR_PARAMS_PATH, RADAR_EXTRINSICS)
radar_extrinsics = load_yaml(radar_meta_extrinsics)
radar_extrinsics['header']['frame_id'] = RADAR_FRAME_ID
radar_extrinsics['child_frame_id'] = radar_name
radar_extrinsics['transform']['translation']['x'] = calibrated_sensor['translation'][0]
radar_extrinsics['transform']['translation']['y'] = calibrated_sensor['translation'][1]
radar_extrinsics['transform']['translation']['z'] = calibrated_sensor['translation'][2]
radar_extrinsics['transform']['rotation']['w'] = calibrated_sensor['rotation'][0]
radar_extrinsics['transform']['rotation']['x'] = calibrated_sensor['rotation'][1]
radar_extrinsics['transform']['rotation']['y'] = calibrated_sensor['rotation'][2]
radar_extrinsics['transform']['rotation']['z'] = calibrated_sensor['rotation'][3]
file_name = RADAR_EXTRINSICS.replace('radar', radar_name)
file_path = os.path.join(calibration_file_path, RADAR_PARAMS_PATH)
Path(file_path).mkdir(parents=True, exist_ok=True)
radar_extrinsics_file = os.path.join(file_path, file_name)
save_yaml(radar_extrinsics_file, radar_extrinsics)
def gen_velodyne_params(lidar_name, calibrated_sensor, calibration_file_path):
"""Generate lidar extrinsic file
Args:
lidar_name (str): lidar name
calibrated_sensor (_type_): nuscenes calibrated_sensor json object
calibration_file_path (str): saved path
"""
lidar_meta_extrinsics = os.path.join(
CALIBRATION_META_ROOT, VELODYNE_PARAMS_PATH, LIDAR_NOVATEL_EXTRINSICS)
lidar_extrinsics = load_yaml(lidar_meta_extrinsics)
lidar_extrinsics['header']['frame_id'] = LIDAR_FRAME_ID
lidar_extrinsics['child_frame_id'] = lidar_name
lidar_extrinsics['transform']['translation']['x'] = calibrated_sensor['translation'][0]
lidar_extrinsics['transform']['translation']['y'] = calibrated_sensor['translation'][1]
lidar_extrinsics['transform']['translation']['z'] = calibrated_sensor['translation'][2]
lidar_extrinsics['transform']['rotation']['w'] = calibrated_sensor['rotation'][0]
lidar_extrinsics['transform']['rotation']['x'] = calibrated_sensor['rotation'][1]
lidar_extrinsics['transform']['rotation']['y'] = calibrated_sensor['rotation'][2]
lidar_extrinsics['transform']['rotation']['z'] = calibrated_sensor['rotation'][3]
file_name = LIDAR_NOVATEL_EXTRINSICS.replace('lidar', lidar_name)
file_path = os.path.join(calibration_file_path, VELODYNE_PARAMS_PATH)
Path(file_path).mkdir(parents=True, exist_ok=True)
lidar_novatel_extrinsics = os.path.join(file_path, file_name)
save_yaml(lidar_novatel_extrinsics, lidar_extrinsics)
def gen_sensor_calibration(calibrations, calibration_file_path):
"""Generate camera/radar/lidar extrinsic file and camera intrinsic file
Args:
calibrations (dict): nuscenes calibrated_sensor json objects
calibration_file_path (str): saved path
"""
for channel, calibrated_sensor in calibrations.items():
if channel.startswith('CAM'):
gen_camera_params(channel, calibrated_sensor, calibration_file_path)
elif channel.startswith('LIDAR'):
gen_velodyne_params(channel, calibrated_sensor, calibration_file_path)
elif channel.startswith('RADAR'):
gen_radar_params(channel, calibrated_sensor, calibration_file_path)
else:
print("Unsupported sensor: {}".format(channel))
def dataset_to_calibration(nuscenes, calibration_root_path):
"""Generate sensor calibration
Args:
nuscenes (_type_): nuscenes(one scene)
calibration_root_path (str): sensor calibrations saved path
"""
calibration_file_path = os.path.join(
calibration_root_path, nuscenes.scene_token)
calibrations = dict()
for c, f, ego_pose, calibrated_sensor, t in nuscenes:
calibrations[c] = calibrated_sensor
gen_sensor_calibration(calibrations, calibration_file_path)
def convert_calibration(dataset_path, calibration_root_path):
"""Generate sensor calibration
Args:
dataset_path (str): nuscenes dataset path
calibration_root_path (str): sensor calibrations saved path
"""
nuscenes_schema = NuScenesSchema(dataroot=dataset_path)
n_helper = NuScenesHelper(nuscenes_schema)
for scene_token in nuscenes_schema.scene.keys():
print("Start to convert scene: {}, Pls wait!".format(scene_token))
nuscenes = NuScenes(n_helper, scene_token)
dataset_to_calibration(nuscenes, calibration_root_path)
print("Success! Calibrations saved in '{}'".format(calibration_root_path))
| 0
|
apollo_public_repos/apollo/modules/tools/dataset
|
apollo_public_repos/apollo/modules/tools/dataset/nuscenes/main.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2022 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import argparse
import os
import sys
import logging
from calibration_converter import convert_calibration
from dataset_converter import convert_dataset
from pcd_converter import convert_pcd
def main(args=sys.argv):
parser = argparse.ArgumentParser(
description="nuScenes dataset convert to record tool.",
prog="main.py")
parser.add_argument(
"-i", "--input", action="store", type=str, required=True,
help="Input file or directory.")
parser.add_argument(
"-o", "--output", action="store", type=str, required=False,
help="Output file or directory.")
parser.add_argument(
"-t", "--type", action="store", type=str, required=False,
default="rcd", choices=['rcd', 'cal', 'pcd'],
help="Conversion type. rcd:record, cal:calibration, pcd:pointcloud")
args = parser.parse_args(args[1:])
logging.debug(args)
if args.type == 'rcd':
if os.path.isdir(args.input):
if args.output is None:
args.output = '.'
convert_dataset(args.input, args.output)
else:
logging.error("Pls enter directory! Not '{}'".format(args.input))
elif args.type == 'cal':
if os.path.isdir(args.input):
if args.output is None:
args.output = '.'
convert_calibration(args.input, args.output)
else:
logging.error("Pls enter directory! Not '{}'".format(args.input))
elif args.type == 'pcd':
if os.path.isfile(args.input):
if args.output is None:
args.output = 'result.pcd'
convert_pcd(args.input, args.output)
else:
logging.error("Pls enter file! Not '{}'".format(args.input))
else:
logging.error("Input error! '{}'".format(args.input))
if __name__ == '__main__':
main()
| 0
|
apollo_public_repos/apollo/modules/tools/dataset/calibration_meta
|
apollo_public_repos/apollo/modules/tools/dataset/calibration_meta/gnss_params/ant_imu_leverarm.yaml
|
leverarm:
primary:
offset:
x: 0.0
y: 0.0
z: 0.0
uncertainty:
x: 0.05
y: 0.05
z: 0.08
secondary:
offset:
x: 0.0
y: 0.0
z: 0.0
uncertainty:
x: 0.05
y: 0.05
z: 0.08
| 0
|
apollo_public_repos/apollo/modules/tools/dataset/calibration_meta
|
apollo_public_repos/apollo/modules/tools/dataset/calibration_meta/radar_params/radar_extrinsics.yaml
|
header:
seq: 0
stamp:
secs: 0
nsecs: 0
frame_id: ''
child_frame_id: radar
transform:
translation:
x: 0.0
y: 0.0
z: 0.0
rotation:
x: 0.0
y: 0.0
z: 0.0
w: 1.0
| 0
|
apollo_public_repos/apollo/modules/tools/dataset/calibration_meta
|
apollo_public_repos/apollo/modules/tools/dataset/calibration_meta/velodyne_params/lidar_novatel_extrinsics.yaml
|
header:
seq: 0
stamp:
secs: 0
nsecs: 0
frame_id: novatel
child_frame_id: velodyne128
transform:
translation:
x: 0.0
y: 0.0
z: 0.0
rotation:
x: 0.0
y: 0.0
z: 0.0
w: 1.0
| 0
|
apollo_public_repos/apollo/modules/tools/dataset/calibration_meta
|
apollo_public_repos/apollo/modules/tools/dataset/calibration_meta/velodyne_params/lidar_height.yaml
|
vehicle:
parameters:
height: 0.0
height_var: 0.0
| 0
|
apollo_public_repos/apollo/modules/tools/dataset/calibration_meta
|
apollo_public_repos/apollo/modules/tools/dataset/calibration_meta/vehicle_params/vehicle_imu_extrinsics.yaml
|
transform:
translation:
x: 0.0
y: 0.0
z: 0.0
rotation:
x: 0.0
y: 0.0
z: 0.0
w: 1.0
| 0
|
apollo_public_repos/apollo/modules/tools/dataset/calibration_meta
|
apollo_public_repos/apollo/modules/tools/dataset/calibration_meta/camera_params/camera_intrinsics.yaml
|
header:
seq: 0
stamp:
secs: 0
nsecs: 0
frame_id: ''
height: 1080
width: 1920
distortion_model: 'plumb_bob'
D: [0.000000000000e+00, 0.000000000000e+00, 0.000000000000e+00, 0.000000000000e+00, 0.000000000000e+00]
K: [0.000000000000e+00, 0.000000000000e+00, 0.000000000000e+00, 0.000000000000e+00, 0.000000000000e+00, 0.000000000000e+00, 0.000000000000e+00, 0.000000000000e+00, 1.000000000000e+00]
R: [0.000000000000e+00, 0.000000000000e+00, 0.000000000000e+00, 0.000000000000e+00, 0.000000000000e+00, 0.000000000000e+00, 0.000000000000e+00, 0.000000000000e+00, 1.000000000000e+00]
P: [0.000000000000e+00, 0.000000000000e+00, 0.000000000000e+00, 0.000000000000e+00, 0.000000000000e+00, 0.000000000000e+00, 0.000000000000e+00, 0.000000000000e+00, 0.000000000000e+00, 0.000000000000e+00, 1.000000000000e+00, 0.000000000000e+00]
binning_x: 0
binning_y: 0
roi:
x_offset: 0
y_offset: 0
height: 0
width: 0
do_rectify: False
| 0
|
apollo_public_repos/apollo/modules/tools/dataset/calibration_meta
|
apollo_public_repos/apollo/modules/tools/dataset/calibration_meta/camera_params/camera_extrinsics.yaml
|
header:
seq: 0
stamp:
secs: 0
nsecs: 0
frame_id: ''
child_frame_id: front_6mm
transform:
translation:
x: 0.0
y: 0.0
z: 0.0
rotation:
x: 0.0
y: 0.0
z: 0.0
w: 0.0
| 0
|
apollo_public_repos/apollo/modules/tools
|
apollo_public_repos/apollo/modules/tools/mapshow/mapshow.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import argparse
import matplotlib.pyplot as plt
from modules.tools.mapshow.libs.localization import Localization
from modules.tools.mapshow.libs.map import Map
from modules.tools.mapshow.libs.path import Path
def draw(map):
lane_ids = args.laneid
if lane_ids is None:
lane_ids = []
map.draw_lanes(plt, args.showlaneids, lane_ids, args.showlanedetails)
if args.showsignals:
map.draw_signal_lights(plt)
if args.showstopsigns:
map.draw_stop_signs(plt)
if args.showjunctions:
map.draw_pnc_junctions(plt)
if args.showcrosswalks:
map.draw_crosswalks(plt)
if args.showyieldsigns:
map.draw_yield_signs(plt)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Mapshow is a tool to display hdmap info on a map.",
prog="mapshow.py")
parser.add_argument(
"-m", "--map", action="store", type=str, required=True,
help="Specify the map file in txt or binary format")
parser.add_argument(
"-m2", "--map2", action="store", type=str, required=False,
help="Specify the map file in txt or binary format")
parser.add_argument(
"-sl", "--showlaneids", action="store_const", const=True,
help="Show all lane ids in map")
parser.add_argument(
"-sld", "--showlanedetails", action="store_const", const=True,
help="Show all lane ids in map")
parser.add_argument(
"-l", "--laneid", nargs='+',
help="Show specific lane id(s) in map")
parser.add_argument(
"-signal", "--showsignals", action="store_const", const=True,
help="Show all signal light stop lines with ids in map")
parser.add_argument(
"-stopsign", "--showstopsigns", action="store_const", const=True,
help="Show all stop sign stop lines with ids in map")
parser.add_argument(
"-yieldsign", "--showyieldsigns", action="store_const", const=True,
help="Show all yield sign stop lines with ids in map")
parser.add_argument(
"-junction", "--showjunctions", action="store_const", const=True,
help="Show all pnc-junctions with ids in map")
parser.add_argument(
"-crosswalk", "--showcrosswalks", action="store_const", const=True,
help="Show all crosswalks with ids in map")
parser.add_argument(
"--loc", action="store", type=str, required=False,
help="Specify the localization pb file in txt format")
parser.add_argument(
"--position", action="store", type=str, required=False,
help="Plot the x,y coordination in string format, e.g., 343.02,332.01")
# driving path data files are text files with data format of
# t,x,y,heading,speed
parser.add_argument(
"-dp", "--drivingpath", nargs='+',
help="Show driving paths in map")
args = parser.parse_args()
map = Map()
map.load(args.map)
draw(map)
if args.map2 is not None:
map2 = Map()
map2.load(args.map2)
draw(map2)
if args.drivingpath is not None:
path = Path(args.drivingpath)
path.draw(plt)
if args.loc is not None:
localization = Localization()
localization.load(args.loc)
localization.plot_vehicle(plt)
if args.position is not None:
x, y = args.position.split(",")
x, y = float(x), float(y)
plt.plot([x], [y], 'bo')
plt.axis('equal')
plt.show()
| 0
|
apollo_public_repos/apollo/modules/tools
|
apollo_public_repos/apollo/modules/tools/mapshow/roadshow.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import argparse
import matplotlib.pyplot as plt
from modules.tools.mapshow.libs.map import Map
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Raodshow is a tool to display road info on a map.",
prog="roadshow.py")
parser.add_argument(
"-m", "--map", action="store", type=str, required=True,
help="Specify the map file in txt or binary format")
args = parser.parse_args()
map = Map()
map.load(args.map)
map.draw_roads(plt)
plt.axis('equal')
plt.show()
| 0
|
apollo_public_repos/apollo/modules/tools
|
apollo_public_repos/apollo/modules/tools/mapshow/README.md
|
# Map Show
## About
Mapshow is a tool to display hdmap info on a map.
## Setup
If you run mapshow inside docker, there is no setup for running the tool.
Otherwise, you have to run following command to setup python path.
```bash
# In apollo root dir:
source scripts/apollo_base.sh
```
## Usage
> usage: python mapshow.py \[-h] -m MAP \[-sl] [-l LANEID [LANEID ...]]
>
> optional arguments:
>
> -h, --help show this help message and exit
>
> -m MAP, --map MAP Specify the map file in txt or binary format
>
> -sl, --showlaneids Show all lane ids in map
>
> -l, --laneid Show specific lane id(s) in map
>
> -l LANEID \[LANEID ...], --laneid LANEID \[LANEID ...] Show specific lane id(s) in map
>
> -signal, --showsignals Show all signal light stop lines with ids in map
>
> -stopsign, --showstopsigns Show all stop sign stop lines with ids in map
>
> -junction, --showjunctions Show all pnc-junctions with ids in map
>
## Examples
Show basic map layout only
```bash
python mapshow.py -m /path/to/map/file
```
Show basic map layout with all lane ids
```bash
python mapshow.py -m /path/to/map/file -sl
```
show basic map layout with specific lane ids
```bash
python mapshow.py -m /path/to/map/file -l 1474023788152_1_-1
```
| 0
|
apollo_public_repos/apollo/modules/tools
|
apollo_public_repos/apollo/modules/tools/mapshow/BUILD
|
load("@rules_python//python:defs.bzl", "py_binary")
load("//tools/install:install.bzl", "install")
package(default_visibility = ["//visibility:public"])
filegroup(
name = "readme",
srcs = [
"README.md",
],
)
py_binary(
name = "mapshow",
srcs = ["mapshow.py"],
deps = [
"//modules/tools/mapshow/libs:localization",
"//modules/tools/mapshow/libs:map",
"//modules/tools/mapshow/libs:path",
],
)
py_binary(
name = "roadshow",
srcs = ["roadshow.py"],
deps = [
"//modules/tools/mapshow/libs:map",
],
)
install(
name = "install",
data = [":readme"],
data_dest = "tools/mapshow",
py_dest = "tools/mapshow",
targets = [
"mapshow",
"roadshow",
]
)
| 0
|
apollo_public_repos/apollo/modules/tools/mapshow
|
apollo_public_repos/apollo/modules/tools/mapshow/libs/subplot_traj_path.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from matplotlib import cm as cmx
from matplotlib import colors as mcolors
import matplotlib.pyplot as plt
class TrajPathSubplot:
def __init__(self, ax):
self.ax = ax
self.path_lines = []
self.path_lines_size = 30
self.colors = []
self.init_colors()
# self.colors = ['b','r', 'y', 'k']
for i in range(self.path_lines_size):
line, = ax.plot(
[0], [0],
c=self.colors[i % len(self.colors)],
ls="-",
marker='',
lw=8,
alpha=0.3)
self.path_lines.append(line)
ax.set_xlabel("x (m)")
# ax.set_xlim([-2, 10])
# ax.set_ylim([-6, 6])
self.ax.autoscale_view()
# self.ax.relim()
# ax.set_ylabel("y (m)")
ax.set_title("PLANNING ACC")
self.set_visible(False)
def init_colors(self):
self.colors = []
values = list(range(self.path_lines_size))
jet = plt.get_cmap('brg')
color_norm = mcolors.Normalize(vmin=0, vmax=values[-1])
scalar_map = cmx.ScalarMappable(norm=color_norm, cmap=jet)
for val in values:
color_val = scalar_map.to_rgba(val)
self.colors.append(color_val)
def set_visible(self, visible):
for line in self.path_lines:
line.set_visible(visible)
def show(self, planning):
planning.traj_data_lock.acquire()
for i in range(len(planning.traj_path_x_history)):
if i >= self.path_lines_size:
print("WARNING: number of path lines is more than "
+ str(self.path_lines_size))
continue
speed_line = self.path_lines[self.path_lines_size - i - 1]
speed_line.set_xdata(planning.traj_path_x_history[i])
speed_line.set_ydata(planning.traj_path_y_history[i])
speed_line.set_visible(True)
# self.ax.legend(loc="upper left", borderaxespad=0., ncol=5)
# self.ax.axis('equal')
planning.traj_data_lock.release()
self.ax.autoscale_view()
self.ax.relim()
| 0
|
apollo_public_repos/apollo/modules/tools/mapshow
|
apollo_public_repos/apollo/modules/tools/mapshow/libs/plot_smoothness.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import matplotlib.animation as animation
import matplotlib.pyplot as plt
from cyber.python.cyber_py3 import cyber
from modules.common_msgs.planning_msgs import planning_pb2
from modules.tools.mapshow.libs.planning import Planning
from modules.tools.mapshow.libs.subplot_traj_acc import TrajAccSubplot
from modules.tools.mapshow.libs.subplot_traj_path import TrajPathSubplot
from modules.tools.mapshow.libs.subplot_traj_speed import TrajSpeedSubplot
planning = Planning()
def update(frame_number):
traj_speed_subplot.show(planning)
traj_acc_subplot.show(planning)
traj_path_subplot.show(planning)
def planning_callback(planning_pb):
planning.update_planning_pb(planning_pb)
planning.compute_traj_data()
def add_listener():
planning_sub = cyber.Node("st_plot")
planning_sub.create_reader('/apollo/planning', planning_pb2.ADCTrajectory,
planning_callback)
def press_key():
pass
if __name__ == '__main__':
cyber.init()
add_listener()
fig = plt.figure(figsize=(14, 6))
fig.canvas.mpl_connect('key_press_event', press_key)
ax = plt.subplot2grid((2, 2), (0, 0))
traj_speed_subplot = TrajSpeedSubplot(ax)
ax2 = plt.subplot2grid((2, 2), (0, 1))
traj_acc_subplot = TrajAccSubplot(ax2)
ax3 = plt.subplot2grid((2, 2), (1, 0))
traj_path_subplot = TrajPathSubplot(ax3)
ani = animation.FuncAnimation(fig, update, interval=100)
plt.show()
cyber.shutdown()
| 0
|
apollo_public_repos/apollo/modules/tools/mapshow
|
apollo_public_repos/apollo/modules/tools/mapshow/libs/plot_planning.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import argparse
import matplotlib.animation as animation
import matplotlib.pyplot as plt
from cyber.python.cyber_py3 import cyber
from modules.common_msgs.planning_msgs import planning_pb2
from modules.tools.mapshow.libs.localization import Localization
from modules.tools.mapshow.libs.planning import Planning
from modules.tools.mapshow.libs.subplot_path import PathSubplot
from modules.tools.mapshow.libs.subplot_sl_main import SlMainSubplot
from modules.tools.mapshow.libs.subplot_speed import SpeedSubplot
from modules.tools.mapshow.libs.subplot_st_main import StMainSubplot
from modules.tools.mapshow.libs.subplot_st_speed import StSpeedSubplot
planning = Planning()
localization = Localization()
def update(frame_number):
# st_main_subplot.show(planning)
# st_speed_subplot.show(planning)
map_path_subplot.show(planning, localization)
dp_st_main_subplot.show(planning)
qp_st_main_subplot.show(planning)
speed_subplot.show(planning)
sl_main_subplot.show(planning)
st_speed_subplot.show(planning)
def planning_callback(planning_pb):
planning.update_planning_pb(planning_pb)
localization.update_localization_pb(
planning_pb.debug.planning_data.adc_position)
planning.compute_st_data()
planning.compute_sl_data()
planning.compute_path_data()
planning.compute_speed_data()
planning.compute_init_point()
def add_listener():
planning_sub = cyber.Node("st_plot")
planning_sub.create_reader('/apollo/planning', planning_pb2.ADCTrajectory,
planning_callback)
def press_key(event):
if event.key == '+' or event.key == '=':
map_path_subplot.zoom_in()
if event.key == '-' or event.key == '_':
map_path_subplot.zoom_out()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="plot_planning is a tool to display "
"planning trajs on a map.",
prog="plot_planning_old.py")
parser.add_argument(
"-m",
"--map",
action="store",
type=str,
required=False,
default=None,
help="Specify the map file in txt or binary format")
args = parser.parse_args()
cyber.init()
add_listener()
fig = plt.figure()
fig.canvas.mpl_connect('key_press_event', press_key)
ax = plt.subplot2grid((3, 3), (0, 0), rowspan=2, colspan=2)
map_path_subplot = PathSubplot(ax, args.map)
ax1 = plt.subplot2grid((3, 3), (0, 2))
speed_subplot = SpeedSubplot(ax1)
ax2 = plt.subplot2grid((3, 3), (2, 2))
dp_st_main_subplot = StMainSubplot(ax2, 'QpSplineStSpeedOptimizer')
ax3 = plt.subplot2grid((3, 3), (1, 2))
qp_st_main_subplot = StMainSubplot(ax3, 'DpStSpeedOptimizer')
ax4 = plt.subplot2grid((3, 3), (2, 0), colspan=1)
sl_main_subplot = SlMainSubplot(ax4)
ax5 = plt.subplot2grid((3, 3), (2, 1), colspan=1)
st_speed_subplot = StSpeedSubplot(ax5, 'QpSplineStSpeedOptimizer')
ani = animation.FuncAnimation(fig, update, interval=100)
ax.axis('equal')
plt.show()
cyber.shutdown()
| 0
|
apollo_public_repos/apollo/modules/tools/mapshow
|
apollo_public_repos/apollo/modules/tools/mapshow/libs/subplot_sl_main.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
class SlMainSubplot:
def __init__(self, ax):
self.ax = ax
self.sl_static_obstacle_lower_boundary_line, = \
ax.plot([0], [0], "r-", lw=0.3, alpha=0.8)
self.sl_static_obstacle_upper_boundary_line, = \
ax.plot([0], [0], "r-", lw=0.3, alpha=0.8)
self.sl_dynamic_obstacle_lower_boundary_line, = \
ax.plot([0], [0], "y-", lw=0.3, alpha=0.8)
self.sl_dynamic_obstacle_upper_boundary_line, = \
ax.plot([0], [0], "y-", lw=0.3, alpha=0.8)
self.sl_map_lower_boundary_line, = \
ax.plot([0], [0], "b-", lw=0.3, ms=2, alpha=0.8)
self.sl_map_upper_boundary_line, = \
ax.plot([0], [0], "b-", lw=0.3, ms=4, alpha=0.8)
self.sl_path_line, = ax.plot([0], [0], "k--")
self.sl_aggregated_boundary_low_line, = \
ax.plot([0], [0], "k-", lw=1, ms=2)
self.sl_aggregated_boundary_high_line, = \
ax.plot([0], [0], "k-", lw=1, ms=2)
ax.set_xlim([-10, 220])
ax.set_ylim([-2.5, 2.5])
ax.set_xlabel("s - ref_line (m)")
ax.set_ylabel("l (m)")
ax.set_title("QP Path - sl Graph")
self.set_visible(False)
def set_visible(self, visible):
self.sl_static_obstacle_lower_boundary_line.set_visible(visible)
self.sl_static_obstacle_upper_boundary_line.set_visible(visible)
self.sl_dynamic_obstacle_lower_boundary_line.set_visible(visible)
self.sl_dynamic_obstacle_upper_boundary_line.set_visible(visible)
self.sl_map_lower_boundary_line.set_visible(visible)
self.sl_map_upper_boundary_line.set_visible(visible)
self.sl_path_line.set_visible(visible)
self.sl_aggregated_boundary_low_line.set_visible(visible)
self.sl_aggregated_boundary_high_line.set_visible(visible)
def show(self, planning):
planning.sl_data_lock.acquire()
self.sl_static_obstacle_lower_boundary_line.set_visible(True)
self.sl_static_obstacle_upper_boundary_line.set_visible(True)
self.sl_dynamic_obstacle_lower_boundary_line.set_visible(True)
self.sl_dynamic_obstacle_upper_boundary_line.set_visible(True)
self.sl_map_lower_boundary_line.set_visible(True)
self.sl_map_upper_boundary_line.set_visible(True)
self.sl_path_line.set_visible(True)
self.sl_aggregated_boundary_low_line.set_visible(True)
self.sl_aggregated_boundary_high_line.set_visible(True)
new_sampled_s = []
for s in planning.sl_sampled_s:
new_sampled_s.append(s)
new_sampled_s.append(s)
new_map_lower = []
for l in planning.sl_map_lower_boundary:
new_map_lower.append(l)
new_map_lower.append(-11)
new_map_upper = []
for l in planning.sl_map_upper_boundary:
new_map_upper.append(l)
new_map_upper.append(11)
self.sl_map_lower_boundary_line.set_xdata(new_sampled_s)
self.sl_map_lower_boundary_line.set_ydata(new_map_lower)
self.sl_map_upper_boundary_line.set_xdata(new_sampled_s)
self.sl_map_upper_boundary_line.set_ydata(new_map_upper)
self.sl_dynamic_obstacle_lower_boundary_line.set_xdata(
planning.sl_sampled_s)
self.sl_dynamic_obstacle_lower_boundary_line.set_ydata(
planning.sl_dynamic_obstacle_lower_boundary)
self.sl_dynamic_obstacle_upper_boundary_line.set_xdata(
planning.sl_sampled_s)
self.sl_dynamic_obstacle_upper_boundary_line.set_ydata(
planning.sl_dynamic_obstacle_upper_boundary)
new_static_lower = []
for l in planning.sl_static_obstacle_lower_boundary:
new_static_lower.append(l)
new_static_lower.append(-11)
new_static_upper = []
for l in planning.sl_static_obstacle_upper_boundary:
new_static_upper.append(l)
new_static_upper.append(11)
self.sl_static_obstacle_lower_boundary_line.set_xdata(new_sampled_s)
self.sl_static_obstacle_lower_boundary_line.set_ydata(new_static_lower)
self.sl_static_obstacle_upper_boundary_line.set_xdata(new_sampled_s)
self.sl_static_obstacle_upper_boundary_line.set_ydata(new_static_upper)
self.sl_path_line.set_xdata(planning.sl_path_s)
self.sl_path_line.set_ydata(planning.sl_path_l)
self.sl_aggregated_boundary_low_line.set_xdata(
planning.sl_aggregated_boundary_s)
self.sl_aggregated_boundary_low_line.set_ydata(
planning.sl_aggregated_boundary_low_l)
self.sl_aggregated_boundary_high_line.set_xdata(
planning.sl_aggregated_boundary_s)
self.sl_aggregated_boundary_high_line.set_ydata(
planning.sl_aggregated_boundary_high_l)
planning.sl_data_lock.release()
| 0
|
apollo_public_repos/apollo/modules/tools/mapshow
|
apollo_public_repos/apollo/modules/tools/mapshow/libs/subplot_speed.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
class SpeedSubplot:
def __init__(self, ax):
self.ax = ax
self.speed_lines = []
self.speed_lines_size = 3
colors = ['b', 'g', 'r', 'k']
for i in range(self.speed_lines_size):
line, = ax.plot(
[0], [0],
colors[i % len(colors)] + ".",
lw=3 + i * 3,
alpha=0.4)
self.speed_lines.append(line)
ax.set_xlabel("t (second)")
ax.set_xlim([-2, 10])
ax.set_ylim([-1, 40])
ax.set_ylabel("speed (m/s)")
ax.set_title("PLANNING SPEED")
self.set_visible(False)
def set_visible(self, visible):
for line in self.speed_lines:
line.set_visible(visible)
def show(self, planning):
cnt = 0
planning.speed_data_lock.acquire()
for name in planning.speed_data_time.keys():
if cnt >= self.speed_lines_size:
print("WARNING: number of path lines is more than "
+ str(self.speed_lines_size))
continue
if len(planning.speed_data_time[name]) <= 1:
continue
speed_line = self.speed_lines[cnt]
speed_line.set_visible(True)
speed_line.set_xdata(planning.speed_data_time[name])
speed_line.set_ydata(planning.speed_data_val[name])
speed_line.set_label(name[0:5])
cnt += 1
self.ax.legend(loc="upper left", borderaxespad=0., ncol=5)
# self.ax.axis('equal')
planning.speed_data_lock.release()
| 0
|
apollo_public_repos/apollo/modules/tools/mapshow
|
apollo_public_repos/apollo/modules/tools/mapshow/libs/subplot_traj_acc.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from matplotlib import cm as cmx
from matplotlib import colors as mcolors
import matplotlib.pyplot as plt
class TrajAccSubplot:
def __init__(self, ax):
self.ax = ax
self.acc_lines = []
self.acc_lines_size = 30
self.colors = []
self.init_colors()
# self.colors = ['b','r', 'y', 'k']
for i in range(self.acc_lines_size):
line, = ax.plot(
[0], [0],
c=self.colors[i % len(self.colors)],
ls="-",
marker='',
lw=3,
alpha=0.8)
self.acc_lines.append(line)
ax.set_xlabel("t (second)")
# ax.set_xlim([-2, 10])
ax.set_ylim([-6, 6])
self.ax.autoscale_view()
# self.ax.relim()
ax.set_ylabel("acc (m/s^2)")
ax.set_title("PLANNING ACC")
self.set_visible(False)
def init_colors(self):
self.colors = []
values = list(range(self.acc_lines_size))
jet = plt.get_cmap('brg')
color_norm = mcolors.Normalize(vmin=0, vmax=values[-1])
scalar_map = cmx.ScalarMappable(norm=color_norm, cmap=jet)
for val in values:
color_val = scalar_map.to_rgba(val)
self.colors.append(color_val)
def set_visible(self, visible):
for line in self.acc_lines:
line.set_visible(visible)
def show(self, planning):
planning.traj_data_lock.acquire()
for i in range(len(planning.traj_speed_t_history)):
if i >= self.acc_lines_size:
print("WARNING: number of path lines is more than "
+ str(self.acc_lines_size))
continue
speed_line = self.acc_lines[self.acc_lines_size - i - 1]
speed_line.set_xdata(planning.traj_acc_t_history[i])
speed_line.set_ydata(planning.traj_acc_a_history[i])
# speed_line.set_xdata([1,2,3,4])
# speed_line.set_ydata([1,2,3,4])
# speed_line.set_label(name[0:5])
speed_line.set_visible(True)
# self.ax.legend(loc="upper left", borderaxespad=0., ncol=5)
# self.ax.axis('equal')
planning.traj_data_lock.release()
self.ax.autoscale_view()
self.ax.relim()
| 0
|
apollo_public_repos/apollo/modules/tools/mapshow
|
apollo_public_repos/apollo/modules/tools/mapshow/libs/subplot_traj_speed.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from matplotlib import cm as cmx
from matplotlib import colors as mcolors
import matplotlib.pyplot as plt
class TrajSpeedSubplot:
def __init__(self, ax):
self.ax = ax
self.speed_lines = []
self.speed_lines_size = 30
self.colors = []
self.init_colors()
# self.colors = ['b','r', 'y', 'k']
for i in range(self.speed_lines_size):
line, = ax.plot(
[0], [0],
c=self.colors[i % len(self.colors)],
ls="-",
marker='',
lw=3,
alpha=0.8)
self.speed_lines.append(line)
ax.set_xlabel("t (second)")
# ax.set_xlim([-2, 10])
ax.set_ylim([-1, 25])
self.ax.autoscale_view()
# self.ax.relim()
ax.set_ylabel("speed (m/s)")
ax.set_title("PLANNING SPEED")
self.set_visible(False)
def init_colors(self):
self.colors = []
values = list(range(self.speed_lines_size))
jet = plt.get_cmap('brg')
color_norm = mcolors.Normalize(vmin=0, vmax=values[-1])
scalar_map = cmx.ScalarMappable(norm=color_norm, cmap=jet)
for val in values:
color_val = scalar_map.to_rgba(val)
self.colors.append(color_val)
def set_visible(self, visible):
for line in self.speed_lines:
line.set_visible(visible)
def show(self, planning):
planning.traj_data_lock.acquire()
for i in range(len(planning.traj_speed_t_history)):
if i >= self.speed_lines_size:
print("WARNING: number of path lines is more than "
+ str(self.speed_lines_size))
continue
speed_line = self.speed_lines[self.speed_lines_size - i - 1]
speed_line.set_xdata(planning.traj_speed_t_history[i])
speed_line.set_ydata(planning.traj_speed_v_history[i])
# speed_line.set_xdata([1,2,3,4])
# speed_line.set_ydata([1,2,3,4])
# speed_line.set_label(name[0:5])
speed_line.set_visible(True)
# self.ax.legend(loc="upper left", borderaxespad=0., ncol=5)
# self.ax.axis('equal')
planning.traj_data_lock.release()
self.ax.autoscale_view()
self.ax.relim()
| 0
|
apollo_public_repos/apollo/modules/tools/mapshow
|
apollo_public_repos/apollo/modules/tools/mapshow/libs/localization.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import math
import threading
import modules.tools.common.proto_utils as proto_utils
from modules.common_msgs.localization_msgs.localization_pb2 import LocalizationEstimate
class Localization:
def __init__(self, localization_pb=None):
self.localization_pb = localization_pb
self.localization_data_lock = threading.Lock()
def update_localization_pb(self, localization_pb):
self.localization_data_lock.acquire()
self.localization_pb = localization_pb
self.localization_data_lock.release()
def load(self, localization_file_name):
self.localization_pb = proto_utils.get_pb_from_text_file(
localization_file_name, LocalizationEstimate())
def plot_vehicle(self, ax):
self.plot_vehicle_position(ax)
self.plot_vehicle_polygon(ax)
self.show_annotation(ax)
def replot_vehicle(self, position_line, polygon_line):
self.localization_data_lock.acquire()
if self.localization_pb is None:
self.localization_data_lock.release()
return
position_line.set_visible(True)
polygon_line.set_visible(True)
self._replot_vehicle_position(position_line)
self._replot_vehicle_polygon(polygon_line)
self.localization_data_lock.release()
def plot_vehicle_position(self, ax):
loc_x = [self.localization_pb.pose.position.x]
loc_y = [self.localization_pb.pose.position.y]
ax.plot(loc_x, loc_y, "bo")
def _replot_vehicle_position(self, line):
loc_x = [self.localization_pb.pose.position.x]
loc_y = [self.localization_pb.pose.position.y]
line.set_xdata(loc_x)
line.set_ydata(loc_y)
def _replot_vehicle_polygon(self, line):
position = []
position.append(self.localization_pb.pose.position.x)
position.append(self.localization_pb.pose.position.y)
position.append(self.localization_pb.pose.position.z)
polygon = self.get_vehicle_polygon(position,
self.localization_pb.pose.heading)
px = []
py = []
for point in polygon:
px.append(point[0])
py.append(point[1])
line.set_xdata(px)
line.set_ydata(py)
def plot_vehicle_polygon(self, ax):
position = []
position.append(self.localization_pb.pose.position.x)
position.append(self.localization_pb.pose.position.y)
position.append(self.localization_pb.pose.position.z)
polygon = self.get_vehicle_polygon(position,
self.localization_pb.pose.heading)
self.plot_polygon(polygon, ax)
def show_annotation(self, ax):
current_t = self.localization_pb.header.timestamp_sec
content = "t = " + str(current_t) + "\n"
content += "speed @y = " + str(
self.localization_pb.pose.linear_velocity.y) + "\n"
content += "acc @y = " + str(
self.localization_pb.pose.linear_acceleration_vrf.y)
lxy = [-80, 80]
ax.annotate(
content,
xy=(self.localization_pb.pose.position.x,
self.localization_pb.pose.position.y),
xytext=lxy,
textcoords='offset points',
ha='left',
va='top',
bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.3),
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0'),
alpha=0.8)
def plot_polygon(self, polygon, ax):
px = []
py = []
for point in polygon:
px.append(point[0])
py.append(point[1])
ax.plot(px, py, "g-")
def get_vehicle_polygon(self, position, heading):
front_edge_to_center = 3.89
back_edge_to_center = 1.043
left_edge_to_center = 1.055
right_edge_to_center = 1.055
cos_h = math.cos(heading)
sin_h = math.sin(heading)
# (p3) -------- (p0)
# | o |
# (p2) -------- (p1)
p0_x, p0_y = front_edge_to_center, left_edge_to_center
p1_x, p1_y = front_edge_to_center, -right_edge_to_center
p2_x, p2_y = -back_edge_to_center, -left_edge_to_center
p3_x, p3_y = -back_edge_to_center, right_edge_to_center
p0_x, p0_y = p0_x * cos_h - p0_y * sin_h, p0_x * sin_h + p0_y * cos_h
p1_x, p1_y = p1_x * cos_h - p1_y * sin_h, p1_x * sin_h + p1_y * cos_h
p2_x, p2_y = p2_x * cos_h - p2_y * sin_h, p2_x * sin_h + p2_y * cos_h
p3_x, p3_y = p3_x * cos_h - p3_y * sin_h, p3_x * sin_h + p3_y * cos_h
[x, y, z] = position
polygon = []
polygon.append([p0_x + x, p0_y + y, 0])
polygon.append([p1_x + x, p1_y + y, 0])
polygon.append([p2_x + x, p2_y + y, 0])
polygon.append([p3_x + x, p3_y + y, 0])
polygon.append([p0_x + x, p0_y + y, 0])
return polygon
| 0
|
apollo_public_repos/apollo/modules/tools/mapshow
|
apollo_public_repos/apollo/modules/tools/mapshow/libs/map.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import random
from matplotlib import cm as cmx
from matplotlib import colors as mcolors
import matplotlib.pyplot as plt
from modules.common_msgs.map_msgs import map_pb2
import modules.tools.common.proto_utils as proto_utils
class Map:
def __init__(self):
self.map_pb = map_pb2.Map()
self.colors = []
self.init_colors()
def init_colors(self):
color_num = 6
self.colors = []
values = list(range(color_num))
jet = plt.get_cmap('brg')
color_norm = mcolors.Normalize(vmin=0, vmax=values[-1])
scalar_map = cmx.ScalarMappable(norm=color_norm, cmap=jet)
for val in values:
color_val = scalar_map.to_rgba(val)
self.colors.append(color_val)
def load(self, map_file_name):
res = proto_utils.get_pb_from_file(map_file_name, self.map_pb)
return res is not None
def draw_roads(self, ax):
cnt = 1
for road in self.map_pb.road:
color_val = self.colors[cnt % len(self.colors)]
self.draw_road(ax, road, color_val)
cnt += 1
def draw_road(self, ax, road, color_val):
for section in road.section:
for edge in section.boundary.outer_polygon.edge:
for segment in edge.curve.segment:
if segment.HasField('line_segment'):
px = []
py = []
for p in segment.line_segment.point:
px.append(float(p.x))
py.append(float(p.y))
ax.plot(px, py, ls='-', c=color_val, alpha=0.5)
def draw_lanes(self, ax, is_show_lane_ids, laneids, is_show_lane_details):
cnt = 1
for lane in self.map_pb.lane:
color_val = self.colors[cnt % len(self.colors)]
if len(laneids) == 0:
self._draw_lane_boundary(lane, ax, color_val)
self._draw_lane_central(lane, ax, color_val)
else:
if lane.id.id in laneids:
self._draw_lane_boundary(lane, ax, color_val)
self._draw_lane_central(lane, ax, color_val)
if is_show_lane_ids:
self._draw_lane_id(lane, ax, color_val)
elif is_show_lane_details:
self._draw_lane_details(lane, ax, color_val)
elif lane.id.id in laneids:
print(str(lane))
self._draw_lane_id(lane, ax, color_val)
cnt += 1
def _draw_lane_id(self, lane, ax, color_val):
"""draw lane id"""
x, y = self._find_lane_central_point(lane)
self._draw_label(lane.id.id, (x, y), ax, color_val)
def _draw_lane_details(self, lane, ax, color_val):
"""draw lane id"""
labelxys = []
labelxys.append((40, -40))
labelxys.append((-40, -40))
labelxys.append((40, 40))
labelxys.append((-40, 40))
has = ['right', 'left', 'right', 'left']
vas = ['bottom', 'bottom', 'top', 'top']
idx = random.randint(0, 3)
lxy = labelxys[idx]
x, y = self._find_lane_central_point(lane)
details = str(lane.id.id)
for predecessor_id in lane.predecessor_id:
details += '\npre:' + str(predecessor_id.id)
for successor_id in lane.successor_id:
details += '\nsuc:' + str(successor_id.id)
for left_neighbor_forward_lane_id in lane.left_neighbor_forward_lane_id:
details += '\nlnf:' + str(left_neighbor_forward_lane_id.id)
for right_neighbor_forward_lane_id in lane.right_neighbor_forward_lane_id:
details += '\nrnf:' + str(right_neighbor_forward_lane_id.id)
for left_neighbor_reverse_lane_id in lane.left_neighbor_reverse_lane_id:
details += '\nlnr:' + str(left_neighbor_reverse_lane_id.id)
for right_neighbor_reverse_lane_id in lane.right_neighbor_reverse_lane_id:
details += '\nrnr:' + str(right_neighbor_reverse_lane_id.id)
plt.annotate(
details,
xy=(x, y), xytext=lxy,
textcoords='offset points', ha=has[idx], va=vas[idx],
bbox=dict(boxstyle='round,pad=0.5', fc=color_val, alpha=0.5),
arrowprops=dict(arrowstyle='-|>', connectionstyle='arc3,rad=-0.2',
fc=color_val, ec=color_val, alpha=0.5))
def draw_pnc_junctions(self, ax):
cnt = 1
for pnc_junction in self.map_pb.pnc_junction:
color_val = self.colors[cnt % len(self.colors)]
self._draw_polygon_boundary(pnc_junction.polygon, ax, color_val)
self._draw_pnc_junction_id(pnc_junction, ax, color_val)
cnt += 1
def _draw_pnc_junction_id(self, pnc_junction, ax, color_val):
x = pnc_junction.polygon.point[0].x
y = pnc_junction.polygon.point[0].y
self._draw_label(pnc_junction.id.id, (x, y), ax, color_val)
def draw_crosswalks(self, ax):
cnt = 1
for crosswalk in self.map_pb.crosswalk:
color_val = self.colors[cnt % len(self.colors)]
self._draw_polygon_boundary(crosswalk.polygon, ax, color_val)
self._draw_crosswalk_id(crosswalk, ax, color_val)
cnt += 1
def _draw_crosswalk_id(self, crosswalk, ax, color_val):
x = crosswalk.polygon.point[0].x
y = crosswalk.polygon.point[0].y
self._draw_label(crosswalk.id.id, (x, y), ax, color_val)
@staticmethod
def _draw_label(label_id, point, ax, color_val):
"""draw label id"""
labelxys = []
labelxys.append((40, -40))
labelxys.append((-40, -40))
labelxys.append((40, 40))
labelxys.append((-40, 40))
has = ['right', 'left', 'right', 'left']
vas = ['bottom', 'bottom', 'top', 'top']
idx = random.randint(0, 3)
lxy = labelxys[idx]
plt.annotate(
label_id,
xy=(point[0], point[1]), xytext=lxy,
textcoords='offset points', ha=has[idx], va=vas[idx],
bbox=dict(boxstyle='round,pad=0.5', fc=color_val, alpha=0.5),
arrowprops=dict(arrowstyle='-|>', connectionstyle='arc3,rad=-0.2',
fc=color_val, ec=color_val, alpha=0.5))
@staticmethod
def _find_lane_central_point(lane):
segment_idx = len(lane.left_boundary.curve.segment) // 2
median_segment = lane.left_boundary.curve.segment[segment_idx]
left_point_idx = len(median_segment.line_segment.point) // 2
left_median_point = median_segment.line_segment.point[left_point_idx]
segment_idx = len(lane.right_boundary.curve.segment) // 2
median_segment = lane.right_boundary.curve.segment[segment_idx]
right_point_idx = len(median_segment.line_segment.point) // 2
right_median_point = median_segment.line_segment.point[right_point_idx]
x = (left_median_point.x + right_median_point.x) // 2
y = (left_median_point.y + right_median_point.y) // 2
return x, y
@staticmethod
def _get_median_point(points):
"""get_median_point"""
if len(points) % 2 == 1:
point = points[len(points) // 2]
return point.x, point.y
else:
point1 = points[len(points) // 2 - 1]
point2 = points[len(points) // 2]
return (point1.x + point2.x) / 2.0, (point1.y + point2.y) / 2.0
@staticmethod
def _draw_lane_boundary(lane, ax, color_val):
"""draw boundary"""
for curve in lane.left_boundary.curve.segment:
if curve.HasField('line_segment'):
px = []
py = []
for p in curve.line_segment.point:
px.append(float(p.x))
py.append(float(p.y))
ax.plot(px, py, ls='-', c=color_val, alpha=0.5)
for curve in lane.right_boundary.curve.segment:
if curve.HasField('line_segment'):
px = []
py = []
for p in curve.line_segment.point:
px.append(float(p.x))
py.append(float(p.y))
ax.plot(px, py, ls='-', c=color_val, alpha=0.5)
@staticmethod
def _draw_lane_central(lane, ax, color_val):
"""draw boundary"""
for curve in lane.central_curve.segment:
if curve.HasField('line_segment'):
px = []
py = []
for p in curve.line_segment.point:
px.append(float(p.x))
py.append(float(p.y))
ax.plot(px, py, ls=':', c=color_val, alpha=0.5)
@staticmethod
def _draw_polygon_boundary(polygon, ax, color_val):
"""draw polygon boundary"""
px = []
py = []
for point in polygon.point:
px.append(point.x)
py.append(point.y)
ax.plot(px, py, ls='-', c=color_val, alpha=0.5)
def draw_signal_lights(self, ax):
"""draw_signal_lights"""
for signal in self.map_pb.signal:
for stop_line in signal.stop_line:
for curve in stop_line.segment:
self._draw_stop_line(curve.line_segment, signal.id.id, ax, "mistyrose")
def draw_stop_signs(self, ax):
"""draw_stop_signs"""
for stop_sign in self.map_pb.stop_sign:
for stop_line in stop_sign.stop_line:
for curve in stop_line.segment:
self._draw_stop_line(curve.line_segment, stop_sign.id.id, ax, "yellow")
def draw_yield_signs(self, ax):
"""draw_yield_signs"""
for yieldsign in getattr(self.map_pb, "yield"):
for stop_line in yieldsign.stop_line:
for curve in stop_line.segment:
self._draw_stop_line(curve.line_segment, yieldsign.id.id, ax, "powderblue")
@staticmethod
def _draw_stop_line(line_segment, label, ax, label_color_val):
"""draw a signal"""
px = []
py = []
for p in line_segment.point:
px.append(float(p.x))
py.append(float(p.y))
ax.plot(px, py, 'o-')
lxy = [random.randint(20, 80) * random.sample([-1, 1], 1)[0],
random.randint(20, 80) * random.sample([-1, 1], 1)[0]]
xy = (sum(px) // len(px), sum(py) // len(py))
plt.annotate(
label,
xy=xy, xytext=lxy,
textcoords='offset points',
bbox=dict(boxstyle='round,pad=0.5', fc=label_color_val, alpha=0.5),
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0'))
| 0
|
apollo_public_repos/apollo/modules/tools/mapshow
|
apollo_public_repos/apollo/modules/tools/mapshow/libs/subplot_st_speed.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
class StSpeedSubplot:
def __init__(self, ax, st_name):
self.speed_limit_line = ax.plot([0], [0], "r-",
lw=6, alpha=0.5, label="limits")[0]
self.speed_line = ax.plot([0], [0], "k-",
lw=3, alpha=0.5, label="planned")[0]
self.speed_upper_bound_line = \
ax.plot([0], [0], "b-", lw=1, alpha=1, label="upper")[0]
self.speed_lower_bound_line = \
ax.plot([0], [0], "b-", lw=3, alpha=1, label="lower")[0]
self.st_name = st_name
ax.set_xlim(-10, 220)
ax.set_ylim(-1, 40)
ax.set_xlabel("s - qp_path(m)")
ax.set_ylabel("v (m/s)")
ax.set_title("QP Speed - sv graph")
ax.legend(loc="upper left", bbox_to_anchor=(0, 1), ncol=2,
borderaxespad=0.)
self.set_visible(False)
def set_visible(self, visible):
self.speed_limit_line.set_visible(visible)
self.speed_line.set_visible(visible)
self.speed_upper_bound_line.set_visible(visible)
self.speed_lower_bound_line.set_visible(visible)
def show(self, planning):
self.set_visible(False)
planning.st_data_lock.acquire()
if self.st_name not in planning.st_curve_s:
planning.st_data_lock.release()
return
planned_speed_s = planning.st_curve_s[self.st_name]
planned_speed_v = planning.st_curve_v[self.st_name]
self.speed_line.set_xdata(planned_speed_s)
self.speed_line.set_ydata(planned_speed_v)
self.speed_line.set_visible(True)
self.speed_limit_line.set_xdata(
planning.st_speed_limit_s[self.st_name])
self.speed_limit_line.set_ydata(
planning.st_speed_limit_v[self.st_name])
self.speed_limit_line.set_visible(True)
self.speed_upper_bound_line.set_xdata(
planning.st_speed_constraint_s[self.st_name])
self.speed_upper_bound_line.set_ydata(
planning.st_speed_constraint_upper[self.st_name])
self.speed_upper_bound_line.set_visible(True)
self.speed_lower_bound_line.set_xdata(
planning.st_speed_constraint_s[self.st_name])
self.speed_lower_bound_line.set_ydata(
planning.st_speed_constraint_lower[self.st_name])
self.speed_lower_bound_line.set_visible(True)
planning.st_data_lock.release()
| 0
|
apollo_public_repos/apollo/modules/tools/mapshow
|
apollo_public_repos/apollo/modules/tools/mapshow/libs/planning.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import threading
import numpy as np
from modules.common_msgs.planning_msgs import planning_internal_pb2
class Planning:
def __init__(self, planning_pb=None):
self.data_lock = threading.Lock()
self.init_point_lock = threading.Lock()
self.planning_pb = planning_pb
self.path_data_lock = threading.Lock()
self.path_data_x = {}
self.path_data_y = {}
self.speed_data_lock = threading.Lock()
self.speed_data_time = {}
self.speed_data_val = {}
self.traj_data_lock = threading.Lock()
self.traj_speed_history_len = 30
self.traj_speed_t_history = []
self.traj_speed_v_history = []
self.traj_acc_history_len = 30
self.traj_acc_t_history = []
self.traj_acc_a_history = []
self.traj_path_history_len = 30
self.traj_path_x_history = []
self.traj_path_y_history = []
self.st_data_lock = threading.Lock()
self.st_curve_s = {}
self.st_curve_t = {}
self.st_curve_v = {}
self.st_data_boundary_s = {}
self.st_data_boundary_t = {}
self.st_data_boundary_type = {}
self.st_speed_limit_s = {}
self.st_speed_limit_v = {}
self.st_speed_constraint_s = {}
self.st_speed_constraint_lower = {}
self.st_speed_constraint_upper = {}
self.sl_data_lock = threading.Lock()
self.sl_sampled_s = []
self.sl_static_obstacle_lower_boundary = []
self.sl_static_obstacle_upper_boundary = []
self.sl_dynamic_obstacle_lower_boundary = []
self.sl_dynamic_obstacle_upper_boundary = []
self.sl_map_lower_boundary = []
self.sl_map_upper_boundary = []
self.sl_path_s = []
self.sl_path_l = []
self.sl_aggregated_boundary_low_l = []
self.sl_aggregated_boundary_high_l = []
self.sl_aggregated_boundary_s = []
self.kernel_cruise_t = {}
self.kernel_cruise_s = {}
self.kernel_follow_t = {}
self.kernel_follow_s = {}
self.init_point_x = []
self.init_point_y = []
def update_planning_pb(self, planning_pb):
self.planning_pb = planning_pb
def compute_init_point(self):
self.init_point_lock.acquire()
init_point = self.planning_pb.debug.planning_data.init_point
self.init_point_x = [init_point.path_point.x]
self.init_point_y = [init_point.path_point.y]
self.init_point_lock.release()
def compute_sl_data(self):
sl_sampled_s = []
sl_map_lower_boundary = []
sl_map_upper_boundary = []
sl_static_obstacle_lower_boundary = []
sl_static_obstacle_upper_boundary = []
sl_dynamic_obstacle_lower_boundary = []
sl_dynamic_obstacle_upper_boundary = []
sl_path_s = []
sl_path_l = []
sl_aggregated_boundary_low_l = []
sl_aggregated_boundary_high_l = []
sl_aggregated_boundary_s = []
for sl_frame in self.planning_pb.debug.planning_data.sl_frame:
for s in sl_frame.sampled_s:
sl_sampled_s.append(s)
for l in sl_frame.map_lower_bound:
if (l > 10 or l < -10):
sl_map_lower_boundary.append(100 * l // abs(l))
else:
sl_map_lower_boundary.append(l)
for l in sl_frame.map_upper_bound:
if (l > 10 or l < -10):
sl_map_upper_boundary.append(100 * l // abs(l))
else:
sl_map_upper_boundary.append(l)
for l in sl_frame.static_obstacle_lower_bound:
sl_static_obstacle_lower_boundary.append(l)
for l in sl_frame.static_obstacle_upper_bound:
sl_static_obstacle_upper_boundary.append(l)
for l in sl_frame.dynamic_obstacle_lower_bound:
sl_dynamic_obstacle_lower_boundary.append(l)
for l in sl_frame.dynamic_obstacle_upper_bound:
sl_dynamic_obstacle_upper_boundary.append(l)
for slpoint in sl_frame.sl_path:
sl_path_s.append(slpoint.s)
sl_path_l.append(slpoint.l)
for l in sl_frame.aggregated_boundary_low:
sl_aggregated_boundary_low_l.append(l)
for l in sl_frame.aggregated_boundary_high:
sl_aggregated_boundary_high_l.append(l)
for s in sl_frame.aggregated_boundary_s:
sl_aggregated_boundary_s.append(s)
self.sl_data_lock.acquire()
self.sl_sampled_s = sl_sampled_s
self.sl_map_upper_boundary = sl_map_upper_boundary
self.sl_map_lower_boundary = sl_map_lower_boundary
self.sl_static_obstacle_lower_boundary = sl_static_obstacle_lower_boundary
self.sl_static_obstacle_upper_boundary = sl_static_obstacle_upper_boundary
self.sl_dynamic_obstacle_lower_boundary = sl_dynamic_obstacle_lower_boundary
self.sl_dynamic_obstacle_upper_boundary = sl_dynamic_obstacle_upper_boundary
self.sl_path_s = sl_path_s
self.sl_path_l = sl_path_l
self.sl_aggregated_boundary_low_l = sl_aggregated_boundary_low_l
self.sl_aggregated_boundary_high_l = sl_aggregated_boundary_high_l
self.sl_aggregated_boundary_s = sl_aggregated_boundary_s
self.sl_data_lock.release()
def compute_st_data(self):
st_data_boundary_s = {}
st_data_boundary_t = {}
st_curve_s = {}
st_curve_t = {}
st_curve_v = {}
st_data_boundary_type = {}
st_speed_limit_s = {}
st_speed_limit_v = {}
st_speed_constraint_s = {}
st_speed_constraint_lower = {}
st_speed_constraint_upper = {}
kernel_cruise_t = {}
kernel_cruise_s = {}
kernel_follow_t = {}
kernel_follow_s = {}
for st_graph in self.planning_pb.debug.planning_data.st_graph:
st_data_boundary_s[st_graph.name] = {}
st_data_boundary_t[st_graph.name] = {}
st_data_boundary_type[st_graph.name] = {}
for boundary in st_graph.boundary:
st_data_boundary_type[st_graph.name][boundary.name] \
= planning_internal_pb2.StGraphBoundaryDebug.StBoundaryType.Name(
boundary.type)
st_data_boundary_s[st_graph.name][boundary.name] = []
st_data_boundary_t[st_graph.name][boundary.name] = []
for point in boundary.point:
st_data_boundary_s[st_graph.name][boundary.name] \
.append(point.s)
st_data_boundary_t[st_graph.name][boundary.name] \
.append(point.t)
st_data_boundary_s[st_graph.name][boundary.name].append(
st_data_boundary_s[st_graph.name][boundary.name][0])
st_data_boundary_t[st_graph.name][boundary.name].append(
st_data_boundary_t[st_graph.name][boundary.name][0])
st_curve_s[st_graph.name] = []
st_curve_t[st_graph.name] = []
st_curve_v[st_graph.name] = []
for point in st_graph.speed_profile:
st_curve_s[st_graph.name].append(point.s)
st_curve_t[st_graph.name].append(point.t)
st_curve_v[st_graph.name].append(point.v)
st_speed_limit_s[st_graph.name] = []
st_speed_limit_v[st_graph.name] = []
for point in st_graph.speed_limit:
st_speed_limit_s[st_graph.name].append(point.s)
st_speed_limit_v[st_graph.name].append(point.v)
st_speed_constraint_s[st_graph.name] = []
st_speed_constraint_lower[st_graph.name] = []
st_speed_constraint_upper[st_graph.name] = []
speed_constraint = st_graph.speed_constraint
interp_s_set = []
for t in speed_constraint.t:
interp_s = np.interp(t, st_curve_t[st_graph.name],
st_curve_s[st_graph.name])
interp_s_set.append(interp_s)
st_speed_constraint_s[st_graph.name].extend(interp_s_set)
st_speed_constraint_lower[st_graph.name].extend(
speed_constraint.lower_bound)
st_speed_constraint_upper[st_graph.name].extend(
speed_constraint.upper_bound)
kernel_cruise_t[st_graph.name] = []
kernel_cruise_s[st_graph.name] = []
kernel_cruise = st_graph.kernel_cruise_ref
kernel_cruise_t[st_graph.name].append(kernel_cruise.t)
kernel_cruise_s[st_graph.name].append(kernel_cruise.cruise_line_s)
kernel_follow_t[st_graph.name] = []
kernel_follow_s[st_graph.name] = []
kernel_follow = st_graph.kernel_follow_ref
kernel_follow_t[st_graph.name].append(kernel_follow.t)
kernel_follow_s[st_graph.name].append(kernel_follow.follow_line_s)
self.st_data_lock.acquire()
self.st_data_boundary_s = st_data_boundary_s
self.st_data_boundary_t = st_data_boundary_t
self.st_curve_s = st_curve_s
self.st_curve_t = st_curve_t
self.st_curve_v = st_curve_v
self.st_speed_limit_v = st_speed_limit_v
self.st_speed_limit_s = st_speed_limit_s
self.st_data_boundary_type = st_data_boundary_type
self.st_speed_constraint_s = st_speed_constraint_s
self.st_speed_constraint_lower = st_speed_constraint_lower
self.st_speed_constraint_upper = st_speed_constraint_upper
self.kernel_cruise_t = kernel_cruise_t
self.kernel_cruise_s = kernel_cruise_s
self.kernel_follow_t = kernel_follow_t
self.kernel_follow_s = kernel_follow_s
self.st_data_lock.release()
def compute_traj_data(self):
traj_speed_t = []
traj_speed_v = []
traj_acc_t = []
traj_acc_a = []
traj_path_x = []
traj_path_y = []
base_time = self.planning_pb.header.timestamp_sec
for trajectory_point in self.planning_pb.trajectory_point:
traj_acc_t.append(base_time + trajectory_point.relative_time)
traj_acc_a.append(trajectory_point.a)
traj_speed_t.append(base_time + trajectory_point.relative_time)
traj_speed_v.append(trajectory_point.v)
traj_path_x.append(trajectory_point.path_point.x)
traj_path_y.append(trajectory_point.path_point.y)
self.traj_data_lock.acquire()
self.traj_speed_t_history.append(traj_speed_t)
self.traj_speed_v_history.append(traj_speed_v)
if len(self.traj_speed_t_history) > self.traj_speed_history_len:
self.traj_speed_t_history = \
self.traj_speed_t_history[len(self.traj_speed_t_history)
- self.traj_speed_history_len:]
self.traj_speed_v_history = \
self.traj_speed_v_history[len(self.traj_speed_v_history)
- self.traj_speed_history_len:]
self.traj_acc_t_history.append(traj_acc_t)
self.traj_acc_a_history.append(traj_acc_a)
if len(self.traj_acc_t_history) > self.traj_acc_history_len:
self.traj_acc_t_history = \
self.traj_acc_t_history[len(self.traj_acc_t_history)
- self.traj_acc_history_len:]
self.traj_acc_a_history = \
self.traj_acc_a_history[len(self.traj_acc_a_history)
- self.traj_acc_history_len:]
self.traj_path_x_history.append(traj_path_x)
self.traj_path_y_history.append(traj_path_y)
if len(self.traj_path_x_history) > self.traj_path_history_len:
self.traj_path_x_history = \
self.traj_path_x_history[len(self.traj_path_x_history)
- self.traj_path_history_len:]
self.traj_path_y_history = \
self.traj_path_y_history[len(self.traj_path_y_history)
- self.traj_path_history_len:]
self.traj_data_lock.release()
def replot_sl_data(self,
sl_static_obstacle_lower_boundary,
sl_static_obstacle_upper_boundary,
sl_dynamic_obstacle_lower_boundary,
sl_dynamic_obstacle_upper_boundary,
sl_map_lower_boundary,
sl_map_upper_boundary, sl_path,
sl_aggregated_boundary_low_line,
sl_aggregated_boundary_high_line):
self.sl_data_lock.acquire()
sl_static_obstacle_lower_boundary.set_visible(True)
sl_static_obstacle_upper_boundary.set_visible(True)
sl_dynamic_obstacle_lower_boundary.set_visible(True)
sl_dynamic_obstacle_upper_boundary.set_visible(True)
sl_map_lower_boundary.set_visible(True)
sl_map_upper_boundary.set_visible(True)
sl_path.set_visible(True)
sl_aggregated_boundary_low_line.set_visible(True)
sl_aggregated_boundary_high_line.set_visible(True)
new_sampled_s = []
for s in self.sl_sampled_s:
new_sampled_s.append(s)
new_sampled_s.append(s)
new_map_lower = []
for l in self.sl_map_lower_boundary:
new_map_lower.append(l)
new_map_lower.append(-11)
new_map_upper = []
for l in self.sl_map_upper_boundary:
new_map_upper.append(l)
new_map_upper.append(11)
sl_map_lower_boundary.set_xdata(new_sampled_s)
sl_map_lower_boundary.set_ydata(new_map_lower)
sl_map_upper_boundary.set_xdata(new_sampled_s)
sl_map_upper_boundary.set_ydata(new_map_upper)
sl_dynamic_obstacle_lower_boundary.set_xdata(self.sl_sampled_s)
sl_dynamic_obstacle_lower_boundary.set_ydata(
self.sl_dynamic_obstacle_lower_boundary)
sl_dynamic_obstacle_upper_boundary.set_xdata(self.sl_sampled_s)
sl_dynamic_obstacle_upper_boundary.set_ydata(
self.sl_dynamic_obstacle_upper_boundary)
new_static_lower = []
for l in self.sl_static_obstacle_lower_boundary:
new_static_lower.append(l)
new_static_lower.append(-11)
new_static_upper = []
for l in self.sl_static_obstacle_upper_boundary:
new_static_upper.append(l)
new_static_upper.append(11)
sl_static_obstacle_lower_boundary.set_xdata(new_sampled_s)
sl_static_obstacle_lower_boundary.set_ydata(new_static_lower)
sl_static_obstacle_upper_boundary.set_xdata(new_sampled_s)
sl_static_obstacle_upper_boundary.set_ydata(new_static_upper)
sl_path.set_xdata(self.sl_path_s)
sl_path.set_ydata(self.sl_path_l)
sl_aggregated_boundary_low_line.set_xdata(
self.sl_aggregated_boundary_s)
sl_aggregated_boundary_low_line.set_ydata(
self.sl_aggregated_boundary_low_l)
sl_aggregated_boundary_high_line.set_xdata(
self.sl_aggregated_boundary_s)
sl_aggregated_boundary_high_line.set_ydata(
self.sl_aggregated_boundary_high_l)
self.sl_data_lock.release()
def replot_st_data(self, boundaries_pool, st_line,
obstacle_annotation_pool, st_graph_name):
if st_graph_name not in self.st_data_boundary_s:
return
if st_graph_name not in self.st_curve_s:
return
cnt = 0
self.st_data_lock.acquire()
st_graph_boudnary_s = self.st_data_boundary_s[st_graph_name]
st_graph_boudnary_t = self.st_data_boundary_t[st_graph_name]
st_boundary_type = self.st_data_boundary_type[st_graph_name]
for boundary_name in st_graph_boudnary_s.keys():
if cnt >= len(boundaries_pool):
print("WARNING: number of path lines is more than "
+ len(boundaries_pool))
continue
boundary = boundaries_pool[cnt]
boundary.set_visible(True)
boundary.set_xdata(st_graph_boudnary_t[boundary_name])
boundary.set_ydata(st_graph_boudnary_s[boundary_name])
center_t = 0
center_s = 0
for i in range(len(st_graph_boudnary_t[boundary_name]) - 1):
center_s += st_graph_boudnary_s[boundary_name][i]
center_t += st_graph_boudnary_t[boundary_name][i]
center_s /= float(len(st_graph_boudnary_s[boundary_name]) - 1)
center_t /= float(len(st_graph_boudnary_t[boundary_name]) - 1)
annotation = obstacle_annotation_pool[cnt]
annotation.set_visible(True)
annotation.set_text(boundary_name + "_"
+ st_boundary_type[boundary_name]
.replace("ST_BOUNDARY_TYPE_", ""))
annotation.set_x(center_t)
annotation.set_y(center_s)
cnt += 1
st_line.set_visible(True)
st_line.set_xdata(self.st_curve_t[st_graph_name])
st_line.set_ydata(self.st_curve_s[st_graph_name])
st_line.set_label(st_graph_name[0:5])
self.st_data_lock.release()
def compute_path_data(self):
path_data_x = {}
path_data_y = {}
for path_debug in self.planning_pb.debug.planning_data.path:
name = path_debug.name
path_data_x[name] = []
path_data_y[name] = []
for path_point in path_debug.path_point:
path_data_x[name].append(path_point.x)
path_data_y[name].append(path_point.y)
self.path_data_lock.acquire()
self.path_data_x = path_data_x
self.path_data_y = path_data_y
self.path_data_lock.release()
def replot_path_data(self, path_lines):
cnt = 0
self.path_data_lock.acquire()
for name in self.path_data_x.keys():
if cnt >= len(path_lines):
print("WARNING: number of path lines is more than "
+ len(path_lines))
continue
if len(self.path_data_x[name]) <= 1:
continue
line = path_lines[cnt]
line.set_visible(True)
line.set_xdata(self.path_data_x[name])
line.set_ydata(self.path_data_y[name])
line.set_label(name[0:5])
cnt += 1
self.path_data_lock.release()
def compute_speed_data(self):
speed_data_time = {}
speed_data_val = {}
for speed_plan in self.planning_pb.debug.planning_data.speed_plan:
name = speed_plan.name
speed_data_time[name] = []
speed_data_val[name] = []
for speed_point in speed_plan.speed_point:
speed_data_time[name].append(speed_point.t)
speed_data_val[name].append(speed_point.v)
name = "final_speed_output"
speed_data_time[name] = []
speed_data_val[name] = []
for traj_point in self.planning_pb.trajectory_point:
speed_data_time[name].append(traj_point.relative_time)
speed_data_val[name].append(traj_point.v)
self.speed_data_lock.acquire()
self.speed_data_time = speed_data_time
self.speed_data_val = speed_data_val
self.speed_data_lock.release()
def replot_speed_data(self, speed_lines):
cnt = 0
self.speed_data_lock.acquire()
for name in self.speed_data_time.keys():
if cnt >= len(speed_lines):
print("WARNING: number of speed lines is more than "
+ len(speed_lines))
continue
if len(self.speed_data_time[name]) <= 1:
continue
line = speed_lines[cnt]
line.set_visible(True)
line.set_xdata(self.speed_data_time[name])
line.set_ydata(self.speed_data_val[name])
line.set_label(name[0:5])
cnt += 1
self.speed_data_lock.release()
| 0
|
apollo_public_repos/apollo/modules/tools/mapshow
|
apollo_public_repos/apollo/modules/tools/mapshow/libs/subplot_st_main.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
class StMainSubplot:
def __init__(self, ax, st_name):
self.st_curve_line, = ax.plot([0], [0], "k.", lw=3, alpha=0.5)
self.kernel_cruise_line, = ax.plot([0], [0], "g.", lw=3, alpha=0.5)
self.kernel_follow_line, = ax.plot([0], [0], "y.", lw=3, alpha=0.5)
self.obstacle_boundary_lines = []
self.obstacle_annotations = []
self.obstacle_boundary_size = 10
for i in range(self.obstacle_boundary_size):
self.obstacle_boundary_lines.append(
ax.plot([0], [0], "r-", lw=1, alpha=1)[0])
self.obstacle_annotations.append(ax.text(0, 0, ""))
# self.st_name = planning_config_pb2.TaskType.Name(
# planning_config_pb2.QP_SPLINE_ST_SPEED_OPTIMIZER)
self.st_name = st_name
ax.set_xlim(-3, 9)
ax.set_ylim(-10, 220)
ax.set_xlabel("t (second)")
ax.set_ylabel("s (m)")
ax.set_title(st_name)
self.set_visible(False)
def set_visible(self, visible):
self.st_curve_line.set_visible(visible)
self.kernel_cruise_line.set_visible(visible)
self.kernel_follow_line.set_visible(visible)
for line in self.obstacle_boundary_lines:
line.set_visible(visible)
for text in self.obstacle_annotations:
text.set_visible(visible)
def show(self, planning):
self.set_visible(False)
planning.st_data_lock.acquire()
if self.st_name not in planning.st_data_boundary_s:
planning.st_data_lock.release()
return
obstacles_boundary_s = planning.st_data_boundary_s[self.st_name]
obstacles_boundary_t = planning.st_data_boundary_t[self.st_name]
obstacles_type = planning.st_data_boundary_type[self.st_name]
cnt = 1
for boundary_name in obstacles_boundary_s.keys():
if cnt >= self.obstacle_boundary_size:
print("WARNING: number of path lines is more than "
+ self.obstacle_boundary_size)
continue
boundary = self.obstacle_boundary_lines[cnt]
boundary.set_visible(True)
boundary.set_xdata(obstacles_boundary_t[boundary_name])
boundary.set_ydata(obstacles_boundary_s[boundary_name])
center_t = 0
center_s = 0
for i in range(len(obstacles_boundary_t[boundary_name]) - 1):
center_s += obstacles_boundary_s[boundary_name][i]
center_t += obstacles_boundary_t[boundary_name][i]
center_s /= float(len(obstacles_boundary_s[boundary_name]) - 1)
center_t /= float(len(obstacles_boundary_t[boundary_name]) - 1)
annotation = self.obstacle_annotations[cnt]
annotation.set_visible(True)
annotation.set_text(boundary_name + "_"
+ obstacles_type[boundary_name]
.replace("ST_BOUNDARY_TYPE_", ""))
annotation.set_x(center_t)
annotation.set_y(center_s)
cnt += 1
self.st_curve_line.set_visible(True)
self.st_curve_line.set_xdata(planning.st_curve_t[self.st_name])
self.st_curve_line.set_ydata(planning.st_curve_s[self.st_name])
self.st_curve_line.set_label(self.st_name[0:5])
self.kernel_cruise_line.set_visible(True)
self.kernel_cruise_line.set_xdata(
planning.kernel_cruise_t[self.st_name])
self.kernel_cruise_line.set_ydata(
planning.kernel_cruise_s[self.st_name])
self.kernel_follow_line.set_visible(True)
self.kernel_follow_line.set_xdata(
planning.kernel_follow_t[self.st_name])
self.kernel_follow_line.set_ydata(
planning.kernel_follow_s[self.st_name])
planning.st_data_lock.release()
| 0
|
apollo_public_repos/apollo/modules/tools/mapshow
|
apollo_public_repos/apollo/modules/tools/mapshow/libs/subplot_path.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from modules.tools.mapshow.libs.map import Map
class PathSubplot:
def __init__(self, ax, map_file=None):
self.ax = ax
self.map_width = 20
if map_file is not None:
map = Map()
map.load(map_file)
map.draw_lanes(ax, False, [])
self.path_lines = []
self.path_lines_size = 3
colors = ['b', 'g', 'r', 'k']
for i in range(self.path_lines_size):
line, = ax.plot(
[0], [0],
colors[i % len(colors)],
lw=3 + i * 3,
alpha=0.4)
self.path_lines.append(line)
self.vehicle_position_line, = ax.plot([0], [0], 'go', alpha=0.3)
self.vehicle_polygon_line, = ax.plot([0], [0], 'g-')
self.init_point_line, = ax.plot([0], [0], 'ro', alpha=0.3)
self.set_visible(False)
ax.set_title("PLANNING PATH")
def set_visible(self, visible):
for line in self.path_lines:
line.set_visible(visible)
self.vehicle_position_line.set_visible(False)
self.vehicle_polygon_line.set_visible(False)
self.init_point_line.set_visible(False)
def show(self, planning, localization):
cnt = 0
planning.path_data_lock.acquire()
for name in planning.path_data_x.keys():
if cnt >= self.path_lines_size:
print("WARNING: number of path lines is more than "
+ str(self.path_lines_size))
continue
if len(planning.path_data_x[name]) <= 1:
continue
path_line = self.path_lines[cnt]
path_line.set_visible(True)
path_line.set_xdata(planning.path_data_x[name])
path_line.set_ydata(planning.path_data_y[name])
path_line.set_label(name[0:5])
cnt += 1
planning.path_data_lock.release()
planning.init_point_lock.acquire()
self.init_point_line.set_xdata(planning.init_point_x)
self.init_point_line.set_ydata(planning.init_point_y)
self.init_point_line.set_visible(True)
planning.init_point_lock.release()
localization.localization_data_lock.acquire()
self.draw_vehicle(localization)
try:
self.ax.set_xlim(
localization.localization_pb.pose.position.x - self.map_width,
localization.localization_pb.pose.position.x + self.map_width)
self.ax.set_ylim(
localization.localization_pb.pose.position.y - self.map_width,
localization.localization_pb.pose.position.y + self.map_width)
except:
pass
localization.localization_data_lock.release()
self.ax.autoscale_view()
self.ax.relim()
self.ax.legend(loc="upper left", borderaxespad=0., ncol=5)
# self.ax.axis('equal')
def zoom_in(self):
if self.map_width > 20:
self.map_width -= 20
def zoom_out(self):
if self.map_width < 200:
self.map_width += 20
def draw_vehicle(self, localization):
if localization.localization_pb is None:
return
self.vehicle_position_line.set_visible(True)
self.vehicle_polygon_line.set_visible(True)
loc_x = [localization.localization_pb.pose.position.x]
loc_y = [localization.localization_pb.pose.position.y]
self.vehicle_position_line.set_xdata(loc_x)
self.vehicle_position_line.set_ydata(loc_y)
position = []
position.append(localization.localization_pb.pose.position.x)
position.append(localization.localization_pb.pose.position.y)
position.append(localization.localization_pb.pose.position.z)
polygon = localization.get_vehicle_polygon(
position,
localization.localization_pb.pose.heading)
px = []
py = []
for point in polygon:
px.append(point[0])
py.append(point[1])
self.vehicle_polygon_line.set_xdata(px)
self.vehicle_polygon_line.set_ydata(py)
| 0
|
apollo_public_repos/apollo/modules/tools/mapshow
|
apollo_public_repos/apollo/modules/tools/mapshow/libs/plot_st.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import matplotlib.animation as animation
import matplotlib.pyplot as plt
from cyber.python.cyber_py3 import cyber
from modules.common_msgs.planning_msgs import planning_pb2
from modules.tools.mapshow.libs.planning import Planning
from modules.tools.mapshow.libs.subplot_st_main import StMainSubplot
from modules.tools.mapshow.libs.subplot_st_speed import StSpeedSubplot
planning = Planning()
def update(frame_number):
st_main_subplot.show(planning)
st_speed_subplot.show(planning)
def planning_callback(planning_pb):
planning.update_planning_pb(planning_pb)
planning.compute_st_data()
def add_listener():
planning_sub = cyber.Node("st_plot")
planning_sub.create_reader('/apollo/planning', planning_pb2.ADCTrajectory,
planning_callback)
def press_key():
pass
if __name__ == '__main__':
cyber.init()
add_listener()
fig = plt.figure(figsize=(14, 6))
fig.canvas.mpl_connect('key_press_event', press_key)
ax = plt.subplot2grid((1, 2), (0, 0))
st_main_subplot = StMainSubplot(ax, 'QpSplineStSpeedOptimizer')
ax2 = plt.subplot2grid((1, 2), (0, 1))
st_speed_subplot = StSpeedSubplot(ax2, "QpSplineStSpeedOptimizer")
ani = animation.FuncAnimation(fig, update, interval=100)
plt.show()
cyber.shutdown()
| 0
|
apollo_public_repos/apollo/modules/tools/mapshow
|
apollo_public_repos/apollo/modules/tools/mapshow/libs/BUILD
|
load("@rules_python//python:defs.bzl", "py_library")
package(default_visibility = ["//visibility:public"])
py_library(
name = "localization",
srcs = ["localization.py"],
deps = [
"//modules/common_msgs/localization_msgs:localization_py_pb2",
"//modules/tools/common:proto_utils",
],
)
py_library(
name = "map",
srcs = ["map.py"],
deps = [
"//modules/common_msgs/map_msgs:map_py_pb2",
"//modules/tools/common:proto_utils",
],
)
py_library(
name = "path",
srcs = ["path.py"],
)
py_library(
name = "planning",
srcs = ["planning.py"],
deps = [
"//modules/common_msgs/planning_msgs:planning_internal_py_pb2",
],
)
py_library(
name = "plot_planning",
srcs = ["plot_planning.py"],
deps = [
":localization",
":planning",
":subplot_path",
":subplot_sl_main",
":subplot_speed",
":subplot_st_main",
":subplot_st_speed",
"//cyber/python/cyber_py3:cyber",
"//modules/common_msgs/planning_msgs:planning_py_pb2",
],
)
py_library(
name = "plot_smoothness",
srcs = ["plot_smoothness.py"],
deps = [
":planning",
":subplot_traj_acc",
":subplot_traj_path",
":subplot_traj_speed",
"//cyber/python/cyber_py3:cyber",
"//modules/common_msgs/planning_msgs:planning_py_pb2",
],
)
py_library(
name = "plot_st",
srcs = ["plot_st.py"],
deps = [
":planning",
":subplot_st_main",
":subplot_st_speed",
"//cyber/python/cyber_py3:cyber",
"//modules/common_msgs/planning_msgs:planning_py_pb2",
],
)
py_library(
name = "subplot_path",
srcs = ["subplot_path.py"],
deps = [
":map",
],
)
py_library(
name = "subplot_sl_main",
srcs = ["subplot_sl_main.py"],
)
py_library(
name = "subplot_speed",
srcs = ["subplot_speed.py"],
)
py_library(
name = "subplot_st_main",
srcs = ["subplot_st_main.py"],
)
py_library(
name = "subplot_st_speed",
srcs = ["subplot_st_speed.py"],
)
py_library(
name = "subplot_traj_acc",
srcs = ["subplot_traj_acc.py"],
)
py_library(
name = "subplot_traj_path",
srcs = ["subplot_traj_path.py"],
)
py_library(
name = "subplot_traj_speed",
srcs = ["subplot_traj_speed.py"],
)
| 0
|
apollo_public_repos/apollo/modules/tools/mapshow
|
apollo_public_repos/apollo/modules/tools/mapshow/libs/path.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
class Path:
def __init__(self, path_files):
self.path_files = path_files
def draw(self, ax):
xs = []
ys = []
for path_file in self.path_files:
with open(path_file, 'r') as f:
lines = f.readlines()
for line in lines:
items = line.split(',')
xs.append(float(items[1]))
ys.append(float(items[2]))
ax.plot(xs, ys, ls='--', c='k', alpha=0.5)
| 0
|
apollo_public_repos/apollo/modules/tools
|
apollo_public_repos/apollo/modules/tools/mock_routing/mock_routing_request.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""
Insert routing request
Usage:
mock_routing_request.py
"""
import argparse
import os
import sys
import time
from cyber.python.cyber_py3 import cyber
from cyber.python.cyber_py3 import cyber_time
from modules.common_msgs.routing_msgs import routing_pb2
def main():
"""
Main rosnode
"""
cyber.init()
node = cyber.Node("mock_routing_requester")
sequence_num = 0
routing_request = routing_pb2.RoutingRequest()
routing_request.header.timestamp_sec = cyber_time.Time.now().to_sec()
routing_request.header.module_name = 'routing_request'
routing_request.header.sequence_num = sequence_num
sequence_num = sequence_num + 1
waypoint = routing_request.waypoint.add()
waypoint.pose.x = 587696.82286
waypoint.pose.y = 4141446.66696
waypoint.id = '1-1'
waypoint.s = 1
waypoint = routing_request.waypoint.add()
waypoint.pose.x = 586948.740120
waypoint.pose.y = 4141171.118641
waypoint.id = '1-1'
waypoint.s = 80
writer = node.create_writer('/apollo/routing_request',
routing_pb2.RoutingRequest)
time.sleep(2.0)
print("routing_request", routing_request)
writer.write(routing_request)
if __name__ == '__main__':
main()
| 0
|
apollo_public_repos/apollo/modules/tools
|
apollo_public_repos/apollo/modules/tools/mock_routing/BUILD
|
load("@rules_python//python:defs.bzl", "py_binary")
load("//tools/install:install.bzl", "install")
package(default_visibility = ["//visibility:public"])
py_binary(
name = "mock_routing_request",
srcs = ["mock_routing_request.py"],
deps = [
"//cyber/python/cyber_py3:cyber",
"//cyber/python/cyber_py3:cyber_time",
"//modules/common_msgs/routing_msgs:routing_py_pb2",
],
)
install(
name = "install",
py_dest = "tools/mock_routing",
targets = [
":mock_routing_request",
]
)
| 0
|
apollo_public_repos/apollo/modules/tools
|
apollo_public_repos/apollo/modules/tools/amodel/amodel.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2022 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""
amodel main function
"""
import argparse
import logging
import sys
from model_manage import (
amodel_install,
amodel_remove,
amodel_list,
amodel_info)
def main(args=sys.argv):
parser = argparse.ArgumentParser(
description="Apollo perception model management tool.",
prog="main.py")
parser.add_argument(
"command", action="store", choices=['list', 'info', 'install', 'remove'],
type=str, nargs="?", const="", help="amodel command list.")
parser.add_argument(
"model_name", action="store", type=str, nargs="?", const="",
help="model name or install path.")
parser.add_argument(
"-a", "--all", action="store", type=str, required=False,
nargs="?", const="", help="Show all models.")
args = parser.parse_args(args[1:])
logging.debug(args)
if args.command == "install":
amodel_install(args.model_name)
elif args.command == "remove":
amodel_remove(args.model_name)
elif args.command == "list":
amodel_list()
elif args.command == "info":
amodel_info(args.model_name)
else:
parser.print_help()
if __name__ == "__main__":
main()
| 0
|
apollo_public_repos/apollo/modules/tools
|
apollo_public_repos/apollo/modules/tools/amodel/readme.md
|
## amodel
amodel is Apollo's model deployment and management tool.
## Set environment
If you are running in Apollo docker, you can skip this step. If you are running outside of docker, the following environment needs to be set up.
```shell
export APOLLO_ROOT_DIR=your_apollo_dir
```
## How to work
amodel provides the following commands.
- list. Show models installed in Apollo.
- info. Show details of a model.
- install. Install the model to Apollo.
- remove. Remove the model from Apollo.
#### List
You can get the installed models in Apollo through the `list` command.
```shell
$ % python3 modules/tools/amodel/main.py list
Name |Task_type |Sensor_type |Framework |Date
mask_pillars |3d_detection |lidar |paddlepaddle |2021-07-30
center_point |3d_detection |lidar |paddlepaddle |2022-07-22
point_pillars |3d_detection |lidar |paddlepaddle |2020-12-15
cnnseg16 |3d_segmentation |lidar |paddlepaddle |2018-10-14
cnnseg128 |3d_segmentation |lidar |paddlepaddle |2020-06-17
cnnseg64 |3d_segmentation |lidar |paddlepaddle |2019-05-29
smoke |3d_detection |camera |paddlepaddle |2019-06-27
3d-yolo |3d_detection |camera |paddlepaddle |2019-12-08
denseline |lane_detection |camera |paddlepaddle |2019-05-29
darkSCNN |lane_detection |camera |paddlepaddle |2020-12-15
tl_detection |tl_detection |camera |paddlepaddle |2021-01-15
tl_recognition |tl_recognition |camera |paddlepaddle |2021-01-15
```
#### Info
Then you can use the `info` command to learn more about the details of the model.
```shell
$ apollo % python3 modules/tools/amodel/main.py info point_pillars
name: point_pillars
date: 2020-12-15
task_type: 3d_detection
sensor_type: lidar
framework: paddlepaddle
model_files:
- name: pfe.onnx
size: 4125
- name: pts_backbone.zip
size: 16945051
- name: pts_bbox_head.zip
size: 121150
- name: pts_middle_encoder.zip
size: 3763
- name: pts_neck.zip
size: 2420625
- name: pts_voxel_encoder.zip
size: 17575
- name: rpn.onnx
size: 18300546
dataset:
- waymo
- kitti
- nusense
```
#### Install
You can deploy the model using the `install` command.
```shell
# Install from local
python3 modules/tools/amodel/main.py install xxx.zip
# Install from http
python3 modules/tools/amodel/main.py install https://xxx.zip
```
#### Remove
You can delete models installed in Apollo with the `remove` command.
```shell
python3 modules/tools/amodel/main.py remove point_pillars
```
| 0
|
apollo_public_repos/apollo/modules/tools
|
apollo_public_repos/apollo/modules/tools/amodel/model_meta.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2022 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""
amodel meta implement
"""
import logging
import os
import yaml
class ModelMeta(object):
"""Model meta object
Args:
object (_type_): base object
"""
def __init__(self, name=None, date=None, task_type=None, sensor_type=None,
framework=None, model=None, dataset=None) -> None:
self.name = name
self.date = date
self.task_type = task_type
self.sensor_type = sensor_type
self.framework = framework
self.model = model
self.dataset = dataset
# raw yaml data
self._raw_modle_meta = None
def parse_from(self, meta_file_path):
"""Parse model meta from yaml file
Args:
meta_file_path (str): Meta yaml file
Returns:
bool: Success or fail
"""
if not os.path.isfile(meta_file_path):
return False
with open(meta_file_path, 'r') as meta_fp:
self._raw_modle_meta = yaml.safe_load(meta_fp)
logging.debug(self._raw_modle_meta)
self.name = self._raw_modle_meta["name"]
self.date = self._raw_modle_meta["date"]
self.task_type = self._raw_modle_meta["task_type"]
self.sensor_type = self._raw_modle_meta["sensor_type"]
self.framework = self._raw_modle_meta["framework"]
self.model = self._raw_modle_meta["model"]
self.dataset = self._raw_modle_meta["dataset"]
return True
def save_to(self, meta_file_path):
"""Save model meta to yaml file
Args:
meta_file_path (str): Meta yaml file
"""
with open(meta_file_path, 'w') as meta_fp:
yaml.safe_dump(self._raw_modle_meta, meta_fp, sort_keys=False)
def __str__(self) -> str:
"""Model meta string
Returns:
str: Model meta content
"""
return yaml.safe_dump(self._raw_modle_meta, sort_keys=False)
| 0
|
apollo_public_repos/apollo/modules/tools
|
apollo_public_repos/apollo/modules/tools/amodel/model_manage.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2022 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""
amodel command implement
"""
import logging
import os
import requests
import shutil
import zipfile
from model_meta import ModelMeta
# APOLLO_ROOT_DIR
WORKSPACE_PATH = os.getenv('APOLLO_ROOT_DIR', '/apollo')
# MODEL_META_FILE_NAME
MODEL_META_FILE_NAME = "apollo_deploy.yaml"
# Install tmp path
DOWNLOAD_TMP_DIR = "/tmp/"
UNZIP_TMP_DIR = "/tmp/amodel/extract_path"
# Model install paths
MODEL_INSTALL_PATH = {
"3d_detection_lidar": "modules/perception/production/data/perception/lidar/models/detection",
"3d_segmentation_lidar": "modules/perception/production/data/perception/lidar/models/cnnseg",
"3d_detection_camera": "modules/perception/production/data/perception/camera/models/yolo_obstacle_detector",
"lane_detection_camera": "modules/perception/production/data/perception/camera/models/lane_detector",
"tl_detection_camera": "modules/perception/production/data/perception/camera/models/traffic_light_detection",
"tl_recognition_camera": "modules/perception/production/data/perception/camera/models/traffic_light_recognition",
}
# Frame abbreviation
FRAMEWORK_ABBREVIATION = {
"Caffe": "caffe",
"PaddlePaddle": "paddle",
"PyTorch": "torch",
"TensorFlow": "tf"}
'''
amodel_list
'''
def get_model_metas(model_install_path):
"""Get model metas from path
Args:
model_install_path (str): file path
Returns:
list: model meta list
"""
model_metas = []
if not os.path.isdir(model_install_path):
return model_metas
# Find MODEL_META_FILE_NAME in child directories.
for model_path in os.listdir(model_install_path):
child_path = os.path.join(model_install_path, model_path)
if os.path.isdir(child_path):
model_meta = ModelMeta()
meta_file = os.path.join(child_path, MODEL_META_FILE_NAME)
is_success = model_meta.parse_from(meta_file)
if is_success:
model_metas.append(model_meta)
return model_metas
def get_all_model_metas():
"""Get all model meta list from model install path(MODEL_INSTALL_PATH)
Returns:
list: all installed models's meta
"""
total_model_metas = []
for _, model_install_path in MODEL_INSTALL_PATH.items():
absolute_path = os.path.join(WORKSPACE_PATH, model_install_path)
model_metas = get_model_metas(absolute_path)
total_model_metas.extend(model_metas)
return total_model_metas
def display_model_list(model_metas):
"""Display all installed models's info
Args:
model_metas (list): all installed models's meta
"""
# display title
print("{:<20}|{:<20}|{:<20}|{:<20}|{:<20}".format(
"Name",
"Task_type",
"Sensor_type",
"Framework",
"Date"))
# display content
for model_meta in model_metas:
print("{:<20}|{:<20}|{:<20}|{:<20}|{:%Y-%m-%d}".format(
model_meta.name,
model_meta.task_type,
model_meta.sensor_type,
model_meta.framework,
model_meta.date))
def amodel_list():
"""amodel list command
"""
total_model_metas = get_all_model_metas()
display_model_list(total_model_metas)
'''
amodel_install
'''
def is_url(url_str):
"""Check url_str is url
Args:
url_str (str): url path
Returns:
bool: Is url or not
"""
return url_str.startswith('https://') or url_str.startswith('http://')
def download_from_url(url):
"""Download file from url
Args:
url (str): url to download
Returns:
file: download file's path
"""
local_filename = url.split('/')[-1]
download_file = os.path.join(DOWNLOAD_TMP_DIR, local_filename)
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(download_file, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
return download_file
def unzip_file(file_path, extract_path):
"""Unzip file_path to extract_path
Args:
file_path (str): zip file need to unzip
extract_path (str): unzip path
Returns:
bool: success or not
"""
if not os.path.isfile(file_path):
return False
if os.path.isdir(extract_path):
shutil.rmtree(extract_path)
with zipfile.ZipFile(file_path, 'r') as zip_ref:
zip_ref.extractall(extract_path)
return True
def get_install_path_by_meta(model_meta):
"""Get model's install path by meta info
Args:
model_meta (object): model's meta
Returns:
str: model's install path
"""
perception_task = "{}_{}".format(model_meta.task_type, model_meta.sensor_type)
file_name = "{}_{}".format(model_meta.name,
FRAMEWORK_ABBREVIATION[model_meta.framework])
install_path = os.path.join(
WORKSPACE_PATH,
MODEL_INSTALL_PATH[perception_task],
file_name)
return install_path
def install_model(model_meta, extract_path):
"""Move model from extract_path to install_path
Args:
model_meta (object): model's meta
extract_path (str): model's extract path
Returns:
bool: success or not
"""
install_path = get_install_path_by_meta(model_meta)
logging.debug("install_model: {} -> {}".format(extract_path, install_path))
if os.path.isdir(install_path):
confirm = user_confirmation(
"Model already exists!!! Do you want to override {}? [y/n]:".format(
model_meta.name))
if confirm:
shutil.rmtree(install_path)
else:
return False
shutil.move(extract_path, install_path)
return True
def amodel_install(model_path):
"""amodel install command
Args:
model_path (str): model's zip file
"""
if not model_path:
print("Input file is empty!!!")
return
# download file
if is_url(model_path):
try:
model_path = download_from_url(model_path)
except Exception as e:
print("Download {} failed! {}".format(model_path, e))
return
# unzip model file
_, tail = os.path.split(model_path)
model_name = tail.split('.')[0]
is_success = unzip_file(model_path, UNZIP_TMP_DIR)
if not is_success:
print("Zip file {} not found.".format(model_path))
return
# read meta file
model_meta = ModelMeta()
meta_file = os.path.join(UNZIP_TMP_DIR, model_name, MODEL_META_FILE_NAME)
is_success = model_meta.parse_from(meta_file)
if not is_success:
print("Meta file {} not found!".format(meta_file))
return
# install meta file
extract_path = os.path.join(UNZIP_TMP_DIR, model_name)
is_success = install_model(model_meta, extract_path)
if is_success:
print("Successed install {}.".format(model_meta.name))
else:
print("Failed install {}.".format(model_meta.name))
'''
amodel_remove
'''
def remove_model_from_path(model_meta):
"""Remove model
Args:
model_meta (object): model's meta
Returns:
bool: success or not
"""
install_path = get_install_path_by_meta(model_meta)
logging.debug(install_path)
if os.path.isdir(install_path):
shutil.rmtree(install_path)
return True
return False
def user_confirmation(question):
"""Command line confirmation interaction
Args:
question (str): Prompt user for confirmation
Returns:
bool: sure or not
"""
yes_choices = ['yes', 'y']
no_choices = ['no', 'n']
count = 3
while count > 0:
count -= 1
user_input = input(question)
if user_input.lower() in yes_choices:
return True
elif user_input.lower() in no_choices:
return False
return False
def amodel_remove(model_name):
"""amodel remove command
Args:
model_name (str): the model need to remove
"""
total_model_metas = get_all_model_metas()
for model_meta in total_model_metas:
if model_meta.name == model_name:
confirm = user_confirmation(
"Do you want to remove {}? [y/n]:".format(model_name))
if confirm:
is_success = remove_model_from_path(model_meta)
if is_success:
print("Successed remove {}.".format(model_name))
else:
print("Failed remove {}.".format(model_name))
else:
print("Canceled remove {}.".format(model_name))
return
# Not found
print("Not found {}, Please check if the name is correct.".format(model_name))
'''
amodel_info
'''
def display_model_info(model_meta):
"""Display model info
Args:
model_meta (object): model's meta
"""
print(model_meta)
def amodel_info(model_name):
"""amodel info command
Args:
model_name (str): model's name
"""
total_model_metas = get_all_model_metas()
for model_meta in total_model_metas:
if model_meta.name == model_name:
display_model_info(model_meta)
return
# Not found
print("Not found {}, Please check if the name is correct.".format(model_name))
| 0
|
apollo_public_repos/apollo/modules/tools/prediction
|
apollo_public_repos/apollo/modules/tools/prediction/fake_prediction/fake_prediction.cc
|
/******************************************************************************
* Copyright 2017 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
/**
* @file
*/
#include "cyber/component/timer_component.h"
#include "cyber/cyber.h"
#include "modules/common/adapters/adapter_gflags.h"
#include "modules/common/util/message_util.h"
#include "modules/common_msgs/prediction_msgs/prediction_obstacle.pb.h"
namespace apollo {
namespace prediction {
/**
* class FakePredictionComponent
* This class generates fake prediction messages. The prediction message only
* has valid headers.
*
* This tool is used to trigger modules that depends on prediction message.
*/
class FakePredictionComponent : public apollo::cyber::TimerComponent {
public:
bool Init() override {
prediction_writer_ =
node_->CreateWriter<PredictionObstacles>(FLAGS_prediction_topic);
return true;
}
bool Proc() override {
auto prediction = std::make_shared<PredictionObstacles>();
common::util::FillHeader("fake_prediction", prediction.get());
prediction_writer_->Write(prediction);
return true;
}
private:
std::shared_ptr<apollo::cyber::Writer<PredictionObstacles>>
prediction_writer_;
};
CYBER_REGISTER_COMPONENT(FakePredictionComponent);
} // namespace prediction
} // namespace apollo
| 0
|
apollo_public_repos/apollo/modules/tools/prediction
|
apollo_public_repos/apollo/modules/tools/prediction/fake_prediction/fake_prediction.launch
|
<cyber>
<module>
<name>fake_prediction</name>
<dag_conf>/apollo/modules/tools/prediction/fake_prediction/fake_prediction.dag</dag_conf>
<process_name></process_name>
</module>
</cyber>
| 0
|
apollo_public_repos/apollo/modules/tools/prediction
|
apollo_public_repos/apollo/modules/tools/prediction/fake_prediction/fake_prediction.dag
|
module_config {
module_library : "/apollo/bazel-bin/modules/tools/prediction/fake_prediction/libfake_prediction_component.so"
timer_components {
class_name : "FakePredictionComponent"
config {
name: "fake_prediction"
interval: 100
}
}
}
| 0
|
apollo_public_repos/apollo/modules/tools/prediction
|
apollo_public_repos/apollo/modules/tools/prediction/fake_prediction/BUILD
|
load("@rules_cc//cc:defs.bzl", "cc_binary")
load("//tools:cpplint.bzl", "cpplint")
load("//tools/install:install.bzl", "install")
package(default_visibility = ["//visibility:public"])
install(
name = "install",
library_dest = "tools/lib",
targets = [":libfake_prediction_component.so"],
visibility = ["//visibility:public"],
)
cc_binary(
name = "libfake_prediction_component.so",
srcs = ["fake_prediction.cc"],
linkshared = True,
linkstatic = False,
deps = [
"//cyber",
"//modules/common/adapters:adapter_gflags",
"//modules/common/util:util_tool",
"//modules/common_msgs/prediction_msgs:prediction_obstacle_cc_proto",
],
)
cpplint()
| 0
|
apollo_public_repos/apollo/modules/tools/prediction
|
apollo_public_repos/apollo/modules/tools/prediction/data_pipelines/merge_h5.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import argparse
import datetime
import os
import numpy as np
import h5py
def getListOfFiles(dirName):
listOfFiles = os.listdir(dirName)
allFiles = list()
for entry in listOfFiles:
fullPath = os.path.join(dirName, entry)
if os.path.isdir(fullPath):
allFiles = allFiles + getListOfFiles(fullPath)
else:
allFiles.append(fullPath)
return allFiles
def load_hdf5(filename):
"""
load training samples from *.hdf5 file
"""
if not(os.path.exists(filename)):
print("file:", filename, "does not exist")
os._exit(1)
if os.path.splitext(filename)[1] != '.h5':
print("file:", filename, "is not an hdf5 file")
os._exit(1)
h5_file = h5py.File(filename, 'r')
values = list(h5_file.values())[0]
print("load data size:", values.shape[0])
return values
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='generate training samples\
from a specified directory')
parser.add_argument('directory', type=str,
help='directory contains feature files in .h5')
parser.add_argument('-n', '--npy', action='store_true',
help='if is .npy rather than .h5, use this.')
args = parser.parse_args()
path = args.directory
if not args.npy:
print("load h5 from directory: {}".format(path))
if os.path.isdir(path):
features = None
labels = None
h5_files = getListOfFiles(path)
print("Total number of files:", len(h5_files))
for i, h5_file in enumerate(h5_files):
print("Process File", i, ":", h5_file)
feature = load_hdf5(h5_file)
if np.any(np.isinf(feature)):
print("inf data found")
features = np.concatenate((features, feature), axis=0) if features is not None \
else feature
else:
print("Fail to find", path)
os._exit(-1)
date = datetime.datetime.now().strftime('%Y-%m-%d')
sample_file = path + '/merged' + date + '.h5'
print("Save samples file to:", sample_file)
h5_file = h5py.File(sample_file, 'w')
h5_file.create_dataset('data', data=features)
h5_file.close()
else:
print("load npy from directory: {}".format(path))
if os.path.isdir(path):
features_go = None
features_cutin = None
npy_files = getListOfFiles(path)
print("Total number of files:", len(npy_files))
for i, npy_file in enumerate(npy_files):
print("Process File", i, ":", npy_file)
temp_features = np.load(npy_file)
feature_go = np.zeros((temp_features.shape[0], 157))
feature_cutin = np.zeros((temp_features.shape[0], 157))
count_go = 0
count_cutin = 0
for j in range(temp_features.shape[0]):
fea = np.asarray(temp_features[j])
if fea.shape[0] != 157:
continue
if fea[-1] < -1 or fea[-1] > 4:
continue
fea = fea.reshape((1, 157))
if fea[0, -1] % 2 == 0:
feature_go[count_go] = fea
count_go += 1
else:
feature_cutin[count_cutin] = fea
count_cutin += 1
feature_go = feature_go[:count_go]
feature_cutin = feature_cutin[:count_cutin]
features_go = np.concatenate((features_go, feature_go), axis=0) if features_go is not None \
else feature_go
features_cutin = np.concatenate((features_cutin, feature_cutin), axis=0) if features_cutin is not None \
else feature_cutin
else:
print("Fail to find", path)
os._exit(-1)
print(features_go.shape)
print(features_cutin.shape)
date = datetime.datetime.now().strftime('%Y-%m-%d')
sample_file_go = path + '/merged_go_' + date + '.h5'
sample_file_cutin = path + '/merged_cutin_' + date + '.h5'
h5_file_go = h5py.File(sample_file_go, 'w')
h5_file_go.create_dataset('data', data=features_go)
h5_file_go.close()
h5_file_cutin = h5py.File(sample_file_cutin, 'w')
h5_file_cutin.create_dataset('data', data=features_cutin)
h5_file_cutin.close()
| 0
|
apollo_public_repos/apollo/modules/tools/prediction
|
apollo_public_repos/apollo/modules/tools/prediction/data_pipelines/cruise_h5_preprocessing.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
'''
This .py file includes functions for data preprocessing:
- splitting data into two categories: go and cut-in for separate training.
- balancing the datasets
'''
import argparse
import datetime
import os
import h5py
import numpy as np
def getListOfFiles(dirName):
'''
Given a directory (dirName), return a list containing the full-path
of all files inside that directory (including all hierarchy).
'''
listOfFiles = os.listdir(dirName)
allFiles = list()
for entry in listOfFiles:
fullPath = os.path.join(dirName, entry)
if os.path.isdir(fullPath):
allFiles = allFiles + getListOfFiles(fullPath)
else:
allFiles.append(fullPath)
return allFiles
def load_hdf5(filename):
"""
load training samples from *.hdf5 file
"""
if not(os.path.exists(filename)):
print("file:", filename, "does not exist")
os._exit(1)
if os.path.splitext(filename)[1] != '.h5':
print("file:", filename, "is not an hdf5 file")
os._exit(1)
h5_file = h5py.File(filename, 'r')
values = h5_file[list(h5_file.keys())[0]]
#values = h5_file.values()[0]
print("load data size:", values.shape[0])
return values
def data_splitting(feature):
'''
Split data into two categories: go and cut-in.
'''
# Don't consider those anomaly data
idx_normal = (feature[:, -3] != -10)
go_idx = (feature[:, -3] % 2 == 0)
cutin_idx = (feature[:, -3] % 2 == 1)
go_idx = np.logical_and(go_idx, idx_normal)
cutin_idx = np.logical_and(cutin_idx, idx_normal)
feature = np.asarray(feature)
return feature[go_idx], feature[cutin_idx]
def down_sample(feature, label, drop_rate):
'''
feature: the input data
label and drop_rate: one-to-one mapping of the drop-rate for each
specific label
'''
fea_label = feature[:, -2]
selected_idx = np.zeros(fea_label.shape[0], dtype=bool)
mask_random = np.random.random(fea_label.shape[0])
for i in range(len(label)):
l = label[i]
dr = drop_rate[i]
idx_of_curr_label = (fea_label == l)
selected_idx_of_curr_label = np.logical_and(idx_of_curr_label,
mask_random > dr)
selected_idx = np.logical_or(selected_idx, selected_idx_of_curr_label)
data_downsampled = feature[selected_idx, :]
return data_downsampled
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Data preprocessing.')
parser.add_argument('directory', type=str,
help='directory contains feature files in .h5')
parser.add_argument('-m', '--merge_files', action='store_true',
help='Merge output files into one.')
parser.add_argument('-s', '--split_category', action='store_true',
help='Split the output into Go and Cutin.')
args = parser.parse_args()
path = args.directory
print("Loading h5 from directory: {}".format(path))
if not args.merge_files:
if os.path.isdir(path):
h5_files = getListOfFiles(path)
print("Total number of files:", len(h5_files))
# For each file in the total list of files:
for i, file in enumerate(h5_files):
print("Process File", i, ":", file)
feature = load_hdf5(file)
if np.any(np.isinf(feature)):
print("inf data found")
if args.split_category:
# Split data into two categories:
fea_go, fea_cutin = data_splitting(feature)
# Balance data by down-sampling oversized bins:
#fea_go = down_sample(fea_go, [0, 1, 4], [0.0, 0.95, 0.83])
#fea_cutin = down_sample(fea_cutin, [-1, 2, 3], [0.985 ,0.0, 0.0])
go_path = path + 'go/' + \
file.split('/')[-2] + '-' + file.split('/')[-1]
h5_file = h5py.File(go_path, 'w')
h5_file.create_dataset('data', data=fea_go)
h5_file.close()
cutin_path = path + 'cutin/' + \
file.split('/')[-2] + '-' + file.split('/')[-1]
h5_file = h5py.File(cutin_path, 'w')
h5_file.create_dataset('data', data=fea_cutin)
h5_file.close()
else:
print(None)
# TODO: implement those non-splitting category
else:
print("Fail to find", path)
os._exit(-1)
else:
if os.path.isdir(path):
features_go = None
features_cutin = None
features = None
labels = None
h5_files = getListOfFiles(path)
print("Total number of files:", len(h5_files))
# For each file in the total list of files:
for i, file in enumerate(h5_files):
print("Process File", i, ":", file)
feature = load_hdf5(file)
if np.any(np.isinf(feature)):
print("inf data found")
if args.split_category:
# Split data into two categories:
fea_go, fea_cutin = data_splitting(feature)
# Balance data by down-sampling oversized bins:
#fea_go = down_sample(fea_go, [0, 1, 4], [0.0, 0.95, 0.83])
#fea_cutin = down_sample(fea_cutin, [-1, 2, 3], [0.985 ,0.0, 0.0])
features_go = np.concatenate((features_go, fea_go), axis=0) if features_go is not None \
else fea_go
features_cutin = np.concatenate((features_cutin, fea_cutin), axis=0) if features_cutin is not None \
else fea_cutin
else:
print("Fail to find", path)
os._exit(-1)
if args.split_category:
date = datetime.datetime.now().strftime('%Y-%m-%d')
sample_file = path + 'merged_go' + date + '.h5'
print("Save samples file to:", sample_file)
h5_file = h5py.File(sample_file, 'w')
h5_file.create_dataset('data', data=features_go)
h5_file.close()
sample_file = path + 'merged_cutin' + date + '.h5'
print("Save samples file to:", sample_file)
h5_file = h5py.File(sample_file, 'w')
h5_file.create_dataset('data', data=features_cutin)
h5_file.close()
| 0
|
apollo_public_repos/apollo/modules/tools/prediction
|
apollo_public_repos/apollo/modules/tools/prediction/data_pipelines/junctionMLP_train.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""
@requirement:
tensorflow 1.11
"""
import os
import h5py
import logging
import argparse
import numpy as np
import tensorflow as tf
from modules.tools.prediction.data_pipelines.proto import fnn_model_pb2
from fnn_model_pb2 import FnnModel, Layer
from sklearn.model_selection import train_test_split
dim_input = 7 + 72
dim_output = 12
def load_data(filename):
"""
Load the data from h5 file to the format of numpy
"""
if not (os.path.exists(filename)):
logging.error("file: {}, does not exist".format(filename))
os._exit(1)
if os.path.splitext(filename)[1] != '.h5':
logging.error("file: {} is not an hdf5 file".format(filename))
os._exit(1)
samples = dict()
h5_file = h5py.File(filename, 'r')
for key in h5_file.keys():
samples[key] = h5_file[key][:]
print("load file success")
return samples['data']
def data_preprocessing(data):
X = data[:, :dim_input]
Y = data[:, -dim_output:]
return X, Y
def save_model(model, filename):
"""
Save the trained model parameters into protobuf binary format file
"""
net_params = FnnModel()
net_params.num_layer = 0
for layer in model.layers:
net_params.num_layer += 1
net_layer = net_params.layer.add()
config = layer.get_config()
net_layer.layer_input_dim = dim_input
net_layer.layer_output_dim = dim_output
if config['activation'] == 'relu':
net_layer.layer_activation_func = fnn_model_pb2.Layer.RELU
elif config['activation'] == 'tanh':
net_layer.layer_activation_func = fnn_model_pb2.Layer.TANH
elif config['activation'] == 'sigmoid':
net_layer.layer_activation_func = fnn_model_pb2.Layer.SIGMOID
elif config['activation'] == 'softmax':
net_layer.layer_activation_func = fnn_model_pb2.Layer.SOFTMAX
weights, bias = layer.get_weights()
net_layer.layer_bias.columns.extend(bias.reshape(-1).tolist())
for col in weights.tolist():
row = net_layer.layer_input_weight.rows.add()
row.columns.extend(col)
net_params.dim_input = dim_input
net_params.dim_output = dim_output
with open(filename, 'wb') as params_file:
params_file.write(net_params.SerializeToString())
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='train neural network based on feature files and save parameters')
parser.add_argument('filename', type=str, help='h5 file of data.')
args = parser.parse_args()
file = args.filename
# load_data
data = load_data(file)
print("Data load success, with data shape: " + str(data.shape))
train_data, test_data = train_test_split(data, test_size=0.2)
X_train, Y_train = data_preprocessing(train_data)
X_test, Y_test = data_preprocessing(test_data)
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(30, activation=tf.nn.relu),
tf.keras.layers.Dense(20, activation=tf.nn.relu),
tf.keras.layers.Dense(12, activation=tf.nn.softmax)])
model.compile(optimizer='adam',
loss='categorical_crossentropy',
# loss='MSE',
metrics=['accuracy'])
model.fit(X_train, Y_train, epochs=5)
model_path = os.path.join(os.getcwd(), "junction_mlp_vehicle_model.bin")
save_model(model, model_path)
print("Model saved to: " + model_path)
score = model.evaluate(X_test, Y_test)
print("Testing accuracy is: " + str(score))
| 0
|
apollo_public_repos/apollo/modules/tools/prediction
|
apollo_public_repos/apollo/modules/tools/prediction/data_pipelines/mlp_train.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""
@requirement:
tensorflow- 1.3.0
Keras-1.2.2
"""
import argparse
import logging
import os
import h5py
import numpy as np
from keras.callbacks import ModelCheckpoint
from keras.metrics import mse
from keras.models import Sequential, Model
from keras.layers.normalization import BatchNormalization
from keras.layers import Activation
from keras.layers import Dense, Input
from keras.layers import Dropout
from keras.regularizers import l2, l1
from keras.utils import np_utils
from sklearn.model_selection import train_test_split
import google.protobuf.text_format as text_format
from modules.tools.prediction.data_pipelines.proto import fnn_model_pb2
from modules.tools.prediction.data_pipelines.common import log
from modules.tools.prediction.data_pipelines.common.data_preprocess import load_h5
from modules.tools.prediction.data_pipelines.common.data_preprocess import down_sample
from modules.tools.prediction.data_pipelines.common.data_preprocess import train_test_split
from modules.tools.prediction.data_pipelines.common.configure import parameters
from modules.tools.prediction.data_pipelines.common.configure import labels
from fnn_model_pb2 import FnnModel, Layer
# Constants
dim_input = parameters['mlp']['dim_input']
dim_hidden_1 = parameters['mlp']['dim_hidden_1']
dim_hidden_2 = parameters['mlp']['dim_hidden_2']
dim_output = parameters['mlp']['dim_output']
train_data_rate = parameters['mlp']['train_data_rate']
evaluation_log_path = os.path.join(os.getcwd(), "evaluation_report")
log.init_log(evaluation_log_path, level=logging.DEBUG)
def load_data(filename):
"""
Load the data from h5 file to the format of numpy
"""
if not (os.path.exists(filename)):
logging.error("file: {}, does not exist".format(filename))
os._exit(1)
if os.path.splitext(filename)[1] != '.h5':
logging.error("file: {} is not an hdf5 file".format(filename))
os._exit(1)
samples = dict()
h5_file = h5py.File(filename, 'r')
for key in h5_file.keys():
samples[key] = h5_file[key][:]
print("load file success")
return samples['data']
def down_sample(data):
cutin_false_drate = 0.9
go_false_drate = 0.9
go_true_drate = 0.7
cutin_true_drate = 0.0
label = data[:, -1]
size = np.shape(label)[0]
cutin_false_index = (label == -1)
go_false_index = (label == 0)
go_true_index = (label == 1)
cutin_true_index = (label == 2)
rand = np.random.random((size))
cutin_false_select = np.logical_and(cutin_false_index,
rand > cutin_false_drate)
cutin_true_select = np.logical_and(cutin_true_index,
rand > cutin_true_drate)
go_false_select = np.logical_and(go_false_index, rand > go_false_drate)
go_true_select = np.logical_and(go_true_index, rand > go_true_drate)
all_select = np.logical_or(cutin_false_select, cutin_true_select)
all_select = np.logical_or(all_select, go_false_select)
all_select = np.logical_or(all_select, go_true_select)
data_downsampled = data[all_select, :]
return data_downsampled
def get_param_norm(feature):
"""
Normalize the samples and save normalized parameters
"""
fea_mean = np.mean(feature, axis=0)
fea_std = np.std(feature, axis=0) + 1e-6
param_norm = (fea_mean, fea_std)
return param_norm
def setup_model():
"""
Set up neural network based on keras.Sequential
"""
model = Sequential()
model.add(
Dense(
dim_hidden_1,
input_dim=dim_input,
init='he_normal',
activation='relu',
W_regularizer=l2(0.01)))
model.add(
Dense(
dim_hidden_2,
init='he_normal',
activation='relu',
W_regularizer=l2(0.01)))
model.add(
Dense(
dim_output,
init='he_normal',
activation='sigmoid',
W_regularizer=l2(0.01)))
model.compile(
loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
return model
def evaluate_model(y, pred):
"""
give the performance [recall, precision] of nn model
Parameters
----------
y: numpy.array; real classess
pred: numpy.array; prediction classes
Returns
-------
performance dict, store the performance in log file
"""
y = y.reshape(-1)
pred = pred.reshape(-1)
go_true = (y == labels['go_true']).sum()
go_false = (y == labels['go_false']).sum()
index_go = np.logical_or(y == labels['go_false'], y == labels['go_true'])
go_positive = (pred[index_go] == 1).sum()
go_negative = (pred[index_go] == 0).sum()
cutin_true = (y == labels['cutin_true']).sum()
cutin_false = (y == labels['cutin_false']).sum()
index_cutin = np.logical_or(y == labels['cutin_false'],
y == labels['cutin_true'])
cutin_positive = (pred[index_cutin] == 1).sum()
cutin_negative = (pred[index_cutin] == 0).sum()
logging.info("data size: {}, included:".format(y.shape[0]))
logging.info("\t True False Positive Negative")
logging.info(" Go: {:7} {:7} {:7} {:7}".format(go_true, go_false,
go_positive, go_negative))
logging.info("Cutin:{:7} {:7} {:7} {:7}".format(
cutin_true, cutin_false, cutin_positive, cutin_negative))
logging.info("--------------------SCORE-----------------------------")
logging.info(" recall precision F1-score")
ctrue = float(go_true + cutin_true)
positive = float(go_positive + cutin_positive)
tp = float((pred[y > 0.1] == 1).sum())
recall = tp / ctrue if ctrue != 0 else 0.0
precision = tp / positive if positive != 0 else 0.0
fscore = 2 * precision * recall / (
precision + recall) if precision + recall != 0 else 0.0
logging.info("Positive:{:6.3} {:6.3} {:6.3}".format(
recall, precision, fscore))
go_tp = float((pred[y == 1] == 1).sum())
go_recall = go_tp / go_true if go_true != 0 else 0.0
go_precision = go_tp / go_positive if go_positive != 0 else 0.0
go_fscore = 2 * go_precision * go_recall / (
go_precision + go_recall) if go_precision + go_recall != 0 else 0.0
logging.info(" Go:{:6.3} {:6.3} {:6.3}".format(
go_recall, go_precision, go_fscore))
cutin_tp = float((pred[y == 2] == 1).sum())
cutin_recall = cutin_tp / cutin_true if cutin_true != 0 else 0.0
cutin_precision = cutin_tp / cutin_positive if cutin_positive != 0 else 0.0
cutin_fscore = 2 * cutin_precision * cutin_recall / (
cutin_precision +
cutin_recall) if cutin_precision + cutin_recall != 0 else 0.0
logging.info(" Cutin:{:6.3} {:6.3} {:6.3}".format(
cutin_recall, cutin_precision, cutin_fscore))
logging.info("-----------------------------------------------------\n\n")
performance = {
'recall': [recall, go_recall, cutin_recall],
'precision': [precision, go_precision, cutin_precision]
}
return performance
def save_model(model, param_norm, filename):
"""
Save the trained model parameters into protobuf binary format file
"""
net_params = FnnModel()
net_params.samples_mean.columns.extend(param_norm[0].reshape(-1).tolist())
net_params.samples_std.columns.extend(param_norm[1].reshape(-1).tolist())
net_params.num_layer = 0
for layer in model.flattened_layers:
net_params.num_layer += 1
net_layer = net_params.layer.add()
config = layer.get_config()
net_layer.layer_input_dim = dim_input
net_layer.layer_output_dim = dim_output
if config['activation'] == 'relu':
net_layer.layer_activation_func = fnn_model_pb2.Layer.RELU
elif config['activation'] == 'tanh':
net_layer.layer_activation_func = fnn_model_pb2.Layer.TANH
elif config['activation'] == 'sigmoid':
net_layer.layer_activation_func = fnn_model_pb2.Layer.SIGMOID
weights, bias = layer.get_weights()
net_layer.layer_bias.columns.extend(bias.reshape(-1).tolist())
for col in weights.tolist():
row = net_layer.layer_input_weight.rows.add()
row.columns.extend(col)
net_params.dim_input = dim_input
net_params.dim_output = dim_output
with open(filename, 'wb') as params_file:
params_file.write(net_params.SerializeToString())
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='train neural network based on feature files and save parameters')
parser.add_argument('filename', type=str, help='h5 file of data.')
args = parser.parse_args()
file = args.filename
data = load_data(file)
data = down_sample(data)
print("Data load success.")
print("data size =", data.shape)
train_data, test_data = train_test_split(data, train_data_rate)
print("training size =", train_data.shape)
X_train = train_data[:, 0:dim_input]
Y_train = train_data[:, -1]
Y_trainc = Y_train > 0.1
X_test = test_data[:, 0:dim_input]
Y_test = test_data[:, -1]
Y_testc = Y_test > 0.1
param_norm = get_param_norm(X_train)
X_train = (X_train - param_norm[0]) / param_norm[1]
X_test = (X_test - param_norm[0]) / param_norm[1]
model = setup_model()
model.fit(X_train, Y_trainc, shuffle=True, nb_epoch=20, batch_size=32)
print("Model trained success.")
X_test = (X_test - param_norm[0]) / param_norm[1]
score = model.evaluate(X_test, Y_testc)
print("\nThe accuracy on testing dat is", score[1])
logging.info("Test data loss: {}, accuracy: {} ".format(
score[0], score[1]))
Y_train_hat = model.predict_classes(X_train, batch_size=32)
Y_test_hat = model.predict_proba(X_test, batch_size=32)
logging.info("## Training Data:")
evaluate_model(Y_train, Y_train_hat)
for thres in [x / 100.0 for x in range(20, 80, 5)]:
logging.info("##threshond = {} Testing Data:".format(thres))
performance = evaluate_model(Y_test, Y_test_hat > thres)
performance['accuracy'] = [score[1]]
print("\nFor more detailed evaluation results, please refer to",
evaluation_log_path + ".log")
model_path = os.path.join(os.getcwd(), "mlp_model.bin")
save_model(model, param_norm, model_path)
print("Model has been saved to", model_path)
| 0
|
apollo_public_repos/apollo/modules/tools/prediction
|
apollo_public_repos/apollo/modules/tools/prediction/data_pipelines/cruise_models.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import argparse
import logging
import os
from sklearn.model_selection import train_test_split
from sklearn.utils import class_weight
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader, sampler
import h5py
import numpy as np
import sklearn
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from modules.tools.prediction.data_pipelines.common.configure import parameters
from proto.cruise_model_pb2 import TensorParameter, InputParameter,\
Conv1dParameter, DenseParameter, ActivationParameter, MaxPool1dParameter,\
AvgPool1dParameter, LaneFeatureConvParameter, ObsFeatureFCParameter,\
ClassifyParameter, RegressParameter, CruiseModelParameter
"""
@requirement:
pytorch 0.4.1
"""
'''
This file includes all model definitions and related loss functions.
'''
'''
Model details:
- Fully-connected layers for classification and regression, respectively.
- It will compute a classification score indicating the probability
of the obstacle choosing the given lane.
- It will also compute a time indicating how soon the obstacle will reach
the center of the given lane.
'''
class FullyConn_NN(torch.nn.Module):
def __init__(self):
super(FullyConn_NN, self).__init__()
self.classify = torch.nn.Sequential(
nn.Linear(174, 88),
nn.Sigmoid(),
nn.Dropout(0.3),
nn.Linear(88, 55),
nn.Sigmoid(),
nn.Dropout(0.2),
nn.Linear(55, 23),
nn.Sigmoid(),
nn.Dropout(0.3),
nn.Linear(23, 10),
nn.Sigmoid(),
nn.Dropout(0.0),
nn.Linear(10, 1),
nn.Sigmoid()
)
self.regress = torch.nn.Sequential(
nn.Linear(174, 88),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(88, 23),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(23, 1),
nn.ReLU()
)
def forward(self, x):
out_c = self.classify(x)
out_r = self.regress(x)
return out_c, out_r
class FCNN_CNN1D(torch.nn.Module):
def __init__(self):
super(FCNN_CNN1D, self).__init__()
self.lane_feature_conv = torch.nn.Sequential(
nn.Conv1d(4, 10, 3, stride=1),\
# nn.BatchNorm1d(10),\
nn.ReLU(),\
#nn.Conv1d(10, 16, 3, stride=2),\
# nn.BatchNorm1d(16),\
# nn.ReLU(),\
nn.Conv1d(10, 25, 3, stride=2),\
# nn.BatchNorm1d(25)
)
self.lane_feature_maxpool = nn.MaxPool1d(4)
self.lane_feature_avgpool = nn.AvgPool1d(4)
self.lane_feature_dropout = nn.Dropout(0.0)
self.obs_feature_fc = torch.nn.Sequential(
nn.Linear(68, 40),
nn.Sigmoid(),
nn.Dropout(0.0),
nn.Linear(40, 24),
nn.Sigmoid(),
nn.Dropout(0.0),
)
self.classify = torch.nn.Sequential(
nn.Linear(124, 66),
nn.Sigmoid(),
nn.Dropout(0.3),
nn.Linear(66, 48),
nn.Sigmoid(),
nn.Dropout(0.1),
nn.Linear(48, 11),
nn.Sigmoid(),
nn.Dropout(0.1),
nn.Linear(11, 1),\
# nn.Sigmoid()
)
self.regress = torch.nn.Sequential(
nn.Linear(125, 77),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(77, 46),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(46, 12),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(12, 1),
nn.ReLU()
)
def forward(self, x):
lane_fea = x[:, -80:]
lane_fea = lane_fea.view(lane_fea.size(0), 4, 20)
obs_fea = x[:, :-80]
lane_fea = self.lane_feature_conv(lane_fea)
lane_fea_max = self.lane_feature_maxpool(lane_fea)
lane_fea_avg = self.lane_feature_avgpool(lane_fea)
lane_fea = torch.cat([lane_fea_max.view(lane_fea_max.size(0), -1),
lane_fea_avg.view(lane_fea_avg.size(0), -1)], 1)
lane_fea = self.lane_feature_dropout(lane_fea)
obs_fea = self.obs_feature_fc(obs_fea)
tot_fea = torch.cat([lane_fea, obs_fea], 1)
out_c = self.classify(tot_fea)
out_r = self.regress(torch.cat([tot_fea, out_c], 1))
return out_c, out_r
| 0
|
apollo_public_repos/apollo/modules/tools/prediction
|
apollo_public_repos/apollo/modules/tools/prediction/data_pipelines/BUILD
|
load("@rules_python//python:defs.bzl", "py_binary", "py_library")
package(default_visibility = ["//visibility:public"])
py_binary(
name = "cruise_h5_preprocessing",
srcs = ["cruise_h5_preprocessing.py"],
)
py_library(
name = "cruise_models",
srcs = ["cruise_models.py"],
deps = [
"//modules/tools/prediction/data_pipelines/common:configure",
"//modules/tools/prediction/data_pipelines/proto:cruise_model_py_pb2",
],
)
py_binary(
name = "cruiseMLP_train",
srcs = ["cruiseMLP_train.py"],
deps = [
":cruise_models",
"//modules/tools/prediction/data_pipelines/common:configure",
"//modules/tools/prediction/data_pipelines/proto:cruise_model_py_pb2",
],
)
py_binary(
name = "junctionMLP_train",
srcs = ["junctionMLP_train.py"],
deps = [
"//modules/tools/prediction/data_pipelines/proto:fnn_model_py_pb2",
],
)
py_binary(
name = "merge_h5",
srcs = ["merge_h5.py"],
)
py_binary(
name = "mlp_train",
srcs = ["mlp_train.py"],
deps = [
"//modules/tools/prediction/data_pipelines/common:configure",
"//modules/tools/prediction/data_pipelines/common:data_preprocess",
"//modules/tools/prediction/data_pipelines/common:log",
"//modules/tools/prediction/data_pipelines/proto:fnn_model_py_pb2",
],
)
| 0
|
apollo_public_repos/apollo/modules/tools/prediction
|
apollo_public_repos/apollo/modules/tools/prediction/data_pipelines/cruiseMLP_train.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""
@requirement:
pytorch 0.4.1
"""
import argparse
import logging
import os
from sklearn.model_selection import train_test_split
from sklearn.utils import class_weight
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader, sampler
import h5py
import numpy as np
import sklearn
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from modules.tools.prediction.data_pipelines.common.configure import parameters
from modules.tools.prediction.data_pipelines.cruise_models import FullyConn_NN, FCNN_CNN1D
from modules.tools.prediction.data_pipelines.proto.cruise_model_pb2 import TensorParameter, InputParameter,\
Conv1dParameter, DenseParameter, ActivationParameter, MaxPool1dParameter,\
AvgPool1dParameter, LaneFeatureConvParameter, ObsFeatureFCParameter,\
ClassifyParameter, RegressParameter, CruiseModelParameter
# TODO(panjiacheng): the data-loader part needs to be modified.
# Constants
dim_input = parameters['cruise_mlp']['dim_input']
dim_hidden_1 = parameters['cruise_mlp']['dim_hidden_1']
dim_hidden_2 = parameters['cruise_mlp']['dim_hidden_2']
dim_output = parameters['cruise_mlp']['dim_output']
# Setup
cuda_is_available = torch.cuda.is_available()
logging.basicConfig(filename='training.log', level=logging.INFO)
def load_Conv1dParameter(model, key, stride=1):
model_pb = Conv1dParameter()
model_pb.shape.extend(list(model.state_dict()[key+'.weight'].shape))
model_pb.use_bias = True
kernel_param = TensorParameter()
kernel_param.shape.extend(list(model.state_dict()[key+'.weight'].shape))
kernel_param.data.extend(
list(model.state_dict()[key+'.weight'].numpy().reshape(-1)))
model_pb.kernel.CopyFrom(kernel_param)
bias_param = TensorParameter()
bias_param.shape.extend(list(model.state_dict()[key+'.bias'].shape))
bias_param.data.extend(
list(model.state_dict()[key+'.bias'].numpy().reshape(-1)))
model_pb.bias.CopyFrom(bias_param)
model_pb.stride = stride
return model_pb
def load_DenseParameter(model, key):
model_pb = DenseParameter()
model_pb.use_bias = True
weights_param = TensorParameter()
weights_param.shape.extend(
list(model.state_dict()[key+'.weight'].numpy().T.shape))
weights_param.data.extend(
list(model.state_dict()[key+'.weight'].numpy().T.reshape(-1)))
model_pb.weights.CopyFrom(weights_param)
bias_param = TensorParameter()
bias_param.shape.extend(
list(model.state_dict()[key+'.bias'].numpy().shape))
bias_param.data.extend(list(model.state_dict()[key+'.bias'].numpy()))
model_pb.bias.CopyFrom(bias_param)
model_pb.units = model_pb.bias.shape[0]
return model_pb
def save_FCNN_CNN1D(model, filename):
model_pb = CruiseModelParameter()
lane_feature_conv = LaneFeatureConvParameter()
lane_feature_conv.conv1d_0.CopyFrom(
load_Conv1dParameter(model, 'lane_feature_conv.0', stride=1))
lane_feature_conv.activation_1.activation = 'relu'
lane_feature_conv.conv1d_2.CopyFrom(
load_Conv1dParameter(model, 'lane_feature_conv.2', stride=2))
lane_feature_conv.activation_3.activation = 'relu'
lane_feature_conv.conv1d_4.CopyFrom(
load_Conv1dParameter(model, 'lane_feature_conv.4', stride=2))
lane_feature_maxpool = MaxPool1dParameter()
lane_feature_maxpool.kernel_size = 3
lane_feature_maxpool.stride = 3
lane_feature_avgpool = AvgPool1dParameter()
lane_feature_avgpool.kernel_size = 3
lane_feature_avgpool.stride = 3
obs_feature_fc = ObsFeatureFCParameter()
obs_feature_fc.linear_0.CopyFrom(
load_DenseParameter(model, 'obs_feature_fc.0'))
obs_feature_fc.activation_1.activation = 'sigmoid'
obs_feature_fc.linear_3.CopyFrom(
load_DenseParameter(model, 'obs_feature_fc.3'))
obs_feature_fc.activation_4.activation = 'sigmoid'
classify = ClassifyParameter()
classify.linear_0.CopyFrom(load_DenseParameter(model, 'classify.0'))
classify.activation_1.activation = 'sigmoid'
classify.linear_3.CopyFrom(load_DenseParameter(model, 'classify.3'))
classify.activation_4.activation = 'sigmoid'
classify.linear_6.CopyFrom(load_DenseParameter(model, 'classify.6'))
classify.activation_7.activation = 'sigmoid'
classify.linear_9.CopyFrom(load_DenseParameter(model, 'classify.9'))
classify.activation_10.activation = 'sigmoid'
regress = RegressParameter()
regress.linear_0.CopyFrom(load_DenseParameter(model, 'regress.0'))
regress.activation_1.activation = 'relu'
regress.linear_3.CopyFrom(load_DenseParameter(model, 'regress.3'))
regress.activation_4.activation = 'relu'
regress.linear_6.CopyFrom(load_DenseParameter(model, 'regress.6'))
regress.activation_7.activation = 'relu'
regress.linear_9.CopyFrom(load_DenseParameter(model, 'regress.9'))
regress.activation_10.activation = 'relu'
model_pb.lane_feature_conv.CopyFrom(lane_feature_conv)
model_pb.lane_feature_maxpool.CopyFrom(lane_feature_maxpool)
model_pb.lane_feature_avgpool.CopyFrom(lane_feature_avgpool)
model_pb.obs_feature_fc.CopyFrom(obs_feature_fc)
model_pb.classify.CopyFrom(classify)
model_pb.regress.CopyFrom(regress)
with open(filename, 'wb') as params_file:
params_file.write(model_pb.SerializeToString())
'''
Custom defined loss function that lumps the loss of classification and
of regression together.
'''
def loss_fn(c_pred, r_pred, target, balance):
loss_C = nn.BCEWithLogitsLoss(
pos_weight=torch.FloatTensor([balance]).cuda()) # nn.BCELoss()
loss_R = nn.MSELoss()
#loss = loss_C(c_pred, target[:,0].view(target.shape[0],1))
loss = 4 * loss_C(c_pred, target[:, 0].view(target.shape[0], 1)) + \
loss_R(((target[:, 2] > 0.0) * (target[:, 2] <= 3.0)).float().view(target.shape[0], 1) * r_pred +
((target[:, 2] <= 0.0) + (target[:, 2] > 3.0)).float().view(
target.shape[0], 1) * target[:, 2].view(target.shape[0], 1),
target[:, 2].view(target.shape[0], 1))
# loss_R((target[:,1] < 10.0).float().view(target.shape[0],1) * r_pred + \
# (target[:,1] >= 10.0).float().view(target.shape[0],1) * target[:,1].view(target.shape[0],1), \
# target[:,1].view(target.shape[0],1))
return loss
# ========================================================================
# Helper functions
'''
Get the full path of all files under the directory: 'dirName'
'''
def getListOfFiles(dirName):
listOfFiles = os.listdir(dirName)
allFiles = list()
for entry in listOfFiles:
fullPath = os.path.join(dirName, entry)
if os.path.isdir(fullPath):
allFiles = allFiles + getListOfFiles(fullPath)
else:
allFiles.append(fullPath)
return allFiles
'''
Print the distribution of data labels.
'''
def print_dist(label):
unique_labels = np.unique(label)
for l in unique_labels:
print('Label = {}: {}%'.format(l, np.sum(label == l)/len(label)*100))
# ========================================================================
# ========================================================================
# Data Loading and preprocessing (Non Data-Loader case)
def load_data(filename):
'''
Load the data from h5 file to the numpy format.
(Only for non data-loader case)
'''
if not (os.path.exists(filename)):
logging.error("file: {}, does not exist".format(filename))
os._exit(1)
if os.path.splitext(filename)[1] != '.h5':
logging.error("file: {} is not an hdf5 file".format(filename))
os._exit(1)
samples = dict()
h5_file = h5py.File(filename, 'r')
for key in h5_file.keys():
samples[key] = h5_file[key][:]
print("load file success")
return samples['data']
def load_npy_data(dir):
'''
Load all .npy files under a certain dir;
merge them together into one;
return.
'''
def data_preprocessing(data):
'''
Preprocess the data.
(Only for non data-loader case)
- separate input X and output y
- process output label from {-1,0,1,2,3,4} to {0,1}
- Take out only those meaningful features
- shuffle data
'''
# Various input features separation
X_obs_old_features = data[:, 0:23]
X_surround_obs = data[:, -dim_output-8:-dim_output]
X_obs_now = data[:, 23:32]
X_obs_hist_5 = data[:, 23:68]
X_lane = data[:, 68:-dim_output-8]
# mask out those that don't have any history
# mask5 = (data[:,53] != 100)
X = np.concatenate((X_obs_old_features, X_obs_hist_5, X_lane), axis=1)
# X = X[mask5, :]
y = data[:, -dim_output:]
# y = y[mask5, :]
# Binary classification
y[:, 0] = (y[:, 0] > 0).astype(float)
#y[:, 0] = np.logical_and((y[:, 0] > 0), (y[:, 1] < 1.0))
# Random shuffling
X_new, X_dummy, y_new, y_dummy = train_test_split(
X, y, test_size=0.0, random_state=233)
return X_new, y_new # , X_dummy, y_dummy
# ========================================================================
# ========================================================================
# Data Loading and preprocessing (Data-Loader case)
'''
TODO: implement custom collate_fn to incorporate down-sampling function
for certain labels.
'''
def collate_wDownSample(batch):
return None
'''
If datasets are too large, use Dataloader to load from disk.
'''
class TrainValidDataset(Dataset):
'''
Args:
- root_dir (string): Directory containing all folders with different
dates, each folder containing .cruise.h5 data files.
'''
def __init__(self, list_of_files):
self.list_of_files_ = list_of_files
self.data_size_until_this_file_ = []
self.dataset_size = 0
for file in self.list_of_files_:
with h5py.File(file, 'r') as h5_file:
data_size = h5_file[list(h5_file.keys())[0]].shape[0]
self.dataset_size += data_size
self.data_size_until_this_file_.append(self.dataset_size)
#print ('Total size of dataset: {}'.format(self.data_size_until_this_file_))
def __len__(self):
return self.dataset_size
def __getitem__(self, index):
bin_idx = self.FindBin(index, 0, len(
self.data_size_until_this_file_)-1)
with h5py.File(self.list_of_files_[bin_idx], 'r') as h5_file:
idx_offset = self.data_size_until_this_file_[bin_idx] - \
h5_file[list(h5_file.keys())[0]].shape[0]
data = h5_file[list(h5_file.keys())[0]][index-idx_offset]
label = data[-dim_output:]
label[0] = (label[0] > 0.0).astype(float)
return data[:-dim_output], label
# Binary search to expedite the data-loading process.
def FindBin(self, index, start, end):
if (start == end):
return start
mid = int((start+end)/2.0)
if (self.data_size_until_this_file_[mid] <= index):
return self.FindBin(index, mid+1, end)
else:
return self.FindBin(index, start, mid)
# ========================================================================
# ========================================================================
# Data training and validation
'''
Train the data. (vanilla version without dataloader)
'''
def train_vanilla(train_X, train_y, model, optimizer, epoch, batch_size=2048, balance=1.0):
model.train()
loss_history = []
logging.info('Epoch: {}'.format(epoch+1))
print('Epoch: {}.'.format(epoch+1))
num_of_data = train_X.shape[0]
num_of_batch = int(num_of_data / batch_size) + 1
pred_y = None
for i in range(num_of_batch):
optimizer.zero_grad()
X = train_X[i*batch_size: min(num_of_data, (i+1)*batch_size), ]
y = train_y[i*batch_size: min(num_of_data, (i+1)*batch_size), ]
c_pred, r_pred = model(X)
loss = loss_fn(c_pred, r_pred, y, balance)
loss_history.append(loss.data)
loss.backward()
optimizer.step()
c_pred = c_pred.data.cpu().numpy()
c_pred = c_pred.reshape(c_pred.shape[0], 1)
pred_y = np.concatenate((pred_y, c_pred), axis=0) if pred_y is not None \
else c_pred
if (i > 0) and (i % 100 == 0):
logging.info('Step: {}, train_loss: {}'.format(
i, np.mean(loss_history[-100:])))
print("Step: {}, training loss: {}".format(
i, np.mean(loss_history[-100:])))
pred_y = (pred_y > 0.0)
train_y = train_y.data.cpu().numpy()
training_accuracy = sklearn.metrics.accuracy_score(
train_y[:, 0], pred_y.reshape(-1))
train_loss = np.mean(loss_history)
logging.info('Training loss: {}'.format(train_loss))
logging.info('Training Accuracy: {}.'.format(training_accuracy))
print('Training Loss: {}. Training Accuracy: {}'
.format(train_loss, training_accuracy))
'''
Validation (vanilla version without dataloader)
'''
def validate_vanilla(valid_X, valid_y, model, batch_size=2048, balance=1.0, pos_label=1.0):
model.eval()
loss_history = []
num_of_data = valid_X.shape[0]
num_of_batch = int(num_of_data / batch_size) + 1
pred_y = None
for i in range(num_of_batch):
X = valid_X[i*batch_size: min(num_of_data, (i+1)*batch_size), ]
y = valid_y[i*batch_size: min(num_of_data, (i+1)*batch_size), ]
c_pred, r_pred = model(X)
valid_loss = loss_fn(c_pred, r_pred, y, balance)
loss_history.append(valid_loss.data)
c_pred = c_pred.data.cpu().numpy()
c_pred = c_pred.reshape(c_pred.shape[0], 1)
pred_y = np.concatenate((pred_y, c_pred), axis=0) if pred_y is not None \
else c_pred
valid_y = valid_y.data.cpu().numpy()
valid_auc = sklearn.metrics.roc_auc_score(
valid_y[:, 0], pred_y.reshape(-1))
pred_y = (pred_y > 0.0)
valid_accuracy = sklearn.metrics.accuracy_score(
valid_y[:, 0], pred_y.reshape(-1))
valid_precision = sklearn.metrics.precision_score(
valid_y[:, 0], pred_y.reshape(-1), pos_label=pos_label)
valid_recall = sklearn.metrics.recall_score(
valid_y[:, 0], pred_y.reshape(-1), pos_label=pos_label)
logging.info('Validation loss: {}. Accuracy: {}.\
Precision: {}. Recall: {}. AUC: {}.'
.format(np.mean(loss_history), valid_accuracy, valid_precision,
valid_recall, valid_auc))
print('Validation loss: {}. Accuracy: {}.\
Precision: {}. Recall: {}. AUC: {}.'
.format(np.mean(loss_history), valid_accuracy, valid_precision,
valid_recall, valid_auc))
return np.mean(loss_history)
'''
Train the data. (using dataloader)
'''
def train_dataloader(train_loader, model, optimizer, epoch, balance=1.0):
model.train()
loss_history = []
train_correct_class = 0
total_size = 0
logging.info('Epoch: {}'.format(epoch))
for i, (inputs, targets) in enumerate(train_loader):
total_size += targets.shape[0]
optimizer.zero_grad()
if cuda_is_available:
X = (inputs).float().cuda()
y = (targets).float().cuda()
c_pred, r_pred = model(X)
loss = loss_fn(c_pred, r_pred, y, balance)
# loss.data[0].cpu().numpy()
loss_history.append(loss.data)
loss.backward()
optimizer.step()
train_correct_class += \
np.sum((c_pred.data.cpu().numpy() > 0.5).astype(float) ==
y[:, 0].data.cpu().numpy().reshape(c_pred.data.cpu().numpy().shape[0], 1))
# if i > 100:
# break
if i % 100 == 0:
logging.info('Step: {}, train_loss: {}'.format(
i, np.mean(loss_history[-100:])))
print("Step: {}, training loss: {}".format(
i, np.mean(loss_history[-100:])))
train_loss = np.mean(loss_history)
logging.info('Training loss: {}'.format(train_loss))
print('Epoch: {}. Training Loss: {}'.format(epoch, train_loss))
'''
Validation (using dataloader)
'''
def validate_dataloader(valid_loader, model, balance=1.0):
model.eval()
loss_history = []
valid_correct_class = 0.0
total_size = 0
for i, (X, y) in enumerate(valid_loader):
total_size += y.shape[0]
if cuda_is_available:
X = X.float().cuda()
y = y.float().cuda()
c_pred, r_pred = model(X)
valid_loss = loss_fn(c_pred, r_pred, y, balance)
loss_history.append(valid_loss.data)
valid_correct_class += \
np.sum((c_pred.data.cpu().numpy() > 0.5).astype(float) ==
y[:, 0].data.cpu().numpy().reshape(c_pred.data.cpu().numpy().shape[0], 1))
valid_classification_accuracy = valid_correct_class / total_size
logging.info('Validation loss: {}. Validation classification accuracy: {}'
.format(np.mean(loss_history), valid_classification_accuracy))
print('Validation loss: {}. Classification accuracy: {}.'
.format(np.mean(loss_history), valid_classification_accuracy))
return valid_loss
# ========================================================================
# ========================================================================
# Main function:
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='train neural network based on feature files and save parameters')
parser.add_argument('train_file', type=str, help='training data (h5)')
parser.add_argument('valid_file', type=str, help='validation data (h5)')
parser.add_argument('-n', '--network-structure', type=int, default=1,
help='Specify which network to use:\n \
\t 0: Fully connected neural network.\n \
\t 1: 1D-CNN for lane feature extraction.')
parser.add_argument('-d', '--data-loader', action='store_true',
help='Use the dataloader (when memory size is smaller than dataset size)')
parser.add_argument('-s', '--save-path', type=str, default='./',
help='Specify the directory to save trained models.')
parser.add_argument('-g', '--go', action='store_true',
help='It is training lane-follow (go) cases.')
parser.add_argument('-b', '--balance', type=float, default=1.0,
help='Specify the weight for positive predictions.')
# parser.add_argument('-g', '--gpu_num', type=int, default=0, \
# help='Specify which GPU to use.')
args = parser.parse_args()
# os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' #specifies the same order as nvidia-smi
#os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_num)
if not args.data_loader:
# Load from file and print out general information of the data.
train_file = args.train_file
valid_file = args.valid_file
train_data = load_data(train_file)
valid_data = load_data(valid_file)
print('Data loaded successfully.')
classes_train = np.asarray(train_data[:, -dim_output])
print('Total number of training samples: {}'.format(len(classes_train)))
print('Training set distribution:')
print_dist(classes_train)
classes_valid = np.asarray(valid_data[:, -dim_output])
print('Total number of validation samples: {}'.format(len(classes_valid)))
print('Validation set distribution:')
print_dist(classes_valid)
# Data preprocessing
X_train, y_train = data_preprocessing(train_data)
X_valid, y_valid = data_preprocessing(valid_data)
# Model declaration
model = None
if args.network_structure == 0:
model = FullyConn_NN()
elif args.network_structure == 1:
model = FCNN_CNN1D()
print("The model used is: ")
print(model)
learning_rate = 6.561e-4
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(
optimizer, factor=0.3, patience=2, min_lr=1e-8, verbose=1, mode='min')
# CUDA set-up:
cuda_is_available = torch.cuda.is_available()
if (cuda_is_available):
print("Using CUDA to speed up training.")
model.cuda()
X_train = Variable(torch.FloatTensor(X_train).cuda())
X_valid = Variable(torch.FloatTensor(X_valid).cuda())
y_train = Variable(torch.FloatTensor(y_train).cuda())
y_valid = Variable(torch.FloatTensor(y_valid).cuda())
# Model training:
pos_label = 1.0
if args.go:
pos_label = 0.0
best_valid_loss = float('+inf')
for epoch in range(50):
train_vanilla(X_train, y_train, model, optimizer,
epoch, balance=args.balance)
valid_loss = validate_vanilla(
X_valid, y_valid, model, balance=args.balance, pos_label=pos_label)
scheduler.step(valid_loss)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), args.save_path + 'cruise_model{}_epoch{}_valloss{:.6f}.pt'
.format(args.network_structure, epoch+1, valid_loss))
else:
train_dir = args.train_file
valid_dir = args.valid_file
# Data preprocessing (training data balancing).
list_of_training_files = getListOfFiles(train_dir)
list_of_validation_files = getListOfFiles(valid_dir)
classes_train = []
for file in list_of_training_files:
with h5py.File(file, 'r') as h5_file:
data = h5_file[list(h5_file.keys())[0]][:, -2]
classes_train.append(data.tolist())
# "Flattening" the list of lists
classes_train = [item for sublist in classes_train for item in sublist]
classes_train = np.asarray(classes_train)
print('Total number of training samples: {}'.format(len(classes_train)))
print('Training set distribution:')
print_dist(classes_train)
classes_valid = []
for file in list_of_validation_files:
with h5py.File(file, 'r') as h5_file:
data = h5_file[list(h5_file.keys())[0]][:, -2]
classes_valid.append(data.tolist())
# "Flattening" the list of lists
classes_valid = [item for sublist in classes_valid for item in sublist]
classes_valid = np.asarray(classes_valid)
print('Total number of validation samples: {}'.format(len(classes_valid)))
print('Validation set distribution:')
print_dist(classes_valid)
#class_weights = class_weight.compute_class_weight('balanced', np.unique(classes_train), classes_train)
#weights = [class_weights[int(i+1)] for i in classes_train]
#weights = torch.DoubleTensor(weights)
#train_sampler = sampler.WeightedRandomSampler(weights, int(len(weights)/1), replacement=True)
model = FCNN_CNN1D()
learning_rate = 6.561e-4
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(
optimizer, factor=0.3, patience=2, min_lr=1e-8, verbose=1, mode='min')
if (cuda_is_available):
print('Using CUDA to speed up training.')
model.cuda()
train_dataset = TrainValidDataset(list_of_training_files)
valid_dataset = TrainValidDataset(list_of_validation_files)
train_loader = DataLoader(train_dataset, batch_size=1024, num_workers=8,
pin_memory=True, shuffle=True) # sampler=train_sampler)
valid_loader = DataLoader(
valid_dataset, batch_size=1024, num_workers=8, pin_memory=True)
for epoch in range(100):
train_dataloader(train_loader, model, optimizer, epoch)
valid_loss = validate_dataloader(valid_loader, model)
scheduler.step(valid_loss)
# ========================================================================
| 0
|
apollo_public_repos/apollo/modules/tools/prediction/data_pipelines
|
apollo_public_repos/apollo/modules/tools/prediction/data_pipelines/proto/fnn_model.proto
|
syntax = "proto2";
message Vector {
repeated double columns = 1;
}
message Matrix {
repeated Vector rows = 1;
}
message Layer {
enum ActivationFunc {
RELU = 0;
TANH = 1;
SIGMOID = 2;
SOFTMAX = 3;
}
optional int32 layer_input_dim = 1;
optional int32 layer_output_dim = 2;
optional Matrix layer_input_weight =
3; // weight matrix of |input_dim| x |output_dim|
optional Vector layer_bias = 4; // vector of bias, size of |output_dim|
optional ActivationFunc layer_activation_func = 5;
}
message FnnModel {
optional int32 dim_input = 1;
optional Vector samples_mean = 2;
optional Vector samples_std = 3;
optional int32 num_layer = 4;
repeated Layer layer =
5; // num_layer must equal to first layer layer_input_dim
optional int32 dim_output =
6; // dim_ouput must equal to last layer layer_output_dim
}
| 0
|
apollo_public_repos/apollo/modules/tools/prediction/data_pipelines
|
apollo_public_repos/apollo/modules/tools/prediction/data_pipelines/proto/BUILD
|
## Auto generated by `proto_build_generator.py`
load("@rules_proto//proto:defs.bzl", "proto_library")
load("@rules_cc//cc:defs.bzl", "cc_proto_library")
load("//tools:python_rules.bzl", "py_proto_library")
package(default_visibility = ["//visibility:public"])
cc_proto_library(
name = "fnn_model_cc_proto",
deps = [
":fnn_model_proto",
],
)
proto_library(
name = "fnn_model_proto",
srcs = ["fnn_model.proto"],
)
py_proto_library(
name = "fnn_model_py_pb2",
deps = [
":fnn_model_proto",
],
)
cc_proto_library(
name = "cruise_model_cc_proto",
deps = [
":cruise_model_proto",
],
)
proto_library(
name = "cruise_model_proto",
srcs = ["cruise_model.proto"],
)
py_proto_library(
name = "cruise_model_py_pb2",
deps = [
":cruise_model_proto",
],
)
| 0
|
apollo_public_repos/apollo/modules/tools/prediction/data_pipelines
|
apollo_public_repos/apollo/modules/tools/prediction/data_pipelines/proto/cruise_model.proto
|
syntax = "proto2";
// Helpers:
message TensorParameter {
repeated float data = 1 [packed = true];
repeated int32 shape = 2;
}
message InputParameter {
repeated int32 input_shape = 1;
optional string dtype = 2; // data type of the input
optional bool sparse = 3;
}
// Basic layers
message Conv1dParameter {
repeated int32 shape = 1;
optional bool use_bias = 2;
optional TensorParameter kernel = 3;
optional TensorParameter bias = 4;
optional int32 stride = 5;
}
message DenseParameter {
optional int32 units = 1;
optional string activation = 2;
optional bool use_bias = 3;
optional TensorParameter weights = 4;
optional TensorParameter bias = 5;
}
message ActivationParameter {
optional string activation = 1;
}
message MaxPool1dParameter {
optional int32 kernel_size = 1;
optional int32 stride = 2;
}
message AvgPool1dParameter {
optional int32 kernel_size = 1;
optional int32 stride = 2;
}
// Intermediate building blocks:
message LaneFeatureConvParameter {
optional Conv1dParameter conv1d_0 = 1;
optional ActivationParameter activation_1 = 2;
optional Conv1dParameter conv1d_2 = 3;
optional ActivationParameter activation_3 = 4;
optional Conv1dParameter conv1d_4 = 5;
}
message ObsFeatureFCParameter {
optional DenseParameter linear_0 = 1;
optional ActivationParameter activation_1 = 2;
optional DenseParameter linear_3 = 3;
optional ActivationParameter activation_4 = 4;
}
message ClassifyParameter {
optional DenseParameter linear_0 = 1;
optional ActivationParameter activation_1 = 2;
optional DenseParameter linear_3 = 3;
optional ActivationParameter activation_4 = 4;
optional DenseParameter linear_6 = 5;
optional ActivationParameter activation_7 = 6;
optional DenseParameter linear_9 = 7;
optional ActivationParameter activation_10 = 8;
}
message RegressParameter {
optional DenseParameter linear_0 = 1;
optional ActivationParameter activation_1 = 2;
optional DenseParameter linear_3 = 3;
optional ActivationParameter activation_4 = 4;
optional DenseParameter linear_6 = 5;
optional ActivationParameter activation_7 = 6;
optional DenseParameter linear_9 = 7;
optional ActivationParameter activation_10 = 8;
}
// Final model
// next id =
message CruiseModelParameter {
optional LaneFeatureConvParameter lane_feature_conv = 1;
optional MaxPool1dParameter lane_feature_maxpool = 2;
optional AvgPool1dParameter lane_feature_avgpool = 3;
optional ObsFeatureFCParameter obs_feature_fc = 5;
optional ClassifyParameter classify = 6;
optional RegressParameter regress = 7;
}
| 0
|
apollo_public_repos/apollo/modules/tools/prediction/data_pipelines
|
apollo_public_repos/apollo/modules/tools/prediction/data_pipelines/common/bounding_rectangle.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from modules.tools.prediction.data_pipelines.common.vector2d import Vector2
from modules.tools.prediction.data_pipelines.common.rotation2d import rotate_fast
from modules.tools.prediction.data_pipelines.common.util import segment_overlap
class BoundingRectangle:
def __init__(self, x, y, theta, length, width):
self.vertices = [None] * 4
dx = 0.5 * length
dy = 0.5 * width
cos_theta = cos(theta)
sin_theta = sin(theta)
self.vertices[0] = rotate_fast(Vector2(dx, -dy), cos_theta, sin_theta)
self.vertices[1] = rotate_fast(Vector2(dx, dy), cos_theta, sin_theta)
self.vertices[2] = rotate_fast(Vector2(-dx, dy), cos_theta, sin_theta)
self.vertices[3] = rotate_fast(Vector2(-dx, -dy), cos_theta, sin_theta)
for i in range(4):
self.vertices[i].x += x
self.vertices[i].y += y
def overlap(self, rect):
for i in range(4):
v0 = self.vertices[i]
v1 = self.vertices[(i + 1) % 4]
range_self = self.project(v0, v1)
range_other = rect.project(v0, v1)
if segment_overlap(range_self[0], range_self[1], range_other[0], range_other[1]) == False:
return False
for i in range(4):
v0 = rect.vertices[i]
v1 = rect.vertices[(i + 1) % 4]
range_self = self.project(v0, v1)
range_other = rect.project(v0, v1)
if segment_overlap(range_self[0], range_self[1], range_other[0], range_other[1]) == False:
return False
return True
def project(self, p0, p1):
v = p1.subtract(p0)
n = v.norm()
rmin = float("inf")
rmax = float("-inf")
for i in range(4):
t = self.vertices[i].subtract(p0)
r = t.dot(v) / n
if r < rmin:
rmin = r
if r > rmax:
rmax = r
return [rmin, rmax]
def print_vertices(self):
for i in range(4):
print(str(self.vertices[i].x) + "\t" +
str(self.vertices[i].y) + "\n")
| 0
|
apollo_public_repos/apollo/modules/tools/prediction/data_pipelines
|
apollo_public_repos/apollo/modules/tools/prediction/data_pipelines/common/configure.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
parameters = {
'config': {
'need_to_label': True,
'maximum_observation_time': 8.0
},
'mlp': {
'train_data_rate': 0.8,
'size_obstacle_feature': 22,
'size_lane_sequence_feature': 40,
'dim_input': 22 + 40,
'dim_hidden_1': 30,
'dim_hidden_2': 15,
'dim_output': 1
},
'cruise_mlp': {
'dim_input': 23 + 5 * 9 + 20 * 4 + 8,
'dim_hidden_1': 50,
'dim_hidden_2': 18,
'dim_output': 1
},
'junction_mlp': {
'dim_input': 3 + 60,
'dim_hidden_1': 30,
'dim_hidden_2': 15,
'dim_output': 12
},
'feature': {
'threshold_label_time_delta': 1.0,
'prediction_label_timeframe': 3.0,
'maximum_maneuver_finish_time': 3.0,
# Lane change is defined to be finished if the ratio of deviation
# from center-line to the lane width is within this: (must be < 0.5)
'lane_change_finish_condition': 0.1,
'maximum_observation_time': 6.0
}
}
labels = {'go_false': 0, 'go_true': 1, 'cutin_false': -1, 'cutin_true': 2}
| 0
|
apollo_public_repos/apollo/modules/tools/prediction/data_pipelines
|
apollo_public_repos/apollo/modules/tools/prediction/data_pipelines/common/log.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import os
import logging
import logging.handlers
def init_log(
log_path,
level=logging.INFO,
when="D",
backup=7,
format="%(levelname)s: %(asctime)s: %(filename)s:%(lineno)d * %(thread)d %(message)s",
datefmt="%m-%d %H:%M:%S"):
formatter = logging.Formatter(format, datefmt)
logger = logging.getLogger()
logger.setLevel(level)
dir = os.path.dirname(log_path)
if not os.path.isdir(dir):
os.makedirs(dir)
handler = logging.handlers.TimedRotatingFileHandler(
log_path + ".log", when=when, backupCount=backup)
handler.setLevel(level)
handler.setFormatter(formatter)
logger.addHandler(handler)
handler = logging.handlers.TimedRotatingFileHandler(
log_path + ".log.wf", when=when, backupCount=backup)
handler.setLevel(logging.WARNING)
handler.setFormatter(formatter)
logger.addHandler(handler)
| 0
|
apollo_public_repos/apollo/modules/tools/prediction/data_pipelines
|
apollo_public_repos/apollo/modules/tools/prediction/data_pipelines/common/util.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
def segment_overlap(a, b, x, y):
if b < x or a > y:
return False
return True
def vector_projection_overlap(p0, p1, p2, p3):
v = p1.subtract(p0)
n_square = v.norm_square()
v0 = p2.subtract(p0)
v1 = p3.subtract(p0)
t0 = v0.dot(v)
t1 = v1.dot(v)
if t0 > t1:
t = t0
t0 = t1
t1 = t
return segment_overlap(t0, t1, 0.0, n_square)
| 0
|
apollo_public_repos/apollo/modules/tools/prediction/data_pipelines
|
apollo_public_repos/apollo/modules/tools/prediction/data_pipelines/common/data_preprocess.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import os
import h5py
import numpy as np
from random import choice
from random import randint
from random import shuffle
from time import time
def load_h5(filename):
if not (os.path.exists(filename)):
logging.error("file: {}, does not exist".format(filename))
os._exit(1)
if os.path.splitext(filename)[1] != '.h5':
logging.error("file: {} is not an hdf5 file".format(filename))
os._exit(1)
samples = dict()
h5_file = h5py.File(filename, 'r')
for key in h5_file.keys():
samples[key] = h5_file[key][:]
return samples['data']
def down_sample(data):
cutin_false_drate = 0.5
go_false_drate = 0.8
go_true_drate = 0.9
cutin_true_drate = 0.0
label = data[:, -1]
size = np.shape(label)[0]
cutin_false_index = (label == -1)
go_false_index = (label == 0)
go_true_index = (label == 1)
cutin_true_index = (label == 2)
rand = np.random.random((size))
cutin_false_select = np.logical_and(cutin_false_index,
rand > cutin_false_drate)
cutin_true_select = np.logical_and(cutin_true_index,
rand > cutin_true_drate)
go_false_select = np.logical_and(go_false_index, rand > go_false_drate)
go_true_select = np.logical_and(go_true_index, rand > go_true_drate)
all_select = np.logical_or(cutin_false_select, cutin_true_select)
all_select = np.logical_or(all_select, go_false_select)
all_select = np.logical_or(all_select, go_true_select)
data_downsampled = data[all_select, :]
return data_downsampled
def train_test_split(data, train_rate):
data_size = np.shape(data)[0]
train_size = int(data_size * train_rate)
train = data[0:train_size, ]
test = data[train_size:, ]
return train, test
| 0
|
apollo_public_repos/apollo/modules/tools/prediction/data_pipelines
|
apollo_public_repos/apollo/modules/tools/prediction/data_pipelines/common/rotation2d.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from math import cos, sin
from modules.tools.prediction.data_pipelines.common.vector2d import Vector2
def rotate(v, theta):
cos_theta = cos(theta)
sin_theta = sin(theta)
return rotate_fast(v, cos_theta, sin_theta)
def rotate_fast(v, cos_theta, sin_theta):
x = cos_theta * v.x - sin_theta * v.y
y = sin_theta * v.x + cos_theta * v.y
return Vector2(x, y)
| 0
|
apollo_public_repos/apollo/modules/tools/prediction/data_pipelines
|
apollo_public_repos/apollo/modules/tools/prediction/data_pipelines/common/trajectory.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import abc
import logging
import math
import numpy as np
from modules.tools.prediction.data_pipelines.common.bounding_rectangle import BoundingRectangle
from modules.tools.prediction.data_pipelines.common.configure import parameters
param_fea = parameters['feature']
class TrajectoryToSample(object, metaclass=abc.ABCMeta):
def __call__(self, trajectory):
self.clean(trajectory)
self.label(trajectory)
data = self.pack(trajectory)
return data
@staticmethod
def clean(trajectory):
'''
Clean up the feature points when lane_id changing abruptly,
meaning that if the lane_id of current timestamp is different
from that of the previous one and that of the next one, remove
this contaminated data.
'''
results = []
traj_len = len(trajectory)
for i in range(traj_len - 2, 0, -1):
if not trajectory[i].HasField('lane') or \
not trajectory[i].lane.HasField('lane_feature'):
continue
lane_seq_sz = len(trajectory[i].lane.lane_graph.lane_sequence)
if lane_seq_sz == 0:
continue
elif lane_seq_sz > 10:
print("Too many lane sequences:", lane_seq_sz)
fea_prev = trajectory[i - 1]
fea_curr = trajectory[i]
fea_post = trajectory[i + 1]
if fea_prev.HasField('lane') and \
fea_prev.lane.HasField('lane_feature'):
lane_id_prev = fea_prev.lane.lane_feature.lane_id
else:
continue
if fea_curr.HasField('lane') and \
fea_curr.lane.HasField('lane_feature'):
lane_id_curr = fea_curr.lane.lane_feature.lane_id
else:
continue
if fea_post.HasField('lane') and \
fea_post.lane.HasField('lane_feature'):
lane_id_post = fea_post.lane.lane_feature.lane_id
else:
continue
if lane_id_curr == lane_id_prev or lane_id_curr == lane_id_post:
results.append(trajectory[i])
results.reverse()
return results
@staticmethod
def cmp_lane_seq(real_seq, predict_seq):
'''
-1: False Cutin
0: False Go
1: True Go
2: True Cutin
'''
if real_seq[0] == predict_seq[0]:
for i in range(1, len(real_seq)):
if len(real_seq) > len(predict_seq):
return 0
if real_seq[i] != predict_seq[i]:
return 0
return 1
else:
if len(real_seq) == 1:
return -1
for i in range(1, len(real_seq)):
if len(real_seq) - 1 > len(predict_seq):
return -1
if real_seq[i] != predict_seq[i - 1]:
return -1
return 2
def is_successor_lane(self, feature, lane_id):
'''
return True if lane_id is the successor lane of feature
'''
if feature.HasField('lane') and \
feature.lane.HasField('lane_graph') and \
len(feature.lane.lane_graph.lane_sequence) > 0:
for lane_seq in feature.lane.lane_graph.lane_sequence:
seq_lane_ids = []
for lan_seq in lane_seq.lane_segment:
seq_lane_ids.append(lane_seg.lane_id)
if feature.lane.lane_feature.lane_id in seq_lane_ids and \
lane_id in seq_lane_ids:
return True
return False
else:
return True
@classmethod
def label(cls, trajectory):
'''
label feature trajectory according to real future lane sequence in 3s
'''
traj_len = len(trajectory)
for i, fea in enumerate(trajectory):
if not fea.HasField('lane') or \
not fea.lane.HasField('lane_feature'):
print("No lane feature, cancel labeling")
continue
future_lane_ids = []
for j in range(i, traj_len):
time_span = trajectory[j].timestamp - fea.timestamp
if time_span > param_fea['prediction_label_timeframe']:
break
if not trajectory[j].HasField('lane') or \
not trajectory[j].lane.HasField('lane_feature'):
continue
lane_id_j = trajectory[j].lane.lane_feature.lane_id
trajectory[i].label_update_time_delta = time_span
if lane_id_j in future_lane_ids:
continue
else:
future_lane_ids.append(lane_id_j)
if len(future_lane_ids) < 1:
print("No lane id")
continue
seq_size = len(fea.lane.lane_graph.lane_sequence)
for j in range(seq_size):
seq = fea.lane.lane_graph.lane_sequence[j]
if len(seq.lane_segment) == 0:
continue
predict_lane_ids = []
for k in range(len(seq.lane_segment)):
if seq.lane_segment[k].HasField('lane_id'):
predict_lane_ids.append(seq.lane_segment[k].lane_id)
seq.label = cls.cmp_lane_seq(future_lane_ids, predict_lane_ids)
return trajectory
@classmethod
def label_cruise(cls, feature_sequence):
'''
Label feature trajectory according to real future lane sequence
in 6sec
'''
feature_seq_len = len(feature_sequence)
for i, fea in enumerate(feature_sequence):
# Sanity check.
if not fea.HasField('lane') or \
not fea.lane.HasField('lane_feature'):
print("No lane feature, cancel labeling")
continue
# Find the lane_sequence at which the obstacle is located,
# and put all the lane_segment ids into a set.
curr_lane_seq = set()
for lane_sequence in fea.lane.lane_graph.lane_sequence:
if lane_sequence.vehicle_on_lane:
for lane_segment in lane_sequence.lane_segment:
curr_lane_seq.add(lane_segment.lane_id)
if len(curr_lane_seq) == 0:
print("Obstacle is not on any lane.")
continue
new_lane_id = None
has_started_lane_change = False
has_finished_lane_change = False
lane_change_start_time = None
lane_change_finish_time = None
is_jittering = False
# This goes through all the subsequent features in this sequence
# of features (this is the GROUND_TRUTH!)
obs_actual_lane_ids = []
for j in range(i, feature_seq_len):
# If timespan exceeds max. maneuver finish time, then break.
time_span = feature_sequence[j].timestamp - fea.timestamp
if time_span > param_fea['maximum_maneuver_finish_time']:
break
# Sanity check.
if not feature_sequence[j].HasField('lane') or \
not feature_sequence[j].lane.HasField('lane_feature'):
continue
fea.label_update_time_delta = time_span
# If step into another lane, label lane change to be started.
lane_id_j = feature_sequence[j].lane.lane_feature.lane_id
if lane_id_j not in obs_actual_lane_ids:
obs_actual_lane_ids.append(lane_id_j)
if lane_id_j not in curr_lane_seq:
# If it's the first time, log new_lane_id
if has_started_lane_change == False:
has_started_lane_change = True
lane_change_start_time = time_span
new_lane_id = lane_id_j
else:
# If it stepped into other lanes and now comes back, it's jittering!
if has_started_lane_change:
is_jittering = True
# This is to let such data not be eliminated by label_file function
fea.label_update_time_delta = param_fea['maximum_maneuver_finish_time']
break
# If roughly get to the center of another lane, label lane change to be finished.
left_bound = feature_sequence[j].lane.lane_feature.dist_to_left_boundary
right_bound = feature_sequence[j].lane.lane_feature.dist_to_right_boundary
if left_bound / (left_bound + right_bound) > (0.5 - param_fea['lane_change_finish_condition']) and \
left_bound / (left_bound + right_bound) < (0.5 + param_fea['lane_change_finish_condition']):
if has_started_lane_change:
has_finished_lane_change = True
lane_change_finish_time = time_span
# new_lane_id = lane_id_j
# This is to let such data not be eliminated by label_file function
fea.label_update_time_delta = param_fea['maximum_maneuver_finish_time']
break
else:
# This means that the obstacle moves back to the center
# of the original lane for the first time.
if lane_change_finish_time is None:
lane_change_finish_time = time_span
if len(obs_actual_lane_ids) < 1:
print("No lane id")
continue
'''
For every lane_sequence in the lane_graph,
assign a label and a finish_time.
-10: Lane jittering
-1: False Cut-in
1: True Cut-in but not to lane_center
3: True Cut-in and reached lane_center
0: Fales Go
2: True Go but not to lane_center
4: True Go and reached lane_center
'''
# This is to label each saved lane_sequence.
for lane_sequence in fea.lane.lane_graph.lane_sequence:
# Sanity check.
if len(lane_sequence.lane_segment) == 0:
continue
if is_jittering:
lane_sequence.label = -10
lane_sequence.time_to_lane_center = -1.0
lane_sequence.time_to_lane_edge = -1.0
continue
# The current lane is obstacle's original lane.
if lane_sequence.vehicle_on_lane:
# Obs is following ONE OF its original lanes:
if not has_started_lane_change:
# Record this lane_sequence's lane_ids
current_lane_ids = []
for k in range(len(lane_sequence.lane_segment)):
if lane_sequence.lane_segment[k].HasField('lane_id'):
current_lane_ids.append(
lane_sequence.lane_segment[k].lane_id)
is_following_this_lane = True
for l_id in range(1, min(len(current_lane_ids), len(obs_actual_lane_ids))):
if current_lane_ids[l_id] != obs_actual_lane_ids[l_id]:
is_following_this_lane = False
break
# Obs is following this original lane:
if is_following_this_lane:
# Obstacle is following this original lane and moved to lane-center
if lane_change_finish_time is not None:
lane_sequence.label = 4
lane_sequence.time_to_lane_edge = -1.0
lane_sequence.time_to_lane_center = lane_change_finish_time
# Obstacle is following this original lane but is never at lane-center:
else:
lane_sequence.label = 2
lane_sequence.time_to_lane_edge = -1.0
lane_sequence.time_to_lane_center = -1.0
# Obs is following another original lane:
else:
lane_sequence.label = 0
lane_sequence.time_to_lane_edge = -1.0
lane_sequence.time_to_lane_center = -1.0
# Obs has stepped out of this lane within 6sec.
else:
lane_sequence.label = 0
lane_sequence.time_to_lane_edge = -1.0
lane_sequence.time_to_lane_center = -1.0
# The current lane is NOT obstacle's original lane.
else:
# Obstacle is following the original lane.
if not has_started_lane_change:
lane_sequence.label = -1
lane_sequence.time_to_lane_edge = -1.0
lane_sequence.time_to_lane_center = -1.0
else:
new_lane_id_is_in_this_lane_seq = False
for lane_segment in lane_sequence.lane_segment:
if lane_segment.lane_id == new_lane_id:
new_lane_id_is_in_this_lane_seq = True
break
# Obstacle has changed to this lane.
if new_lane_id_is_in_this_lane_seq:
# Obstacle has finished lane changing within 6 sec.
if has_finished_lane_change:
lane_sequence.label = 3
lane_sequence.time_to_lane_edge = lane_change_start_time
lane_sequence.time_to_lane_center = lane_change_finish_time
# Obstacle started lane changing but haven't finished yet.
else:
lane_sequence.label = 1
lane_sequence.time_to_lane_edge = lane_change_start_time
lane_sequence.time_to_lane_center = -1.0
# Obstacle has changed to some other lane.
else:
lane_sequence.label = -1
lane_sequence.time_to_lane_edge = -1.0
lane_sequence.time_to_lane_center = -1.0
return feature_sequence
@classmethod
def label_junction(cls, trajectory):
'''
label feature trajectory according to real future lane sequence in 7s
'''
traj_len = len(trajectory)
for i, fea in enumerate(trajectory):
# Sanity check.
if not fea.HasField('junction_feature') or \
not len(fea.junction_feature.junction_exit) or \
not len(fea.junction_feature.junction_mlp_feature):
# print("No junction_feature, junction_exit, or junction_mlp_feature, not labeling this frame.")
continue
curr_pos = np.array([fea.position.x, fea.position.y])
# Only keep speed > 1
# TODO(all) consider recovery
# if fea.speed <= 1:
# continue
heading = math.atan2(fea.raw_velocity.y, fea.raw_velocity.x)
# Construct dictionary of all exit with dict[exit_lane_id] = np.array(exit_position)
exit_dict = dict()
exit_pos_dict = dict()
for junction_exit in fea.junction_feature.junction_exit:
if junction_exit.HasField('exit_lane_id'):
exit_dict[junction_exit.exit_lane_id] = \
BoundingRectangle(junction_exit.exit_position.x,
junction_exit.exit_position.y,
junction_exit.exit_heading,
0.01,
junction_exit.exit_width)
exit_pos_dict[junction_exit.exit_lane_id] = np.array(
[junction_exit.exit_position.x, junction_exit.exit_position.y])
# Searching for up to 100 frames (10 seconds)
for j in range(i, min(i + 100, traj_len)):
car_bounding = BoundingRectangle(trajectory[j].position.x,
trajectory[j].position.y,
math.atan2(trajectory[j].raw_velocity.y,
trajectory[j].raw_velocity.x),
trajectory[j].length,
trajectory[j].width)
for key, value in exit_dict.items():
if car_bounding.overlap(value):
exit_pos = exit_pos_dict[key]
delta_pos = exit_pos - curr_pos
angle = math.atan2(
delta_pos[1], delta_pos[0]) - heading
d_idx = int((angle / (2.0 * np.pi)) * 12 % 12)
label = [0 for idx in range(12)]
label[d_idx] = 1
fea.junction_feature.junction_mlp_label.extend(label)
break # actually break two level
else:
continue
break
return trajectory
@abc.abstractmethod
def pack(self, trajectory):
""" abstractmethod"""
raise NotImplementedError
| 0
|
apollo_public_repos/apollo/modules/tools/prediction/data_pipelines
|
apollo_public_repos/apollo/modules/tools/prediction/data_pipelines/common/feature_io.py
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import copy
import logging
import sys
from google.protobuf.internal import decoder
from google.protobuf.internal import encoder
import google.protobuf.text_format as text_format
from modules.common_msgs.prediction_msgs import feature_pb2
from modules.prediction.proto import offline_features_pb2
def readVarint32(stream):
"""
read block size from file stream
"""
mask = 0x80 # (1 << 7)
raw_varint32 = []
while 1:
b = stream.read(1)
if b == "":
break
raw_varint32.append(b)
if not (ord(b) & mask):
break
return raw_varint32
def load_protobuf(filename):
"""
read a file in protobuf binary
"""
offline_features = offline_features_pb2.Features()
with open(filename, 'rb') as file_in:
offline_features.ParseFromString(file_in.read())
return offline_features.feature
def load_label_feature(filename):
features = []
with open(filename, 'rb') as f:
size = readVarint32(f)
while size:
read_bytes, _ = decoder._DecodeVarint32(size, 0)
data = f.read(read_bytes)
if len(data) < read_bytes:
print("Failed to load protobuf")
break
fea = feature_pb2.Feature()
fea.ParseFromString(data)
features.append(fea)
size = readVarint32(f)
return features
def save_protobuf(filename, feature_trajectories):
"""
save a features in the given filename
"""
with open(filename, 'wb') as f:
for features in feature_trajectories:
for fea in features:
serializedMessage = fea.SerializeToString()
delimiter = encoder._VarintBytes(len(serializedMessage))
f.write(delimiter + serializedMessage)
def build_trajectory(features):
"""
classify features by id and build trajectories of feature
"""
fea_dict = dict()
for fea in features:
if fea.id in fea_dict.keys():
fea_dict[fea.id].append(fea)
else:
fea_dict[fea.id] = [fea]
for k in fea_dict.keys():
if len(fea_dict[k]) < 2:
del fea_dict[k]
continue
fea_dict[k].sort(key=lambda x: x.timestamp)
return fea_dict
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.