jacoblin's picture
Add files using upload-large-folder tool
5b09670 verified
# This example loads a stereo pair together with LiDAR points associated with the stereo pair at that time instance
# The LiDAR data is returned in the following form
# 'lidar_points' is a list of 'lidar_points'
# where 'lidar_point' is a dictionary containing:
# 'backscatter': the backscatter profile scanned by the LiDAR
# 'azi': the azimuthal angle of the scanned ray
# 'elev': the elevation angle of the scanned ray
# In addition, if a cloud is detected from the backscatter, then 'lidar_point' also contains:
# 'lidar_depth': the depth from the LiDAR to the cloud
# 'right_cam_xy': the xy coordinates of the cloud in the right image
# 'right_cam_depth': the depth of the cloud to the right camera
from PIL import Image
import numpy as np
import cv2
import json
import glob
from collections import defaultdict
def main():
# Choose input images to load
date = '2021-10-22'
hour = 12
frame_idx = 50
# Load metadata
with open(f'calib.json', 'r') as fp:
meta = json.load(fp)
h, w = meta['h'], meta['w']
right_intrinsic = np.array(meta['right_intrinsic'])
lidar_to_right = np.array(meta['lidar_to_right_cam'])
# Unused metadata in this example
# right_cam2world = np.array(meta['right_cam2world'])
# left_intrinsic = np.array(meta['left_intrinsic'])
# left_cam2world = np.array(meta['left_cam2world'])
# left_to_right = np.array(meta['left_to_right_pose'])
# Load LiDAR data associated with this time
lidar_files = glob.glob(f'lidar/{date}/{hour:0>2}*.hpl')
lidar_data = load_lidar_data(date, hour, right_intrinsic, lidar_to_right)
# ---------- Load Data --------- #
left_video = cv2.VideoCapture(f'left_images/{date}/{date}_{hour:0>2}.mp4')
right_video = cv2.VideoCapture(f'right_images/{date}/{date}_{hour:0>2}.mp4')
for _ in range(frame_idx):
left_video.read()
right_video.read()
_, left_image = left_video.read()
_, right_image = right_video.read()
# BGR To RGB
left_image = cv2.cvtColor(left_image, cv2.COLOR_BGR2RGB).astype('uint8')
right_image = cv2.cvtColor(right_image, cv2.COLOR_BGR2RGB).astype('uint8')
# Ensure images match the expected size
left_image = cv2.resize(left_image, (w, h))
right_image = cv2.resize(left_image, (w, h))
# See top of file for the format of 'lidar_points'
lidar_points = lidar_data[frame_idx]
return left_image, right_image, lidar_points
################ Define some helper functions ################
def load_lidar_data(date, hour, right_intrinsic, lidar_to_right):
def flattenList(xss):
return [x for xs in xss for x in xs]
decimal_time = []
azi = []
elev = []
distance = []
backscatter = []
lidar_files = glob.glob(f'lidar/{date}/{date}_{hour:0>2}*.hpl')
for lidar_file in lidar_files:
ld = LidarData.fromfile(lidar_file)
data_locs = ld.data_locs
decimal_time.append(list(data_locs[:,0]))
azi.append(list(data_locs[:,1]))
elev.append(list(data_locs[:,2]))
distance.append(list(ld.getDistance()))
backscatter.append(list(ld.getBackscatter()))
decimal_time = np.array(flattenList(decimal_time), ndmin=1)
azi = np.array(flattenList(azi), ndmin=1)
elev = np.array(flattenList(elev), ndmin=1)
distance = np.array(flattenList(distance), ndmin=1)
backscatter = np.array(flattenList(backscatter), ndmin=1)
# Go through all frames now
camera_time = hour + 10/3600 # First frame starts 10 seconds into the hour
lidar_output = defaultdict(list)
# Simple unoptimised code for LiDAR loading
for frame_idx in range(717): # 717 frames per video
for time in decimal_time:
if np.abs(time-camera_time) < 2.5/3600: # Associate each LiDAR point with the closest frame (1 frame per 5 seconds)
i = list(decimal_time).index(time)
# Check if there is a cloud
_, cloud_depth = findCloud_in_backscatter(backscatter[i], int(500 / 3))
if cloud_depth is not None:
# Project lidar to right camera
lidar_right_xy, lidar_right_depth = project_lidar_to_right(lidar_to_right, right_intrinsic, azi[i],
elev[i],
cloud_depth)
lidar_output[frame_idx].append({'lidar_depth': cloud_depth, 'azi': azi[i], 'elev': elev[i],
'right_cam_xy': lidar_right_xy, 'right_cam_depth': lidar_right_depth,
'backscatter': backscatter[i]})
else:
lidar_output[frame_idx].append({'azi': azi[i], 'elev': elev[i], 'backscatter': backscatter[i]})
camera_time += 5/3600
return lidar_output
class LidarData():
def __init__(self,
fname=None,
system_id=None,
num_gates=0,
gate_length=0,
gate_pulse_length=0,
pulses_per_ray=0,
start_time=None,
data=None,
data_locs=None):
self.fname = fname
self.system_id = system_id
self.num_gates = num_gates
self.gate_length = gate_length
self.gate_pulse_length = gate_pulse_length
self.pulses_per_ray = pulses_per_ray
self.start_time = start_time
self.data = data
self.data_locs = data_locs
@classmethod
def fromfile(cls, filename):
with open(filename) as f:
header = [f.readline().split(':', maxsplit=1) for i in range(17)]
fname = header[0][1].strip()
system_id = header[1][1].strip()
num_gates = int(header[2][1].strip())
gate_length = header[3][1].strip()
gate_pulse_length = header[4][1].strip()
pulses_per_ray = header[5][1].strip()
start_time = header[9][1].strip()
data_locs_format = header[13][0].split(' ')[0].strip()
data_format = header[15][0].split(' ')[0].strip()
data = []
data_locs = []
while True:
try:
data_locs_in = np.array(f.readline().split()).astype('float')
if len(data_locs_in) == 0:
break
data_locs.append(data_locs_in)
data.append(np.array(
[f.readline().split() for i in range(num_gates)]).astype('float'))
except:
break
data = np.array(data)
data_locs = np.array(data_locs)
return cls(
fname=fname,
system_id=system_id,
num_gates=num_gates,
gate_length=gate_length,
gate_pulse_length=gate_pulse_length,
pulses_per_ray=pulses_per_ray,
start_time=start_time,
data=data,
data_locs=data_locs)
# starting all these at 20 means we avoid the peak at zero distance
def getDistance(self):
return self.data[:,20:,0]*3 #multiply by 3 to get distance in m
def getDoppler(self):
return self.data[:,20:,1]
def getBackscatter(self):
return self.data[:,20:,2]
def getBeta(self):
return self.data[:,20:,3]
def project_lidar_to_right(lidar_to_right_cam, right_intrinsic, azi, elev, depth):
# Convert from azi, elev, depth to lidar xyz
azi = -azi + 270
x = depth * np.cos(elev * np.pi / 180) * np.cos(azi * np.pi / 180)
z = depth * np.cos(elev * np.pi / 180) * np.sin(azi * np.pi / 180)
y = -depth * np.sin(elev * np.pi / 180)
lidar_xyz = np.stack((x, y, z), axis=0)
# Go from lidar coordinate system to right camera coordinate system
desired_shape = list(lidar_xyz.shape)
desired_shape[0] = 1
cam_xyz = lidar_to_right_cam @ np.concatenate((lidar_xyz, np.ones(desired_shape)), axis=0)
# Note: here you can also use the left_to_right_pose to project onto the left camera
# Project onto right camera
projected_lidar = cam_xyz[:3] / cam_xyz[2]
projected_lidar = right_intrinsic @ projected_lidar
projected_lidar = projected_lidar[:2]
# Get depth
right_cam_depth = np.sqrt(np.sum(cam_xyz ** 2, axis=0))
return projected_lidar, right_cam_depth
def findCloud_in_backscatter(backscatter, dist_thresh=300):
if np.max(backscatter) > 10:
# Building reflection
return (None, None)
cloud = np.argmax(backscatter[dist_thresh:])
if backscatter[cloud+dist_thresh] - np.median(backscatter[dist_thresh:]) > 0.03:
# Found cloud
return cloud+dist_thresh, (cloud+dist_thresh+20)*3 #cloud index, cloud distance
else:
# No cloud found
return (None, None)
##############################################################
if __name__ == "__main__":
left_image, right_image, lidar_points = main()