Datasets:

DOI:
License:
kanayamaHideaki's picture
Add semantics, instances, layout_eval, preprocessing and modifying README.md.
324d4da
# --*-- coding:utf-8 --*--
import math
import cv2
import os
import math
import glob
from utils_from_Depth2HHA_python.rgbd_util import *
from utils_from_Depth2HHA_python.getCameraParam import *
def config_setup():
config = {}
config["home_param"] = "<scene>/"
return config
'''
must use 'COLOR_BGR2GRAY' here, or you will get a different gray-value with what MATLAB gets.
'''
def getImage(root='demo'):
D = cv2.imread(os.path.join(root, '0.png'), cv2.COLOR_BGR2GRAY)/10000
RD = cv2.imread(os.path.join(root, '0_raw.png'), cv2.COLOR_BGR2GRAY)/10000
return D, RD
'''
C: Camera matrix
D: Depth image, the unit of each element in it is "meter"
RD: Raw depth image, the unit of each element in it is "meter"
'''
def getHHA(C, D, RD):
missingMask = (RD == 0);
pc, N, yDir, h, pcRot, NRot = processDepthImage(D * 100, missingMask, C)
tmp = np.multiply(N, yDir)
acosValue = np.minimum(1,np.maximum(-1,np.sum(tmp, axis=2)))
angle = np.array([math.degrees(math.acos(x)) for x in acosValue.flatten()])
angle = np.reshape(angle, h.shape)
'''
Must convert nan to 180 as the MATLAB program actually does.
Or we will get a HHA image whose border region is different
with that of MATLAB program's output.
'''
angle[np.isnan(angle)] = 180
pc[:,:,2] = np.maximum(pc[:,:,2], 100)
I = np.zeros(pc.shape)
# opencv-python save the picture in BGR order.
I[:,:,2] = 31000/pc[:,:,2]
I[:,:,1] = h
I[:,:,0] = (angle + 128-90)
# print(np.isnan(angle))
'''
np.uint8 seems to use 'floor', but in matlab, it seems to use 'round'.
So I convert it to integer myself.
'''
I = np.rint(I)
# np.uint8: 256->1, but in MATLAB, uint8: 256->255
I[I>255] = 255
HHA = I.astype(np.uint8)
return HHA
def main():
# D, RD = getImage()
# camera_matrix = getCameraParam('color')
# print('max gray value: ', np.max(D)) # make sure that the image is in 'meter'
# hha = getHHA(camera_matrix, D, RD)
# hha_complete = getHHA(camera_matrix, D, D)
# cv2.imwrite('demo/hha.png', hha)
# cv2.imwrite('demo/hha_complete.png', hha_complete)
config = config_setup()
depth_paths = sorted(glob.glob(config["scene_path"] + "depth/*.png"))
for i, depth_path in enumerate(depth_paths):
depth = cv2.imread(depth_path, -1)
## make HHA
depth = depth / 1000
H_ori, W_ori = (depth.shape[0], depth.shape[1])
camera_matrix = np.array([[max(H_ori, W_ori), 0, W_ori/2], [0, max(H_ori, W_ori), H_ori/2], [0, 0, 1]])
H, W = (int(depth.shape[0]/4), int(depth.shape[1]/4))
depth_resize = cv2.resize(depth, (W, H), interpolation=cv2.INTER_NEAREST)
hha = getHHA(camera_matrix, depth_resize, depth_resize)
cv2.imwrite(config["scene_path"]+f'HHA/{i:03d}_equi_hha.png', cv2.resize(hha, (W_ori, H_ori), interpolation=cv2.INTER_NEAREST))
if __name__ == "__main__":
main()
''' multi-peocessing example '''
'''
from multiprocessing import Pool
def generate_hha(i):
# generate hha for the i-th image
return
processNum = 16
pool = Pool(processNum)
for i in range(img_num):
print(i)
pool.apply_async(generate_hha, args=(i,))
pool.close()
pool.join()
'''