Datasets:

DOI:
License:
ToF-360 / assets /preprocessing /align_manhattan.py
kanayamaHideaki's picture
Add semantics, instances, layout_eval, preprocessing and modifying README.md.
324d4da
from .utils_from_LGT_Net import *
import sys
import numpy as np
import cv2
import os
from PIL import Image
import glob
import json
from natsort import natsorted
from tqdm import tqdm
def config_setup():
config = {}
config["home_param"] = "<scene>/"
return config
def main():
config = config_setup()
print(f"Now Processing: {config["home_param"]}...")
input_folder = f"{config["home_param"]}/RGB"
output_folder = f"{config["home_param"]}/RGB_mh_aligned"
os.makedirs(output_folder, exist_ok=True)
input_files = natsorted(glob.glob(f"{input_folder}/*_rgb.png"))
mat_dict = {}
mat_dict["data"] = []
for input_file in tqdm(input_files):
# disable OpenCV3's non thread safe OpenCL option
cv2.ocl.setUseOpenCL(False)
# Read image
img_ori = np.array(Image.open(input_file))
olines, vp, views, edges, panoEdge, score, angle = panoEdgeDetection(img_ori,
qError=0.7,
refineIter=3)
img, R = rotatePanorama(img_ori / 255.0, vp[2::-1])
file_name = input_file.split("/")[-1].split(".")[0]
file_path = f"{output_folder}/{file_name}_aligned.png"
Image.fromarray((img * 255).astype(np.uint8)).save(file_path)
each_dict = {"input_file": input_file, "output_file": file_path, "rotation_matrix": R.tolist()}
mat_dict["data"].append(each_dict)
with open(f'{output_folder}/rotation_matrix.json', 'w') as f:
json.dump(mat_dict, f, indent=2)
if __name__ == "__main__":
main()