|
|
import os |
|
|
import shutil |
|
|
import argparse |
|
|
import logging |
|
|
import struct |
|
|
import numpy as np |
|
|
from collections import namedtuple |
|
|
|
|
|
|
|
|
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') |
|
|
|
|
|
|
|
|
Camera = namedtuple("Camera", ["id", "model", "width", "height", "params"]) |
|
|
Image = namedtuple("Image", ["id", "qvec", "tvec", "camera_id", "name", "xys", "point3D_ids"]) |
|
|
Point3D = namedtuple("Point3D", ["id", "xyz", "rgb", "error", "image_ids", "point2D_idxs"]) |
|
|
|
|
|
def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"): |
|
|
return struct.unpack(endian_character + format_char_sequence, fid.read(num_bytes)) |
|
|
|
|
|
def read_images_binary(path): |
|
|
images = {} |
|
|
with open(path, "rb") as fid: |
|
|
num_reg_images = read_next_bytes(fid, 8, "Q")[0] |
|
|
for _ in range(num_reg_images): |
|
|
binary_image_properties = read_next_bytes(fid, 64, "i4d3di") |
|
|
image_id = binary_image_properties[0] |
|
|
qvec = np.array(binary_image_properties[1:5]) |
|
|
tvec = np.array(binary_image_properties[5:8]) |
|
|
camera_id = binary_image_properties[8] |
|
|
|
|
|
image_name = "" |
|
|
current_char = read_next_bytes(fid, 1, "c")[0] |
|
|
while current_char != b"\x00": |
|
|
image_name += current_char.decode("utf-8") |
|
|
current_char = read_next_bytes(fid, 1, "c")[0] |
|
|
|
|
|
num_points2D = read_next_bytes(fid, 8, "Q")[0] |
|
|
fid.seek(24 * num_points2D, 1) |
|
|
|
|
|
images[image_id] = Image( |
|
|
id=image_id, qvec=qvec, tvec=tvec, |
|
|
camera_id=camera_id, name=image_name, |
|
|
xys=np.empty((0, 2)), point3D_ids=np.empty(0)) |
|
|
return images |
|
|
|
|
|
def write_images_binary(path, images): |
|
|
with open(path, "wb") as fid: |
|
|
fid.write(struct.pack("<Q", len(images))) |
|
|
for _, image in images.items(): |
|
|
fid.write(struct.pack("<i", image.id)) |
|
|
fid.write(struct.pack("<4d", *image.qvec)) |
|
|
fid.write(struct.pack("<3d", *image.tvec)) |
|
|
fid.write(struct.pack("<i", image.camera_id)) |
|
|
fid.write(image.name.encode("utf-8") + b"\x00") |
|
|
fid.write(struct.pack("<Q", len(image.xys))) |
|
|
|
|
|
def run_command(cmd): |
|
|
logging.info(f"執行命令: {cmd}") |
|
|
exit_code = os.system(cmd) |
|
|
if exit_code != 0: |
|
|
logging.error(f"命令執行失敗,返回碼: {exit_code}。腳本終止。") |
|
|
exit(exit_code) |
|
|
logging.info("命令執行成功。") |
|
|
|
|
|
def run_full_colmap_and_split(data_path, colmap_arg=""): |
|
|
logging.info(f"===== 開始對 {data_path} 執行全自動 COLMAP 重建與分割 =====") |
|
|
colmap_command = f'"{colmap_arg}"' if len(colmap_arg) > 0 else "colmap" |
|
|
|
|
|
|
|
|
logging.info("步驟 1: 準備統一的工作目錄...") |
|
|
work_dir = os.path.join(data_path, "colmap_work_dir") |
|
|
image_dir = os.path.join(work_dir, "images") |
|
|
|
|
|
shutil.rmtree(work_dir, ignore_errors=True) |
|
|
os.makedirs(image_dir) |
|
|
|
|
|
train_image_dir = os.path.join(data_path, "train", "images") |
|
|
test_image_dir = os.path.join(data_path, "test", "images") |
|
|
|
|
|
for d in [os.path.join(data_path, "train_data"), os.path.join(data_path, "test_data")]: |
|
|
if os.path.exists(os.path.join(d, "input")): |
|
|
os.rename(os.path.join(d, "input"), os.path.join(d, "images")) |
|
|
|
|
|
train_files = set(os.listdir(train_image_dir)) |
|
|
test_files = set(os.listdir(test_image_dir)) |
|
|
|
|
|
for f in train_files: shutil.copy(os.path.join(train_image_dir, f), image_dir) |
|
|
for f in test_files: shutil.copy(os.path.join(test_image_dir, f), image_dir) |
|
|
logging.info(f"已將 {len(train_files)} 個訓練影像和 {len(test_files)} 個測試影像複製到工作目錄。") |
|
|
|
|
|
|
|
|
logging.info("步驟 2: 執行完整的 COLMAP 流程...") |
|
|
db_path = os.path.join(work_dir, "database.db") |
|
|
|
|
|
|
|
|
cmd_feature = (f'{colmap_command} feature_extractor ' |
|
|
f'--database_path "{db_path}" ' |
|
|
f'--image_path "{image_dir}" ' |
|
|
f'--ImageReader.single_camera 1 ' |
|
|
f'--ImageReader.camera_model PINHOLE ' |
|
|
f'--SiftExtraction.max_num_features 8192 ' |
|
|
f'--SiftExtraction.upright 0') |
|
|
run_command(cmd_feature) |
|
|
|
|
|
cmd_matcher = f'{colmap_command} exhaustive_matcher --database_path "{db_path}"' |
|
|
run_command(cmd_matcher) |
|
|
|
|
|
sparse_dir = os.path.join(work_dir, "sparse") |
|
|
os.makedirs(sparse_dir) |
|
|
cmd_mapper = (f'{colmap_command} mapper ' |
|
|
f'--database_path "{db_path}" ' |
|
|
f'--image_path "{image_dir}" ' |
|
|
f'--output_path "{sparse_dir}" ' |
|
|
f'--Mapper.ba_refine_focal_length 0 ' |
|
|
f'--Mapper.ba_refine_principal_point 0 ' |
|
|
f'--Mapper.ba_refine_extra_params 0 ' |
|
|
f'--Mapper.min_num_matches 4 ' |
|
|
f'--Mapper.init_min_num_inliers 4 ' |
|
|
f'--Mapper.abs_pose_max_error 12.0 ' |
|
|
f'--Mapper.abs_pose_min_num_inliers 4 ' |
|
|
f'--Mapper.init_max_forward_motion 0.95 ' |
|
|
f'--Mapper.init_min_tri_angle 4.0 ' |
|
|
f'--Mapper.multiple_models 0') |
|
|
|
|
|
|
|
|
exit_code = os.system(cmd_mapper) |
|
|
if exit_code != 0: |
|
|
logging.warning("标准重建失败,尝试更宽松的参数...") |
|
|
|
|
|
cmd_mapper_fallback = (f'{colmap_command} mapper ' |
|
|
f'--database_path "{db_path}" ' |
|
|
f'--image_path "{image_dir}" ' |
|
|
f'--output_path "{sparse_dir}" ' |
|
|
f'--Mapper.ba_refine_focal_length 0 ' |
|
|
f'--Mapper.ba_refine_principal_point 0 ' |
|
|
f'--Mapper.ba_refine_extra_params 0 ' |
|
|
f'--Mapper.min_num_matches 2 ' |
|
|
f'--Mapper.init_min_num_inliers 2 ' |
|
|
f'--Mapper.abs_pose_max_error 20.0 ' |
|
|
f'--Mapper.abs_pose_min_num_inliers 2 ' |
|
|
f'--Mapper.init_max_forward_motion 0.99 ' |
|
|
f'--Mapper.init_min_tri_angle 2.0 ' |
|
|
f'--Mapper.multiple_models 0') |
|
|
run_command(cmd_mapper_fallback) |
|
|
|
|
|
|
|
|
logging.info("步驟 3: 分割 COLMAP 模型...") |
|
|
unified_model_path = os.path.join(sparse_dir, "0") |
|
|
if not os.path.exists(unified_model_path): |
|
|
subdirs = [d for d in os.listdir(sparse_dir) if os.path.isdir(os.path.join(sparse_dir, d)) and d.isdigit()] |
|
|
if len(subdirs) == 1: |
|
|
unified_model_path = os.path.join(sparse_dir, subdirs[0]) |
|
|
logging.info(f"找到了 COLMAP 輸出模型於: {unified_model_path}") |
|
|
else: |
|
|
logging.error("COLMAP mapper 未能成功生成唯一的 sparse 模型文件夾,腳本終止。") |
|
|
return |
|
|
|
|
|
images_data = read_images_binary(os.path.join(unified_model_path, "images.bin")) |
|
|
|
|
|
train_images = {} |
|
|
test_images = {} |
|
|
for img_id, img in images_data.items(): |
|
|
if img.name in train_files: |
|
|
train_images[img_id] = img |
|
|
elif img.name in test_files: |
|
|
test_images[img_id] = img |
|
|
|
|
|
logging.info(f"分割完成: {len(train_images)} 個訓練影像,{len(test_images)} 個測試影像。") |
|
|
if len(train_images) == 0 or len(test_images) == 0: |
|
|
logging.warning("警告:訓練集或測試集中的影像未能全部成功註冊,分割後可能為空。") |
|
|
|
|
|
|
|
|
logging.info("步驟 4: 創建最終的輸出目錄...") |
|
|
for split, split_images in [("train", train_images), ("test", test_images)]: |
|
|
if not split_images: |
|
|
logging.warning(f"{split} 中沒有成功註冊的影像,跳過生成 sparse 文件。") |
|
|
continue |
|
|
output_dir = os.path.join(data_path, split) |
|
|
output_sparse_dir = os.path.join(output_dir, "sparse", "0") |
|
|
shutil.rmtree(output_sparse_dir, ignore_errors=True) |
|
|
os.makedirs(output_sparse_dir, exist_ok=True) |
|
|
|
|
|
write_images_binary(os.path.join(output_sparse_dir, "images.bin"), split_images) |
|
|
|
|
|
shutil.copy(os.path.join(unified_model_path, "cameras.bin"), output_sparse_dir) |
|
|
shutil.copy(os.path.join(unified_model_path, "points3D.bin"), output_sparse_dir) |
|
|
|
|
|
ply_path = os.path.join(output_sparse_dir, "points3D.ply") |
|
|
cmd_converter = f'{colmap_command} model_converter --input_path "{unified_model_path}" --output_path "{ply_path}" --output_type PLY' |
|
|
run_command(cmd_converter) |
|
|
|
|
|
logging.info(f"已為 {split} 生成最終的 sparse 文件。") |
|
|
|
|
|
|
|
|
shutil.rmtree(work_dir) |
|
|
logging.info("臨時工作目錄已清理。") |
|
|
logging.info(f"===== 所有處理完成! =====") |
|
|
|
|
|
if __name__ == '__main__': |
|
|
parser = argparse.ArgumentParser(description="全自動執行 COLMAP 重建並分割訓練/測試集。") |
|
|
parser.add_argument('--data_path', type=str, required=True, help='包含 train 和 test 的根目錄路徑。') |
|
|
parser.add_argument('--colmap_executable', type=str, default="", help='(可選) COLMAP 可執行文件的路徑。') |
|
|
args = parser.parse_args() |
|
|
|
|
|
run_full_colmap_and_split(args.data_path, args.colmap_executable) |