vslamlab commited on
Commit
e8ace62
·
verified ·
1 Parent(s): bed028c

Upload folder using huggingface_hub

Browse files
.gitignore ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ vocab_tree_flickr100K_words1M.bin
2
+ vocab_tree_flickr100K_words256K.bin
3
+ vocab_tree_flickr100K_words32K.bin
4
+
5
+ LightGlue/
6
+ __pycache__/
colmap_mapper.sh CHANGED
@@ -7,73 +7,23 @@ exp_id="$3"
7
  settings_yaml="$4"
8
  calibration_yaml="$5"
9
  rgb_csv="$6"
 
10
 
11
  exp_folder_colmap="${exp_folder}/colmap_${exp_id}"
12
- rgb_dir=$(awk -F, 'NR==2 { split($2,a,"/"); print a[1]; exit }' "$rgb_csv")
13
  rgb_path="${sequence_path}/${rgb_dir}"
14
 
15
- calibration_model=$(grep -oP '(?<=Camera0\.model:\s)[\w]+' "$calibration_yaml")
16
  echo " camera model : $calibration_model"
17
-
18
- # Reading settings from yaml file
19
- mapper_Mapper_min_num_matches=$(yq '.mapper.Mapper_min_num_matches // 15' $settings_yaml)
20
- mapper_Mapper_ignore_watermarks=$(yq '.mapper.Mapper_ignore_watermarks // 0' $settings_yaml)
21
- mapper_Mapper_multiple_models=$(yq '.mapper.Mapper_multiple_models // 1' $settings_yaml)
22
- mapper_Mapper_max_num_models=$(yq '.mapper.Mapper_max_num_models // 50' $settings_yaml)
23
- mapper_Mapper_max_model_overlap=$(yq '.mapper.Mapper_max_model_overlap // 20' $settings_yaml)
24
- mapper_Mapper_min_model_size=$(yq '.mapper.Mapper_min_model_size // 10' $settings_yaml)
25
- mapper_Mapper_init_image_id1=$(yq '.mapper.Mapper_init_image_id1 // -1' $settings_yaml)
26
- mapper_Mapper_init_image_id2=$(yq '.mapper.Mapper_init_image_id2 // -1' $settings_yaml)
27
- mapper_Mapper_init_num_trials=$(yq '.mapper.Mapper_init_num_trials // 200' $settings_yaml)
28
- mapper_Mapper_extract_colors=$(yq '.mapper.Mapper_extract_colors // 1' $settings_yaml)
29
- mapper_Mapper_num_threads=$(yq '.mapper.Mapper_num_threads // -1' $settings_yaml)
30
- mapper_Mapper_min_focal_length_ratio=$(yq '.mapper.Mapper_min_focal_length_ratio // 0.1' $settings_yaml)
31
- mapper_Mapper_max_focal_length_ratio=$(yq '.mapper.Mapper_max_focal_length_ratio // 10' $settings_yaml)
32
- mapper_Mapper_max_extra_param=$(yq '.mapper.Mapper_max_extra_param // 1' $settings_yaml)
33
- mapper_Mapper_ba_refine_focal_length=$(yq '.mapper.Mapper_ba_refine_focal_length // 1' $settings_yaml)
34
- mapper_Mapper_ba_refine_principal_point=$(yq '.mapper.Mapper_ba_refine_principal_point // 0' $settings_yaml)
35
- mapper_Mapper_ba_refine_extra_params=$(yq '.mapper.Mapper_ba_refine_extra_params // 1' $settings_yaml)
36
- mapper_Mapper_ba_local_num_images=$(yq '.mapper.Mapper_ba_local_num_images // 6' $settings_yaml)
37
- mapper_Mapper_ba_local_function_tolerance=$(yq '.mapper.Mapper_ba_local_function_tolerance // 0' $settings_yaml)
38
- mapper_Mapper_ba_local_max_num_iterations=$(yq '.mapper.Mapper_ba_local_max_num_iterations // 25' $settings_yaml)
39
- mapper_Mapper_ba_global_images_ratio=$(yq '.mapper.Mapper_ba_global_images_ratio // 1.1' $settings_yaml)
40
- mapper_Mapper_ba_global_points_ratio=$(yq '.mapper.Mapper_ba_global_points_ratio // 1.1' $settings_yaml)
41
- mapper_Mapper_ba_global_images_freq=$(yq '.mapper.Mapper_ba_global_images_freq // 500' $settings_yaml)
42
- mapper_Mapper_ba_global_points_freq=$(yq '.mapper.Mapper_ba_global_points_freq // 250000' $settings_yaml)
43
- mapper_Mapper_ba_global_function_tolerance=$(yq '.mapper.Mapper_ba_global_function_tolerance // 0' $settings_yaml)
44
- mapper_Mapper_ba_global_max_num_iterations=$(yq '.mapper.Mapper_ba_global_max_num_iterations // 50' $settings_yaml)
45
- mapper_Mapper_ba_global_max_refinements=$(yq '.mapper.Mapper_ba_global_max_refinements // 5' $settings_yaml)
46
- mapper_Mapper_ba_global_max_refinement_change=$(yq '.mapper.Mapper_ba_global_max_refinement_change // 0.0005' $settings_yaml)
47
- mapper_Mapper_ba_local_max_refinements=$(yq '.mapper.Mapper_ba_local_max_refinements // 2' $settings_yaml)
48
- mapper_Mapper_ba_local_max_refinement_change=$(yq '.mapper.Mapper_ba_local_max_refinement_change // 0.001' $settings_yaml)
49
- mapper_Mapper_ba_use_gpu=$(yq '.mapper.Mapper_ba_use_gpu // 0' $settings_yaml)
50
- mapper_Mapper_ba_gpu_index=$(yq '.mapper.Mapper_ba_gpu_index // -1' $settings_yaml)
51
- mapper_Mapper_ba_min_num_residuals_for_cpu_multi_threading=$(yq '.mapper.Mapper_ba_min_num_residuals_for_cpu_multi_threading // 50000' $settings_yaml)
52
- mapper_Mapper_snapshot_images_freq=$(yq '.mapper.Mapper_snapshot_images_freq // 0' $settings_yaml)
53
- mapper_Mapper_fix_existing_images=$(yq '.mapper.Mapper_fix_existing_images // 0' $settings_yaml)
54
- mapper_Mapper_init_min_num_inliers=$(yq '.mapper.Mapper_init_min_num_inliers // 100' $settings_yaml)
55
- mapper_Mapper_init_max_error=$(yq '.mapper.Mapper_init_max_error // 4' $settings_yaml)
56
- mapper_Mapper_init_max_forward_motion=$(yq '.mapper.Mapper_init_max_forward_motion // 0.95' $settings_yaml)
57
- mapper_Mapper_init_min_tri_angle=$(yq '.mapper.Mapper_init_min_tri_angle // 16' $settings_yaml)
58
- mapper_Mapper_init_max_reg_trials=$(yq '.mapper.Mapper_init_max_reg_trials // 2' $settings_yaml)
59
- mapper_Mapper_abs_pose_max_error=$(yq '.mapper.Mapper_abs_pose_max_error // 12' $settings_yaml)
60
- mapper_Mapper_abs_pose_min_num_inliers=$(yq '.mapper.Mapper_abs_pose_min_num_inliers // 30' $settings_yaml)
61
- mapper_Mapper_abs_pose_min_inlier_ratio=$(yq '.mapper.Mapper_abs_pose_min_inlier_ratio // 0.25' $settings_yaml)
62
- mapper_Mapper_filter_max_reproj_error=$(yq '.mapper.Mapper_filter_max_reproj_error // 4' $settings_yaml)
63
- mapper_Mapper_filter_min_tri_angle=$(yq '.mapper.Mapper_filter_min_tri_angle // 1.5' $settings_yaml)
64
- mapper_Mapper_max_reg_trials=$(yq '.mapper.Mapper_max_reg_trials // 3' $settings_yaml)
65
- mapper_Mapper_local_ba_min_tri_angle=$(yq '.mapper.Mapper_local_ba_min_tri_angle // 6' $settings_yaml)
66
- mapper_Mapper_tri_max_transitivity=$(yq '.mapper.Mapper_tri_max_transitivity // 1' $settings_yaml)
67
- mapper_Mapper_tri_create_max_angle_error=$(yq '.mapper.Mapper_tri_create_max_angle_error // 2' $settings_yaml)
68
- mapper_Mapper_tri_continue_max_angle_error=$(yq '.mapper.Mapper_tri_continue_max_angle_error // 2' $settings_yaml)
69
- mapper_Mapper_tri_merge_max_reproj_error=$(yq '.mapper.Mapper_tri_merge_max_reproj_error // 4' $settings_yaml)
70
- mapper_Mapper_tri_complete_max_reproj_error=$(yq '.mapper.Mapper_tri_complete_max_reproj_error // 4' $settings_yaml)
71
- mapper_Mapper_tri_complete_max_transitivity=$(yq '.mapper.Mapper_tri_complete_max_transitivity // 5' $settings_yaml)
72
- mapper_Mapper_tri_re_max_angle_error=$(yq '.mapper.Mapper_tri_re_max_angle_error // 5' $settings_yaml)
73
- mapper_Mapper_tri_re_min_ratio=$(yq '.mapper.Mapper_tri_re_min_ratio // 0.2' $settings_yaml)
74
- mapper_Mapper_tri_re_max_trials=$(yq '.mapper.Mapper_tri_re_max_trials // 1' $settings_yaml)
75
- mapper_Mapper_tri_min_angle=$(yq '.mapper.Mapper_tri_min_angle // 1.5' $settings_yaml)
76
- mapper_Mapper_tri_ignore_two_view_tracks=$(yq '.mapper.Mapper_tri_ignore_two_view_tracks // 1' $settings_yaml)
77
 
78
  echo " colmap mapper ..."
79
  database="${exp_folder_colmap}/colmap_database.db"
@@ -81,66 +31,10 @@ database="${exp_folder_colmap}/colmap_database.db"
81
  colmap mapper \
82
  --database_path ${database} \
83
  --image_path ${rgb_path} \
84
- --output_path ${exp_folder_colmap} #\
85
- # --Mapper.min_num_matches ${mapper_Mapper_min_num_matches} \
86
- # --Mapper.ignore_watermarks ${mapper_Mapper_ignore_watermarks} \
87
- # --Mapper.multiple_models ${mapper_Mapper_multiple_models} \
88
- # --Mapper.max_num_models ${mapper_Mapper_max_num_models} \
89
- # --Mapper.max_model_overlap ${mapper_Mapper_max_model_overlap} \
90
- # --Mapper.min_model_size ${mapper_Mapper_min_model_size} \
91
- # --Mapper.init_image_id1 ${mapper_Mapper_init_image_id1} \
92
- # --Mapper.init_image_id2 ${mapper_Mapper_init_image_id2} \
93
- # --Mapper.init_num_trials ${mapper_Mapper_init_num_trials} \
94
- # --Mapper.extract_colors ${mapper_Mapper_extract_colors} \
95
- # --Mapper.num_threads ${mapper_Mapper_num_threads} \
96
- # --Mapper.min_focal_length_ratio ${mapper_Mapper_min_focal_length_ratio} \
97
- # --Mapper.max_focal_length_ratio ${mapper_Mapper_max_focal_length_ratio} \
98
- # --Mapper.max_extra_param ${mapper_Mapper_max_extra_param} \
99
- # --Mapper.ba_refine_focal_length ${mapper_Mapper_ba_refine_focal_length} \
100
- # --Mapper.ba_refine_principal_point ${mapper_Mapper_ba_refine_principal_point} \
101
- # --Mapper.ba_refine_extra_params ${mapper_Mapper_ba_refine_extra_params} \
102
- # --Mapper.ba_local_num_images ${mapper_Mapper_ba_local_num_images} \
103
- # --Mapper.ba_local_function_tolerance ${mapper_Mapper_ba_local_function_tolerance} \
104
- # --Mapper.ba_local_max_num_iterations ${mapper_Mapper_ba_local_max_num_iterations} \
105
- # --Mapper.ba_global_images_ratio ${mapper_Mapper_ba_global_images_ratio} \
106
- # --Mapper.ba_global_points_ratio ${mapper_Mapper_ba_global_points_ratio} \
107
- # --Mapper.ba_global_images_freq ${mapper_Mapper_ba_global_images_freq} \
108
- # --Mapper.ba_global_points_freq ${mapper_Mapper_ba_global_points_freq} \
109
- # --Mapper.ba_global_function_tolerance ${mapper_Mapper_ba_global_function_tolerance} \
110
- # --Mapper.ba_global_max_num_iterations ${mapper_Mapper_ba_global_max_num_iterations} \
111
- # --Mapper.ba_global_max_refinements ${mapper_Mapper_ba_global_max_refinements} \
112
- # --Mapper.ba_global_max_refinement_change ${mapper_Mapper_ba_global_max_refinement_change} \
113
- # --Mapper.ba_local_max_refinements ${mapper_Mapper_ba_local_max_refinements} \
114
- # --Mapper.ba_local_max_refinement_change ${mapper_Mapper_ba_local_max_refinement_change} \
115
- # --Mapper.ba_use_gpu ${mapper_Mapper_ba_use_gpu} \
116
- # --Mapper.ba_gpu_index ${mapper_Mapper_ba_gpu_index} \
117
- # --Mapper.ba_min_num_residuals_for_cpu_multi_threading ${mapper_Mapper_ba_min_num_residuals_for_cpu_multi_threading} \
118
- # --Mapper.snapshot_images_freq ${mapper_Mapper_snapshot_images_freq} \
119
- # --Mapper.fix_existing_images ${mapper_Mapper_fix_existing_images} \
120
- # --Mapper.init_min_num_inliers ${mapper_Mapper_init_min_num_inliers} \
121
- # --Mapper.init_max_error ${mapper_Mapper_init_max_error} \
122
- # --Mapper.init_max_forward_motion ${mapper_Mapper_init_max_forward_motion} \
123
- # --Mapper.init_min_tri_angle ${mapper_Mapper_init_min_tri_angle} \
124
- # --Mapper.init_max_reg_trials ${mapper_Mapper_init_max_reg_trials} \
125
- # --Mapper.abs_pose_max_error ${mapper_Mapper_abs_pose_max_error} \
126
- # --Mapper.abs_pose_min_num_inliers ${mapper_Mapper_abs_pose_min_num_inliers} \
127
- # --Mapper.abs_pose_min_inlier_ratio ${mapper_Mapper_abs_pose_min_inlier_ratio} \
128
- # --Mapper.filter_max_reproj_error ${mapper_Mapper_filter_max_reproj_error} \
129
- # --Mapper.filter_min_tri_angle ${mapper_Mapper_filter_min_tri_angle} \
130
- # --Mapper.max_reg_trials ${mapper_Mapper_max_reg_trials} \
131
- # --Mapper.local_ba_min_tri_angle ${mapper_Mapper_local_ba_min_tri_angle} \
132
- # --Mapper.tri_max_transitivity ${mapper_Mapper_tri_max_transitivity} \
133
- # --Mapper.tri_create_max_angle_error ${mapper_Mapper_tri_create_max_angle_error} \
134
- # --Mapper.tri_continue_max_angle_error ${mapper_Mapper_tri_continue_max_angle_error} \
135
- # --Mapper.tri_merge_max_reproj_error ${mapper_Mapper_tri_merge_max_reproj_error} \
136
- # --Mapper.tri_complete_max_reproj_error ${mapper_Mapper_tri_complete_max_reproj_error} \
137
- # --Mapper.tri_complete_max_transitivity ${mapper_Mapper_tri_complete_max_transitivity} \
138
- # --Mapper.tri_re_max_angle_error ${mapper_Mapper_tri_re_max_angle_error} \
139
- # --Mapper.tri_re_min_ratio ${mapper_Mapper_tri_re_min_ratio} \
140
- # --Mapper.tri_re_max_trials ${mapper_Mapper_tri_re_max_trials} \
141
- # --Mapper.tri_min_angle ${mapper_Mapper_tri_min_angle} \
142
- # --Mapper.tri_ignore_two_view_tracks ${mapper_Mapper_tri_ignore_two_view_tracks}
143
-
144
 
145
  echo " colmap model_converter ..."
146
  colmap model_converter \
 
7
  settings_yaml="$4"
8
  calibration_yaml="$5"
9
  rgb_csv="$6"
10
+ camera_name="$7"
11
 
12
  exp_folder_colmap="${exp_folder}/colmap_${exp_id}"
13
+ rgb_dir="${camera_name}"
14
  rgb_path="${sequence_path}/${rgb_dir}"
15
 
16
+ read -r calibration_model more_ <<< $(python3 Baselines/colmap/get_calibration.py "$calibration_yaml" "$camera_name")
17
  echo " camera model : $calibration_model"
18
+ ba_refine_focal_length="0"
19
+ ba_refine_principal_point="0"
20
+ ba_refine_extra_params="0"
21
+ if [ "${calibration_model}" == "unknown" ]
22
+ then
23
+ ba_refine_focal_length="1"
24
+ ba_refine_principal_point="1"
25
+ ba_refine_extra_params="1"
26
+ fi
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
  echo " colmap mapper ..."
29
  database="${exp_folder_colmap}/colmap_database.db"
 
31
  colmap mapper \
32
  --database_path ${database} \
33
  --image_path ${rgb_path} \
34
+ --output_path ${exp_folder_colmap} \
35
+ --Mapper.ba_refine_focal_length ${ba_refine_focal_length} \
36
+ --Mapper.ba_refine_principal_point ${ba_refine_principal_point} \
37
+ --Mapper.ba_refine_extra_params ${ba_refine_extra_params}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
  echo " colmap model_converter ..."
40
  colmap model_converter \
colmap_matcher.sh CHANGED
@@ -10,50 +10,28 @@ calibration_yaml="$5"
10
  rgb_csv="$6"
11
  matcher_type="$7"
12
  use_gpu="$8"
 
13
 
14
  exp_folder_colmap="${exp_folder}/colmap_${exp_id}"
15
  rgb_dir=$(awk -F, 'NR==2 { split($2,a,"/"); print a[1]; exit }' "$rgb_csv")
16
  rgb_path="${sequence_path}/${rgb_dir}"
17
 
18
- calibration_model=$(grep -oP '(?<=Camera0\.model:\s)[\w]+' "$calibration_yaml")
19
-
20
- fx=$(grep -oP '(?<=Camera0\.fx:\s)-?\d+\.\d+' "$calibration_yaml")
21
- fy=$(grep -oP '(?<=Camera0\.fy:\s)-?\d+\.\d+' "$calibration_yaml")
22
- cx=$(grep -oP '(?<=Camera0\.cx:\s)-?\d+\.\d+' "$calibration_yaml")
23
- cy=$(grep -oP '(?<=Camera0\.cy:\s)-?\d+\.\d+' "$calibration_yaml")
24
-
25
- # Reading settings from yaml file
26
- feature_extractor_SiftExtraction_num_octaves=$(yq '.feature_extractor.SiftExtraction_num_octaves // 4.0' $settings_yaml)
27
- feature_extractor_SiftExtraction_octave_resolution=$(yq '.feature_extractor.SiftExtraction_octave_resolution // 3.0' $settings_yaml)
28
- feature_extractor_SiftExtraction_peak_threshold=$(yq '.feature_extractor.SiftExtraction_peak_threshold // 0.0066666666666666671' $settings_yaml)
29
- feature_extractor_SiftExtraction_edge_threshold=$(yq '.feature_extractor.SiftExtraction_edge_threshold // 10.0' $settings_yaml)
30
- feature_extractor_SiftExtraction_dsp_min_scale=$(yq '.feature_extractor.SiftExtraction_dsp_min_scale // 0.1666666666666666' $settings_yaml)
31
- feature_extractor_SiftExtraction_dsp_max_scale=$(yq '.feature_extractor.SiftExtraction_dsp_max_scale // 3.0' $settings_yaml)
32
- feature_extractor_SiftExtraction_dsp_num_scales=$(yq '.feature_extractor.SiftExtraction_dsp_num_scales // 10.0' $settings_yaml)
33
-
34
- matcher_SiftMatching_max_ratio=$(yq '.matcher.SiftMatching_max_ratio // 0.80000000000000004' $settings_yaml)
35
- matcher_SiftMatching_max_distance=$(yq '.matcher.SiftMatching_max_distance // 0.69999999999999996' $settings_yaml)
36
- matcher_TwoViewGeometry_min_num_inliers=$(yq '.matcher.TwoViewGeometry_min_num_inliers // 15.0' $settings_yaml)
37
- matcher_TwoViewGeometry_max_error=$(yq '.matcher.TwoViewGeometry_max_error // 4.0' $settings_yaml)
38
- matcher_TwoViewGeometry_confidence=$(yq '.matcher.TwoViewGeometry_confidence // 0.999' $settings_yaml)
39
- matcher_TwoViewGeometry_min_inlier_ratio=$(yq '.matcher.TwoViewGeometry_min_inlier_ratio // 0.25' $settings_yaml)
40
- matcher_SequentialMatching_overlap=$(yq '.matcher.SequentialMatching_overlap // 10.0' $settings_yaml)
41
- matcher_SequentialMatching_quadratic_overlap=$(yq '.matcher.SequentialMatching_quadratic_overlap // 1.0' $settings_yaml)
42
- matcher_ExhaustiveMatching_block_size=$(yq '.matcher.ExhaustiveMatching_block_size // 50.0' $settings_yaml)
43
 
44
  # Create colmap image list
45
  colmap_image_list="${exp_folder_colmap}/colmap_image_list.txt"
46
- awk -F, 'NR>1 { split($2,a,"/"); print a[2] }' "$rgb_csv" > "$colmap_image_list"
47
 
48
  # Create Colmap Database
49
  database="${exp_folder_colmap}/colmap_database.db"
50
  rm -rf ${database}
51
- colmap database_creator --database_path ${database}
52
 
53
  # Feature extractor
54
  echo " colmap feature_extractor ..."
55
 
56
- if [ "${calibration_model}" == "UNKNOWN" ]
57
  then
58
  echo " camera model : $calibration_model"
59
  colmap feature_extractor \
@@ -64,18 +42,13 @@ colmap feature_extractor \
64
  --ImageReader.single_camera 1 \
65
  --ImageReader.single_camera_per_folder 1 \
66
  --FeatureExtraction.use_gpu ${use_gpu}
67
- # --SiftExtraction.num_octaves ${feature_extractor_SiftExtraction_num_octaves} \
68
- # --SiftExtraction.octave_resolution ${feature_extractor_SiftExtraction_octave_resolution} \
69
- # --SiftExtraction.peak_threshold ${feature_extractor_SiftExtraction_peak_threshold} \
70
- # --SiftExtraction.edge_threshold ${feature_extractor_SiftExtraction_edge_threshold} \
71
- # --SiftExtraction.dsp_min_scale ${feature_extractor_SiftExtraction_dsp_min_scale} \
72
- # --SiftExtraction.dsp_max_scale ${feature_extractor_SiftExtraction_dsp_max_scale} \
73
- # --SiftExtraction.dsp_num_scales ${feature_extractor_SiftExtraction_dsp_num_scales}
74
  fi
75
 
76
- if [ "${calibration_model}" == "Pinhole" ]
77
  then
 
78
  echo " camera model : $calibration_model"
 
79
  colmap feature_extractor \
80
  --database_path ${database} \
81
  --image_path ${rgb_path} \
@@ -84,69 +57,58 @@ then
84
  --ImageReader.single_camera 1 \
85
  --ImageReader.single_camera_per_folder 1 \
86
  --FeatureExtraction.use_gpu ${use_gpu} \
87
- --ImageReader.camera_params "${fx}, ${fy}, ${cx}, ${cy}" \
88
- --SiftExtraction.num_octaves ${feature_extractor_SiftExtraction_num_octaves} \
89
- --SiftExtraction.octave_resolution ${feature_extractor_SiftExtraction_octave_resolution} \
90
- --SiftExtraction.peak_threshold ${feature_extractor_SiftExtraction_peak_threshold} \
91
- --SiftExtraction.edge_threshold ${feature_extractor_SiftExtraction_edge_threshold} \
92
- --SiftExtraction.dsp_min_scale ${feature_extractor_SiftExtraction_dsp_min_scale} \
93
- --SiftExtraction.dsp_max_scale ${feature_extractor_SiftExtraction_dsp_max_scale} \
94
- --SiftExtraction.dsp_num_scales ${feature_extractor_SiftExtraction_dsp_num_scales}
95
  fi
96
 
97
- if [ "${calibration_model}" == "OPENCV" ]
98
  then
99
-
100
- k1=$(grep -oP '(?<=Camera0\.k1:\s)-?\d+\.\d+' "$calibration_yaml")
101
- k2=$(grep -oP '(?<=Camera0\.k2:\s)-?\d+\.\d+' "$calibration_yaml")
102
- p1=$(grep -oP '(?<=Camera0\.p1:\s)-?\d+\.\d+' "$calibration_yaml")
103
- p2=$(grep -oP '(?<=Camera0\.p2:\s)-?\d+\.\d+' "$calibration_yaml")
104
- k3=$(grep -oP '(?<=Camera0\.k3:\s)-?\d+\.\d+' "$calibration_yaml")
105
-
106
  echo " camera model : $calibration_model"
 
 
107
  colmap feature_extractor \
108
  --database_path ${database} \
109
  --image_path ${rgb_path} \
110
  --image_list_path ${colmap_image_list} \
111
- --ImageReader.camera_model ${calibration_model} \
112
  --ImageReader.single_camera 1 \
113
  --ImageReader.single_camera_per_folder 1 \
114
  --FeatureExtraction.use_gpu ${use_gpu} \
115
- --ImageReader.camera_params "${fx}, ${fy}, ${cx}, ${cy}, ${k1}, ${k2}, ${p1}, ${p2}" \
116
- --SiftExtraction.num_octaves ${feature_extractor_SiftExtraction_num_octaves} \
117
- --SiftExtraction.octave_resolution ${feature_extractor_SiftExtraction_octave_resolution} \
118
- --SiftExtraction.peak_threshold ${feature_extractor_SiftExtraction_peak_threshold} \
119
- --SiftExtraction.edge_threshold ${feature_extractor_SiftExtraction_edge_threshold} \
120
- --SiftExtraction.dsp_min_scale ${feature_extractor_SiftExtraction_dsp_min_scale} \
121
- --SiftExtraction.dsp_max_scale ${feature_extractor_SiftExtraction_dsp_max_scale} \
122
- --SiftExtraction.dsp_num_scales ${feature_extractor_SiftExtraction_dsp_num_scales}
123
  fi
124
 
125
- if [ "${calibration_model}" == "OPENCV_FISHEYE" ]
126
  then
 
 
 
 
 
 
 
 
 
 
 
 
 
 
127
 
128
- k1=$(grep -oP '(?<=Camera0\.k1:\s)-?\d+\.\d+' "$calibration_yaml")
129
- k2=$(grep -oP '(?<=Camera0\.k2:\s)-?\d+\.\d+' "$calibration_yaml")
130
- k3=$(grep -oP '(?<=Camera0\.k3:\s)-?\d+\.\d+' "$calibration_yaml")
131
- k4=$(grep -oP '(?<=Camera0\.k4:\s)-?\d+\.\d+' "$calibration_yaml")
132
-
133
  echo " camera model : $calibration_model"
 
 
134
  colmap feature_extractor \
135
  --database_path ${database} \
136
  --image_path ${rgb_path} \
137
  --image_list_path ${colmap_image_list} \
138
- --ImageReader.camera_model ${calibration_model} \
139
  --ImageReader.single_camera 1 \
140
  --ImageReader.single_camera_per_folder 1 \
141
  --FeatureExtraction.use_gpu ${use_gpu} \
142
- --ImageReader.camera_params "${fx}, ${fy}, ${cx}, ${cy}, ${k1}, ${k2}, ${k3}, ${k4}" \
143
- --SiftExtraction.num_octaves ${feature_extractor_SiftExtraction_num_octaves} \
144
- --SiftExtraction.octave_resolution ${feature_extractor_SiftExtraction_octave_resolution} \
145
- --SiftExtraction.peak_threshold ${feature_extractor_SiftExtraction_peak_threshold} \
146
- --SiftExtraction.edge_threshold ${feature_extractor_SiftExtraction_edge_threshold} \
147
- --SiftExtraction.dsp_min_scale ${feature_extractor_SiftExtraction_dsp_min_scale} \
148
- --SiftExtraction.dsp_max_scale ${feature_extractor_SiftExtraction_dsp_max_scale} \
149
- --SiftExtraction.dsp_num_scales ${feature_extractor_SiftExtraction_dsp_num_scales}
150
  fi
151
 
152
  # Exhaustive Feature Matcher
@@ -155,15 +117,7 @@ then
155
  echo " colmap exhaustive_matcher ..."
156
  colmap exhaustive_matcher \
157
  --database_path ${database} \
158
- --FeatureMatching.use_gpu ${use_gpu} \
159
- --SiftMatching.max_ratio "${matcher_SiftMatching_max_ratio}" \
160
- --SiftMatching.max_distance "${matcher_SiftMatching_max_distance}" \
161
- --TwoViewGeometry.min_num_inliers "${matcher_TwoViewGeometry_min_num_inliers}" \
162
- --TwoViewGeometry.max_error "${matcher_TwoViewGeometry_max_error}" \
163
- --TwoViewGeometry.confidence "${matcher_TwoViewGeometry_confidence}" \
164
- --TwoViewGeometry.min_inlier_ratio "${matcher_TwoViewGeometry_min_inlier_ratio}" \
165
- --ExhaustiveMatching.block_size "${matcher_ExhaustiveMatching_block_size}"
166
-
167
  fi
168
 
169
  # Sequential Feature Matcher
@@ -186,13 +140,16 @@ then
186
  --database_path "${database}" \
187
  --SequentialMatching.loop_detection 1 \
188
  --SequentialMatching.vocab_tree_path ${vocabulary_tree} \
189
- --FeatureMatching.use_gpu "${use_gpu}" \
190
- --SiftMatching.max_ratio "${matcher_SiftMatching_max_ratio}" \
191
- --SiftMatching.max_distance "${matcher_SiftMatching_max_distance}" \
192
- --TwoViewGeometry.min_num_inliers "${matcher_TwoViewGeometry_min_num_inliers}" \
193
- --TwoViewGeometry.max_error "${matcher_TwoViewGeometry_max_error}" \
194
- --TwoViewGeometry.confidence "${matcher_TwoViewGeometry_confidence}" \
195
- --TwoViewGeometry.min_inlier_ratio "${matcher_TwoViewGeometry_min_inlier_ratio}" \
196
- --SequentialMatching.overlap "${matcher_SequentialMatching_overlap}" \
197
- --SequentialMatching.quadratic_overlap "${matcher_SequentialMatching_quadratic_overlap}"
198
- fi
 
 
 
 
10
  rgb_csv="$6"
11
  matcher_type="$7"
12
  use_gpu="$8"
13
+ camera_name="$9"
14
 
15
  exp_folder_colmap="${exp_folder}/colmap_${exp_id}"
16
  rgb_dir=$(awk -F, 'NR==2 { split($2,a,"/"); print a[1]; exit }' "$rgb_csv")
17
  rgb_path="${sequence_path}/${rgb_dir}"
18
 
19
+ # Get calibration model
20
+ read -r calibration_model more_ <<< $(python3 Baselines/colmap/get_calibration.py "$calibration_yaml" "$camera_name")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
  # Create colmap image list
23
  colmap_image_list="${exp_folder_colmap}/colmap_image_list.txt"
24
+ python3 Baselines/colmap/create_colmap_image_list.py "$rgb_csv" "$colmap_image_list" "$camera_name"
25
 
26
  # Create Colmap Database
27
  database="${exp_folder_colmap}/colmap_database.db"
28
  rm -rf ${database}
29
+ colmap database_creator --database_path ${database}
30
 
31
  # Feature extractor
32
  echo " colmap feature_extractor ..."
33
 
34
+ if [ "${calibration_model}" == "unknown" ]
35
  then
36
  echo " camera model : $calibration_model"
37
  colmap feature_extractor \
 
42
  --ImageReader.single_camera 1 \
43
  --ImageReader.single_camera_per_folder 1 \
44
  --FeatureExtraction.use_gpu ${use_gpu}
 
 
 
 
 
 
 
45
  fi
46
 
47
+ if [ "${calibration_model}" == "pinhole" ]
48
  then
49
+ read -r calibration_model fx fy cx cy <<< $(python3 Baselines/colmap/get_calibration.py "$calibration_yaml" "$camera_name")
50
  echo " camera model : $calibration_model"
51
+ echo " fx: $fx , fy: $fy , cx: $cx , cy: $cy"
52
  colmap feature_extractor \
53
  --database_path ${database} \
54
  --image_path ${rgb_path} \
 
57
  --ImageReader.single_camera 1 \
58
  --ImageReader.single_camera_per_folder 1 \
59
  --FeatureExtraction.use_gpu ${use_gpu} \
60
+ --ImageReader.camera_params "${fx},${fy},${cx},${cy}"
 
 
 
 
 
 
 
61
  fi
62
 
63
+ if [ "${calibration_model}" == "radtan4" ]
64
  then
65
+ read -r calibration_model fx fy cx cy k1 k2 p1 p2 <<< $(python3 Baselines/colmap/get_calibration.py "$calibration_yaml" "$camera_name")
 
 
 
 
 
 
66
  echo " camera model : $calibration_model"
67
+ echo " fx: $fx , fy: $fy , cx: $cx , cy: $cy"
68
+ echo " k1: $k1 , k2: $k2 , p1: $p1 , p2: $p2"
69
  colmap feature_extractor \
70
  --database_path ${database} \
71
  --image_path ${rgb_path} \
72
  --image_list_path ${colmap_image_list} \
73
+ --ImageReader.camera_model "OPENCV" \
74
  --ImageReader.single_camera 1 \
75
  --ImageReader.single_camera_per_folder 1 \
76
  --FeatureExtraction.use_gpu ${use_gpu} \
77
+ --ImageReader.camera_params "${fx},${fy},${cx},${cy},${k1},${k2},${p1},${p2}"
 
 
 
 
 
 
 
78
  fi
79
 
80
+ if [ "${calibration_model}" == "radtan5" ]
81
  then
82
+ read -r calibration_model fx fy cx cy k1 k2 p1 p2 k3 <<< $(python3 Baselines/colmap/get_calibration.py "$calibration_yaml" "$camera_name")
83
+ echo " camera model : $calibration_model"
84
+ echo " fx: $fx , fy: $fy , cx: $cx , cy: $cy"
85
+ echo " k1: $k1 , k2: $k2 , p1: $p1 , p2: $p2, k3: $k3"
86
+ colmap feature_extractor \
87
+ --database_path ${database} \
88
+ --image_path ${rgb_path} \
89
+ --image_list_path ${colmap_image_list} \
90
+ --ImageReader.camera_model "FULL_OPENCV" \
91
+ --ImageReader.single_camera 1 \
92
+ --ImageReader.single_camera_per_folder 1 \
93
+ --FeatureExtraction.use_gpu ${use_gpu} \
94
+ --ImageReader.camera_params "${fx},${fy},${cx},${cy},${k1},${k2},${p1},${p2},${k3},0,0,0"
95
+ fi
96
 
97
+ if [ "${calibration_model}" == "equid4" ]
98
+ then
99
+ read -r calibration_model fx fy cx cy k1 k2 k3 k4 <<< $(python3 Baselines/colmap/get_calibration.py "$calibration_yaml" "$camera_name")
 
 
100
  echo " camera model : $calibration_model"
101
+ echo " fx: $fx , fy: $fy , cx: $cx , cy: $cy"
102
+ echo " k1: $k1 , k2: $k2 , k3: $k3 , k4: $k4"
103
  colmap feature_extractor \
104
  --database_path ${database} \
105
  --image_path ${rgb_path} \
106
  --image_list_path ${colmap_image_list} \
107
+ --ImageReader.camera_model "OPENCV_FISHEYE"\
108
  --ImageReader.single_camera 1 \
109
  --ImageReader.single_camera_per_folder 1 \
110
  --FeatureExtraction.use_gpu ${use_gpu} \
111
+ --ImageReader.camera_params "${fx},${fy},${cx},${cy},${k1},${k2},${k3},${k4}"
 
 
 
 
 
 
 
112
  fi
113
 
114
  # Exhaustive Feature Matcher
 
117
  echo " colmap exhaustive_matcher ..."
118
  colmap exhaustive_matcher \
119
  --database_path ${database} \
120
+ --FeatureMatching.use_gpu ${use_gpu}
 
 
 
 
 
 
 
 
121
  fi
122
 
123
  # Sequential Feature Matcher
 
140
  --database_path "${database}" \
141
  --SequentialMatching.loop_detection 1 \
142
  --SequentialMatching.vocab_tree_path ${vocabulary_tree} \
143
+ --FeatureMatching.use_gpu "${use_gpu}"
144
+ fi
145
+
146
+ # LightGlue Feature Matcher
147
+ if [ "${matcher_type}" == "custom" ]
148
+ then
149
+ colmap exhaustive_matcher \
150
+ --database_path ${database} \
151
+ --FeatureMatching.use_gpu ${use_gpu}
152
+
153
+ pixi run -e lightglue python3 Baselines/colmap/feature_matcher.py --database ${database} --rgb_path ${rgb_path} --rgb_csv ${rgb_csv}
154
+ fi
155
+
colmap_reconstruction.sh CHANGED
@@ -10,6 +10,7 @@ exp_folder=""
10
  exp_id=""
11
  calibration_yaml=""
12
  rgb_csv=""
 
13
 
14
  # Function to split key-value pairs and assign them to variables
15
  split_and_assign() {
@@ -36,6 +37,7 @@ echo " Use GPU : $use_gpu"
36
  echo " Settings YAML : $settings_yaml"
37
  echo " Calibration YAML : $calibration_yaml"
38
  echo " RGB CSV : $rgb_csv"
 
39
  echo "============================================================"
40
 
41
  # Create folder to save colmap files
@@ -46,11 +48,11 @@ mkdir "$exp_folder_colmap"
46
  # Run COLMAP scripts for matching and mapping
47
  export QT_QPA_PLATFORM_PLUGIN_PATH="$CONDA_PREFIX/plugins/platforms"
48
  colmap_args="$sequence_path $exp_folder $exp_id $settings_yaml $calibration_yaml $rgb_csv"
49
- ./Baselines/colmap/colmap_matcher.sh $colmap_args $matcher_type $use_gpu
50
- ./Baselines/colmap/colmap_mapper.sh $colmap_args
51
 
52
  # Convert COLMAP outputs to a format suitable for VSLAM-LAB
53
- python Baselines/colmap/colmap_to_vslamlab.py $sequence_path $exp_folder $exp_id $verbose $rgb_csv
54
 
55
  # Visualization with colmap gui
56
  if [ "$verbose" -eq 1 ]; then
 
10
  exp_id=""
11
  calibration_yaml=""
12
  rgb_csv=""
13
+ camera_name="rgb_0"
14
 
15
  # Function to split key-value pairs and assign them to variables
16
  split_and_assign() {
 
37
  echo " Settings YAML : $settings_yaml"
38
  echo " Calibration YAML : $calibration_yaml"
39
  echo " RGB CSV : $rgb_csv"
40
+ echo " Camera Name : $camera_name"
41
  echo "============================================================"
42
 
43
  # Create folder to save colmap files
 
48
  # Run COLMAP scripts for matching and mapping
49
  export QT_QPA_PLATFORM_PLUGIN_PATH="$CONDA_PREFIX/plugins/platforms"
50
  colmap_args="$sequence_path $exp_folder $exp_id $settings_yaml $calibration_yaml $rgb_csv"
51
+ ./Baselines/colmap/colmap_matcher.sh $colmap_args $matcher_type $use_gpu $camera_name
52
+ ./Baselines/colmap/colmap_mapper.sh $colmap_args $camera_name
53
 
54
  # Convert COLMAP outputs to a format suitable for VSLAM-LAB
55
+ python Baselines/colmap/colmap_to_vslamlab.py $sequence_path $exp_folder $exp_id $verbose $rgb_csv $camera_name
56
 
57
  # Visualization with colmap gui
58
  if [ "$verbose" -eq 1 ]; then
colmap_to_vslamlab.py CHANGED
@@ -67,14 +67,14 @@ def write_trajectory_tum_format(file_name, image_ts, t_wc, q_wc_xyzw):
67
  data = data[data[:, 0].argsort()]
68
 
69
  with open(file_name, 'w', newline='') as file:
70
- file.write('timestamp,tx,ty,tz,qx,qy,qz,qw\n')
71
  for row in data:
72
  file.write(','.join(f'{x:.15f}' for x in row) + '\n')
73
 
74
- def get_timestamps(files_path, rgb_file):
75
  print(f"getTimestamps: {os.path.join(files_path, rgb_file)}")
76
  df = pd.read_csv(rgb_file)
77
- ts = df['ts_rgb0 (s)'].to_list()
78
  return ts
79
 
80
  if __name__ == "__main__":
@@ -84,13 +84,14 @@ if __name__ == "__main__":
84
  exp_id = sys.argv[3]
85
  verbose = bool(int(sys.argv[4]))
86
  rgb_file = sys.argv[5]
 
87
 
88
  images_file = os.path.join(exp_folder, f'colmap_{exp_id}', 'images.txt')
89
 
90
  number_of_header_lines = 4
91
  image_id, t_wc, q_wc_xyzw = get_colmap_keyframes(images_file, number_of_header_lines, verbose)
92
 
93
- image_ts = np.array(get_timestamps(sequence_path, rgb_file))
94
  timestamps = []
95
  for id in image_id:
96
  timestamps.append(float(image_ts[id-1]))
 
67
  data = data[data[:, 0].argsort()]
68
 
69
  with open(file_name, 'w', newline='') as file:
70
+ file.write('ts (ns),tx (m),ty (m),tz (m),qx,qy,qz,qw\n')
71
  for row in data:
72
  file.write(','.join(f'{x:.15f}' for x in row) + '\n')
73
 
74
+ def get_timestamps(files_path, rgb_file, camera_name):
75
  print(f"getTimestamps: {os.path.join(files_path, rgb_file)}")
76
  df = pd.read_csv(rgb_file)
77
+ ts = df[f'ts_{camera_name} (ns)'].to_list()
78
  return ts
79
 
80
  if __name__ == "__main__":
 
84
  exp_id = sys.argv[3]
85
  verbose = bool(int(sys.argv[4]))
86
  rgb_file = sys.argv[5]
87
+ camera_name = sys.argv[6]
88
 
89
  images_file = os.path.join(exp_folder, f'colmap_{exp_id}', 'images.txt')
90
 
91
  number_of_header_lines = 4
92
  image_id, t_wc, q_wc_xyzw = get_colmap_keyframes(images_file, number_of_header_lines, verbose)
93
 
94
+ image_ts = np.array(get_timestamps(sequence_path, rgb_file, camera_name))
95
  timestamps = []
96
  for id in image_id:
97
  timestamps.append(float(image_ts[id-1]))
create_colmap_image_list.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import pandas as pd
3
+ import os
4
+ def create_colmap_image_list(rgb_csv, colmap_image_list_txt, cam_name):
5
+
6
+ df = pd.read_csv(rgb_csv)
7
+ image_list = df[f'path_{cam_name}'].to_list()
8
+
9
+ with open(colmap_image_list_txt, 'w') as f:
10
+ for name in image_list:
11
+ file_name = os.path.basename(name)
12
+ f.write(f"{file_name}\n")
13
+
14
+ if __name__ == "__main__":
15
+ parser = argparse.ArgumentParser()
16
+ parser.add_argument("rgb_csv", help="Path to the rgb_csv")
17
+ parser.add_argument("colmap_image_list", help="Path to the colmap_image_list")
18
+ parser.add_argument("camera_name", help="camera_name")
19
+
20
+ args = parser.parse_args()
21
+ create_colmap_image_list(args.rgb_csv, args.colmap_image_list, args.camera_name)
feature_matcher.py ADDED
@@ -0,0 +1,457 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sqlite3
2
+ from feature_matcher_utilities import extract_keypoints, feature_matching, unrotate_kps_W
3
+ import os
4
+ import torch
5
+ import matplotlib.pyplot as plt
6
+ from tqdm import tqdm
7
+ import numpy as np
8
+ import cv2
9
+ import argparse
10
+ from pathlib import Path
11
+
12
+ from lightglue import LightGlue
13
+
14
+ # ==========================================
15
+ # ==========================================
16
+ # DATABASE UTILITIES
17
+ # ==========================================
18
+ def load_colmap_db(db_path):
19
+ if not os.path.exists(db_path):
20
+ raise FileNotFoundError(f"Database file not found: {db_path}")
21
+ conn = sqlite3.connect(db_path)
22
+ cursor = conn.cursor()
23
+ return conn, cursor
24
+
25
+ def create_pair_id(image_id1, image_id2):
26
+ if image_id1 > image_id2:
27
+ image_id1, image_id2 = image_id2, image_id1
28
+ return image_id1 * 2147483647 + image_id2
29
+
30
+ def clean_database(cursor):
31
+ """Removes existing features and matches to ensure a clean overwrite."""
32
+ tables = ["keypoints", "descriptors", "matches"]#, "two_view_geometry"]
33
+ for table in tables:
34
+ cursor.execute(f"DELETE FROM {table};")
35
+ print("Database cleaned (keypoints, descriptors, matches removed).")
36
+
37
+ def insert_keypoints(cursor, image_id, keypoints, descriptors):
38
+ """
39
+ keypoints: (N, 2) numpy array, float32
40
+ descriptors: (N, D) numpy array, float32
41
+ """
42
+ keypoints_blob = keypoints.tobytes()
43
+ descriptors_blob = descriptors.tobytes()
44
+
45
+ # Keypoints
46
+ cursor.execute(
47
+ "INSERT INTO keypoints(image_id, rows, cols, data) VALUES(?, ?, ?, ?)",
48
+ (image_id, keypoints.shape[0], keypoints.shape[1], keypoints_blob)
49
+ )
50
+
51
+ # Descriptors (Optional but good practice)
52
+ cursor.execute(
53
+ "INSERT INTO descriptors(image_id, rows, cols, data) VALUES(?, ?, ?, ?)",
54
+ (image_id, descriptors.shape[0], descriptors.shape[1], descriptors_blob)
55
+ )
56
+
57
+ def insert_matches(cursor, image_id1, image_id2, matches):
58
+ """
59
+ matches: (K, 2) numpy array, uint32.
60
+ Col 0 is index in image1, Col 1 is index in image2
61
+ """
62
+ pair_id = create_pair_id(image_id1, image_id2)
63
+ matches_blob = matches.tobytes()
64
+
65
+ cursor.execute(
66
+ "INSERT INTO matches(pair_id, rows, cols, data) VALUES(?, ?, ?, ?)",
67
+ (pair_id, matches.shape[0], matches.shape[1], matches_blob)
68
+ )
69
+
70
+ def verify_matches_visual(cursor, image_id1, image_id2, image_dir):
71
+ """
72
+ Reads matches and keypoints from the COLMAP db and plots them.
73
+
74
+ Args:
75
+ cursor: SQLite cursor connected to the database.
76
+ image_id1: ID of the first image.
77
+ image_id2: ID of the second image.
78
+ image_dir: Path to the directory containing the images.
79
+ """
80
+
81
+ # 1. Helper to ensure image_id1 < image_id2 for pair_id calculation
82
+ if image_id1 > image_id2:
83
+ image_id1, image_id2 = image_id2, image_id1
84
+ swapped = True
85
+ else:
86
+ swapped = False
87
+
88
+ pair_id = image_id1 * 2147483647 + image_id2
89
+
90
+ # 2. Fetch Matches
91
+ cursor.execute("SELECT data FROM matches WHERE pair_id = ?", (pair_id,))
92
+ match_row = cursor.fetchone()
93
+
94
+ if match_row is None:
95
+ print(f"No matches found in DB for pair {image_id1}-{image_id2}")
96
+ return
97
+
98
+ # Decode Matches: UINT32 (N, 2)
99
+ matches = np.frombuffer(match_row[0], dtype=np.uint32).reshape(-1, 2)
100
+
101
+ # If we swapped inputs to generate pair_id, we must swap columns in matches
102
+ # so matches[:,0] corresponds to the requested image_id1
103
+ if swapped:
104
+ matches = matches[:, [1, 0]]
105
+
106
+ # 3. Fetch Keypoints for both images
107
+ def get_keypoints_and_name(img_id):
108
+ # Get Name
109
+ cursor.execute("SELECT name FROM images WHERE image_id = ?", (img_id,))
110
+ name = cursor.fetchone()[0]
111
+
112
+ # Get Keypoints
113
+ cursor.execute("SELECT data FROM keypoints WHERE image_id = ?", (img_id,))
114
+ kp_row = cursor.fetchone()
115
+ # Decode Keypoints: FLOAT32 (N, 2)
116
+ kpts = np.frombuffer(kp_row[0], dtype=np.float32).reshape(-1, 2)
117
+ return name, kpts
118
+
119
+ name1, kpts1 = get_keypoints_and_name(image_id1)
120
+ name2, kpts2 = get_keypoints_and_name(image_id2)
121
+
122
+ # 4. Filter Keypoints using the Matches indices
123
+ # matches[:, 0] are indices into kpts1
124
+ # matches[:, 1] are indices into kpts2
125
+ valid_kpts1 = kpts1[matches[:, 0]]
126
+ valid_kpts2 = kpts2[matches[:, 1]]
127
+
128
+ # 5. Load Images
129
+ path1 = os.path.join(image_dir, name1)
130
+ path2 = os.path.join(image_dir, name2)
131
+
132
+ img1 = cv2.imread(path1)
133
+ img2 = cv2.imread(path2)
134
+
135
+ # Convert BGR (OpenCV) to RGB (Matplotlib)
136
+ img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
137
+ img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)
138
+
139
+ # 6. Plotting
140
+ # Concatenate images side-by-side
141
+ h1, w1, _ = img1.shape
142
+ h2, w2, _ = img2.shape
143
+
144
+ # Create a canvas large enough for both
145
+ height = max(h1, h2)
146
+ width = w1 + w2
147
+ canvas = np.zeros((height, width, 3), dtype=np.uint8)
148
+
149
+ canvas[:h1, :w1, :] = img1
150
+ canvas[:h2, w1:w1+w2, :] = img2
151
+
152
+ plt.figure(figsize=(15, 10))
153
+ plt.imshow(canvas)
154
+
155
+ # Plot lines
156
+ # Shift x-coordinates of image2 by w1
157
+ for (x1, y1), (x2, y2) in zip(valid_kpts1, valid_kpts2):
158
+ plt.plot([x1, x2 + w1], [y1, y2], 'c-', alpha=0.6, linewidth=0.5)
159
+ plt.plot(x1, y1, 'r.', markersize=2)
160
+ plt.plot(x2 + w1, y2, 'r.', markersize=2)
161
+
162
+ plt.title(f"DB Verification: {name1} (ID:{image_id1}) <-> {name2} (ID:{image_id2}) | Matches: {len(matches)}")
163
+ plt.axis('off')
164
+ plt.tight_layout()
165
+ plt.show()
166
+
167
+ import numpy as np
168
+ import matplotlib.pyplot as plt
169
+ import cv2
170
+ import os
171
+ import sqlite3
172
+
173
+ def plot_matches_from_db(cursor, image_id1, image_id2, image_dir):
174
+ """
175
+ Reads matches and keypoints for a specific pair from the COLMAP DB and plots them.
176
+
177
+ Args:
178
+ cursor: SQLite cursor.
179
+ image_id1, image_id2: The IDs of the two images to plot.
180
+ image_dir: Path to the directory containing the actual image files.
181
+ """
182
+
183
+ # 1. Resolve Pair ID (Colmap requires id1 < id2 for unique pair_id)
184
+ if image_id1 > image_id2:
185
+ id_a, id_b = image_id2, image_id1
186
+ swapped = True
187
+ else:
188
+ id_a, id_b = image_id1, image_id2
189
+ swapped = False
190
+
191
+ pair_id = id_a * 2147483647 + id_b
192
+
193
+ # 2. Fetch Matches
194
+ print(f"Fetching matches for pair {image_id1}-{image_id2} (PairID: {pair_id})...")
195
+ cursor.execute("SELECT data, rows, cols FROM matches WHERE pair_id = ?", (pair_id,))
196
+ match_row = cursor.fetchone()
197
+
198
+ if match_row is None:
199
+ print(f"No matches found in database for Pair {image_id1}-{image_id2}")
200
+ return
201
+
202
+ # Decode Matches (UINT32)
203
+ # Blob is match_row[0], rows is [1], cols is [2]
204
+ matches_blob = match_row[0]
205
+ matches = np.frombuffer(matches_blob, dtype=np.uint32).reshape(-1, 2)
206
+
207
+ # If inputs were swapped relative to how COLMAP stores them, swap the columns
208
+ # so matches[:,0] refers to image_id1 and matches[:,1] refers to image_id2
209
+ if swapped:
210
+ matches = matches[:, [1, 0]]
211
+
212
+ # 3. Fetch Keypoints & Image Names
213
+ def get_image_data(img_id):
214
+ cursor.execute("SELECT name FROM images WHERE image_id = ?", (img_id,))
215
+ res = cursor.fetchone()
216
+ if not res:
217
+ raise ValueError(f"Image ID {img_id} not found in 'images' table.")
218
+ name = res[0]
219
+
220
+ cursor.execute("SELECT data FROM keypoints WHERE image_id = ?", (img_id,))
221
+ kp_res = cursor.fetchone()
222
+ if not kp_res:
223
+ raise ValueError(f"No keypoints found for Image ID {img_id}.")
224
+
225
+ # Decode Keypoints (FLOAT32)
226
+ kpts = np.frombuffer(kp_res[0], dtype=np.float32).reshape(-1, 2)
227
+ return name, kpts
228
+
229
+ name1, kpts1 = get_image_data(image_id1)
230
+ name2, kpts2 = get_image_data(image_id2)
231
+
232
+ # 4. Filter Keypoints using Match Indices
233
+ valid_kpts1 = kpts1[matches[:, 0]]
234
+ valid_kpts2 = kpts2[matches[:, 1]]
235
+
236
+ # 5. Visualization
237
+ path1 = os.path.join(image_dir, name1)
238
+ path2 = os.path.join(image_dir, name2)
239
+
240
+ if not os.path.exists(path1) or not os.path.exists(path2):
241
+ print(f"Error: Could not find image files at \n{path1}\n{path2}")
242
+ return
243
+
244
+ img1 = cv2.imread(path1)
245
+ img2 = cv2.imread(path2)
246
+ img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
247
+ img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)
248
+
249
+ # Create canvas
250
+ h1, w1 = img1.shape[:2]
251
+ h2, w2 = img2.shape[:2]
252
+ height = max(h1, h2)
253
+ width = w1 + w2
254
+ canvas = np.zeros((height, width, 3), dtype=np.uint8)
255
+ canvas[:h1, :w1] = img1
256
+ canvas[:h2, w1:w1+w2] = img2
257
+
258
+ plt.figure(figsize=(20, 10))
259
+ plt.imshow(canvas)
260
+
261
+ # Plot matches
262
+ # x2 coordinates need to be shifted by w1
263
+ for (x1, y1), (x2, y2) in zip(valid_kpts1, valid_kpts2):
264
+ plt.plot([x1, x2 + w1], [y1, y2], 'g-', alpha=0.5, linewidth=1.5)
265
+ plt.plot(x1, y1, 'r.', markersize=4)
266
+ plt.plot(x2 + w1, y2, 'r.', markersize=4)
267
+
268
+ plt.title(f"{name1} <-> {name2} | Total Matches: {len(matches)}")
269
+ plt.axis('off')
270
+ plt.tight_layout()
271
+ plt.show()
272
+
273
+ def load_sift_keypoints(cursor):
274
+ cursor.execute("""
275
+ SELECT image_id, rows, cols, data
276
+ FROM keypoints
277
+ """)
278
+
279
+ keypoints_dict = {}
280
+
281
+ for image_id, rows, cols, data in cursor.fetchall():
282
+ kpts = np.frombuffer(data, dtype=np.float32)
283
+ kpts = kpts.reshape((rows, cols))
284
+ keypoints_dict[image_id] = kpts
285
+
286
+ return keypoints_dict
287
+
288
+ def load_sift_matches(cursor):
289
+ sift_matches = {}
290
+ cursor.execute("SELECT pair_id, data FROM matches")
291
+ for row in cursor.fetchall():
292
+ pair_id = row[0]
293
+ data = row[1]
294
+
295
+ if data is None:
296
+ # skip pairs with no matches
297
+ sift_matches[pair_id] = None
298
+ continue
299
+
300
+ # COLMAP stores matches as uint32 pairs
301
+ matches = np.frombuffer(data, dtype=np.uint32).reshape(-1, 2)
302
+ sift_matches[pair_id] = matches
303
+
304
+ return sift_matches
305
+
306
+ def insert_all_inlier_two_view_geometry(cursor, image_id1, image_id2, matches):
307
+ """
308
+ Treats all matches as inliers and inserts dummy two-view geometry.
309
+ """
310
+ if image_id1 > image_id2:
311
+ image_id1, image_id2 = image_id2, image_id1
312
+ matches = matches[:, [1, 0]]
313
+
314
+ pair_id = image_id1 * 2147483647 + image_id2
315
+
316
+ # COLMAP expects uint32 indices
317
+ matches = matches.astype(np.uint32)
318
+
319
+ # Dummy geometry (not actually used by mapper)
320
+ dummy_F = np.eye(3, dtype=np.float64).tobytes()
321
+
322
+ cursor.execute("""
323
+ INSERT OR REPLACE INTO two_view_geometries
324
+ (pair_id, rows, cols, data, config)
325
+ VALUES (?, ?, ?, ?, ?)
326
+ """, (
327
+ pair_id,
328
+ matches.shape[0],
329
+ matches.shape[1],
330
+ matches.tobytes(),
331
+ 2 # config=2 → "calibrated / essential matrix"
332
+ ))
333
+
334
+ if __name__ == "__main__":
335
+
336
+ FEATURE_TYPE = 'superpoint'
337
+ MATCHER_TYPE = 'lightglue'
338
+ LG_MATCHES_THRESHOLD = 40
339
+
340
+
341
+ parser = argparse.ArgumentParser()
342
+
343
+ parser.add_argument("--database", type=Path, required=True)
344
+ parser.add_argument("--rgb_path", type=Path, required=True)
345
+ parser.add_argument("--rgb_csv", type=Path, required=True)
346
+
347
+ args, _ = parser.parse_known_args()
348
+
349
+ DB_PATH = args.database
350
+ IMAGE_DIR = args.rgb_path
351
+ DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
352
+
353
+ # Load colmap database
354
+ conn, cursor = load_colmap_db(DB_PATH)
355
+ cursor.execute("SELECT image_id, name FROM images")
356
+ images_info = {row[0]: row[1] for row in cursor.fetchall()}
357
+ image_ids = sorted(images_info.keys())
358
+
359
+ # Load SIFT keypoints and matches from exhaustive matching
360
+ sift_keypoints = load_sift_keypoints(cursor)
361
+ sift_matches = load_sift_matches(cursor)
362
+
363
+ # Clean colmap database
364
+ clean_database(cursor)
365
+ conn.commit()
366
+
367
+ # Extract superpoint keypoints
368
+ fts_sp = {}
369
+ keypoints_sp = {}
370
+ rotations_sp = {}
371
+ for i in tqdm(range(len(image_ids)), desc="Feature Extraction"):
372
+ id = image_ids[i]
373
+ fname = images_info[id]
374
+ path = os.path.join(IMAGE_DIR, fname)
375
+
376
+ feats_dict, feats_norot, h, w = extract_keypoints(path, features=FEATURE_TYPE)
377
+ fts_sp[id] = feats_norot
378
+
379
+ kpts_sp = feats_dict['keypoints'].squeeze(0).cpu().numpy().astype(np.float32)
380
+ descs = feats_dict['descriptors'].squeeze(0).cpu().numpy().astype(np.float32)
381
+
382
+ keypoints_sp[id] = kpts_sp
383
+ rotations_sp[id] = feats_dict['rotations'].squeeze(0).cpu().numpy().astype(np.float32)
384
+
385
+ # Combine superpoint and SIFT keypoints, insert into database
386
+ for i in tqdm(range(len(image_ids)), desc="Feature Extraction"):
387
+ id = image_ids[i]
388
+ kpts_sp = keypoints_sp[id]
389
+ rots_sp = rotations_sp[id]
390
+ kpts_rot = unrotate_kps_W(kpts_sp, rots_sp, h, w)
391
+
392
+ N = kpts_rot.shape[0]
393
+
394
+ scales = np.ones((N, 1), dtype=np.float32)
395
+ oris = np.zeros((N, 1), dtype=np.float32)
396
+ resp = np.ones((N, 1), dtype=np.float32)
397
+ octave = np.zeros((N, 1), dtype=np.float32)
398
+
399
+ kpts_mod = np.hstack([
400
+ kpts_rot.astype(np.float32), # (N, 2)
401
+ scales,
402
+ oris,
403
+ resp,
404
+ octave
405
+ ])
406
+
407
+ kpts_sift = sift_keypoints[id]
408
+
409
+ kpts = np.vstack([kpts_sift, kpts_mod])
410
+ descs = np.zeros((kpts.shape[0], 128), dtype=np.float32)
411
+
412
+ insert_keypoints(cursor, id, kpts, descs)
413
+
414
+ conn.commit()
415
+
416
+ # Feature Matching
417
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
418
+ matcher = LightGlue(features='superpoint', depth_confidence=-1, width_confidence=-1, flash=True).eval().to(device)
419
+
420
+ for i in tqdm(range(len(image_ids)), desc="Feature Matching"):
421
+ id1 = image_ids[i]
422
+ fname1 = images_info[id1]
423
+ path1 = os.path.join(IMAGE_DIR, fname1)
424
+
425
+ for j in range(i + 1, len(image_ids)):
426
+ if j == i:
427
+ continue
428
+ id2 = image_ids[j]
429
+ fname2 = images_info[id2]
430
+ path2 = os.path.join(IMAGE_DIR, fname2)
431
+
432
+ # Get SIFT matches
433
+ pair_id = create_pair_id(id1, id2)
434
+ matches_sift = sift_matches[pair_id]
435
+ if matches_sift is None:
436
+ matches_sift = np.zeros((0, 2), dtype=np.uint32)
437
+
438
+ n_sift_kpts_1 = sift_keypoints[id1].shape[0]
439
+ n_sift_kpts_2 = sift_keypoints[id2].shape[0]
440
+
441
+ # Compute LightGlue matches
442
+ matches_lg = feature_matching(fts_sp[id1], fts_sp[id2], matcher=matcher, exhaustive=True)
443
+
444
+ if matches_lg is not None and len(matches_lg) > LG_MATCHES_THRESHOLD:
445
+ matches_lg[:,0] += n_sift_kpts_1
446
+ matches_lg[:,1] += n_sift_kpts_2
447
+ else:
448
+ matches_lg = np.zeros((0, 2), dtype=np.uint32)
449
+
450
+ # Combine superpoint and SIFT matches, insert into database
451
+ matches = np.vstack([matches_sift, matches_lg])
452
+ insert_matches(cursor, id1, id2, matches)
453
+ insert_all_inlier_two_view_geometry(cursor, id1, id2, matches)
454
+
455
+ conn.commit()
456
+ conn.close()
457
+ print("Database overwrite complete.")
feature_matcher_batch.py ADDED
@@ -0,0 +1,465 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sqlite3
2
+ import time
3
+ from feature_matcher_utilities import extract_keypoints, feature_matching, unrotate_kps_W
4
+ import os
5
+ import torch
6
+ import matplotlib.pyplot as plt
7
+ from tqdm import tqdm
8
+ import numpy as np
9
+ import cv2
10
+ import argparse
11
+ from pathlib import Path
12
+
13
+ from PIL import Image
14
+ import torchvision.transforms.functional as TF
15
+
16
+ from lightglue import LightGlue
17
+ from lightglue.utils import rbd
18
+ from lightglue import SuperPoint, SIFT
19
+ from lightglue.utils import load_image
20
+
21
+ # ==========================================
22
+ # ==========================================
23
+ # DATABASE UTILITIES
24
+ # ==========================================
25
+ def load_colmap_db(db_path):
26
+ if not os.path.exists(db_path):
27
+ raise FileNotFoundError(f"Database file not found: {db_path}")
28
+ conn = sqlite3.connect(db_path)
29
+ cursor = conn.cursor()
30
+ return conn, cursor
31
+
32
+ def create_pair_id(image_id1, image_id2):
33
+ if image_id1 > image_id2:
34
+ image_id1, image_id2 = image_id2, image_id1
35
+ return image_id1 * 2147483647 + image_id2
36
+
37
+ def clean_database(cursor):
38
+ """Removes existing features and matches to ensure a clean overwrite."""
39
+ tables = ["keypoints", "descriptors"]#, "matches"], "two_view_geometry"]
40
+ for table in tables:
41
+ cursor.execute(f"DELETE FROM {table};")
42
+ print("Database cleaned (keypoints, descriptors, matches removed).")
43
+
44
+ def insert_keypoints(cursor, image_id, keypoints, descriptors):
45
+ """
46
+ keypoints: (N, 2) numpy array, float32
47
+ descriptors: (N, D) numpy array, float32
48
+ """
49
+ keypoints_blob = keypoints.tobytes()
50
+ descriptors_blob = descriptors.tobytes()
51
+
52
+ # Keypoints
53
+ cursor.execute(
54
+ "INSERT INTO keypoints(image_id, rows, cols, data) VALUES(?, ?, ?, ?)",
55
+ (image_id, keypoints.shape[0], keypoints.shape[1], keypoints_blob)
56
+ )
57
+
58
+ # Descriptors (Optional but good practice)
59
+ cursor.execute(
60
+ "INSERT INTO descriptors(image_id, rows, cols, data) VALUES(?, ?, ?, ?)",
61
+ (image_id, descriptors.shape[0], descriptors.shape[1], descriptors_blob)
62
+ )
63
+
64
+ def insert_matches(cursor, image_id1, image_id2, matches):
65
+ """
66
+ matches: (K, 2) numpy array, uint32.
67
+ Col 0 is index in image1, Col 1 is index in image2
68
+ """
69
+ pair_id = create_pair_id(image_id1, image_id2)
70
+ matches_blob = matches.tobytes()
71
+
72
+ cursor.execute(
73
+ "INSERT INTO matches(pair_id, rows, cols, data) VALUES(?, ?, ?, ?)",
74
+ (pair_id, matches.shape[0], matches.shape[1], matches_blob)
75
+ )
76
+
77
+ def verify_matches_visual(cursor, image_id1, image_id2, image_dir):
78
+ """
79
+ Reads matches and keypoints from the COLMAP db and plots them.
80
+
81
+ Args:
82
+ cursor: SQLite cursor connected to the database.
83
+ image_id1: ID of the first image.
84
+ image_id2: ID of the second image.
85
+ image_dir: Path to the directory containing the images.
86
+ """
87
+
88
+ # 1. Helper to ensure image_id1 < image_id2 for pair_id calculation
89
+ if image_id1 > image_id2:
90
+ image_id1, image_id2 = image_id2, image_id1
91
+ swapped = True
92
+ else:
93
+ swapped = False
94
+
95
+ pair_id = image_id1 * 2147483647 + image_id2
96
+
97
+ # 2. Fetch Matches
98
+ cursor.execute("SELECT data FROM matches WHERE pair_id = ?", (pair_id,))
99
+ match_row = cursor.fetchone()
100
+
101
+ if match_row is None:
102
+ print(f"No matches found in DB for pair {image_id1}-{image_id2}")
103
+ return
104
+
105
+ # Decode Matches: UINT32 (N, 2)
106
+ matches = np.frombuffer(match_row[0], dtype=np.uint32).reshape(-1, 2)
107
+
108
+ # If we swapped inputs to generate pair_id, we must swap columns in matches
109
+ # so matches[:,0] corresponds to the requested image_id1
110
+ if swapped:
111
+ matches = matches[:, [1, 0]]
112
+
113
+ # 3. Fetch Keypoints for both images
114
+ def get_keypoints_and_name(img_id):
115
+ # Get Name
116
+ cursor.execute("SELECT name FROM images WHERE image_id = ?", (img_id,))
117
+ name = cursor.fetchone()[0]
118
+
119
+ # Get Keypoints
120
+ cursor.execute("SELECT data FROM keypoints WHERE image_id = ?", (img_id,))
121
+ kp_row = cursor.fetchone()
122
+ # Decode Keypoints: FLOAT32 (N, 2)
123
+ kpts = np.frombuffer(kp_row[0], dtype=np.float32).reshape(-1, 2)
124
+ return name, kpts
125
+
126
+ name1, kpts1 = get_keypoints_and_name(image_id1)
127
+ name2, kpts2 = get_keypoints_and_name(image_id2)
128
+
129
+ # 4. Filter Keypoints using the Matches indices
130
+ # matches[:, 0] are indices into kpts1
131
+ # matches[:, 1] are indices into kpts2
132
+ valid_kpts1 = kpts1[matches[:, 0]]
133
+ valid_kpts2 = kpts2[matches[:, 1]]
134
+
135
+ # 5. Load Images
136
+ path1 = os.path.join(image_dir, name1)
137
+ path2 = os.path.join(image_dir, name2)
138
+
139
+ img1 = cv2.imread(path1)
140
+ img2 = cv2.imread(path2)
141
+
142
+ # Convert BGR (OpenCV) to RGB (Matplotlib)
143
+ img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
144
+ img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)
145
+
146
+ # 6. Plotting
147
+ # Concatenate images side-by-side
148
+ h1, w1, _ = img1.shape
149
+ h2, w2, _ = img2.shape
150
+
151
+ # Create a canvas large enough for both
152
+ height = max(h1, h2)
153
+ width = w1 + w2
154
+ canvas = np.zeros((height, width, 3), dtype=np.uint8)
155
+
156
+ canvas[:h1, :w1, :] = img1
157
+ canvas[:h2, w1:w1+w2, :] = img2
158
+
159
+ plt.figure(figsize=(15, 10))
160
+ plt.imshow(canvas)
161
+
162
+ # Plot lines
163
+ # Shift x-coordinates of image2 by w1
164
+ for (x1, y1), (x2, y2) in zip(valid_kpts1, valid_kpts2):
165
+ plt.plot([x1, x2 + w1], [y1, y2], 'c-', alpha=0.6, linewidth=0.5)
166
+ plt.plot(x1, y1, 'r.', markersize=2)
167
+ plt.plot(x2 + w1, y2, 'r.', markersize=2)
168
+
169
+ plt.title(f"DB Verification: {name1} (ID:{image_id1}) <-> {name2} (ID:{image_id2}) | Matches: {len(matches)}")
170
+ plt.axis('off')
171
+ plt.tight_layout()
172
+ plt.show()
173
+
174
+ import numpy as np
175
+ import matplotlib.pyplot as plt
176
+ import cv2
177
+ import os
178
+ import sqlite3
179
+
180
+ def plot_matches_from_db(cursor, image_id1, image_id2, image_dir):
181
+ """
182
+ Reads matches and keypoints for a specific pair from the COLMAP DB and plots them.
183
+
184
+ Args:
185
+ cursor: SQLite cursor.
186
+ image_id1, image_id2: The IDs of the two images to plot.
187
+ image_dir: Path to the directory containing the actual image files.
188
+ """
189
+
190
+ # 1. Resolve Pair ID (Colmap requires id1 < id2 for unique pair_id)
191
+ if image_id1 > image_id2:
192
+ id_a, id_b = image_id2, image_id1
193
+ swapped = True
194
+ else:
195
+ id_a, id_b = image_id1, image_id2
196
+ swapped = False
197
+
198
+ pair_id = id_a * 2147483647 + id_b
199
+
200
+ # 2. Fetch Matches
201
+ print(f"Fetching matches for pair {image_id1}-{image_id2} (PairID: {pair_id})...")
202
+ cursor.execute("SELECT data, rows, cols FROM matches WHERE pair_id = ?", (pair_id,))
203
+ match_row = cursor.fetchone()
204
+
205
+ if match_row is None:
206
+ print(f"No matches found in database for Pair {image_id1}-{image_id2}")
207
+ return
208
+
209
+ # Decode Matches (UINT32)
210
+ # Blob is match_row[0], rows is [1], cols is [2]
211
+ matches_blob = match_row[0]
212
+ matches = np.frombuffer(matches_blob, dtype=np.uint32).reshape(-1, 2)
213
+
214
+ # If inputs were swapped relative to how COLMAP stores them, swap the columns
215
+ # so matches[:,0] refers to image_id1 and matches[:,1] refers to image_id2
216
+ if swapped:
217
+ matches = matches[:, [1, 0]]
218
+
219
+ # 3. Fetch Keypoints & Image Names
220
+ def get_image_data(img_id):
221
+ cursor.execute("SELECT name FROM images WHERE image_id = ?", (img_id,))
222
+ res = cursor.fetchone()
223
+ if not res:
224
+ raise ValueError(f"Image ID {img_id} not found in 'images' table.")
225
+ name = res[0]
226
+
227
+ cursor.execute("SELECT data FROM keypoints WHERE image_id = ?", (img_id,))
228
+ kp_res = cursor.fetchone()
229
+ if not kp_res:
230
+ raise ValueError(f"No keypoints found for Image ID {img_id}.")
231
+
232
+ # Decode Keypoints (FLOAT32)
233
+ kpts = np.frombuffer(kp_res[0], dtype=np.float32).reshape(-1, 2)
234
+ return name, kpts
235
+
236
+ name1, kpts1 = get_image_data(image_id1)
237
+ name2, kpts2 = get_image_data(image_id2)
238
+
239
+ # 4. Filter Keypoints using Match Indices
240
+ valid_kpts1 = kpts1[matches[:, 0]]
241
+ valid_kpts2 = kpts2[matches[:, 1]]
242
+
243
+ # 5. Visualization
244
+ path1 = os.path.join(image_dir, name1)
245
+ path2 = os.path.join(image_dir, name2)
246
+
247
+ if not os.path.exists(path1) or not os.path.exists(path2):
248
+ print(f"Error: Could not find image files at \n{path1}\n{path2}")
249
+ return
250
+
251
+ img1 = cv2.imread(path1)
252
+ img2 = cv2.imread(path2)
253
+ img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
254
+ img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)
255
+
256
+ # Create canvas
257
+ h1, w1 = img1.shape[:2]
258
+ h2, w2 = img2.shape[:2]
259
+ height = max(h1, h2)
260
+ width = w1 + w2
261
+ canvas = np.zeros((height, width, 3), dtype=np.uint8)
262
+ canvas[:h1, :w1] = img1
263
+ canvas[:h2, w1:w1+w2] = img2
264
+
265
+ plt.figure(figsize=(20, 10))
266
+ plt.imshow(canvas)
267
+
268
+ # Plot matches
269
+ # x2 coordinates need to be shifted by w1
270
+ for (x1, y1), (x2, y2) in zip(valid_kpts1, valid_kpts2):
271
+ plt.plot([x1, x2 + w1], [y1, y2], 'g-', alpha=0.5, linewidth=1.5)
272
+ plt.plot(x1, y1, 'r.', markersize=4)
273
+ plt.plot(x2 + w1, y2, 'r.', markersize=4)
274
+
275
+ plt.title(f"{name1} <-> {name2} | Total Matches: {len(matches)}")
276
+ plt.axis('off')
277
+ plt.tight_layout()
278
+ plt.show()
279
+
280
+
281
+ if __name__ == "__main__":
282
+
283
+ parser = argparse.ArgumentParser()
284
+
285
+ parser.add_argument("--database", type=Path, required=True)
286
+ parser.add_argument("--rgb_path", type=Path, required=True)
287
+ parser.add_argument("--feature", type=str, required=True)
288
+ parser.add_argument("--matcher", type=str, required=True)
289
+
290
+ args, _ = parser.parse_known_args()
291
+
292
+ DB_PATH = args.database
293
+ IMAGE_DIR = args.rgb_path
294
+ FEATURE_TYPE = args.feature
295
+ MATCHER_TYPE = args.matcher
296
+ DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
297
+ matches_file_path = os.path.join(os.path.dirname(DB_PATH), "matches.txt")
298
+
299
+ conn, cursor = load_colmap_db(DB_PATH)
300
+ cursor.execute("SELECT image_id, name FROM images")
301
+ images_info = {row[0]: row[1] for row in cursor.fetchall()}
302
+ image_ids = sorted(images_info.keys())
303
+
304
+ clean_database(cursor)
305
+ conn.commit()
306
+
307
+ # Keypoint Extraction
308
+ extractor = SuperPoint(max_num_keypoints=128, detection_threshold=0.0).eval().cuda()
309
+ matcher = LightGlue(width_confidence=-1).eval().cuda()
310
+
311
+ total_time = 0.0
312
+ with open(matches_file_path, "w") as f_match:
313
+ for i, id_i in enumerate(tqdm(image_ids, desc="Outer Loop")):
314
+ fname_i = images_info[id_i]
315
+ path_i = os.path.join(IMAGE_DIR, fname_i)
316
+ img_i = Image.open(path_i).convert("RGB")
317
+ t_i = TF.to_tensor(img_i)
318
+ imgs_i = []
319
+ imgs_j = []
320
+ ids_j = []
321
+ for j, id_j in enumerate(tqdm(image_ids[i+1:], desc="Inner Loop", leave=False), start=i+1):
322
+ fname_j = images_info[id_j]
323
+ path_j = os.path.join(IMAGE_DIR, fname_j)
324
+ img_j = Image.open(path_j).convert("RGB")
325
+ t_j = TF.to_tensor(img_j)
326
+ imgs_j.append(t_j)
327
+ imgs_i.append(t_i)
328
+ ids_j.append(id_j)
329
+
330
+ if len(imgs_j) == 0:
331
+ continue
332
+ print(f"Processing batch: Image {fname_i} with {len(imgs_j)} images.")
333
+ batch_i = torch.stack(imgs_i, dim=0).to(DEVICE) # (B,3,H,W)
334
+ batch_j = torch.stack(imgs_j, dim=0).to(DEVICE) # (B,3,H,W)
335
+
336
+ with torch.no_grad():
337
+ feats_i = extractor({"image": batch_i})
338
+ feats_j = extractor({"image": batch_j})
339
+
340
+ kpts = feats_i['keypoints'][0].squeeze(0).cpu().numpy().astype(np.float32)
341
+ descs = feats_i['descriptors'][0].squeeze(0).cpu().numpy().astype(np.float32)
342
+ insert_keypoints(cursor, id_i, kpts, descs)
343
+
344
+ data = {}
345
+ data['image0'] = {}
346
+ data['image1'] = {}
347
+ data['image0']['keypoints'] = feats_i['keypoints']
348
+ data['image0']['descriptors'] = feats_i['descriptors']
349
+ data['image1']['keypoints'] = feats_j['keypoints']
350
+ data['image1']['descriptors'] = feats_j['descriptors']
351
+ # data['image0']['image'] = batch_i
352
+ # data['image1']['image'] = batch_j
353
+
354
+ t0 = time.perf_counter()
355
+ matches01 = matcher(data)
356
+ t1 = time.perf_counter()
357
+ elapsed = t1 - t0
358
+ print(f"Matching took {elapsed:.4f} seconds")
359
+ total_time += elapsed
360
+
361
+ for k in range(len(matches01["matches0"])):
362
+ m0 = matches01["matches0"][k]
363
+ valid = m0 > -1
364
+ if valid.any():
365
+ fname_j = images_info[ids_j[k]]
366
+ f_match.write(f"{fname_i} {fname_j}\n")
367
+ idx0 = torch.nonzero(valid, as_tuple=False).squeeze(1)
368
+ idx1 = m0[valid].long()
369
+ matches_np = torch.stack([idx0, idx1], dim=1).cpu().numpy().astype(int)
370
+ np.savetxt(f_match, matches_np, fmt="%d")
371
+ f_match.write("\n")
372
+
373
+ del batch_i, batch_j, feats_i, feats_j, data, matches01, imgs_i, imgs_j
374
+ torch.cuda.synchronize()
375
+ torch.cuda.empty_cache()
376
+ import gc
377
+ gc.collect()
378
+
379
+ conn.commit()
380
+
381
+ #plot_matches_from_db(cursor, image_ids[0], image_ids[1], IMAGE_DIR)
382
+
383
+ conn.close()
384
+ print("Database overwrite complete.")
385
+ print(f"Total matching time: {total_time:.2f} seconds.")
386
+
387
+ # B = len(image_ids)
388
+ # print("matches01 keys:", list(matches01.keys()))
389
+ # B0, N0 = matches01["matches0"].shape
390
+ # B1, N1 = matches01["matches1"].shape
391
+ # print(f"Batch size: {B0}, Num keypoints image0: {N0}")
392
+ # print(f"Batch size: {B1}, Num keypoints image1: {N1}")
393
+ # print(matches01["matches"][0].shape)
394
+ # print(matches01["matches"][0].shape)
395
+ # saved_images = set()
396
+
397
+ # with open(matches_file_path, "w") as f_match:
398
+ # for i in range(B):
399
+ # for j in range(i + 1, B):
400
+ # fname1 = images_info[image_ids[i]]
401
+ # fname2 = images_info[image_ids[j]]
402
+
403
+ # if "matches" in matches01 and matches01["matches"] is not None:
404
+ # m = matches01["matches"]
405
+ # # Handle (1, M, 2) or (M, 2)
406
+ # if m.dim() == 3:
407
+ # m = m[0]
408
+ # matches_np = m.detach().cpu().numpy().astype(int)
409
+
410
+ # # Fallback: build pairs from matches0
411
+ # else:
412
+ # m0 = matches01["matches0"][0] # (N0,)
413
+ # valid = m0 > -1
414
+ # if valid.any():
415
+ # idx0 = torch.nonzero(valid, as_tuple=False).squeeze(1)
416
+ # idx1 = m0[valid].long()
417
+ # matches_np = torch.stack([idx0, idx1], dim=1).cpu().numpy().astype(int)
418
+ # else:
419
+ # matches_np = np.empty((0, 2), dtype=int)
420
+ # f_match.write(f"{fname1} {fname2}\n")
421
+ # np.savetxt(f_match, matches_np, fmt="%d")
422
+ # f_match.write("\n")
423
+
424
+ # with open(matches_file_path, "w") as f_match:
425
+ # for i in range(B):
426
+ # for j in range(i + 1, B):
427
+ # fname1 = ""
428
+ # fname2 = ""
429
+ # matches_np = np.array([])
430
+ # f_match.write(f"{fname1} {fname2}\n")
431
+ # np.savetxt(f_match, matches_np, fmt="%d")
432
+ # f_match.write("\n")
433
+
434
+ # with open(matches_file_path, "w") as f_match:
435
+ # for i in tqdm(range(len(image_ids)), desc="Feature Extraction"):
436
+ # id1 = image_ids[i]
437
+ # fname1 = images_info[id1]
438
+ # path1 = os.path.join(IMAGE_DIR, fname1)
439
+
440
+ # for j in range(i + 1, len(image_ids)):
441
+ # if j == i:
442
+ # continue
443
+ # id2 = image_ids[j]
444
+
445
+ # fname2 = images_info[id2]
446
+ # path2 = os.path.join(IMAGE_DIR, fname2)
447
+ # matches_tensor = feature_matching(fts[id1], fts[id2], matcher=matcher, features=FEATURE_TYPE, matcher_type=MATCHER_TYPE)
448
+
449
+ # if matches_tensor is not None and len(matches_tensor) > 0:
450
+ # matches_np = matches_tensor.cpu().numpy().astype(np.uint32)
451
+ # #insert_matches(cursor, id1, id2, matches_np)
452
+
453
+ # f_match.write(f"{fname1} {fname2}\n")
454
+ # np.savetxt(f_match, matches_np, fmt="%d")
455
+ # f_match.write("\n")
456
+
457
+ # #verify_matches_visual(cursor, image_ids[i], image_ids[j], IMAGE_DIR)
458
+ # #plt.show()
459
+
460
+ # conn.commit()
461
+
462
+ # #plot_matches_from_db(cursor, image_ids[0], image_ids[1], IMAGE_DIR)
463
+
464
+ # conn.close()
465
+ # print("Database overwrite complete.")
feature_matcher_utilities.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ import cv2
4
+ from lightglue import LightGlue
5
+ from lightglue.utils import rbd
6
+ from lightglue import SuperPoint, SIFT
7
+ from lightglue.utils import load_image
8
+
9
+
10
+ def unrotate_kps_W(kps_rot, k, H, W):
11
+ # Ensure inputs are Numpy
12
+ if hasattr(kps_rot, 'cpu'): kps_rot = kps_rot.cpu().numpy()
13
+ if hasattr(k, 'cpu'): k = k.cpu().numpy()
14
+
15
+ # Squeeze if necessary
16
+ if k.ndim > 1: k = k.squeeze()
17
+ if kps_rot.ndim > 2: kps_rot = kps_rot.squeeze()
18
+
19
+ x_r = kps_rot[:, 0]
20
+ y_r = kps_rot[:, 1]
21
+
22
+ x = np.zeros_like(x_r)
23
+ y = np.zeros_like(y_r)
24
+
25
+ mask0 = (k == 0)
26
+ x[mask0], y[mask0] = x_r[mask0], y_r[mask0]
27
+
28
+ mask1 = (k == 1)
29
+ x[mask1], y[mask1] = (W - 1) - y_r[mask1], x_r[mask1]
30
+
31
+ mask2 = (k == 2)
32
+ x[mask2], y[mask2] = (W - 1) - x_r[mask2], (H - 1) - y_r[mask2]
33
+
34
+ mask3 = (k == 3)
35
+ x[mask3], y[mask3] = y_r[mask3], (H - 1) - x_r[mask3]
36
+
37
+ return np.stack([x, y], axis=-1)
38
+
39
+ def extract_keypoints(path_to_image0, features='superpoint', rotations = [0,1,2,3]):
40
+ # --- Models on GPU ---
41
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
42
+
43
+ # --- Load images as Torch tensors (3,H,W) in [0,1] ---
44
+ timg = load_image(path_to_image0).to(device)
45
+ _, h, w = timg.shape
46
+
47
+ if features == 'sift':
48
+ extractor = SIFT(max_num_keypoints=2048).eval().to(device)
49
+ feats = extractor.extract(timg)
50
+ return feats , h, w
51
+
52
+ if features == 'superpoint':
53
+ extractor = SuperPoint(max_num_keypoints=2048).eval().to(device)
54
+
55
+ # --- Extract local features ---
56
+ feats = {}
57
+ for k in (rotations):
58
+ timg_rotated = torch.rot90(timg, k, dims=(1, 2))
59
+ feats[k] = extractor.extract(timg_rotated)
60
+ #print(f"Extracted {feats[k]['keypoints'].shape[1]} keypoints for rotation {k}")
61
+
62
+ # --- Merge features back to original coordinate system ---
63
+ all_keypoints = []
64
+ all_scores = []
65
+ all_descriptors = []
66
+ all_rotations = []
67
+ for k, feat in feats.items():
68
+ kpts = feat['keypoints'] # Shape (1, N, 2)
69
+ num_kpts = kpts.shape[1]
70
+ # if k == 0:
71
+ # kpts_corrected = kpts
72
+ # elif k == 1:
73
+ # kpts_corrected = torch.stack(
74
+ # [w - 1 - kpts[..., 1], kpts[..., 0]], dim=-1
75
+ # )
76
+ # elif k == 2:
77
+ # kpts_corrected = torch.stack(
78
+ # [w - 1 - kpts[..., 0], h - 1 - kpts[..., 1]], dim=-1
79
+ # )
80
+ # elif k == 3:
81
+ # kpts_corrected = torch.stack(
82
+ # [kpts[..., 1], h - 1 - kpts[..., 0]], dim=-1
83
+ # )
84
+
85
+ rot_indices = torch.full((1, num_kpts), k, dtype=torch.long, device=device)
86
+ all_keypoints.append(feat['keypoints'])
87
+ all_scores.append(feat['keypoint_scores'])
88
+ all_descriptors.append(feat['descriptors'])
89
+ all_rotations.append(rot_indices)
90
+
91
+ # Concatenate all features along the keypoint dimension (dim=1)
92
+ feats_merged = {
93
+ 'keypoints': torch.cat(all_keypoints, dim=1),
94
+ 'keypoint_scores': torch.cat(all_scores, dim=1),
95
+ 'descriptors': torch.cat(all_descriptors, dim=1),
96
+ 'rotations': torch.cat(all_rotations, dim=1)
97
+ }
98
+
99
+ num_kpts = feats_merged['keypoints'].shape[1]
100
+ # perm = torch.randperm(num_kpts, device=device)
101
+
102
+ # feats_merged['keypoints'] = feats_merged['keypoints'][:, perm, :]
103
+ # feats_merged['keypoint_scores'] = feats_merged['keypoint_scores'][:, perm]
104
+ # feats_merged['descriptors'] = feats_merged['descriptors'][:, perm, :]
105
+
106
+ # Optional: If you want to retain other keys like 'shape' or 'image_size'
107
+ #feats_merged['image_size'] = torch.tensor([w, h], device=device).unsqueeze(0)
108
+ #feats_merged['scales'] = torch.tensor([w, h], device=device).unsqueeze(0)
109
+
110
+ # for f in feats_merged:
111
+ # if 'scales' not in f:
112
+ # f['scales'] = torch.ones(all_keypoints.shape[:-1], device=device)
113
+ # if 'oris' not in f:
114
+ # f['oris'] = torch.zeros(all_keypoints.shape[:-1], device=device)
115
+
116
+ return feats_merged , feats, h, w
117
+
118
+ def lightglue_matching(feats0, feats1, matcher = None):
119
+ if matcher is None:
120
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
121
+ matcher = LightGlue(features='superpoint').eval().to(device)
122
+
123
+ out_k = matcher({'image0': feats0, 'image1': feats1})
124
+ _, _, out_k = [rbd(x) for x in [feats0, feats1, out_k]] # remove batch dim
125
+ return out_k['matches']
126
+
127
+ def feature_matching(feats0, feats1, matcher = None, exhaustive = True):
128
+ best_rot = 0
129
+ best_num_matches = 0
130
+ matches_tensor = None
131
+
132
+ # Find the best rotation alignment
133
+ for rot in [0,1,2,3]:
134
+ matches_tensor_rot = lightglue_matching(feats0[0], feats1[rot], matcher = matcher)
135
+ if (len(matches_tensor_rot) > best_num_matches):
136
+ best_num_matches = len(matches_tensor_rot)
137
+ best_rot = rot
138
+ matches_tensor = matches_tensor_rot
139
+
140
+ if matches_tensor is not None and len(matches_tensor) > 0:
141
+ matches_np = matches_tensor.cpu().numpy().astype(np.uint32)
142
+ else:
143
+ return None
144
+
145
+ # Adjust matches to account for rotations
146
+ for k in range(best_rot):
147
+ matches_np[:,1] += feats1[k]['keypoints'].shape[1]
148
+ all_matches = [matches_np]
149
+
150
+ if not exhaustive:
151
+ return matches_np
152
+
153
+ # Find the other rotation combinations
154
+ rots = []
155
+ for rot in [1, 2, 3]:
156
+ rot_i = best_rot + rot
157
+ if rot_i >=4:
158
+ rot_i = rot_i -4
159
+ rots.append(rot_i)
160
+
161
+ # Compute matches for the other rotation combinations
162
+ for rot_i in [1,2,3]:
163
+ rot_j = rots[rot_i-1]
164
+
165
+ matches_tensor_rot = lightglue_matching(feats0[rot_i], feats1[rot_j], matcher = matcher)
166
+ matches_np_i = matches_tensor_rot.cpu().numpy().astype(np.uint32)
167
+ if rot_i > 0:
168
+ for k in range(rot_i):
169
+ matches_np_i[:,0] += feats0[k]['keypoints'].shape[1]
170
+ if rot_j > 0:
171
+ for k in range(rot_j):
172
+ matches_np_i[:,1] += feats1[k]['keypoints'].shape[1]
173
+
174
+ all_matches.append(matches_np_i)
175
+ print(f"Rotation {rot_i} vs {rot_j}: {len(matches_tensor_rot)} matches")
176
+
177
+ # Stack all matches together
178
+ matches_stacked = (
179
+ np.vstack(all_matches) if len(all_matches) and all_matches[0].size else
180
+ np.empty((0, 2), dtype=np.uint32)
181
+ )
182
+
183
+ # if best_rot > 0:
184
+ # for k in range(best_rot):
185
+ # print(f"Adjusting for rotation {k}")
186
+ # matches_np[:,1] += feats1[k]['keypoints'].shape[1]
187
+
188
+ # return matches_np
189
+ return matches_stacked
190
+
191
+
192
+
get_calibration.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import yaml
2
+ import sys
3
+ import argparse
4
+ import numpy as np
5
+
6
+ def get_camera_intrinsics(calibration_yaml, cam_name):
7
+ with open(calibration_yaml, 'r') as file:
8
+ data = yaml.safe_load(file)
9
+ cameras = data.get('cameras', [])
10
+ for cam_ in cameras:
11
+ if cam_['cam_name'] == cam_name:
12
+ cam = cam_;
13
+ break;
14
+
15
+ has_dist = ('distortion_type' in cam) and ('distortion_coefficients' in cam)
16
+ K = np.array([[cam['focal_length'][0], 0, cam['principal_point'][0]],
17
+ [0, cam['focal_length'][1], cam['principal_point'][1]],
18
+ [0, 0, 1]], dtype=np.float32)
19
+
20
+ if has_dist:
21
+ dist= " ".join(map(str, cam['distortion_coefficients']))
22
+ print(f"{cam['distortion_type']} {K[0,0]} {K[1,1]} {K[0,2]} {K[1,2]} {dist}")
23
+ else:
24
+ print(f"{cam['cam_model']} {K[0,0]} {K[1,1]} {K[0,2]} {K[1,2]}")
25
+
26
+
27
+ if __name__ == "__main__":
28
+ parser = argparse.ArgumentParser()
29
+ parser.add_argument("calibration_yaml", help="Path to the calibration YAML")
30
+ parser.add_argument("camera_name", help="camera_name")
31
+ args = parser.parse_args()
32
+
33
+ get_camera_intrinsics(args.calibration_yaml, args.camera_name)
lightglue_matcher.py ADDED
@@ -0,0 +1,351 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sqlite3
2
+ from lightglue_matcher_utilities import lightglue_keypoints, lightglue_matching, unrotate_kps_W
3
+ import os
4
+ import torch
5
+ import matplotlib.pyplot as plt
6
+ from tqdm import tqdm
7
+ import numpy as np
8
+ import cv2
9
+ import argparse
10
+ from pathlib import Path
11
+
12
+ # ==========================================
13
+ # ==========================================
14
+ # DATABASE UTILITIES
15
+ # ==========================================
16
+ def load_colmap_db(db_path):
17
+ if not os.path.exists(db_path):
18
+ raise FileNotFoundError(f"Database file not found: {db_path}")
19
+ conn = sqlite3.connect(db_path)
20
+ cursor = conn.cursor()
21
+ return conn, cursor
22
+
23
+ def create_pair_id(image_id1, image_id2):
24
+ if image_id1 > image_id2:
25
+ image_id1, image_id2 = image_id2, image_id1
26
+ return image_id1 * 2147483647 + image_id2
27
+
28
+ def clean_database(cursor):
29
+ """Removes existing features and matches to ensure a clean overwrite."""
30
+ tables = ["keypoints", "descriptors"]#, "matches"], "two_view_geometry"]
31
+ for table in tables:
32
+ cursor.execute(f"DELETE FROM {table};")
33
+ print("Database cleaned (keypoints, descriptors, matches removed).")
34
+
35
+ def insert_keypoints(cursor, image_id, keypoints, descriptors):
36
+ """
37
+ keypoints: (N, 2) numpy array, float32
38
+ descriptors: (N, D) numpy array, float32
39
+ """
40
+ keypoints_blob = keypoints.tobytes()
41
+ descriptors_blob = descriptors.tobytes()
42
+
43
+ # Keypoints
44
+ cursor.execute(
45
+ "INSERT INTO keypoints(image_id, rows, cols, data) VALUES(?, ?, ?, ?)",
46
+ (image_id, keypoints.shape[0], keypoints.shape[1], keypoints_blob)
47
+ )
48
+
49
+ # Descriptors (Optional but good practice)
50
+ cursor.execute(
51
+ "INSERT INTO descriptors(image_id, rows, cols, data) VALUES(?, ?, ?, ?)",
52
+ (image_id, descriptors.shape[0], descriptors.shape[1], descriptors_blob)
53
+ )
54
+
55
+ def insert_matches(cursor, image_id1, image_id2, matches):
56
+ """
57
+ matches: (K, 2) numpy array, uint32.
58
+ Col 0 is index in image1, Col 1 is index in image2
59
+ """
60
+ pair_id = create_pair_id(image_id1, image_id2)
61
+ matches_blob = matches.tobytes()
62
+
63
+ cursor.execute(
64
+ "INSERT INTO matches(pair_id, rows, cols, data) VALUES(?, ?, ?, ?)",
65
+ (pair_id, matches.shape[0], matches.shape[1], matches_blob)
66
+ )
67
+
68
+ def verify_matches_visual(cursor, image_id1, image_id2, image_dir):
69
+ """
70
+ Reads matches and keypoints from the COLMAP db and plots them.
71
+
72
+ Args:
73
+ cursor: SQLite cursor connected to the database.
74
+ image_id1: ID of the first image.
75
+ image_id2: ID of the second image.
76
+ image_dir: Path to the directory containing the images.
77
+ """
78
+
79
+ # 1. Helper to ensure image_id1 < image_id2 for pair_id calculation
80
+ if image_id1 > image_id2:
81
+ image_id1, image_id2 = image_id2, image_id1
82
+ swapped = True
83
+ else:
84
+ swapped = False
85
+
86
+ pair_id = image_id1 * 2147483647 + image_id2
87
+
88
+ # 2. Fetch Matches
89
+ cursor.execute("SELECT data FROM matches WHERE pair_id = ?", (pair_id,))
90
+ match_row = cursor.fetchone()
91
+
92
+ if match_row is None:
93
+ print(f"No matches found in DB for pair {image_id1}-{image_id2}")
94
+ return
95
+
96
+ # Decode Matches: UINT32 (N, 2)
97
+ matches = np.frombuffer(match_row[0], dtype=np.uint32).reshape(-1, 2)
98
+
99
+ # If we swapped inputs to generate pair_id, we must swap columns in matches
100
+ # so matches[:,0] corresponds to the requested image_id1
101
+ if swapped:
102
+ matches = matches[:, [1, 0]]
103
+
104
+ # 3. Fetch Keypoints for both images
105
+ def get_keypoints_and_name(img_id):
106
+ # Get Name
107
+ cursor.execute("SELECT name FROM images WHERE image_id = ?", (img_id,))
108
+ name = cursor.fetchone()[0]
109
+
110
+ # Get Keypoints
111
+ cursor.execute("SELECT data FROM keypoints WHERE image_id = ?", (img_id,))
112
+ kp_row = cursor.fetchone()
113
+ # Decode Keypoints: FLOAT32 (N, 2)
114
+ kpts = np.frombuffer(kp_row[0], dtype=np.float32).reshape(-1, 2)
115
+ return name, kpts
116
+
117
+ name1, kpts1 = get_keypoints_and_name(image_id1)
118
+ name2, kpts2 = get_keypoints_and_name(image_id2)
119
+
120
+ # 4. Filter Keypoints using the Matches indices
121
+ # matches[:, 0] are indices into kpts1
122
+ # matches[:, 1] are indices into kpts2
123
+ valid_kpts1 = kpts1[matches[:, 0]]
124
+ valid_kpts2 = kpts2[matches[:, 1]]
125
+
126
+ # 5. Load Images
127
+ path1 = os.path.join(image_dir, name1)
128
+ path2 = os.path.join(image_dir, name2)
129
+
130
+ img1 = cv2.imread(path1)
131
+ img2 = cv2.imread(path2)
132
+
133
+ # Convert BGR (OpenCV) to RGB (Matplotlib)
134
+ img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
135
+ img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)
136
+
137
+ # 6. Plotting
138
+ # Concatenate images side-by-side
139
+ h1, w1, _ = img1.shape
140
+ h2, w2, _ = img2.shape
141
+
142
+ # Create a canvas large enough for both
143
+ height = max(h1, h2)
144
+ width = w1 + w2
145
+ canvas = np.zeros((height, width, 3), dtype=np.uint8)
146
+
147
+ canvas[:h1, :w1, :] = img1
148
+ canvas[:h2, w1:w1+w2, :] = img2
149
+
150
+ plt.figure(figsize=(15, 10))
151
+ plt.imshow(canvas)
152
+
153
+ # Plot lines
154
+ # Shift x-coordinates of image2 by w1
155
+ for (x1, y1), (x2, y2) in zip(valid_kpts1, valid_kpts2):
156
+ plt.plot([x1, x2 + w1], [y1, y2], 'c-', alpha=0.6, linewidth=0.5)
157
+ plt.plot(x1, y1, 'r.', markersize=2)
158
+ plt.plot(x2 + w1, y2, 'r.', markersize=2)
159
+
160
+ plt.title(f"DB Verification: {name1} (ID:{image_id1}) <-> {name2} (ID:{image_id2}) | Matches: {len(matches)}")
161
+ plt.axis('off')
162
+ plt.tight_layout()
163
+ plt.show()
164
+
165
+ import numpy as np
166
+ import matplotlib.pyplot as plt
167
+ import cv2
168
+ import os
169
+ import sqlite3
170
+
171
+ def plot_matches_from_db(cursor, image_id1, image_id2, image_dir):
172
+ """
173
+ Reads matches and keypoints for a specific pair from the COLMAP DB and plots them.
174
+
175
+ Args:
176
+ cursor: SQLite cursor.
177
+ image_id1, image_id2: The IDs of the two images to plot.
178
+ image_dir: Path to the directory containing the actual image files.
179
+ """
180
+
181
+ # 1. Resolve Pair ID (Colmap requires id1 < id2 for unique pair_id)
182
+ if image_id1 > image_id2:
183
+ id_a, id_b = image_id2, image_id1
184
+ swapped = True
185
+ else:
186
+ id_a, id_b = image_id1, image_id2
187
+ swapped = False
188
+
189
+ pair_id = id_a * 2147483647 + id_b
190
+
191
+ # 2. Fetch Matches
192
+ print(f"Fetching matches for pair {image_id1}-{image_id2} (PairID: {pair_id})...")
193
+ cursor.execute("SELECT data, rows, cols FROM matches WHERE pair_id = ?", (pair_id,))
194
+ match_row = cursor.fetchone()
195
+
196
+ if match_row is None:
197
+ print(f"No matches found in database for Pair {image_id1}-{image_id2}")
198
+ return
199
+
200
+ # Decode Matches (UINT32)
201
+ # Blob is match_row[0], rows is [1], cols is [2]
202
+ matches_blob = match_row[0]
203
+ matches = np.frombuffer(matches_blob, dtype=np.uint32).reshape(-1, 2)
204
+
205
+ # If inputs were swapped relative to how COLMAP stores them, swap the columns
206
+ # so matches[:,0] refers to image_id1 and matches[:,1] refers to image_id2
207
+ if swapped:
208
+ matches = matches[:, [1, 0]]
209
+
210
+ # 3. Fetch Keypoints & Image Names
211
+ def get_image_data(img_id):
212
+ cursor.execute("SELECT name FROM images WHERE image_id = ?", (img_id,))
213
+ res = cursor.fetchone()
214
+ if not res:
215
+ raise ValueError(f"Image ID {img_id} not found in 'images' table.")
216
+ name = res[0]
217
+
218
+ cursor.execute("SELECT data FROM keypoints WHERE image_id = ?", (img_id,))
219
+ kp_res = cursor.fetchone()
220
+ if not kp_res:
221
+ raise ValueError(f"No keypoints found for Image ID {img_id}.")
222
+
223
+ # Decode Keypoints (FLOAT32)
224
+ kpts = np.frombuffer(kp_res[0], dtype=np.float32).reshape(-1, 2)
225
+ return name, kpts
226
+
227
+ name1, kpts1 = get_image_data(image_id1)
228
+ name2, kpts2 = get_image_data(image_id2)
229
+
230
+ # 4. Filter Keypoints using Match Indices
231
+ valid_kpts1 = kpts1[matches[:, 0]]
232
+ valid_kpts2 = kpts2[matches[:, 1]]
233
+
234
+ # 5. Visualization
235
+ path1 = os.path.join(image_dir, name1)
236
+ path2 = os.path.join(image_dir, name2)
237
+
238
+ if not os.path.exists(path1) or not os.path.exists(path2):
239
+ print(f"Error: Could not find image files at \n{path1}\n{path2}")
240
+ return
241
+
242
+ img1 = cv2.imread(path1)
243
+ img2 = cv2.imread(path2)
244
+ img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
245
+ img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)
246
+
247
+ # Create canvas
248
+ h1, w1 = img1.shape[:2]
249
+ h2, w2 = img2.shape[:2]
250
+ height = max(h1, h2)
251
+ width = w1 + w2
252
+ canvas = np.zeros((height, width, 3), dtype=np.uint8)
253
+ canvas[:h1, :w1] = img1
254
+ canvas[:h2, w1:w1+w2] = img2
255
+
256
+ plt.figure(figsize=(20, 10))
257
+ plt.imshow(canvas)
258
+
259
+ # Plot matches
260
+ # x2 coordinates need to be shifted by w1
261
+ for (x1, y1), (x2, y2) in zip(valid_kpts1, valid_kpts2):
262
+ plt.plot([x1, x2 + w1], [y1, y2], 'g-', alpha=0.5, linewidth=1.5)
263
+ plt.plot(x1, y1, 'r.', markersize=4)
264
+ plt.plot(x2 + w1, y2, 'r.', markersize=4)
265
+
266
+ plt.title(f"{name1} <-> {name2} | Total Matches: {len(matches)}")
267
+ plt.axis('off')
268
+ plt.tight_layout()
269
+ plt.show()
270
+
271
+ if __name__ == "__main__":
272
+
273
+ parser = argparse.ArgumentParser()
274
+
275
+ #DB_PATH = "/home/alejandro/VSLAM-LAB-NEXT-ITERATION/VSLAM-LAB-Evaluation/demo/SESOKO/sskall-s01/colmap_00000/colmap_database.db"
276
+ #IMAGE_DIR = "/home/alejandro/VSLAM-LAB-NEXT-ITERATION/VSLAM-LAB-Benchmark/SESOKO/sskall-s01/rgb_0"
277
+ #FEATURE_TYPE = 'superpoint'
278
+ #DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
279
+ #matches_file_path = os.path.join(os.path.dirname(DB_PATH), "matches.txt")
280
+
281
+ parser.add_argument("--database", type=Path, required=True)
282
+ parser.add_argument("--rgb_path", type=Path, required=True)
283
+ parser.add_argument("--feature", type=str, required=True)
284
+
285
+ args, _ = parser.parse_known_args()
286
+
287
+ DB_PATH = args.database
288
+ IMAGE_DIR = args.rgb_path
289
+ FEATURE_TYPE = args.feature
290
+ DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
291
+ matches_file_path = os.path.join(os.path.dirname(DB_PATH), "matches.txt")
292
+
293
+ conn, cursor = load_colmap_db(DB_PATH)
294
+ cursor.execute("SELECT image_id, name FROM images")
295
+ images_info = {row[0]: row[1] for row in cursor.fetchall()}
296
+ image_ids = sorted(images_info.keys())
297
+
298
+ clean_database(cursor)
299
+ conn.commit()
300
+
301
+ fts = {}
302
+ for i in tqdm(range(len(image_ids)), desc="Feature Extraction"):
303
+ id = image_ids[i]
304
+ fname = images_info[id]
305
+ path = os.path.join(IMAGE_DIR, fname)
306
+
307
+ feats_dict, h, w = lightglue_keypoints(path, features='superpoint')
308
+
309
+ fts[id] = feats_dict
310
+
311
+ kpts = feats_dict['keypoints'].squeeze(0).cpu().numpy().astype(np.float32)
312
+ descs = feats_dict['descriptors'].squeeze(0).cpu().numpy().astype(np.float32)
313
+
314
+ kpts_rot = unrotate_kps_W(kpts, feats_dict['rotations'].squeeze(0).cpu().numpy().astype(np.float32), h, w)
315
+ insert_keypoints(cursor, id, kpts_rot, descs)
316
+
317
+ conn.commit()
318
+ with open(matches_file_path, "w") as f_match:
319
+ for i in tqdm(range(len(image_ids)), desc="Feature Extraction"):
320
+ id1 = image_ids[i]
321
+ fname1 = images_info[id1]
322
+ path1 = os.path.join(IMAGE_DIR, fname1)
323
+
324
+ for j in range(i + 1, len(image_ids)):
325
+ if j == i:
326
+ continue
327
+ id2 = image_ids[j]
328
+
329
+ fname2 = images_info[id2]
330
+ path2 = os.path.join(IMAGE_DIR, fname2)
331
+ matches_tensor = lightglue_matching(fts[id1], fts[id2], plot=False, features='superpoint', path_to_image0=path1, path_to_image1=path2)
332
+
333
+ if matches_tensor is not None and len(matches_tensor) > 0:
334
+ matches_np = matches_tensor.cpu().numpy().astype(np.uint32)
335
+ #insert_matches(cursor, id1, id2, matches_np)
336
+
337
+ f_match.write(f"{fname1} {fname2}\n")
338
+ np.savetxt(f_match, matches_np, fmt="%d")
339
+ f_match.write("\n")
340
+
341
+ #verify_matches_visual(cursor, image_ids[i], image_ids[j], IMAGE_DIR)
342
+ #tqdm.write(f"Processed matches for Image ID {id1} in {duration:.2f} seconds.")
343
+
344
+ #plt.show()
345
+
346
+ conn.commit()
347
+
348
+ #plot_matches_from_db(cursor, image_ids[0], image_ids[1], IMAGE_DIR)
349
+
350
+ conn.close()
351
+ print("Database overwrite complete.")
lightglue_matcher_utilities.py ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ import cv2
4
+ from lightglue import LightGlue
5
+ from lightglue.utils import rbd
6
+
7
+ def unrotate_kps_W(kps_rot, k, H, W):
8
+ import numpy as np
9
+
10
+ # Ensure inputs are Numpy
11
+ if hasattr(kps_rot, 'cpu'): kps_rot = kps_rot.cpu().numpy()
12
+ if hasattr(k, 'cpu'): k = k.cpu().numpy()
13
+
14
+ # Squeeze if necessary
15
+ if k.ndim > 1: k = k.squeeze()
16
+ if kps_rot.ndim > 2: kps_rot = kps_rot.squeeze()
17
+
18
+ x_r = kps_rot[:, 0]
19
+ y_r = kps_rot[:, 1]
20
+
21
+ x = np.zeros_like(x_r)
22
+ y = np.zeros_like(y_r)
23
+
24
+ mask0 = (k == 0)
25
+ x[mask0], y[mask0] = x_r[mask0], y_r[mask0]
26
+
27
+ mask1 = (k == 1)
28
+ x[mask1], y[mask1] = (W - 1) - y_r[mask1], x_r[mask1]
29
+
30
+ mask2 = (k == 2)
31
+ x[mask2], y[mask2] = (W - 1) - x_r[mask2], (H - 1) - y_r[mask2]
32
+
33
+ mask3 = (k == 3)
34
+ x[mask3], y[mask3] = y_r[mask3], (H - 1) - x_r[mask3]
35
+
36
+ return np.stack([x, y], axis=-1)
37
+
38
+ def unrotate_kps(kps_rot, k, H, W):
39
+ import torch
40
+ # k is how many times you rotated CCW by 90° to create the rotated image
41
+ x_r, y_r = kps_rot[:, 0].clone(), kps_rot[:, 1].clone()
42
+ if k == 0:
43
+ x, y = x_r, y_r
44
+ elif k == 1: # 90° CCW
45
+ x = (W - 1) - y_r
46
+ y = x_r
47
+ elif k == 2: # 180°
48
+ x = (W - 1) - x_r
49
+ y = (H - 1) - y_r
50
+ elif k == 3: # 270° CCW
51
+ x = y_r
52
+ y = (H - 1) - x_r
53
+ else:
54
+ raise ValueError("k must be 0..3")
55
+ return torch.stack([x, y], dim=-1)
56
+
57
+ # def lightglue_matching(path_to_image0, path_to_image1, plot=False, features='superpoint'):
58
+ # from lightglue import LightGlue, SuperPoint, SIFT
59
+ # from lightglue.utils import load_image, rbd
60
+ # from lightglue import viz2d
61
+ # import torch
62
+
63
+ # # --- Models on GPU ---
64
+ # device = 'cuda' if torch.cuda.is_available() else 'cpu'
65
+
66
+ # if features == 'superpoint':
67
+ # extractor = SuperPoint(max_num_keypoints=2048).eval().to(device)
68
+ # if features == 'sift':
69
+ # extractor = SIFT(max_num_keypoints=2048).eval().to(device)
70
+
71
+ # matcher = LightGlue(features=features).eval().to(device)
72
+
73
+ # # --- Load images as Torch tensors (3,H,W) in [0,1] ---
74
+ # timg0 = load_image(path_to_image0).to(device)
75
+ # timg1 = load_image(path_to_image1).to(device)
76
+
77
+ # # --- Extract local features ---
78
+ # feats0 = extractor.extract(timg0) # auto-resize inside
79
+
80
+ # max_num_matches = -1
81
+ # best_k = 0
82
+ # best_feats0 = None
83
+ # best_feats1 = None
84
+ # for k in range(4):
85
+ # timg1_rotated = torch.rot90(timg1, k, dims=(1, 2))
86
+ # feats1_k = extractor.extract(timg1_rotated)
87
+ # out_k = matcher({'image0': feats0, 'image1': feats1_k})
88
+ # feats0_k, feats1_k, out_k = [rbd(x) for x in [feats0, feats1_k, out_k]] # remove batch dim
89
+ # matches_k = out_k['matches'] # (K,2) long
90
+ # num_k = len(matches_k)
91
+ # if num_k > max_num_matches:
92
+ # max_num_matches = num_k
93
+ # matches = matches_k
94
+ # best_feats0 = feats0_k
95
+ # best_feats1 = feats1_k
96
+ # best_k = k
97
+
98
+ # # --- Keypoints in matched order (Torch tensors on CPU) ---
99
+ # H1, W1 = timg1.shape[-2], timg1.shape[-1]
100
+
101
+ # kpts0 = best_feats0['keypoints'][matches[:, 0]]
102
+ # kpts1 = best_feats1['keypoints'][matches[:, 1]]
103
+ # kpts1 = unrotate_kps(kpts1, best_k, H1, W1) # (K,2) mapped to original image1 coords
104
+
105
+ # desc0 = best_feats0['descriptors'][matches[:, 0]]
106
+ # desc1 = best_feats1['descriptors'][matches[:, 1]]
107
+
108
+ # if plot:
109
+ # if len(kpts0) == 0 or len(kpts1) == 0:
110
+ # print("No matches found.")
111
+ # return None, None
112
+ # ax = viz2d.plot_images([timg0.cpu(), timg1.cpu()])
113
+ # viz2d.plot_matches(kpts0.cpu(), kpts1.cpu(), color=None, lw=0.8, axes=ax)
114
+ # #ax0 = ax[0] if isinstance(ax, (list, tuple, np.ndarray)) else ax
115
+ # #fig = ax0.figure
116
+
117
+ # #return kpts0, kpts1 #, fig, ax
118
+
119
+
120
+ # return kpts0, kpts1, desc0, desc1
121
+
122
+ def lightglue_keypoints(path_to_image0, features='superpoint', rotations = [0,1,2,3]):
123
+ from lightglue import LightGlue, SuperPoint, SIFT
124
+ from lightglue.utils import load_image, rbd
125
+ from lightglue import viz2d
126
+ import torch
127
+
128
+ # --- Models on GPU ---
129
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
130
+
131
+ if features == 'superpoint':
132
+ extractor = SuperPoint(max_num_keypoints=2048).eval().to(device)
133
+ if features == 'sift':
134
+ extractor = SIFT(max_num_keypoints=2048).eval().to(device)
135
+
136
+ # --- Load images as Torch tensors (3,H,W) in [0,1] ---
137
+ timg = load_image(path_to_image0).to(device)
138
+ _, h, w = timg.shape
139
+
140
+
141
+ # --- Extract local features ---
142
+ feats = {}
143
+ for k in (rotations):
144
+ timg_rotated = torch.rot90(timg, k, dims=(1, 2))
145
+ feats[k] = extractor.extract(timg_rotated)
146
+ print(f"Extracted {feats[k]['keypoints'].shape[1]} keypoints for rotation {k}")
147
+
148
+ # --- Merge features back to original coordinate system ---
149
+ all_keypoints = []
150
+ all_scores = []
151
+ all_descriptors = []
152
+ all_rotations = []
153
+ for k, feat in feats.items():
154
+ kpts = feat['keypoints'] # Shape (1, N, 2)
155
+ num_kpts = kpts.shape[1]
156
+ if k == 0:
157
+ kpts_corrected = kpts
158
+ elif k == 1:
159
+ kpts_corrected = torch.stack(
160
+ [w - 1 - kpts[..., 1], kpts[..., 0]], dim=-1
161
+ )
162
+ elif k == 2:
163
+ kpts_corrected = torch.stack(
164
+ [w - 1 - kpts[..., 0], h - 1 - kpts[..., 1]], dim=-1
165
+ )
166
+ elif k == 3:
167
+ kpts_corrected = torch.stack(
168
+ [kpts[..., 1], h - 1 - kpts[..., 0]], dim=-1
169
+ )
170
+
171
+ rot_indices = torch.full((1, num_kpts), k, dtype=torch.long, device=device)
172
+ all_keypoints.append(feat['keypoints'])
173
+ all_scores.append(feat['keypoint_scores'])
174
+ all_descriptors.append(feat['descriptors'])
175
+ all_rotations.append(rot_indices)
176
+
177
+ # Concatenate all features along the keypoint dimension (dim=1)
178
+ feats_merged = {
179
+ 'keypoints': torch.cat(all_keypoints, dim=1),
180
+ 'keypoint_scores': torch.cat(all_scores, dim=1),
181
+ 'descriptors': torch.cat(all_descriptors, dim=1),
182
+ 'rotations': torch.cat(all_rotations, dim=1)
183
+ }
184
+
185
+ num_kpts = feats_merged['keypoints'].shape[1]
186
+ # perm = torch.randperm(num_kpts, device=device)
187
+
188
+ # feats_merged['keypoints'] = feats_merged['keypoints'][:, perm, :]
189
+ # feats_merged['keypoint_scores'] = feats_merged['keypoint_scores'][:, perm]
190
+ # feats_merged['descriptors'] = feats_merged['descriptors'][:, perm, :]
191
+
192
+ # Optional: If you want to retain other keys like 'shape' or 'image_size'
193
+ feats_merged['image_size'] = torch.tensor([w, h], device=device).unsqueeze(0)
194
+ return feats_merged , h, w
195
+
196
+ def lightglue_matching(feats0, feats1, plot=False, features='superpoint', path_to_image0=None, path_to_image1=None):
197
+ from lightglue import LightGlue, SuperPoint, SIFT
198
+ from lightglue.utils import load_image, rbd
199
+ from lightglue import viz2d
200
+ import torch
201
+
202
+ # --- Models on GPU ---
203
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
204
+
205
+ matcher = LightGlue(features=features).eval().to(device)
206
+
207
+ # --- Load images as Torch tensors (3,H,W) in [0,1] ---
208
+ if plot:
209
+ timg0 = load_image(path_to_image0).to(device)
210
+ timg1 = load_image(path_to_image1).to(device)
211
+
212
+ # --- Extract local features ---
213
+
214
+ max_num_matches = -1
215
+ best_k = 0
216
+ best_feats0 = None
217
+ best_feats1 = None
218
+ for k in range(1):
219
+ #timg1_rotated = torch.rot90(timg1, k, dims=(1, 2))
220
+ feats1_k = feats1 #extractor.extract(timg1_rotated)
221
+ out_k = matcher({'image0': feats0, 'image1': feats1_k})
222
+ feats0_k, feats1_k, out_k = [rbd(x) for x in [feats0, feats1_k, out_k]] # remove batch dim
223
+ matches_k = out_k['matches'] # (K,2) long
224
+ num_k = len(matches_k)
225
+ if num_k > max_num_matches:
226
+ max_num_matches = num_k
227
+ matches = matches_k
228
+ best_feats0 = feats0_k
229
+ best_feats1 = feats1_k
230
+ best_k = k
231
+ print(f"LightGlue found {len(matches)} matches.")
232
+ # --- Keypoints in matched order (Torch tensors on CPU) ---
233
+ #H1, W1 = timg1.shape[-2], timg1.shape[-1]
234
+
235
+ # kpts0 = best_feats0['keypoints'][matches[:, 0]]
236
+ # kpts1 = best_feats1['keypoints'][matches[:, 1]]
237
+ # #kpts1 = unrotate_kps(kpts1, best_k, H1, W1) # (K,2) mapped to original image1 coords
238
+
239
+ # desc0 = best_feats0['descriptors'][matches[:, 0]]
240
+ # desc1 = best_feats1['descriptors'][matches[:, 1]]
241
+
242
+ # pts0 = kpts0.detach().cpu().numpy().astype(np.float32) # (K,2)
243
+ # pts1 = kpts1.detach().cpu().numpy().astype(np.float32) # (K,2)
244
+ # H, inliers = cv2.findHomography(pts0, pts1, cv2.RANSAC, 5.0)
245
+
246
+ # if inliers is not None:
247
+ # mask = inliers.ravel() == 1
248
+ # mask_tensor = torch.from_numpy(mask).to(matches.device)
249
+ # matches = matches[mask_tensor]
250
+ # else:
251
+ # # If geometry check failed completely, return no matches
252
+ # return None
253
+
254
+ # if plot:
255
+ # if len(kpts0) == 0 or len(kpts1) == 0:
256
+ # print("No matches found.")
257
+ # return None, None
258
+ # ax = viz2d.plot_images([timg0.cpu(), timg1.cpu()])
259
+ # viz2d.plot_matches(kpts0.cpu(), kpts1.cpu(), color=None, lw=0.8, axes=ax)
260
+ # #ax0 = ax[0] if isinstance(ax, (list, tuple, np.ndarray)) else ax
261
+ # #fig = ax0.figure
262
+
263
+ # #return kpts0, kpts1 #, fig, ax
264
+
265
+
266
+ return matches
test.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import torch
3
+ from feature_matcher_utilities import extract_keypoints, feature_matching
4
+ from lightglue import LightGlue
5
+
6
+ path_0 = "/media/alejandro/E45B-3EBD/FOR ALEX/8312297.jpg"
7
+ path_1 = "/media/alejandro/E45B-3EBD/FOR ALEX/8315088.jpg"
8
+
9
+ feats_dict0, h0, w0 = extract_keypoints(path_0, features="superpoint", rotations=[0])
10
+ feats_dict1, h1, w1 = extract_keypoints(path_1, features="superpoint", rotations=[1])
11
+
12
+
13
+
14
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
15
+ matcher = LightGlue(features='superpoint', depth_confidence=-1, width_confidence=-1, flash=True).eval().to(device)
16
+
17
+ matches_tensor = feature_matching(feats_dict0, feats_dict1, matcher=matcher, features="superpoint", matcher_type="lightglue")
18
+
19
+ print(len(matches_tensor))