Upload folder using huggingface_hub
Browse files- colmap_matcher.sh +4 -4
- feature_matcher.py +152 -54
- feature_matcher_utilities.py +76 -24
colmap_matcher.sh
CHANGED
|
@@ -146,10 +146,10 @@ fi
|
|
| 146 |
# LightGlue Feature Matcher
|
| 147 |
if [ "${matcher_type}" == "custom" ]
|
| 148 |
then
|
| 149 |
-
|
| 150 |
-
colmap matches_importer \
|
| 151 |
--database_path ${database} \
|
| 152 |
-
--
|
| 153 |
-
|
|
|
|
| 154 |
fi
|
| 155 |
|
|
|
|
| 146 |
# LightGlue Feature Matcher
|
| 147 |
if [ "${matcher_type}" == "custom" ]
|
| 148 |
then
|
| 149 |
+
colmap exhaustive_matcher \
|
|
|
|
| 150 |
--database_path ${database} \
|
| 151 |
+
--FeatureMatching.use_gpu ${use_gpu}
|
| 152 |
+
|
| 153 |
+
pixi run -e lightglue python3 Baselines/colmap/feature_matcher.py --database ${database} --rgb_path ${rgb_path} --rgb_csv ${rgb_csv}
|
| 154 |
fi
|
| 155 |
|
feature_matcher.py
CHANGED
|
@@ -29,7 +29,7 @@ def create_pair_id(image_id1, image_id2):
|
|
| 29 |
|
| 30 |
def clean_database(cursor):
|
| 31 |
"""Removes existing features and matches to ensure a clean overwrite."""
|
| 32 |
-
tables = ["keypoints", "descriptors"
|
| 33 |
for table in tables:
|
| 34 |
cursor.execute(f"DELETE FROM {table};")
|
| 35 |
print("Database cleaned (keypoints, descriptors, matches removed).")
|
|
@@ -270,90 +270,188 @@ def plot_matches_from_db(cursor, image_id1, image_id2, image_dir):
|
|
| 270 |
plt.tight_layout()
|
| 271 |
plt.show()
|
| 272 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 273 |
if __name__ == "__main__":
|
| 274 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 275 |
parser = argparse.ArgumentParser()
|
| 276 |
|
| 277 |
parser.add_argument("--database", type=Path, required=True)
|
| 278 |
parser.add_argument("--rgb_path", type=Path, required=True)
|
| 279 |
-
parser.add_argument("--
|
| 280 |
-
parser.add_argument("--matcher", type=str, required=True)
|
| 281 |
|
| 282 |
args, _ = parser.parse_known_args()
|
| 283 |
|
| 284 |
DB_PATH = args.database
|
| 285 |
IMAGE_DIR = args.rgb_path
|
| 286 |
-
FEATURE_TYPE = args.feature
|
| 287 |
-
MATCHER_TYPE = args.matcher
|
| 288 |
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
|
| 289 |
-
matches_file_path = os.path.join(os.path.dirname(DB_PATH), "matches.txt")
|
| 290 |
|
|
|
|
| 291 |
conn, cursor = load_colmap_db(DB_PATH)
|
| 292 |
cursor.execute("SELECT image_id, name FROM images")
|
| 293 |
images_info = {row[0]: row[1] for row in cursor.fetchall()}
|
| 294 |
image_ids = sorted(images_info.keys())
|
| 295 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 296 |
clean_database(cursor)
|
| 297 |
-
conn.commit()
|
| 298 |
|
| 299 |
-
#
|
| 300 |
-
|
|
|
|
|
|
|
| 301 |
for i in tqdm(range(len(image_ids)), desc="Feature Extraction"):
|
| 302 |
id = image_ids[i]
|
| 303 |
fname = images_info[id]
|
| 304 |
path = os.path.join(IMAGE_DIR, fname)
|
| 305 |
|
| 306 |
-
feats_dict, h, w = extract_keypoints(path, features=FEATURE_TYPE)
|
| 307 |
-
|
| 308 |
-
fts[id] = feats_dict
|
| 309 |
|
| 310 |
-
|
| 311 |
descs = feats_dict['descriptors'].squeeze(0).cpu().numpy().astype(np.float32)
|
| 312 |
|
| 313 |
-
|
| 314 |
-
|
| 315 |
-
else:
|
| 316 |
-
kpts_rot = kpts
|
| 317 |
-
insert_keypoints(cursor, id, kpts_rot, descs)
|
| 318 |
-
|
| 319 |
-
conn.commit()
|
| 320 |
|
| 321 |
-
#
|
| 322 |
-
|
| 323 |
-
|
| 324 |
-
|
| 325 |
-
|
| 326 |
-
|
| 327 |
-
|
| 328 |
-
|
| 329 |
-
|
| 330 |
-
|
| 331 |
-
|
| 332 |
-
|
| 333 |
-
|
| 334 |
-
|
| 335 |
-
|
| 336 |
-
|
| 337 |
-
|
| 338 |
-
|
| 339 |
-
|
| 340 |
-
|
| 341 |
-
|
| 342 |
-
|
| 343 |
-
|
| 344 |
-
|
| 345 |
-
|
|
|
|
| 346 |
|
| 347 |
-
|
| 348 |
-
np.savetxt(f_match, matches_np, fmt="%d")
|
| 349 |
-
f_match.write("\n")
|
| 350 |
-
|
| 351 |
-
#verify_matches_visual(cursor, image_ids[i], image_ids[j], IMAGE_DIR)
|
| 352 |
-
#plt.show()
|
| 353 |
-
|
| 354 |
-
conn.commit()
|
| 355 |
|
| 356 |
-
|
| 357 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 358 |
conn.close()
|
| 359 |
print("Database overwrite complete.")
|
|
|
|
| 29 |
|
| 30 |
def clean_database(cursor):
|
| 31 |
"""Removes existing features and matches to ensure a clean overwrite."""
|
| 32 |
+
tables = ["keypoints", "descriptors", "matches"]#, "two_view_geometry"]
|
| 33 |
for table in tables:
|
| 34 |
cursor.execute(f"DELETE FROM {table};")
|
| 35 |
print("Database cleaned (keypoints, descriptors, matches removed).")
|
|
|
|
| 270 |
plt.tight_layout()
|
| 271 |
plt.show()
|
| 272 |
|
| 273 |
+
def load_sift_keypoints(cursor):
|
| 274 |
+
cursor.execute("""
|
| 275 |
+
SELECT image_id, rows, cols, data
|
| 276 |
+
FROM keypoints
|
| 277 |
+
""")
|
| 278 |
+
|
| 279 |
+
keypoints_dict = {}
|
| 280 |
+
|
| 281 |
+
for image_id, rows, cols, data in cursor.fetchall():
|
| 282 |
+
kpts = np.frombuffer(data, dtype=np.float32)
|
| 283 |
+
kpts = kpts.reshape((rows, cols))
|
| 284 |
+
keypoints_dict[image_id] = kpts
|
| 285 |
+
|
| 286 |
+
return keypoints_dict
|
| 287 |
+
|
| 288 |
+
def load_sift_matches(cursor):
|
| 289 |
+
sift_matches = {}
|
| 290 |
+
cursor.execute("SELECT pair_id, data FROM matches")
|
| 291 |
+
for row in cursor.fetchall():
|
| 292 |
+
pair_id = row[0]
|
| 293 |
+
data = row[1]
|
| 294 |
+
|
| 295 |
+
if data is None:
|
| 296 |
+
# skip pairs with no matches
|
| 297 |
+
sift_matches[pair_id] = None
|
| 298 |
+
continue
|
| 299 |
+
|
| 300 |
+
# COLMAP stores matches as uint32 pairs
|
| 301 |
+
matches = np.frombuffer(data, dtype=np.uint32).reshape(-1, 2)
|
| 302 |
+
sift_matches[pair_id] = matches
|
| 303 |
+
|
| 304 |
+
return sift_matches
|
| 305 |
+
|
| 306 |
+
def insert_all_inlier_two_view_geometry(cursor, image_id1, image_id2, matches):
|
| 307 |
+
"""
|
| 308 |
+
Treats all matches as inliers and inserts dummy two-view geometry.
|
| 309 |
+
"""
|
| 310 |
+
if image_id1 > image_id2:
|
| 311 |
+
image_id1, image_id2 = image_id2, image_id1
|
| 312 |
+
matches = matches[:, [1, 0]]
|
| 313 |
+
|
| 314 |
+
pair_id = image_id1 * 2147483647 + image_id2
|
| 315 |
+
|
| 316 |
+
# COLMAP expects uint32 indices
|
| 317 |
+
matches = matches.astype(np.uint32)
|
| 318 |
+
|
| 319 |
+
# Dummy geometry (not actually used by mapper)
|
| 320 |
+
dummy_F = np.eye(3, dtype=np.float64).tobytes()
|
| 321 |
+
|
| 322 |
+
cursor.execute("""
|
| 323 |
+
INSERT OR REPLACE INTO two_view_geometries
|
| 324 |
+
(pair_id, rows, cols, data, config)
|
| 325 |
+
VALUES (?, ?, ?, ?, ?)
|
| 326 |
+
""", (
|
| 327 |
+
pair_id,
|
| 328 |
+
matches.shape[0],
|
| 329 |
+
matches.shape[1],
|
| 330 |
+
matches.tobytes(),
|
| 331 |
+
2 # config=2 → "calibrated / essential matrix"
|
| 332 |
+
))
|
| 333 |
+
|
| 334 |
if __name__ == "__main__":
|
| 335 |
|
| 336 |
+
FEATURE_TYPE = 'superpoint'
|
| 337 |
+
MATCHER_TYPE = 'lightglue'
|
| 338 |
+
LG_MATCHES_THRESHOLD = 40
|
| 339 |
+
|
| 340 |
+
|
| 341 |
parser = argparse.ArgumentParser()
|
| 342 |
|
| 343 |
parser.add_argument("--database", type=Path, required=True)
|
| 344 |
parser.add_argument("--rgb_path", type=Path, required=True)
|
| 345 |
+
parser.add_argument("--rgb_csv", type=Path, required=True)
|
|
|
|
| 346 |
|
| 347 |
args, _ = parser.parse_known_args()
|
| 348 |
|
| 349 |
DB_PATH = args.database
|
| 350 |
IMAGE_DIR = args.rgb_path
|
|
|
|
|
|
|
| 351 |
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
|
|
|
|
| 352 |
|
| 353 |
+
# Load colmap database
|
| 354 |
conn, cursor = load_colmap_db(DB_PATH)
|
| 355 |
cursor.execute("SELECT image_id, name FROM images")
|
| 356 |
images_info = {row[0]: row[1] for row in cursor.fetchall()}
|
| 357 |
image_ids = sorted(images_info.keys())
|
| 358 |
|
| 359 |
+
# Load SIFT keypoints and matches from exhaustive matching
|
| 360 |
+
sift_keypoints = load_sift_keypoints(cursor)
|
| 361 |
+
sift_matches = load_sift_matches(cursor)
|
| 362 |
+
|
| 363 |
+
# Clean colmap database
|
| 364 |
clean_database(cursor)
|
| 365 |
+
conn.commit()
|
| 366 |
|
| 367 |
+
# Extract superpoint keypoints
|
| 368 |
+
fts_sp = {}
|
| 369 |
+
keypoints_sp = {}
|
| 370 |
+
rotations_sp = {}
|
| 371 |
for i in tqdm(range(len(image_ids)), desc="Feature Extraction"):
|
| 372 |
id = image_ids[i]
|
| 373 |
fname = images_info[id]
|
| 374 |
path = os.path.join(IMAGE_DIR, fname)
|
| 375 |
|
| 376 |
+
feats_dict, feats_norot, h, w = extract_keypoints(path, features=FEATURE_TYPE)
|
| 377 |
+
fts_sp[id] = feats_norot
|
|
|
|
| 378 |
|
| 379 |
+
kpts_sp = feats_dict['keypoints'].squeeze(0).cpu().numpy().astype(np.float32)
|
| 380 |
descs = feats_dict['descriptors'].squeeze(0).cpu().numpy().astype(np.float32)
|
| 381 |
|
| 382 |
+
keypoints_sp[id] = kpts_sp
|
| 383 |
+
rotations_sp[id] = feats_dict['rotations'].squeeze(0).cpu().numpy().astype(np.float32)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 384 |
|
| 385 |
+
# Combine superpoint and SIFT keypoints, insert into database
|
| 386 |
+
for i in tqdm(range(len(image_ids)), desc="Feature Extraction"):
|
| 387 |
+
id = image_ids[i]
|
| 388 |
+
kpts_sp = keypoints_sp[id]
|
| 389 |
+
rots_sp = rotations_sp[id]
|
| 390 |
+
kpts_rot = unrotate_kps_W(kpts_sp, rots_sp, h, w)
|
| 391 |
+
|
| 392 |
+
N = kpts_rot.shape[0]
|
| 393 |
+
|
| 394 |
+
scales = np.ones((N, 1), dtype=np.float32)
|
| 395 |
+
oris = np.zeros((N, 1), dtype=np.float32)
|
| 396 |
+
resp = np.ones((N, 1), dtype=np.float32)
|
| 397 |
+
octave = np.zeros((N, 1), dtype=np.float32)
|
| 398 |
+
|
| 399 |
+
kpts_mod = np.hstack([
|
| 400 |
+
kpts_rot.astype(np.float32), # (N, 2)
|
| 401 |
+
scales,
|
| 402 |
+
oris,
|
| 403 |
+
resp,
|
| 404 |
+
octave
|
| 405 |
+
])
|
| 406 |
+
|
| 407 |
+
kpts_sift = sift_keypoints[id]
|
| 408 |
+
|
| 409 |
+
kpts = np.vstack([kpts_sift, kpts_mod])
|
| 410 |
+
descs = np.zeros((kpts.shape[0], 128), dtype=np.float32)
|
| 411 |
|
| 412 |
+
insert_keypoints(cursor, id, kpts, descs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 413 |
|
| 414 |
+
conn.commit()
|
| 415 |
|
| 416 |
+
# Feature Matching
|
| 417 |
+
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
| 418 |
+
matcher = LightGlue(features='superpoint', depth_confidence=-1, width_confidence=-1, flash=True).eval().to(device)
|
| 419 |
+
|
| 420 |
+
for i in tqdm(range(len(image_ids)), desc="Feature Matching"):
|
| 421 |
+
id1 = image_ids[i]
|
| 422 |
+
fname1 = images_info[id1]
|
| 423 |
+
path1 = os.path.join(IMAGE_DIR, fname1)
|
| 424 |
+
|
| 425 |
+
for j in range(i + 1, len(image_ids)):
|
| 426 |
+
if j == i:
|
| 427 |
+
continue
|
| 428 |
+
id2 = image_ids[j]
|
| 429 |
+
fname2 = images_info[id2]
|
| 430 |
+
path2 = os.path.join(IMAGE_DIR, fname2)
|
| 431 |
+
|
| 432 |
+
# Get SIFT matches
|
| 433 |
+
pair_id = create_pair_id(id1, id2)
|
| 434 |
+
matches_sift = sift_matches[pair_id]
|
| 435 |
+
if matches_sift is None:
|
| 436 |
+
matches_sift = np.zeros((0, 2), dtype=np.uint32)
|
| 437 |
+
|
| 438 |
+
n_sift_kpts_1 = sift_keypoints[id1].shape[0]
|
| 439 |
+
n_sift_kpts_2 = sift_keypoints[id2].shape[0]
|
| 440 |
+
|
| 441 |
+
# Compute LightGlue matches
|
| 442 |
+
matches_lg = feature_matching(fts_sp[id1], fts_sp[id2], matcher=matcher, exhaustive=True)
|
| 443 |
+
|
| 444 |
+
if matches_lg is not None and len(matches_lg) > LG_MATCHES_THRESHOLD:
|
| 445 |
+
matches_lg[:,0] += n_sift_kpts_1
|
| 446 |
+
matches_lg[:,1] += n_sift_kpts_2
|
| 447 |
+
else:
|
| 448 |
+
matches_lg = np.zeros((0, 2), dtype=np.uint32)
|
| 449 |
+
|
| 450 |
+
# Combine superpoint and SIFT matches, insert into database
|
| 451 |
+
matches = np.vstack([matches_sift, matches_lg])
|
| 452 |
+
insert_matches(cursor, id1, id2, matches)
|
| 453 |
+
insert_all_inlier_two_view_geometry(cursor, id1, id2, matches)
|
| 454 |
+
|
| 455 |
+
conn.commit()
|
| 456 |
conn.close()
|
| 457 |
print("Database overwrite complete.")
|
feature_matcher_utilities.py
CHANGED
|
@@ -113,28 +113,80 @@ def extract_keypoints(path_to_image0, features='superpoint', rotations = [0,1,2,
|
|
| 113 |
# if 'oris' not in f:
|
| 114 |
# f['oris'] = torch.zeros(all_keypoints.shape[:-1], device=device)
|
| 115 |
|
| 116 |
-
return feats_merged , h, w
|
| 117 |
-
|
| 118 |
-
def
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
| 123 |
-
matcher = LightGlue(features=features).eval().to(device)
|
| 124 |
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 113 |
# if 'oris' not in f:
|
| 114 |
# f['oris'] = torch.zeros(all_keypoints.shape[:-1], device=device)
|
| 115 |
|
| 116 |
+
return feats_merged , feats, h, w
|
| 117 |
+
|
| 118 |
+
def lightglue_matching(feats0, feats1, matcher = None):
|
| 119 |
+
if matcher is None:
|
| 120 |
+
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
| 121 |
+
matcher = LightGlue(features='superpoint').eval().to(device)
|
|
|
|
|
|
|
| 122 |
|
| 123 |
+
out_k = matcher({'image0': feats0, 'image1': feats1})
|
| 124 |
+
_, _, out_k = [rbd(x) for x in [feats0, feats1, out_k]] # remove batch dim
|
| 125 |
+
return out_k['matches']
|
| 126 |
+
|
| 127 |
+
def feature_matching(feats0, feats1, matcher = None, exhaustive = True):
|
| 128 |
+
best_rot = 0
|
| 129 |
+
best_num_matches = 0
|
| 130 |
+
matches_tensor = None
|
| 131 |
+
|
| 132 |
+
# Find the best rotation alignment
|
| 133 |
+
for rot in [0,1,2,3]:
|
| 134 |
+
matches_tensor_rot = lightglue_matching(feats0[0], feats1[rot], matcher = matcher)
|
| 135 |
+
if (len(matches_tensor_rot) > best_num_matches):
|
| 136 |
+
best_num_matches = len(matches_tensor_rot)
|
| 137 |
+
best_rot = rot
|
| 138 |
+
matches_tensor = matches_tensor_rot
|
| 139 |
+
|
| 140 |
+
if matches_tensor is not None and len(matches_tensor) > 0:
|
| 141 |
+
matches_np = matches_tensor.cpu().numpy().astype(np.uint32)
|
| 142 |
+
else:
|
| 143 |
+
return None
|
| 144 |
+
|
| 145 |
+
# Adjust matches to account for rotations
|
| 146 |
+
for k in range(best_rot):
|
| 147 |
+
matches_np[:,1] += feats1[k]['keypoints'].shape[1]
|
| 148 |
+
all_matches = [matches_np]
|
| 149 |
+
|
| 150 |
+
if not exhaustive:
|
| 151 |
+
return matches_np
|
| 152 |
+
|
| 153 |
+
# Find the other rotation combinations
|
| 154 |
+
rots = []
|
| 155 |
+
for rot in [1, 2, 3]:
|
| 156 |
+
rot_i = best_rot + rot
|
| 157 |
+
if rot_i >=4:
|
| 158 |
+
rot_i = rot_i -4
|
| 159 |
+
rots.append(rot_i)
|
| 160 |
+
|
| 161 |
+
# Compute matches for the other rotation combinations
|
| 162 |
+
for rot_i in [1,2,3]:
|
| 163 |
+
rot_j = rots[rot_i-1]
|
| 164 |
+
|
| 165 |
+
matches_tensor_rot = lightglue_matching(feats0[rot_i], feats1[rot_j], matcher = matcher)
|
| 166 |
+
matches_np_i = matches_tensor_rot.cpu().numpy().astype(np.uint32)
|
| 167 |
+
if rot_i > 0:
|
| 168 |
+
for k in range(rot_i):
|
| 169 |
+
matches_np_i[:,0] += feats0[k]['keypoints'].shape[1]
|
| 170 |
+
if rot_j > 0:
|
| 171 |
+
for k in range(rot_j):
|
| 172 |
+
matches_np_i[:,1] += feats1[k]['keypoints'].shape[1]
|
| 173 |
+
|
| 174 |
+
all_matches.append(matches_np_i)
|
| 175 |
+
print(f"Rotation {rot_i} vs {rot_j}: {len(matches_tensor_rot)} matches")
|
| 176 |
+
|
| 177 |
+
# Stack all matches together
|
| 178 |
+
matches_stacked = (
|
| 179 |
+
np.vstack(all_matches) if len(all_matches) and all_matches[0].size else
|
| 180 |
+
np.empty((0, 2), dtype=np.uint32)
|
| 181 |
+
)
|
| 182 |
+
|
| 183 |
+
# if best_rot > 0:
|
| 184 |
+
# for k in range(best_rot):
|
| 185 |
+
# print(f"Adjusting for rotation {k}")
|
| 186 |
+
# matches_np[:,1] += feats1[k]['keypoints'].shape[1]
|
| 187 |
+
|
| 188 |
+
# return matches_np
|
| 189 |
+
return matches_stacked
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
|