text
stringlengths
0
184
# ignore small border around the edge
H0, W0 = view1['true_shape'][0]
valid_matches_im0 = (matches_im0[:, 0] >= 3) & (matches_im0[:, 0] < int(W0) - 3) & (
matches_im0[:, 1] >= 3) & (matches_im0[:, 1] < int(H0) - 3)
H1, W1 = view2['true_shape'][0]
valid_matches_im1 = (matches_im1[:, 0] >= 3) & (matches_im1[:, 0] < int(W1) - 3) & (
matches_im1[:, 1] >= 3) & (matches_im1[:, 1] < int(H1) - 3)
valid_matches = valid_matches_im0 & valid_matches_im1
matches_im0, matches_im1 = matches_im0[valid_matches], matches_im1[valid_matches]
# visualize a few matches
import numpy as np
import torch
import torchvision.transforms.functional
from matplotlib import pyplot as pl
n_viz = 100
num_matches = matches_im0.shape[0]
match_idx_to_viz = np.round(np.linspace(0, num_matches - 1, n_viz)).astype(int)
viz_matches_im0, viz_matches_im1 = matches_im0[match_idx_to_viz], matches_im1[match_idx_to_viz]
image_mean = torch.as_tensor([0.5, 0.5, 0.5], device='cpu').reshape(1, 3, 1, 1)
image_std = torch.as_tensor([0.5, 0.5, 0.5], device='cpu').reshape(1, 3, 1, 1)
viz_imgs = []
for i, view in enumerate([view1, view2]):
rgb_tensor = view['img'] * image_std + image_mean
viz_imgs.append(rgb_tensor.squeeze(0).permute(1, 2, 0).cpu().numpy())
H0, W0, H1, W1 = *viz_imgs[0].shape[:2], *viz_imgs[1].shape[:2]
print(H0,W0,H1,W1)
img0 = np.pad(viz_imgs[0], ((0, max(H1 - H0, 0)), (0, 0), (0, 0)), 'constant', constant_values=0)
img1 = np.pad(viz_imgs[1], ((0, max(H0 - H1, 0)), (0, 0), (0, 0)), 'constant', constant_values=0)
img = np.concatenate((img0, img1), axis=1)
pl.figure()
pl.imshow(img)
cmap = pl.get_cmap('jet')
for i in range(n_viz):
(x0, y0), (x1, y1) = viz_matches_im0[i].T, viz_matches_im1[i].T
pl.plot([x0, x1 + W0], [y0, y1], '-+', color=cmap(i / (n_viz - 1)), scalex=False, scaley=False)
pl.show(block=True)
img0 = cv2.imread(img_path1)
img1 = cv2.imread(img_path2)
H0, W0 = img0.shape[:2]
H1, W1 = img1.shape[:2]
viz_matches_im0_org = transform_keypoints_to_original(viz_matches_im0, (H0, W0))
viz_matches_im1_org = transform_keypoints_to_original(viz_matches_im1, (H1, W1))
out_org_dir= os.path.join("/kaggle/working", "temp")
os.makedirs(out_org_dir, exist_ok=True)
draw_matches_on_original_images(img_path1, img_path2, viz_matches_im0_org, viz_matches_im1_org, out_org_dir, n_viz=100)
def get_img_pairs_exhaustive(img_fnames):
index_pairs = []
for i in range(len(img_fnames)):
for j in range(i+1, len(img_fnames)):
index_pairs.append((i,j))
return index_pairs
import os
import torch
import PIL
import numpy as np # Ensure numpy is imported for checking np.ndarray
from PIL import Image
from mast3r.retrieval.processor import Retriever
from mast3r.image_pairs import make_pairs
from mast3r.model import AsymmetricMASt3R
def get_image_list(images_path):
"""
Scans the specified path for all image files and returns their relative paths.
Skips unidentifiable or corrupt image files.
"""
file_list = [os.path.relpath(os.path.join(dirpath, filename), images_path)
for dirpath, _, filenames in os.walk(images_path)
for filename in filenames]
file_list = sorted(file_list)
image_list = []
for filename in file_list:
try:
with Image.open(os.path.join(images_path, filename)) as im:
im.verify() # Verify image file integrity
image_list.append(filename)
except (OSError, PIL.UnidentifiedImageError):
print(f'Skipping invalid image file: {filename}')
return image_list
def make_pair_with_mast3r_return_pairs(
image_dir: str,
weights_path: str, # Path to the AsymmetricMASt3R model weights
retrieval_model_path: str, # Path to the retrieval model (e.g., "trainingfree.pth")
scene_graph: str = 'retrieval-20-25',
device: str = 'cuda'
):
"""
Generates image pairs using MASt3R + ASMK retrieval, returning a list of pairs.