text stringlengths 0 184 |
|---|
kpts_original = kpts_resized/ scale_factor |
return kpts_original |
import os |
import numpy as np |
import cv2 |
import matplotlib.pyplot as plt |
def draw_matches_on_original_images(img_path1, img_path2, matches_im0, matches_im1, save_path, n_viz=100): |
""" |
画出两张原图上的匹配点连线,并保存图像 |
Args: |
img_path1: str, 原图1路径 |
img_path2: str, 原图2路径 |
matches_im0: (N, 2) numpy array,图1上的匹配点坐标(在原图上) |
matches_im1: (N, 2) numpy array,图2上的匹配点坐标(在原图上) |
save_path: str, 输出图像路径 |
n_viz: int, 可视化前n个匹配(默认100) |
""" |
os.makedirs(os.path.dirname(save_path), exist_ok=True) |
# 读取图像 |
img0 = cv2.imread(img_path1) |
img1 = cv2.imread(img_path2) |
key1 = os.path.basename(img_path1) |
key2 = os.path.basename(img_path2) |
if img0 is None or img1 is None: |
print(f"Error: Cannot load {img_path1} or {img_path2}") |
return |
img0 = cv2.cvtColor(img0, cv2.COLOR_BGR2RGB) |
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB) |
H0, W0 = img0.shape[:2] |
H1, W1 = img1.shape[:2] |
canvas_h = max(H0, H1) |
canvas = np.zeros((canvas_h, W0 + W1, 3), dtype=np.uint8) |
canvas[:H0, :W0] = img0 |
canvas[:H1, W0:] = img1 |
# 可视化子集 |
num_matches = min(len(matches_im0), n_viz) |
idxs = np.round(np.linspace(0, len(matches_im0) - 1, num_matches)).astype(int) |
cmap = plt.get_cmap('rainbow') |
for i, idx in enumerate(idxs): |
(x0, y0) = matches_im0[idx] |
(x1, y1) = matches_im1[idx] |
color = tuple((np.array(cmap(i / n_viz))[:3] * 255).astype(int).tolist()) |
pt1 = (int(round(x0)), int(round(y0))) |
pt2 = (int(round(x1 + W0)), int(round(y1))) |
cv2.line(canvas, pt1, pt2, color, thickness=1, lineType=cv2.LINE_AA) |
cv2.circle(canvas, pt1, 2, color, -1, lineType=cv2.LINE_AA) |
cv2.circle(canvas, pt2, 2, color, -1, lineType=cv2.LINE_AA) |
# 保存 |
cv2.imwrite(f'{save_path}/{key1}_{key2}.jpg', cv2.cvtColor(canvas, cv2.COLOR_RGB2BGR)) |
# print(f"Saved match debug image to {save_path}") |
img_path1 = '/kaggle/input/image-matching-challenge-2025/train/ETs/et_et003.png' |
img_path2 = '/kaggle/input/image-matching-challenge-2025/train/ETs/et_et006.png' |
images = load_images([img_path1, img_path2], size=512) |
output = inference([tuple(images)], mast3r_model, device, batch_size=1, verbose=True) |
# at this stage, you have the raw dust3r predictions |
view1, pred1 = output['view1'], output['pred1'] |
view2, pred2 = output['view2'], output['pred2'] |
desc1, desc2 = pred1['desc'].squeeze(0).detach(), pred2['desc'].squeeze(0).detach() |
# find 2D-2D matches between the two images |
# matches_im0, matches_im1 = fast_reciprocal_NNs(desc1, desc2, subsample_or_initxy1=8, |
# device=device, dist='dot', block_size=2**13) |
conf1, conf2 = pred1['desc_conf'].squeeze(0).detach(), pred2['desc_conf'].squeeze(0).detach() |
print(f"desc1 shape: {desc1.shape}") |
print(f"conf1 shape: {conf1.shape}") |
corres = extract_correspondences_nonsym(desc1, desc2, conf1, conf2, |
device=device, subsample=8, pixel_tol=5) |
print(f"corres[0].shape: {corres[0].shape}") |
print(f"corres[2].shape: {corres[2].shape}") |
score = corres[2] |
print("conf min:",score.min().item()) |
print("conf max:", score.max().item()) |
print("conf mean:", score.float().mean().item()) |
print("conf std:", score.float().std().item()) |
print("conf median:", score.float().median().item()) |
mask = score >= CONFIG.MATCH_CONF_TH |
matches_im0 = corres[0][mask].cpu().numpy() |
matches_im1 = corres[1][mask].cpu().numpy() |
print(f"matches_im0.shape: {matches_im0.shape}") |
# matches_im0 = corres[0].cpu().numpy() |
# matches_im1 = corres[1].cpu().numpy() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.