Voldemort108X's picture
Add files using upload-large-folder tool
d6145b2 verified
import torch
import torch.nn.functional as F
from torch import Tensor
import numpy as np
def resize_and_pad(image_tensor, output_size):
"""
Resizes an image tensor to a square shape by scaling and padding.
Args:
image_tensor (torch.Tensor): Input image tensor of shape (H, W).
output_size (int): The desired square output size.
Returns:
torch.Tensor: The resized and padded image tensor of shape (output_size, output_size).
"""
original_h, original_w = image_tensor.shape
# 1. Calculate the scale factor to fit the longest side to output_size
scale = output_size / max(original_h, original_w)
new_h, new_w = int(original_h * scale), int(original_w * scale)
# Add batch and channel dimensions for F.interpolate
image_tensor = image_tensor.unsqueeze(0).unsqueeze(0)
# 2. Resize the image, preserving aspect ratio
resized_image = F.interpolate(image_tensor, size=(new_h, new_w), mode='bilinear', align_corners=False)
# 3. Calculate padding for the shorter side
pad_h = output_size - new_h
pad_w = output_size - new_w
# Padding format is (left, right, top, bottom)
padding = (pad_w // 2, pad_w - (pad_w // 2), pad_h // 2, pad_h - (pad_h // 2))
# 4. Pad the image with a constant value (0 for black)
padded_image = F.pad(resized_image, padding, "constant", 0)
return padded_image.squeeze()
def resize(img, target_res=224, resize=True, to_pil=True, edge=False):
original_width, original_height = img.size
original_channels = len(img.getbands())
if not edge:
canvas = np.zeros([target_res, target_res, 3], dtype=np.uint8)
if original_channels == 1:
canvas = np.zeros([target_res, target_res], dtype=np.uint8)
if original_height <= original_width:
if resize:
img = img.resize((target_res, int(np.around(target_res * original_height / original_width))), Image.Resampling.LANCZOS)
width, height = img.size
img = np.asarray(img)
canvas[(width - height) // 2: (width + height) // 2] = img
else:
if resize:
img = img.resize((int(np.around(target_res * original_width / original_height)), target_res), Image.Resampling.LANCZOS)
width, height = img.size
img = np.asarray(img)
canvas[:, (height - width) // 2: (height + width) // 2] = img
else:
if original_height <= original_width:
if resize:
img = img.resize((target_res, int(np.around(target_res * original_height / original_width))), Image.Resampling.LANCZOS)
width, height = img.size
img = np.asarray(img)
top_pad = (target_res - height) // 2
bottom_pad = target_res - height - top_pad
img = np.pad(img, pad_width=[(top_pad, bottom_pad), (0, 0), (0, 0)], mode='edge')
else:
if resize:
img = img.resize((int(np.around(target_res * original_width / original_height)), target_res), Image.Resampling.LANCZOS)
width, height = img.size
img = np.asarray(img)
left_pad = (target_res - width) // 2
right_pad = target_res - width - left_pad
img = np.pad(img, pad_width=[(0, 0), (left_pad, right_pad), (0, 0)], mode='edge')
canvas = img
if to_pil:
canvas = Image.fromarray(canvas)
return canvas
def scaled_shifted_sigmoid(
x: Tensor,
a: float = 1.0, # vertical scale
b: float = 1.0, # slope (steepness)
c: float = 0.0, # horizontal shift (bias)
d: float = 0.0, # vertical shift (baseline)
) -> Tensor:
"""
Compute a scaled-and-shifted sigmoid: y = a * sigmoid(b * x + c) + d.
Parameters
----------
x : torch.Tensor
Input tensor.
a : float, default 1.0
Output scale (amplitude).
b : float, default 1.0
Input scale (controls slope).
c : float, default 0.0
Input shift (horizontal translation).
d : float, default 0.0
Output shift (vertical translation).
Returns
-------
torch.Tensor
Tensor with the same shape as `x` after applying the transformation.
"""
return a * torch.sigmoid(b * x + c) + d
############
# for 2D to 3D correspondence with cropping
from scipy.ndimage import distance_transform_edt as edt
from scipy.ndimage import gaussian_filter
# from skimage import img_as_ubyte
from PIL import Image
from pathlib import Path
import numpy as np
from typing import Tuple
# ✨ New helper to find the object's bounding box from transparency
def get_bbox_from_alpha(image_path: Path) -> Tuple[int, int, int, int]:
"""Calculates a bounding box from the alpha channel of a PNG."""
with Image.open(image_path).convert("RGBA") as img:
alpha = np.array(img)[:, :, 3]
non_transparent_pixels = np.argwhere(alpha > 0)
y_min, x_min = non_transparent_pixels.min(axis=0)
y_max, x_max = non_transparent_pixels.max(axis=0)
return x_min, y_min, x_max, y_max
# ... (rest of your imports and functions)
#####################
# dataset utils loading functions
#####################
import os
import json
import numpy as np
import pandas as pd
import torch
from glob import glob
# from scipy.io import loadmat as read_mat
import scipy.io as sio
def read_mat(path, obj_name):
r"""Reads specified objects from Matlab data file, (.mat)"""
mat_contents = sio.loadmat(path)
mat_obj = mat_contents[obj_name]
return mat_obj
def process_kps_pascal(kps):
# Step 1: Reshape the array to (20, 2) by adding nan values
num_pad_rows = 20 - kps.shape[0]
if num_pad_rows > 0:
pad_values = np.full((num_pad_rows, 2), np.nan)
kps = np.vstack((kps, pad_values))
# Step 2: Reshape the array to (20, 3)
# Add an extra column: set to 1 if the row does not contain nan, 0 otherwise
last_col = np.isnan(kps).any(axis=1)
last_col = np.where(last_col, 0, 1)
kps = np.column_stack((kps, last_col))
# Step 3: Replace rows with nan values to all 0's
mask = np.isnan(kps).any(axis=1)
kps[mask] = 0
return torch.tensor(kps).float()
def preprocess_kps_pad(kps, img_width, img_height, size):
# Once an image has been pre-processed via border (or zero) padding,
# the location of key points needs to be updated. This function applies
# that pre-processing to the key points so they are correctly located
# in the border-padded (or zero-padded) image.
kps = kps.clone()
scale = size / max(img_width, img_height)
kps[:, [0, 1]] *= scale
if img_height < img_width:
new_h = int(np.around(size * img_height / img_width))
offset_y = int((size - new_h) / 2)
offset_x = 0
kps[:, 1] += offset_y
elif img_width < img_height:
new_w = int(np.around(size * img_width / img_height))
offset_x = int((size - new_w) / 2)
offset_y = 0
kps[:, 0] += offset_x
else:
offset_x = 0
offset_y = 0
kps *= kps[:, 2:3].clone() # zero-out any non-visible key points
return kps, offset_x, offset_y, scale
def load_pascal_data(path="data/PF-dataset-PASCAL", size=256, category='cat', split='test', subsample=None):
def get_points(point_coords_list, idx):
X = np.fromstring(point_coords_list.iloc[idx, 0], sep=";")
Y = np.fromstring(point_coords_list.iloc[idx, 1], sep=";")
Xpad = -np.ones(20)
Xpad[: len(X)] = X
Ypad = -np.ones(20)
Ypad[: len(X)] = Y
Zmask = np.zeros(20)
Zmask[: len(X)] = 1
point_coords = np.concatenate(
(Xpad.reshape(1, 20), Ypad.reshape(1, 20), Zmask.reshape(1,20)), axis=0
)
# make arrays float tensor for subsequent processing
point_coords = torch.Tensor(point_coords.astype(np.float32))
return point_coords
np.random.seed(42)
files = []
kps = []
test_data = pd.read_csv(f'{path}/{split}_pairs_pf_pascal.csv')
cls = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
'bus', 'car', 'cat', 'chair', 'cow',
'diningtable', 'dog', 'horse', 'motorbike', 'person',
'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
cls_ids = test_data.iloc[:,2].values.astype("int") - 1
cat_id = cls.index(category)
subset_id = np.where(cls_ids == cat_id)[0]
# logger.info(f'Number of Pairs for {category} = {len(subset_id)}')
subset_pairs = test_data.iloc[subset_id,:]
src_img_names = np.array(subset_pairs.iloc[:,0])
trg_img_names = np.array(subset_pairs.iloc[:,1])
# print(src_img_names.shape, trg_img_names.shape)
if not split.startswith('train'):
point_A_coords = subset_pairs.iloc[:,3:5]
point_B_coords = subset_pairs.iloc[:,5:]
# print(point_A_coords.shape, point_B_coords.shape)
for i in range(len(src_img_names)):
src_fn= f'{path}/../{src_img_names[i]}'
trg_fn= f'{path}/../{trg_img_names[i]}'
src_size=Image.open(src_fn).size
trg_size=Image.open(trg_fn).size
if not split.startswith('train'):
point_coords_src = get_points(point_A_coords, i).transpose(1,0)
point_coords_trg = get_points(point_B_coords, i).transpose(1,0)
else:
src_anns = os.path.join(path, 'Annotations', category,
os.path.basename(src_fn))[:-4] + '.mat'
trg_anns = os.path.join(path, 'Annotations', category,
os.path.basename(trg_fn))[:-4] + '.mat'
point_coords_src = process_kps_pascal(read_mat(src_anns, 'kps'))
point_coords_trg = process_kps_pascal(read_mat(trg_anns, 'kps'))
# print(src_size)
source_kps, src_x, src_y, src_scale = preprocess_kps_pad(point_coords_src, src_size[0], src_size[1], size)
target_kps, trg_x, trg_y, trg_scale = preprocess_kps_pad(point_coords_trg, trg_size[0], trg_size[1], size)
kps.append(source_kps)
kps.append(target_kps)
files.append(src_fn)
files.append(trg_fn)
kps = torch.stack(kps)
used_kps, = torch.where(kps[:, :, 2].any(dim=0))
kps = kps[:, used_kps, :]
# logger.info(f'Final number of used key points: {kps.size(1)}')
return files, kps, None, used_kps
def load_spair_data(path="data/SPair-71k", size=256, category='cat', split='test', subsample=None):
np.random.seed(42)
pairs = sorted(glob(f'{path}/PairAnnotation/{split}/*:{category}.json'))
if subsample is not None and subsample > 0:
pairs = [pairs[ix] for ix in np.random.choice(len(pairs), subsample)]
files = []
thresholds = []
kps = []
category_anno = list(glob(f'{path}/ImageAnnotation/{category}/*.json'))[0]
with open(category_anno) as f:
num_kps = len(json.load(f)['kps'])
for pair in pairs:
source_kps = torch.zeros(num_kps, 3)
target_kps = torch.zeros(num_kps, 3)
with open(pair) as f:
data = json.load(f)
assert category == data["category"]
source_fn = f'{path}/JPEGImages/{category}/{data["src_imname"]}'
target_fn = f'{path}/JPEGImages/{category}/{data["trg_imname"]}'
source_json_name = source_fn.replace('JPEGImages','ImageAnnotation').replace('jpg','json')
target_json_name = target_fn.replace('JPEGImages','ImageAnnotation').replace('jpg','json')
source_bbox = np.asarray(data["src_bndbox"]) # (x1, y1, x2, y2)
target_bbox = np.asarray(data["trg_bndbox"])
with open(source_json_name) as f:
file = json.load(f)
kpts_src = file['kps']
with open(target_json_name) as f:
file = json.load(f)
kpts_trg = file['kps']
source_size = data["src_imsize"][:2] # (W, H)
target_size = data["trg_imsize"][:2] # (W, H)
for i in range(30):
point = kpts_src[str(i)]
if point is None:
source_kps[i, :3] = 0
else:
source_kps[i, :2] = torch.Tensor(point).float() # set x and y
source_kps[i, 2] = 1
source_kps, src_x, src_y, src_scale = preprocess_kps_pad(source_kps, source_size[0], source_size[1], size)
for i in range(30):
point = kpts_trg[str(i)]
if point is None:
target_kps[i, :3] = 0
else:
target_kps[i, :2] = torch.Tensor(point).float()
target_kps[i, 2] = 1
# target_raw_kps = torch.cat([torch.tensor(data["trg_kps"], dtype=torch.float), torch.ones(kp_ixs.size(0), 1)], 1)
# target_kps = blank_kps.scatter(dim=0, index=kp_ixs, src=target_raw_kps)
target_kps, trg_x, trg_y, trg_scale = preprocess_kps_pad(target_kps, target_size[0], target_size[1], size)
if split == 'test' or split == 'val':
thresholds.append(max(target_bbox[3] - target_bbox[1], target_bbox[2] - target_bbox[0])*trg_scale)
elif split == 'trn':
thresholds.append(max(source_bbox[3] - source_bbox[1], source_bbox[2] - source_bbox[0])*src_scale)
thresholds.append(max(target_bbox[3] - target_bbox[1], target_bbox[2] - target_bbox[0])*trg_scale)
kps.append(source_kps)
kps.append(target_kps)
files.append(source_fn)
files.append(target_fn)
kps = torch.stack(kps)
used_kps, = torch.where(kps[:, :, 2].any(dim=0))
kps = kps[:, used_kps, :]
return files, kps, thresholds, used_kps
def load_specific_pascal_pair(
source_image_id: str,
target_image_id: str,
path: str = "data/PF-dataset-PASCAL",
size: int = 256,
split: str = 'test'
):
"""
Loads and processes a specific pair of source and target images from the PASCAL dataset.
Args:
source_image_id: The identifier of the source image (e.g., '2011_001407').
target_image_id: The identifier of the target image (e.g., '2010_004184').
path: The base path to the PF-PASCAL dataset directory.
size: The target size for preprocessing images.
split: The dataset split to use ('test', 'train', etc.).
Returns:
A tuple containing:
- files (list): A list with the full paths to the source and target images.
- kps (torch.Tensor): A tensor of processed keypoints for the image pair.
- None: A placeholder to match the original function's return format.
- used_kps_indices (torch.Tensor): A tensor of indices for keypoints present in either image.
"""
def get_points_from_strings(x_str: str, y_str: str) -> torch.Tensor:
"""Parses coordinate strings, pads them, and returns a tensor."""
X = np.fromstring(x_str, sep=";")
Y = np.fromstring(y_str, sep=";")
# Pad arrays to a fixed size of 20 (as in the original function)
Xpad = -np.ones(20)
Xpad[:len(X)] = X
Ypad = -np.ones(20)
Ypad[:len(Y)] = Y
# Create a mask for valid keypoints
Zmask = np.zeros(20)
Zmask[:len(X)] = 1
point_coords = np.stack((Xpad, Ypad, Zmask), axis=0)
return torch.from_numpy(point_coords.astype(np.float32))
# Construct the path to the CSV file and load it
csv_path = os.path.join(path, f'{split}_pairs_pf_pascal.csv')
try:
pairs_df = pd.read_csv(csv_path)
except FileNotFoundError:
print(f"Error: CSV file not found at '{csv_path}'")
return None, None, None, None
# Find the specific row matching the source and target image IDs
pair_row = pairs_df[
pairs_df['source_image'].str.contains(source_image_id) &
pairs_df['target_image'].str.contains(target_image_id)
]
if pair_row.empty:
print(f"Error: Pair for source '{source_image_id}' and target '{target_image_id}' not found.")
return None, None, None, None
# Select the first match
pair_data = pair_row.iloc[0]
# Get full image paths and dimensions
src_fn = os.path.join(path, '..', pair_data['source_image'])
trg_fn = os.path.join(path, '..', pair_data['target_image'])
try:
src_size = Image.open(src_fn).size
trg_size = Image.open(trg_fn).size
except FileNotFoundError as e:
print(f"Error: Image file not found: {e.filename}")
return None, None, None, None
# Process keypoints based on the split type
if not split.startswith('train'):
point_coords_src = get_points_from_strings(pair_data['XA'], pair_data['YA']).T
point_coords_trg = get_points_from_strings(pair_data['XB'], pair_data['YB']).T
else:
# This logic for the 'train' split is preserved from the original function
cls_list = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car',
'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike',
'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
category = cls_list[pair_data['class'] - 1]
src_anns_path = os.path.join(path, 'Annotations', category, os.path.basename(src_fn).replace('.jpg', '.mat'))
trg_anns_path = os.path.join(path, 'Annotations', category, os.path.basename(trg_fn).replace('.jpg', '.mat'))
point_coords_src = process_kps_pascal(read_mat(src_anns_path, 'kps'))
point_coords_trg = process_kps_pascal(read_mat(trg_anns_path, 'kps'))
# Preprocess keypoints (e.g., padding and scaling)
source_kps, _, _, _ = preprocess_kps_pad(point_coords_src, src_size[0], src_size[1], size)
target_kps, _, _, _ = preprocess_kps_pad(point_coords_trg, trg_size[0], trg_size[1], size)
# Stack keypoints and find the indices of keypoints present in at least one image
kps = torch.stack([source_kps, target_kps])
used_kps_indices, = torch.where(kps[:, :, 2].any(dim=0))
# Filter the keypoints tensor to include only the used keypoints
kps_final = kps[:, used_kps_indices, :]
return [src_fn, trg_fn], kps_final, None, used_kps_indices
import matplotlib.pyplot as plt
def load_img_and_kps(idx, files, kps, img_size=224, edge=False, load_masked=False):
if load_masked:
img_rgba = Image.open(files[idx].replace('JPEGImages', 'JPEGImages_bgd_rmv').replace('.jpg', '_bgd_rmv.png')).convert('RGBA')
# img_rgba = Image.open(path_image).convert("RGBA")
# 2. create a white background and composite
img = Image.new("RGB", img_rgba.size, (0, 0, 0)) # choose any colour here
img.paste(img_rgba, mask=img_rgba.split()[3]) # mask = alpha channel
plt.imsave("img2_masked_before_resize.png", np.array(img))
# print(np.array(img).shape)
else:
img = Image.open(files[idx]).convert('RGB')
img = resize(img, img_size, resize=True, to_pil=True, edge=edge)
if load_masked:
plt.imsave("img2_masked_after_resize.png", np.array(img))
img_kps = kps[idx]
return img, img_kps
import os
import json
from glob import glob
import numpy as np
import torch
# NOTE: The helper function preprocess_kps_pad(kps, width, height, size)
# is assumed to be defined elsewhere, as in your original code.
def load_specific_spair_pair(
source_image_name: str,
target_image_name: str,
category: str,
path: str = "data/SPair-71k",
size: int = 256,
split: str = 'test',
unfiltered: bool = False
):
"""
Loads and processes a specific pair of images from the SPair-71k dataset.
Args:
source_image_name (str): Filename of the source image (e.g., '2008_002719.jpg').
target_image_name (str): Filename of the target image (e.g., '2008_004100.jpg').
category (str): The object category (e.g., 'aeroplane').
path (str): The base path to the SPair-71k dataset directory.
size (int): The target size for preprocessing images.
split (str): The dataset split to use ('test', 'trn', 'val').
Returns:
A tuple containing:
- files (list): Full paths to the source and target images.
- kps (torch.Tensor): Processed keypoints for the pair.
- thresholds (list): Bounding-box based thresholds for the pair.
- used_kps_indices (torch.Tensor): Indices of keypoints present in either image.
"""
# Helper to create a keypoint tensor from the annotation dictionary
def _get_kps_tensor(kps_dict, num_kps):
kps_tensor = torch.zeros(num_kps, 3)
for i in range(num_kps):
point = kps_dict.get(str(i)) # Use .get() for safety
if point is not None:
kps_tensor[i, :2] = torch.tensor(point, dtype=torch.float)
kps_tensor[i, 2] = 1.0 # Mark as visible
return kps_tensor
# --- 1. Find the correct pair annotation file ---
pair_annotation_path = os.path.join(path, 'PairAnnotation', split)
candidate_files = glob(os.path.join(pair_annotation_path, f'*:{category}.json'))
pair_data = None
for file_path in candidate_files:
with open(file_path) as f:
data = json.load(f)
if data['src_imname'] == source_image_name and data['trg_imname'] == target_image_name:
pair_data = data
break
if pair_data is None:
print(f"Error: Pair for '{source_image_name}' and '{target_image_name}' not found.")
return None, None, None, None
# --- 2. Process the found pair ---
source_fn = os.path.join(path, 'JPEGImages', category, pair_data['src_imname'])
target_fn = os.path.join(path, 'JPEGImages', category, pair_data['trg_imname'])
files = [source_fn, target_fn]
# Get total number of keypoints for the category
try:
category_anno_path = glob(os.path.join(path, 'ImageAnnotation', category, '*.json'))[0]
with open(category_anno_path) as f:
num_kps = len(json.load(f)['kps'])
except IndexError:
print(f"Error: No image annotations found for category '{category}'.")
return None, None, None, None
# Get keypoints from individual image annotation files
source_json_path = source_fn.replace('JPEGImages', 'ImageAnnotation').replace('.jpg', '.json')
target_json_path = target_fn.replace('JPEGImages', 'ImageAnnotation').replace('.jpg', '.json')
with open(source_json_path) as f:
kpts_src_dict = json.load(f)['kps']
with open(target_json_path) as f:
kpts_trg_dict = json.load(f)['kps']
source_kps_raw = _get_kps_tensor(kpts_src_dict, num_kps)
target_kps_raw = _get_kps_tensor(kpts_trg_dict, num_kps)
# print(f"Source keypoints raw: {source_kps_raw.shape}, Target keypoints raw: {target_kps_raw.shape}")
# Preprocess keypoints (padding, scaling, etc.)
w_src, h_src = pair_data["src_imsize"][:2]
w_trg, h_trg = pair_data["trg_imsize"][:2]
source_kps, src_x, src_y, src_scale = preprocess_kps_pad(source_kps_raw, w_src, h_src, size)
target_kps, trg_x, trg_y, trg_scale = preprocess_kps_pad(target_kps_raw, w_trg, h_trg, size)
# Calculate thresholds from bounding boxes
source_bbox = np.asarray(pair_data["src_bndbox"])
target_bbox = np.asarray(pair_data["trg_bndbox"])
thresholds = []
if split == 'test' or split == 'val':
thresholds.append(max(target_bbox[3] - target_bbox[1], target_bbox[2] - target_bbox[0]) * trg_scale)
elif split == 'trn':
thresholds.append(max(source_bbox[3] - source_bbox[1], source_bbox[2] - source_bbox[0]) * src_scale)
thresholds.append(max(target_bbox[3] - target_bbox[1], target_bbox[2] - target_bbox[0]) * trg_scale)
# --- 3. Format output ---
kps = torch.stack([source_kps, target_kps])
used_kps_indices, = torch.where(kps[:, :, 2].any(dim=0))
kps_final = kps[:, used_kps_indices, :]
if unfiltered:
return files, kps, thresholds, used_kps_indices
else:
return files, kps_final, thresholds, used_kps_indices
######################################
# original loading function
######################################
def load_spair_data(path="data/SPair-71k", size=256, category='cat', split='test', subsample=None):
np.random.seed(42)
pairs = sorted(glob(f'{path}/PairAnnotation/{split}/*:{category}.json'))
if subsample is not None and subsample > 0:
pairs = [pairs[ix] for ix in np.random.choice(len(pairs), subsample)]
files = []
thresholds = []
kps = []
category_anno = list(glob(f'{path}/ImageAnnotation/{category}/*.json'))[0]
with open(category_anno) as f:
num_kps = len(json.load(f)['kps'])
for pair in pairs:
source_kps = torch.zeros(num_kps, 3)
target_kps = torch.zeros(num_kps, 3)
with open(pair) as f:
data = json.load(f)
assert category == data["category"]
source_fn = f'{path}/JPEGImages/{category}/{data["src_imname"]}'
target_fn = f'{path}/JPEGImages/{category}/{data["trg_imname"]}'
source_json_name = source_fn.replace('JPEGImages','ImageAnnotation').replace('jpg','json')
target_json_name = target_fn.replace('JPEGImages','ImageAnnotation').replace('jpg','json')
source_bbox = np.asarray(data["src_bndbox"]) # (x1, y1, x2, y2)
target_bbox = np.asarray(data["trg_bndbox"])
with open(source_json_name) as f:
file = json.load(f)
kpts_src = file['kps']
with open(target_json_name) as f:
file = json.load(f)
kpts_trg = file['kps']
source_size = data["src_imsize"][:2] # (W, H)
target_size = data["trg_imsize"][:2] # (W, H)
for i in range(30):
point = kpts_src[str(i)]
if point is None:
source_kps[i, :3] = 0
else:
source_kps[i, :2] = torch.Tensor(point).float() # set x and y
source_kps[i, 2] = 1
source_kps, src_x, src_y, src_scale = preprocess_kps_pad(source_kps, source_size[0], source_size[1], size)
for i in range(30):
point = kpts_trg[str(i)]
if point is None:
target_kps[i, :3] = 0
else:
target_kps[i, :2] = torch.Tensor(point).float()
target_kps[i, 2] = 1
# target_raw_kps = torch.cat([torch.tensor(data["trg_kps"], dtype=torch.float), torch.ones(kp_ixs.size(0), 1)], 1)
# target_kps = blank_kps.scatter(dim=0, index=kp_ixs, src=target_raw_kps)
target_kps, trg_x, trg_y, trg_scale = preprocess_kps_pad(target_kps, target_size[0], target_size[1], size)
if split == 'test' or split == 'val':
thresholds.append(max(target_bbox[3] - target_bbox[1], target_bbox[2] - target_bbox[0])*trg_scale)
elif split == 'trn':
thresholds.append(max(source_bbox[3] - source_bbox[1], source_bbox[2] - source_bbox[0])*src_scale)
thresholds.append(max(target_bbox[3] - target_bbox[1], target_bbox[2] - target_bbox[0])*trg_scale)
kps.append(source_kps)
kps.append(target_kps)
files.append(source_fn)
files.append(target_fn)
kps = torch.stack(kps)
used_kps, = torch.where(kps[:, :, 2].any(dim=0))
kps = kps[:, used_kps, :]
return files, kps, thresholds, used_kps
def load_pascal_data(path="data/PF-dataset-PASCAL", size=256, category='cat', split='test', subsample=None):
def get_points(point_coords_list, idx):
X = np.fromstring(point_coords_list.iloc[idx, 0], sep=";")
Y = np.fromstring(point_coords_list.iloc[idx, 1], sep=";")
Xpad = -np.ones(20)
Xpad[: len(X)] = X
Ypad = -np.ones(20)
Ypad[: len(X)] = Y
Zmask = np.zeros(20)
Zmask[: len(X)] = 1
point_coords = np.concatenate(
(Xpad.reshape(1, 20), Ypad.reshape(1, 20), Zmask.reshape(1,20)), axis=0
)
# make arrays float tensor for subsequent processing
point_coords = torch.Tensor(point_coords.astype(np.float32))
return point_coords
np.random.seed(42)
files = []
kps = []
test_data = pd.read_csv(f'{path}/{split}_pairs_pf_pascal.csv')
cls = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
'bus', 'car', 'cat', 'chair', 'cow',
'diningtable', 'dog', 'horse', 'motorbike', 'person',
'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
cls_ids = test_data.iloc[:,2].values.astype("int") - 1
cat_id = cls.index(category)
subset_id = np.where(cls_ids == cat_id)[0]
# logger.info(f'Number of Pairs for {category} = {len(subset_id)}')
subset_pairs = test_data.iloc[subset_id,:]
src_img_names = np.array(subset_pairs.iloc[:,0])
trg_img_names = np.array(subset_pairs.iloc[:,1])
# print(src_img_names.shape, trg_img_names.shape)
if not split.startswith('train'):
point_A_coords = subset_pairs.iloc[:,3:5]
point_B_coords = subset_pairs.iloc[:,5:]
# print(point_A_coords.shape, point_B_coords.shape)
for i in range(len(src_img_names)):
src_fn= f'{path}/../{src_img_names[i]}'
trg_fn= f'{path}/../{trg_img_names[i]}'
src_size=Image.open(src_fn).size
trg_size=Image.open(trg_fn).size
if not split.startswith('train'):
point_coords_src = get_points(point_A_coords, i).transpose(1,0)
point_coords_trg = get_points(point_B_coords, i).transpose(1,0)
else:
src_anns = os.path.join(path, 'Annotations', category,
os.path.basename(src_fn))[:-4] + '.mat'
trg_anns = os.path.join(path, 'Annotations', category,
os.path.basename(trg_fn))[:-4] + '.mat'
point_coords_src = process_kps_pascal(read_mat(src_anns, 'kps'))
point_coords_trg = process_kps_pascal(read_mat(trg_anns, 'kps'))
# print(src_size)
source_kps, src_x, src_y, src_scale = preprocess_kps_pad(point_coords_src, src_size[0], src_size[1], size)
target_kps, trg_x, trg_y, trg_scale = preprocess_kps_pad(point_coords_trg, trg_size[0], trg_size[1], size)
kps.append(source_kps)
kps.append(target_kps)
files.append(src_fn)
files.append(trg_fn)
kps = torch.stack(kps)
used_kps, = torch.where(kps[:, :, 2].any(dim=0))
kps = kps[:, used_kps, :]
# logger.info(f'Final number of used key points: {kps.size(1)}')
return files, kps, None, used_kps
def load_eval_data(args, path, category, split):
# if args.EVAL_DATASET == 'ap10k':
# files, kps, thresholds, used_kps = load_ap10k_data(path, args.ANNO_SIZE, category, split, args.TEST_SAMPLE)
print(f"Loading evaluation data for dataset: {args.EVAL_DATASET}, category: {category}, split: {split}, test sample: {args.TEST_SAMPLE}")
if args.EVAL_DATASET == 'pascal':
files, kps, thresholds, used_kps = load_pascal_data(path, args.ANNO_SIZE, category, split, args.TEST_SAMPLE)
elif args.EVAL_DATASET == 'spair':
files, kps, thresholds, used_kps = load_spair_data(path, args.ANNO_SIZE, category, split, args.TEST_SAMPLE)
return files, kps, thresholds, used_kps
###### plot helper
from PIL import Image, ImageDraw, ImageFont
def draw_bbox_point_grid(
image,
bbox=None,
point=None,
box_color=(0, 255, 0),
pt_color=(255, 0, 0),
width=5,
draw_grid=False,
step=50, # pixels between grid lines
grid_color=(255, 255, 255),
grid_width=1,
add_text=True,
dilation=28
):
"""Draw bbox, point, and optional grid on a PIL image.
Args
----
image (PIL.Image): target image (modified in place if not copied).
bbox (list | tuple): [x1, y1, x2, y2] or None.
point (tuple): (x, y) or None.
color (tuple): RGB for bbox / point.
width (int): line width for bbox.
draw_grid (bool): enable/disable grid.
step (int): grid spacing in pixels.
grid_color (tuple): RGB for grid.
grid_width (int): line width for grid.
"""
draw = ImageDraw.Draw(image)
if dilation > 0 and bbox is not None:
# Dilation logic: expand bbox by dilation pixels
x1, y1, x2, y2 = bbox
bbox = (x1 - dilation, y1 - dilation, x2 + dilation, y2 + dilation)
# ── draw grid ───────────────────────────────────────────
if draw_grid and step > 0:
w, h = image.size
# vertical lines
for x in range(0, w, step):
draw.line([(x, 0), (x, h)], fill=grid_color, width=grid_width)
# horizontal lines
for y in range(0, h, step):
draw.line([(0, y), (w, y)], fill=grid_color, width=grid_width)
# ── draw bbox ──────────────────────────────────────────
if bbox is not None:
draw.rectangle(bbox, outline=box_color, width=width)
# ── draw point ─────────────────────────────────────────
if point is not None:
radius = 20
x, y = point
draw.ellipse(
(x - radius, y - radius, x + radius, y + radius),
fill=pt_color
)
# add a white text at the center of the point
# add a white text at the center of the point
if add_text:
text = "Ref"
# Try to use a better font, or fall back to the default if not found
# try:
font = ImageFont.truetype("DejaVuSans.ttf", size=26)
# except IOError:
# print('test')
# font = ImageFont.load_default()
# Get text bounding box for centering
print(font)
bbox_text = draw.textbbox((0, 0), text, font=font)
text_width = bbox_text[2] - bbox_text[0]
text_height = bbox_text[3] - bbox_text[1]
text_x = x - text_width // 2
text_y = y - text_height // 2
draw.text((text_x, text_y), text, font=font, fill=(255, 255, 255), text_anchor = "mm")
return image