File size: 34,414 Bytes
d6145b2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829

import torch
import torch.nn.functional as F
from torch import Tensor
import numpy as np

def resize_and_pad(image_tensor, output_size):
    """
    Resizes an image tensor to a square shape by scaling and padding.
    
    Args:
        image_tensor (torch.Tensor): Input image tensor of shape (H, W).
        output_size (int): The desired square output size.
        
    Returns:
        torch.Tensor: The resized and padded image tensor of shape (output_size, output_size).
    """
    original_h, original_w = image_tensor.shape
    
    # 1. Calculate the scale factor to fit the longest side to output_size
    scale = output_size / max(original_h, original_w)
    new_h, new_w = int(original_h * scale), int(original_w * scale)

    # Add batch and channel dimensions for F.interpolate
    image_tensor = image_tensor.unsqueeze(0).unsqueeze(0)
    
    # 2. Resize the image, preserving aspect ratio
    resized_image = F.interpolate(image_tensor, size=(new_h, new_w), mode='bilinear', align_corners=False)
    
    # 3. Calculate padding for the shorter side
    pad_h = output_size - new_h
    pad_w = output_size - new_w
    
    # Padding format is (left, right, top, bottom)
    padding = (pad_w // 2, pad_w - (pad_w // 2), pad_h // 2, pad_h - (pad_h // 2))
    
    # 4. Pad the image with a constant value (0 for black)
    padded_image = F.pad(resized_image, padding, "constant", 0)
    
    return padded_image.squeeze()


def resize(img, target_res=224, resize=True, to_pil=True, edge=False):
    original_width, original_height = img.size
    original_channels = len(img.getbands())
    if not edge:
        canvas = np.zeros([target_res, target_res, 3], dtype=np.uint8)
        if original_channels == 1:
            canvas = np.zeros([target_res, target_res], dtype=np.uint8)
        if original_height <= original_width:
            if resize:
                img = img.resize((target_res, int(np.around(target_res * original_height / original_width))), Image.Resampling.LANCZOS)
            width, height = img.size
            img = np.asarray(img)
            canvas[(width - height) // 2: (width + height) // 2] = img
        else:
            if resize:
                img = img.resize((int(np.around(target_res * original_width / original_height)), target_res), Image.Resampling.LANCZOS)
            width, height = img.size
            img = np.asarray(img)
            canvas[:, (height - width) // 2: (height + width) // 2] = img
    else:
        if original_height <= original_width:
            if resize:
                img = img.resize((target_res, int(np.around(target_res * original_height / original_width))), Image.Resampling.LANCZOS)
            width, height = img.size
            img = np.asarray(img)
            top_pad = (target_res - height) // 2
            bottom_pad = target_res - height - top_pad
            img = np.pad(img, pad_width=[(top_pad, bottom_pad), (0, 0), (0, 0)], mode='edge')
        else:
            if resize:
                img = img.resize((int(np.around(target_res * original_width / original_height)), target_res), Image.Resampling.LANCZOS)
            width, height = img.size
            img = np.asarray(img)
            left_pad = (target_res - width) // 2
            right_pad = target_res - width - left_pad
            img = np.pad(img, pad_width=[(0, 0), (left_pad, right_pad), (0, 0)], mode='edge')
        canvas = img
    if to_pil:
        canvas = Image.fromarray(canvas)
    return canvas

def scaled_shifted_sigmoid(
    x: Tensor,
    a: float = 1.0,   # vertical scale
    b: float = 1.0,   # slope (steepness)
    c: float = 0.0,   # horizontal shift (bias)
    d: float = 0.0,   # vertical shift (baseline)
) -> Tensor:
    """
    Compute a scaled-and-shifted sigmoid: y = a * sigmoid(b * x + c) + d.

    Parameters
    ----------
    x : torch.Tensor
        Input tensor.
    a : float, default 1.0
        Output scale (amplitude).
    b : float, default 1.0
        Input scale (controls slope).
    c : float, default 0.0
        Input shift (horizontal translation).
    d : float, default 0.0
        Output shift (vertical translation).

    Returns
    -------
    torch.Tensor
        Tensor with the same shape as `x` after applying the transformation.
    """
    return a * torch.sigmoid(b * x + c) + d


############
# for 2D to 3D correspondence with cropping

from scipy.ndimage import distance_transform_edt as edt
from scipy.ndimage import gaussian_filter
# from skimage import img_as_ubyte
from PIL import Image
from pathlib import Path
import numpy as np
from typing import Tuple

# ✨ New helper to find the object's bounding box from transparency
def get_bbox_from_alpha(image_path: Path) -> Tuple[int, int, int, int]:
    """Calculates a bounding box from the alpha channel of a PNG."""
    with Image.open(image_path).convert("RGBA") as img:
        alpha = np.array(img)[:, :, 3]
        non_transparent_pixels = np.argwhere(alpha > 0)
        y_min, x_min = non_transparent_pixels.min(axis=0)
        y_max, x_max = non_transparent_pixels.max(axis=0)
        return x_min, y_min, x_max, y_max

# ... (rest of your imports and functions)



#####################
# dataset utils loading functions
#####################
import os
import json
import numpy as np
import pandas as pd
import torch
from glob import glob
# from scipy.io import loadmat as read_mat
import scipy.io as sio


def read_mat(path, obj_name):
    r"""Reads specified objects from Matlab data file, (.mat)"""
    mat_contents = sio.loadmat(path)
    mat_obj = mat_contents[obj_name]

    return mat_obj

def process_kps_pascal(kps):
    # Step 1: Reshape the array to (20, 2) by adding nan values
    num_pad_rows = 20 - kps.shape[0]
    if num_pad_rows > 0:
        pad_values = np.full((num_pad_rows, 2), np.nan)
        kps = np.vstack((kps, pad_values))
        
    # Step 2: Reshape the array to (20, 3) 
    # Add an extra column: set to 1 if the row does not contain nan, 0 otherwise
    last_col = np.isnan(kps).any(axis=1)
    last_col = np.where(last_col, 0, 1)
    kps = np.column_stack((kps, last_col))

    # Step 3: Replace rows with nan values to all 0's
    mask = np.isnan(kps).any(axis=1)
    kps[mask] = 0

    return torch.tensor(kps).float()

def preprocess_kps_pad(kps, img_width, img_height, size):
    # Once an image has been pre-processed via border (or zero) padding,
    # the location of key points needs to be updated. This function applies
    # that pre-processing to the key points so they are correctly located
    # in the border-padded (or zero-padded) image.
    kps = kps.clone()
    scale = size / max(img_width, img_height)
    kps[:, [0, 1]] *= scale
    if img_height < img_width:
        new_h = int(np.around(size * img_height / img_width))
        offset_y = int((size - new_h) / 2)
        offset_x = 0
        kps[:, 1] += offset_y
    elif img_width < img_height:
        new_w = int(np.around(size * img_width / img_height))
        offset_x = int((size - new_w) / 2)
        offset_y = 0
        kps[:, 0] += offset_x
    else:
        offset_x = 0
        offset_y = 0
    kps *= kps[:, 2:3].clone()  # zero-out any non-visible key points
    return kps, offset_x, offset_y, scale


def load_pascal_data(path="data/PF-dataset-PASCAL", size=256, category='cat', split='test', subsample=None):
    
    def get_points(point_coords_list, idx):
        X = np.fromstring(point_coords_list.iloc[idx, 0], sep=";")
        Y = np.fromstring(point_coords_list.iloc[idx, 1], sep=";")
        Xpad = -np.ones(20)
        Xpad[: len(X)] = X
        Ypad = -np.ones(20)
        Ypad[: len(X)] = Y
        Zmask = np.zeros(20)
        Zmask[: len(X)] = 1
        point_coords = np.concatenate(
            (Xpad.reshape(1, 20), Ypad.reshape(1, 20), Zmask.reshape(1,20)), axis=0
        )
        # make arrays float tensor for subsequent processing
        point_coords = torch.Tensor(point_coords.astype(np.float32))
        return point_coords
    
    np.random.seed(42)
    files = []
    kps = []
    test_data = pd.read_csv(f'{path}/{split}_pairs_pf_pascal.csv')
    cls = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
                    'bus', 'car', 'cat', 'chair', 'cow',
                    'diningtable', 'dog', 'horse', 'motorbike', 'person',
                    'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
    cls_ids = test_data.iloc[:,2].values.astype("int") - 1
    cat_id = cls.index(category)
    subset_id = np.where(cls_ids == cat_id)[0]
    # logger.info(f'Number of Pairs for {category} = {len(subset_id)}')
    subset_pairs = test_data.iloc[subset_id,:]
    src_img_names = np.array(subset_pairs.iloc[:,0])
    trg_img_names = np.array(subset_pairs.iloc[:,1])
    # print(src_img_names.shape, trg_img_names.shape)
    if not split.startswith('train'):
        point_A_coords = subset_pairs.iloc[:,3:5]
        point_B_coords = subset_pairs.iloc[:,5:]
    # print(point_A_coords.shape, point_B_coords.shape)
    for i in range(len(src_img_names)):
        src_fn= f'{path}/../{src_img_names[i]}'
        trg_fn= f'{path}/../{trg_img_names[i]}'
        src_size=Image.open(src_fn).size
        trg_size=Image.open(trg_fn).size

        if not split.startswith('train'):
            point_coords_src = get_points(point_A_coords, i).transpose(1,0)
            point_coords_trg = get_points(point_B_coords, i).transpose(1,0)
        else:
            src_anns = os.path.join(path, 'Annotations', category,
                                    os.path.basename(src_fn))[:-4] + '.mat'
            trg_anns = os.path.join(path, 'Annotations', category,
                                    os.path.basename(trg_fn))[:-4] + '.mat'
            point_coords_src = process_kps_pascal(read_mat(src_anns, 'kps'))
            point_coords_trg = process_kps_pascal(read_mat(trg_anns, 'kps'))

        # print(src_size)
        source_kps, src_x, src_y, src_scale = preprocess_kps_pad(point_coords_src, src_size[0], src_size[1], size)
        target_kps, trg_x, trg_y, trg_scale = preprocess_kps_pad(point_coords_trg, trg_size[0], trg_size[1], size)
        kps.append(source_kps)
        kps.append(target_kps)
        files.append(src_fn)
        files.append(trg_fn)
    
    kps = torch.stack(kps)
    used_kps, = torch.where(kps[:, :, 2].any(dim=0))
    kps = kps[:, used_kps, :]
    # logger.info(f'Final number of used key points: {kps.size(1)}')
    return files, kps, None, used_kps


def load_spair_data(path="data/SPair-71k", size=256, category='cat', split='test', subsample=None):
    np.random.seed(42)
    pairs = sorted(glob(f'{path}/PairAnnotation/{split}/*:{category}.json'))
    if subsample is not None and subsample > 0:
        pairs = [pairs[ix] for ix in np.random.choice(len(pairs), subsample)]
    files = []
    thresholds = []
    kps = []
    category_anno = list(glob(f'{path}/ImageAnnotation/{category}/*.json'))[0]
    with open(category_anno) as f:
        num_kps = len(json.load(f)['kps'])
    for pair in pairs:
        source_kps = torch.zeros(num_kps, 3)
        target_kps = torch.zeros(num_kps, 3)
        with open(pair) as f:
            data = json.load(f)
        assert category == data["category"]
        source_fn = f'{path}/JPEGImages/{category}/{data["src_imname"]}'
        target_fn = f'{path}/JPEGImages/{category}/{data["trg_imname"]}'
        source_json_name = source_fn.replace('JPEGImages','ImageAnnotation').replace('jpg','json')
        target_json_name = target_fn.replace('JPEGImages','ImageAnnotation').replace('jpg','json')
        source_bbox = np.asarray(data["src_bndbox"])    # (x1, y1, x2, y2)
        target_bbox = np.asarray(data["trg_bndbox"])
        with open(source_json_name) as f:
            file = json.load(f)
            kpts_src = file['kps']
        with open(target_json_name) as f:
            file = json.load(f)
            kpts_trg = file['kps']

        source_size = data["src_imsize"][:2]  # (W, H)
        target_size = data["trg_imsize"][:2]  # (W, H)

        for i in range(30):
            point = kpts_src[str(i)]
            if point is None:
                source_kps[i, :3] = 0
            else:
                source_kps[i, :2] = torch.Tensor(point).float()  # set x and y
                source_kps[i, 2] = 1
        source_kps, src_x, src_y, src_scale = preprocess_kps_pad(source_kps, source_size[0], source_size[1], size)
        
        for i in range(30):
            point = kpts_trg[str(i)]
            if point is None:
                target_kps[i, :3] = 0
            else:
                target_kps[i, :2] = torch.Tensor(point).float()
                target_kps[i, 2] = 1
        # target_raw_kps = torch.cat([torch.tensor(data["trg_kps"], dtype=torch.float), torch.ones(kp_ixs.size(0), 1)], 1)
        # target_kps = blank_kps.scatter(dim=0, index=kp_ixs, src=target_raw_kps)
        target_kps, trg_x, trg_y, trg_scale = preprocess_kps_pad(target_kps, target_size[0], target_size[1], size)
        if split == 'test' or split == 'val':
            thresholds.append(max(target_bbox[3] - target_bbox[1], target_bbox[2] - target_bbox[0])*trg_scale)
        elif split == 'trn':
            thresholds.append(max(source_bbox[3] - source_bbox[1], source_bbox[2] - source_bbox[0])*src_scale)
            thresholds.append(max(target_bbox[3] - target_bbox[1], target_bbox[2] - target_bbox[0])*trg_scale)

        kps.append(source_kps)
        kps.append(target_kps)
        files.append(source_fn)
        files.append(target_fn)
    kps = torch.stack(kps)
    used_kps, = torch.where(kps[:, :, 2].any(dim=0))
    kps = kps[:, used_kps, :]

    return files, kps, thresholds, used_kps


def load_specific_pascal_pair(
    source_image_id: str,
    target_image_id: str,
    path: str = "data/PF-dataset-PASCAL",
    size: int = 256,
    split: str = 'test'
):
    """
    Loads and processes a specific pair of source and target images from the PASCAL dataset.

    Args:
        source_image_id: The identifier of the source image (e.g., '2011_001407').
        target_image_id: The identifier of the target image (e.g., '2010_004184').
        path: The base path to the PF-PASCAL dataset directory.
        size: The target size for preprocessing images.
        split: The dataset split to use ('test', 'train', etc.).

    Returns:
        A tuple containing:
        - files (list): A list with the full paths to the source and target images.
        - kps (torch.Tensor): A tensor of processed keypoints for the image pair.
        - None: A placeholder to match the original function's return format.
        - used_kps_indices (torch.Tensor): A tensor of indices for keypoints present in either image.
    """
    
    def get_points_from_strings(x_str: str, y_str: str) -> torch.Tensor:
        """Parses coordinate strings, pads them, and returns a tensor."""
        X = np.fromstring(x_str, sep=";")
        Y = np.fromstring(y_str, sep=";")
        
        # Pad arrays to a fixed size of 20 (as in the original function)
        Xpad = -np.ones(20)
        Xpad[:len(X)] = X
        Ypad = -np.ones(20)
        Ypad[:len(Y)] = Y
        
        # Create a mask for valid keypoints
        Zmask = np.zeros(20)
        Zmask[:len(X)] = 1
        
        point_coords = np.stack((Xpad, Ypad, Zmask), axis=0)
        return torch.from_numpy(point_coords.astype(np.float32))

    # Construct the path to the CSV file and load it
    csv_path = os.path.join(path, f'{split}_pairs_pf_pascal.csv')
    try:
        pairs_df = pd.read_csv(csv_path)
    except FileNotFoundError:
        print(f"Error: CSV file not found at '{csv_path}'")
        return None, None, None, None

    # Find the specific row matching the source and target image IDs
    pair_row = pairs_df[
        pairs_df['source_image'].str.contains(source_image_id) &
        pairs_df['target_image'].str.contains(target_image_id)
    ]

    if pair_row.empty:
        print(f"Error: Pair for source '{source_image_id}' and target '{target_image_id}' not found.")
        return None, None, None, None
    
    # Select the first match
    pair_data = pair_row.iloc[0]

    # Get full image paths and dimensions
    src_fn = os.path.join(path, '..', pair_data['source_image'])
    trg_fn = os.path.join(path, '..', pair_data['target_image'])
    
    try:
        src_size = Image.open(src_fn).size
        trg_size = Image.open(trg_fn).size
    except FileNotFoundError as e:
        print(f"Error: Image file not found: {e.filename}")
        return None, None, None, None

    # Process keypoints based on the split type
    if not split.startswith('train'):
        point_coords_src = get_points_from_strings(pair_data['XA'], pair_data['YA']).T
        point_coords_trg = get_points_from_strings(pair_data['XB'], pair_data['YB']).T
    else:
        # This logic for the 'train' split is preserved from the original function
        cls_list = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 
                    'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 
                    'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
        category = cls_list[pair_data['class'] - 1]
        
        src_anns_path = os.path.join(path, 'Annotations', category, os.path.basename(src_fn).replace('.jpg', '.mat'))
        trg_anns_path = os.path.join(path, 'Annotations', category, os.path.basename(trg_fn).replace('.jpg', '.mat'))
        
        point_coords_src = process_kps_pascal(read_mat(src_anns_path, 'kps'))
        point_coords_trg = process_kps_pascal(read_mat(trg_anns_path, 'kps'))

    # Preprocess keypoints (e.g., padding and scaling)
    source_kps, _, _, _ = preprocess_kps_pad(point_coords_src, src_size[0], src_size[1], size)
    target_kps, _, _, _ = preprocess_kps_pad(point_coords_trg, trg_size[0], trg_size[1], size)

    # Stack keypoints and find the indices of keypoints present in at least one image
    kps = torch.stack([source_kps, target_kps])
    used_kps_indices, = torch.where(kps[:, :, 2].any(dim=0))
    
    # Filter the keypoints tensor to include only the used keypoints
    kps_final = kps[:, used_kps_indices, :]

    return [src_fn, trg_fn], kps_final, None, used_kps_indices

import matplotlib.pyplot as plt
def load_img_and_kps(idx, files, kps, img_size=224, edge=False, load_masked=False):
    if load_masked:
        img_rgba = Image.open(files[idx].replace('JPEGImages', 'JPEGImages_bgd_rmv').replace('.jpg', '_bgd_rmv.png')).convert('RGBA')
        
        # img_rgba = Image.open(path_image).convert("RGBA")

        # 2. create a white background and composite
        img = Image.new("RGB", img_rgba.size, (0, 0, 0))  # choose any colour here
        img.paste(img_rgba, mask=img_rgba.split()[3])               # mask = alpha channel
        plt.imsave("img2_masked_before_resize.png", np.array(img))
        # print(np.array(img).shape)
    else:
        img = Image.open(files[idx]).convert('RGB')
    img = resize(img, img_size, resize=True, to_pil=True, edge=edge)
    if load_masked:
        plt.imsave("img2_masked_after_resize.png", np.array(img))
    img_kps = kps[idx]

    return img, img_kps


import os
import json
from glob import glob
import numpy as np
import torch

# NOTE: The helper function preprocess_kps_pad(kps, width, height, size) 
# is assumed to be defined elsewhere, as in your original code.

def load_specific_spair_pair(
    source_image_name: str,
    target_image_name: str,
    category: str,
    path: str = "data/SPair-71k",
    size: int = 256,
    split: str = 'test',
    unfiltered: bool = False
    
):
    """
    Loads and processes a specific pair of images from the SPair-71k dataset.

    Args:
        source_image_name (str): Filename of the source image (e.g., '2008_002719.jpg').
        target_image_name (str): Filename of the target image (e.g., '2008_004100.jpg').
        category (str): The object category (e.g., 'aeroplane').
        path (str): The base path to the SPair-71k dataset directory.
        size (int): The target size for preprocessing images.
        split (str): The dataset split to use ('test', 'trn', 'val').

    Returns:
        A tuple containing:
        - files (list): Full paths to the source and target images.
        - kps (torch.Tensor): Processed keypoints for the pair.
        - thresholds (list): Bounding-box based thresholds for the pair.
        - used_kps_indices (torch.Tensor): Indices of keypoints present in either image.
    """
    
    # Helper to create a keypoint tensor from the annotation dictionary
    def _get_kps_tensor(kps_dict, num_kps):
        kps_tensor = torch.zeros(num_kps, 3)
        for i in range(num_kps):
            point = kps_dict.get(str(i)) # Use .get() for safety
            if point is not None:
                kps_tensor[i, :2] = torch.tensor(point, dtype=torch.float)
                kps_tensor[i, 2] = 1.0 # Mark as visible
        return kps_tensor

    # --- 1. Find the correct pair annotation file ---
    pair_annotation_path = os.path.join(path, 'PairAnnotation', split)
    candidate_files = glob(os.path.join(pair_annotation_path, f'*:{category}.json'))
    
    pair_data = None
    for file_path in candidate_files:
        with open(file_path) as f:
            data = json.load(f)
            if data['src_imname'] == source_image_name and data['trg_imname'] == target_image_name:
                pair_data = data
                break
    
    if pair_data is None:
        print(f"Error: Pair for '{source_image_name}' and '{target_image_name}' not found.")
        return None, None, None, None

    # --- 2. Process the found pair ---
    source_fn = os.path.join(path, 'JPEGImages', category, pair_data['src_imname'])
    target_fn = os.path.join(path, 'JPEGImages', category, pair_data['trg_imname'])
    files = [source_fn, target_fn]

    # Get total number of keypoints for the category
    try:
        category_anno_path = glob(os.path.join(path, 'ImageAnnotation', category, '*.json'))[0]
        with open(category_anno_path) as f:
            num_kps = len(json.load(f)['kps'])
    except IndexError:
        print(f"Error: No image annotations found for category '{category}'.")
        return None, None, None, None

    # Get keypoints from individual image annotation files
    source_json_path = source_fn.replace('JPEGImages', 'ImageAnnotation').replace('.jpg', '.json')
    target_json_path = target_fn.replace('JPEGImages', 'ImageAnnotation').replace('.jpg', '.json')

    with open(source_json_path) as f:
        kpts_src_dict = json.load(f)['kps']
    with open(target_json_path) as f:
        kpts_trg_dict = json.load(f)['kps']
        
    source_kps_raw = _get_kps_tensor(kpts_src_dict, num_kps)
    target_kps_raw = _get_kps_tensor(kpts_trg_dict, num_kps)

    # print(f"Source keypoints raw: {source_kps_raw.shape}, Target keypoints raw: {target_kps_raw.shape}")

    # Preprocess keypoints (padding, scaling, etc.)
    w_src, h_src = pair_data["src_imsize"][:2]
    w_trg, h_trg = pair_data["trg_imsize"][:2]
    
    source_kps, src_x, src_y, src_scale = preprocess_kps_pad(source_kps_raw, w_src, h_src, size)
    target_kps, trg_x, trg_y, trg_scale = preprocess_kps_pad(target_kps_raw, w_trg, h_trg, size)
    
    # Calculate thresholds from bounding boxes
    source_bbox = np.asarray(pair_data["src_bndbox"])
    target_bbox = np.asarray(pair_data["trg_bndbox"])
    thresholds = []
    if split == 'test' or split == 'val':
        thresholds.append(max(target_bbox[3] - target_bbox[1], target_bbox[2] - target_bbox[0]) * trg_scale)
    elif split == 'trn':
        thresholds.append(max(source_bbox[3] - source_bbox[1], source_bbox[2] - source_bbox[0]) * src_scale)
        thresholds.append(max(target_bbox[3] - target_bbox[1], target_bbox[2] - target_bbox[0]) * trg_scale)

    # --- 3. Format output ---
    kps = torch.stack([source_kps, target_kps])
    used_kps_indices, = torch.where(kps[:, :, 2].any(dim=0))
    kps_final = kps[:, used_kps_indices, :]

    if unfiltered:
        return files, kps, thresholds, used_kps_indices
    else:
        return files, kps_final, thresholds, used_kps_indices




######################################
# original loading function
######################################

def load_spair_data(path="data/SPair-71k", size=256, category='cat', split='test', subsample=None):
    np.random.seed(42)
    pairs = sorted(glob(f'{path}/PairAnnotation/{split}/*:{category}.json'))
    if subsample is not None and subsample > 0:
        pairs = [pairs[ix] for ix in np.random.choice(len(pairs), subsample)]
    files = []
    thresholds = []
    kps = []
    category_anno = list(glob(f'{path}/ImageAnnotation/{category}/*.json'))[0]
    with open(category_anno) as f:
        num_kps = len(json.load(f)['kps'])
    for pair in pairs:
        source_kps = torch.zeros(num_kps, 3)
        target_kps = torch.zeros(num_kps, 3)
        with open(pair) as f:
            data = json.load(f)
        assert category == data["category"]
        source_fn = f'{path}/JPEGImages/{category}/{data["src_imname"]}'
        target_fn = f'{path}/JPEGImages/{category}/{data["trg_imname"]}'
        source_json_name = source_fn.replace('JPEGImages','ImageAnnotation').replace('jpg','json')
        target_json_name = target_fn.replace('JPEGImages','ImageAnnotation').replace('jpg','json')
        source_bbox = np.asarray(data["src_bndbox"])    # (x1, y1, x2, y2)
        target_bbox = np.asarray(data["trg_bndbox"])
        with open(source_json_name) as f:
            file = json.load(f)
            kpts_src = file['kps']
        with open(target_json_name) as f:
            file = json.load(f)
            kpts_trg = file['kps']

        source_size = data["src_imsize"][:2]  # (W, H)
        target_size = data["trg_imsize"][:2]  # (W, H)

        for i in range(30):
            point = kpts_src[str(i)]
            if point is None:
                source_kps[i, :3] = 0
            else:
                source_kps[i, :2] = torch.Tensor(point).float()  # set x and y
                source_kps[i, 2] = 1
        source_kps, src_x, src_y, src_scale = preprocess_kps_pad(source_kps, source_size[0], source_size[1], size)
        
        for i in range(30):
            point = kpts_trg[str(i)]
            if point is None:
                target_kps[i, :3] = 0
            else:
                target_kps[i, :2] = torch.Tensor(point).float()
                target_kps[i, 2] = 1
        # target_raw_kps = torch.cat([torch.tensor(data["trg_kps"], dtype=torch.float), torch.ones(kp_ixs.size(0), 1)], 1)
        # target_kps = blank_kps.scatter(dim=0, index=kp_ixs, src=target_raw_kps)
        target_kps, trg_x, trg_y, trg_scale = preprocess_kps_pad(target_kps, target_size[0], target_size[1], size)
        if split == 'test' or split == 'val':
            thresholds.append(max(target_bbox[3] - target_bbox[1], target_bbox[2] - target_bbox[0])*trg_scale)
        elif split == 'trn':
            thresholds.append(max(source_bbox[3] - source_bbox[1], source_bbox[2] - source_bbox[0])*src_scale)
            thresholds.append(max(target_bbox[3] - target_bbox[1], target_bbox[2] - target_bbox[0])*trg_scale)

        kps.append(source_kps)
        kps.append(target_kps)
        files.append(source_fn)
        files.append(target_fn)
    kps = torch.stack(kps)
    used_kps, = torch.where(kps[:, :, 2].any(dim=0))
    kps = kps[:, used_kps, :]

    return files, kps, thresholds, used_kps


def load_pascal_data(path="data/PF-dataset-PASCAL", size=256, category='cat', split='test', subsample=None):
    
    def get_points(point_coords_list, idx):
        X = np.fromstring(point_coords_list.iloc[idx, 0], sep=";")
        Y = np.fromstring(point_coords_list.iloc[idx, 1], sep=";")
        Xpad = -np.ones(20)
        Xpad[: len(X)] = X
        Ypad = -np.ones(20)
        Ypad[: len(X)] = Y
        Zmask = np.zeros(20)
        Zmask[: len(X)] = 1
        point_coords = np.concatenate(
            (Xpad.reshape(1, 20), Ypad.reshape(1, 20), Zmask.reshape(1,20)), axis=0
        )
        # make arrays float tensor for subsequent processing
        point_coords = torch.Tensor(point_coords.astype(np.float32))
        return point_coords
    
    np.random.seed(42)
    files = []
    kps = []
    test_data = pd.read_csv(f'{path}/{split}_pairs_pf_pascal.csv')
    cls = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
                    'bus', 'car', 'cat', 'chair', 'cow',
                    'diningtable', 'dog', 'horse', 'motorbike', 'person',
                    'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
    cls_ids = test_data.iloc[:,2].values.astype("int") - 1
    cat_id = cls.index(category)
    subset_id = np.where(cls_ids == cat_id)[0]
    # logger.info(f'Number of Pairs for {category} = {len(subset_id)}')
    subset_pairs = test_data.iloc[subset_id,:]
    src_img_names = np.array(subset_pairs.iloc[:,0])
    trg_img_names = np.array(subset_pairs.iloc[:,1])
    # print(src_img_names.shape, trg_img_names.shape)
    if not split.startswith('train'):
        point_A_coords = subset_pairs.iloc[:,3:5]
        point_B_coords = subset_pairs.iloc[:,5:]
    # print(point_A_coords.shape, point_B_coords.shape)
    for i in range(len(src_img_names)):
        src_fn= f'{path}/../{src_img_names[i]}'
        trg_fn= f'{path}/../{trg_img_names[i]}'
        src_size=Image.open(src_fn).size
        trg_size=Image.open(trg_fn).size

        if not split.startswith('train'):
            point_coords_src = get_points(point_A_coords, i).transpose(1,0)
            point_coords_trg = get_points(point_B_coords, i).transpose(1,0)
        else:
            src_anns = os.path.join(path, 'Annotations', category,
                                    os.path.basename(src_fn))[:-4] + '.mat'
            trg_anns = os.path.join(path, 'Annotations', category,
                                    os.path.basename(trg_fn))[:-4] + '.mat'
            point_coords_src = process_kps_pascal(read_mat(src_anns, 'kps'))
            point_coords_trg = process_kps_pascal(read_mat(trg_anns, 'kps'))

        # print(src_size)
        source_kps, src_x, src_y, src_scale = preprocess_kps_pad(point_coords_src, src_size[0], src_size[1], size)
        target_kps, trg_x, trg_y, trg_scale = preprocess_kps_pad(point_coords_trg, trg_size[0], trg_size[1], size)
        kps.append(source_kps)
        kps.append(target_kps)
        files.append(src_fn)
        files.append(trg_fn)
    
    kps = torch.stack(kps)
    used_kps, = torch.where(kps[:, :, 2].any(dim=0))
    kps = kps[:, used_kps, :]
    # logger.info(f'Final number of used key points: {kps.size(1)}')
    return files, kps, None, used_kps


def load_eval_data(args, path, category, split):
    # if args.EVAL_DATASET == 'ap10k':
    #     files, kps, thresholds, used_kps = load_ap10k_data(path, args.ANNO_SIZE, category, split, args.TEST_SAMPLE)
    print(f"Loading evaluation data for dataset: {args.EVAL_DATASET}, category: {category}, split: {split}, test sample: {args.TEST_SAMPLE}")
    if args.EVAL_DATASET == 'pascal':
        files, kps, thresholds, used_kps = load_pascal_data(path, args.ANNO_SIZE, category, split, args.TEST_SAMPLE)
    elif args.EVAL_DATASET == 'spair':
        files, kps, thresholds, used_kps = load_spair_data(path, args.ANNO_SIZE, category, split, args.TEST_SAMPLE)

    return files, kps, thresholds, used_kps


###### plot helper
from PIL import Image, ImageDraw, ImageFont

def draw_bbox_point_grid(
    image,
    bbox=None,
    point=None,
    box_color=(0, 255, 0),
    pt_color=(255, 0, 0),
    width=5,
    draw_grid=False,
    step=50,                # pixels between grid lines
    grid_color=(255, 255, 255),
    grid_width=1,
    add_text=True,
    dilation=28
):
    """Draw bbox, point, and optional grid on a PIL image.

    Args
    ----
    image (PIL.Image): target image (modified in place if not copied).
    bbox  (list | tuple): [x1, y1, x2, y2] or None.
    point (tuple): (x, y) or None.
    color (tuple): RGB for bbox / point.
    width (int): line width for bbox.
    draw_grid (bool): enable/disable grid.
    step (int): grid spacing in pixels.
    grid_color (tuple): RGB for grid.
    grid_width (int): line width for grid.
    """
    draw = ImageDraw.Draw(image)

    if dilation > 0 and bbox is not None:
        # Dilation logic: expand bbox by dilation pixels
        x1, y1, x2, y2 = bbox
        bbox = (x1 - dilation, y1 - dilation, x2 + dilation, y2 + dilation)

    # ── draw grid ───────────────────────────────────────────
    if draw_grid and step > 0:
        w, h = image.size
        # vertical lines
        for x in range(0, w, step):
            draw.line([(x, 0), (x, h)], fill=grid_color, width=grid_width)
        # horizontal lines
        for y in range(0, h, step):
            draw.line([(0, y), (w, y)], fill=grid_color, width=grid_width)

    # ── draw bbox ──────────────────────────────────────────
    if bbox is not None:
        draw.rectangle(bbox, outline=box_color, width=width)

    # ── draw point ─────────────────────────────────────────
    if point is not None:
        radius = 20
        x, y = point
        draw.ellipse(
            (x - radius, y - radius, x + radius, y + radius),
            fill=pt_color
        )
        # add a white text at the center of the point
        # add a white text at the center of the point
        if add_text:
            text = "Ref"
            # Try to use a better font, or fall back to the default if not found
            # try:
            font = ImageFont.truetype("DejaVuSans.ttf", size=26)
            # except IOError:
            #     print('test')
            #     font = ImageFont.load_default()

            # Get text bounding box for centering
            print(font)
            bbox_text = draw.textbbox((0, 0), text, font=font)
            text_width = bbox_text[2] - bbox_text[0]
            text_height = bbox_text[3] - bbox_text[1]

            text_x = x - text_width // 2
            text_y = y - text_height // 2
            draw.text((text_x, text_y), text, font=font, fill=(255, 255, 255), text_anchor = "mm")


    return image