File size: 31,834 Bytes
3757e50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725

import os
import numpy as np
from timeit import default_timer as timer
from tqdm.auto import tqdm 
import torch
import metrics
from torch import nn
import utilities
import csv
import matplotlib.pyplot as plt
from sklearn.model_selection import KFold
## -----------------------------------------------------------------------------------------------------------------##
##                                               TRAINING with GRADIENT ACCUMULATION                                                      ##
## -----------------------------------------------------------------------------------------------------------------##

def train_step(model: torch.nn.Module,
               device: torch.device,
               dataloader: torch.utils.data.DataLoader,
               loss_fn: torch.nn.Module,
               optimizer: torch.optim.Optimizer,
               useHeatmaps: bool = False,
               gradient_accumulation_steps: int = 1):
    # Put model in train mode
    model = model.to(device)
    model.train()

    # Setup train loss value
    train_loss = 0.0

    # Loop through data loader data batches
    for batch, data in enumerate(dataloader):

        img_name = data['name']
        images_tensor = data['image']
        landmarks_tensor = data['landmarks']
        heatmaps_tensor = data['heatmaps']

        # Send data to target device
        X = images_tensor.to(device)

        if useHeatmaps:
            y = heatmaps_tensor.to(device)
        else:
            y = landmarks_tensor.to(device)

        #print(f"Batch {batch} - image tensor:  {X.shape} - GT tensor: {y.shape}")

        # Forward pass
        y_pred = model(X)

        #print(f"y pred shape: {y_pred.shape} - y shape: {y.shape}")
        
        # Calculate  and accumulate loss
        loss = loss_fn(y_pred, y)

        # normalize loss to account for batch accumulation
        loss = loss / gradient_accumulation_steps
        train_loss += loss.item()

        # Loss backward
        loss.backward()
        
        # Check if it is time to update the weights
        if ((batch + 1) % gradient_accumulation_steps == 0) or (batch + 1 == len(dataloader)):
            # Optimizer step
            optimizer.step()
            # Reset gradients
            optimizer.zero_grad()

    # Adjust metrics to get average loss and accuracy per batch
    train_loss /= len(dataloader)
    
    return train_loss

## -----------------------------------------------------------------------------------------------------------------##
##                                               VALIDATION PART                                                    ##
## -----------------------------------------------------------------------------------------------------------------##

def validate_step(model: torch.nn.Module,
                  device: torch.device,
                  dataloader: torch.utils.data.DataLoader,
                  loss_fn: torch.nn.Module,
                  useHeatmaps: bool = False):
    # Put model in eval mode
    model = model.to(device)
    model.eval()

    # Setup validation loss value
    val_loss = 0.0

    with torch.no_grad():
        # Loop through DataLoader batches
        for batch, data in enumerate(dataloader):
            images_tensor = data['image']
            landmarks_tensor = data['landmarks']
            heatmaps_tensor = data['heatmaps']

            # Send data to target device
            X = images_tensor.to(device)

            if useHeatmaps:
                y = heatmaps_tensor.to(device)
            else:
                y = landmarks_tensor.to(device)

            # Forward pass
            val_pred_logits = model(X)

            # Calculate and accumulate loss
            loss = loss_fn(val_pred_logits, y)
            val_loss += loss.item()

    # Adjust metrics to get average loss per batch
    val_loss = val_loss / len(dataloader)

    return val_loss


## -----------------------------------------------------------------------------------------------------------------##
##                                               EARLY STOPPING                                                     ##
## -----------------------------------------------------------------------------------------------------------------##

class EarlyStopping:
    """Early stops the training if validation loss doesn't improve after a given patience."""
    def __init__(self, patience=10, delta=0, save_path=not None, counter=0, best_val_loss=None):
        self.patience = patience
        self.counter = counter
        self.best_val_loss = best_val_loss
        self.early_stop = False
        self.val_loss_min = np.Inf
        self.delta = delta
        self.path = save_path

    def call(self, val_loss, model, optimizer, scheduler, loss_fn, results, epochs_without_improvement, epoch):

        if self.best_val_loss is None:
            self.best_val_loss = val_loss
            save_model(self.path, model, optimizer, scheduler, loss_fn, results, epochs_without_improvement, self.best_val_loss, epoch, called_by_early_stopping=True)

        elif val_loss >= self.best_val_loss + self.delta:
            self.counter += 1
            print(f'EarlyStopping counter: {self.counter} out of {self.patience}')
            if self.counter >= self.patience:
                self.early_stop = True
        else:
            self.best_val_loss = val_loss
            save_model(self.path, model, optimizer, scheduler, loss_fn, results, epochs_without_improvement, self.best_val_loss, epoch, called_by_early_stopping=True)
            self.counter = 0


## -----------------------------------------------------------------------------------------------------------------##
##                                           SAVE AND LOAD A MODEL                                            ##
## -----------------------------------------------------------------------------------------------------------------##
def save_model(save_path, model, optimizer, scheduler, loss_fn, results, epochs_without_improvement, best_val_loss, epoch, called_by_early_stopping=False):
    if not os.path.exists(save_path):
        os.makedirs(save_path)

    if called_by_early_stopping:
        checkpoint_path = os.path.join(save_path, "best_checkpoint.pt")
    else:
        checkpoint_path = os.path.join(save_path, f"checkpoint_epoch{epoch}.pt")

    torch.save({
        'model_state_dict': model.state_dict(),
        'optimizer_state_dict': optimizer.state_dict(),
        'scheduler_state_dict': scheduler.state_dict(),
        'loss_fn': loss_fn.state_dict(),
        'results': results,
        'epochs_without_improvement': epochs_without_improvement,
        'best_val_loss': best_val_loss,
        'epoch': epoch
    }, checkpoint_path)
    #print(f"Model saved to {checkpoint_path}")

    
def load_model(load_path, model, optimizer, scheduler, loss_fn, device):
    checkpoint = torch.load(load_path, map_location=torch.device(device))

    # Load the state_dict into the model only if it exists in the checkpoint
    if 'model_state_dict' in checkpoint:
        model.load_state_dict(checkpoint['model_state_dict'])
    model = model.to(device)  # Move the model to the specified device

    # Load the optimizer state_dict only if it exists in the checkpoint
    if 'optimizer_state_dict' in checkpoint:
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])

    # Load the scheduler state_dict only if it exists in the checkpoint
    if 'scheduler_state_dict' in checkpoint:
        scheduler.load_state_dict(checkpoint['scheduler_state_dict'])

    # Load the loss_fn state_dict only if it exists in the checkpoint
    if 'loss_fn' in checkpoint:
        loss_fn.load_state_dict(checkpoint['loss_fn'])

    # Load other values only if they exist in the checkpoint
    start_epoch = checkpoint.get('epoch', 0) + 1
    results = checkpoint.get('results', None)
    epochs_without_improvement = checkpoint.get('epochs_without_improvement', 0)
    best_val_loss = checkpoint.get('best_val_loss', None)
    print(f"Model loaded from {load_path} | Starting from epoch {start_epoch} | Best validation loss: {best_val_loss} | Epochs without improvement: {epochs_without_improvement}")
    return model, optimizer, scheduler, loss_fn, start_epoch, results, epochs_without_improvement, best_val_loss


## -----------------------------------------------------------------------------------------------------------------##
##                                           TRAINING + VALIDATION PART                                             ##
## -----------------------------------------------------------------------------------------------------------------##

def train_and_validate(model: torch.nn.Module,
                       device: torch.device,
                       train_dataloader: torch.utils.data.DataLoader,
                       val_dataloader: torch.utils.data.DataLoader,
                       optimizer: torch.optim.Optimizer,
                       scheduler: torch.optim.lr_scheduler,
                       loss_fn: torch.nn.Module,
                       epochs: int = 10,
                       save_path: str = None,
                       useHeatmaps: bool = True,
                       patience: int = 10,
                       save_all_epochs: bool = False,
                       useGradAcc: int = 1,
                       continue_training: bool = False):
    
    if continue_training:
        model_path = os.path.join(save_path, "best_checkpoint.pt")
        assert model_path is not None, "If you want to continue training, you must provide a path to load the model from."

        # Load the model from the path
        model, optimizer, scheduler, loss_fn, start_epoch, results, epochs_without_improvement, best_val_loss = load_model(model_path, model, optimizer, scheduler, loss_fn, device)
    else:
        # Create empty results dictionary and initialize epoch
        results = {"train_loss": [], "val_loss": []}
        start_epoch = 1
        best_val_loss = float("inf")
        epochs_without_improvement = 0

    # Start the timer
    start_time = timer()

    # Create EarlyStopping instance
    early_stopping = EarlyStopping(patience=patience, save_path=save_path, counter=epochs_without_improvement, best_val_loss=best_val_loss)

    # Loop through training and validating steps for a number of epochs
    for epoch in tqdm(range(start_epoch, epochs + 1)):

        assert useGradAcc >= 1, "Gradient accumulation steps must be greater than 1"

        train_loss = train_step(model, device, train_dataloader, loss_fn, optimizer, useHeatmaps, gradient_accumulation_steps=useGradAcc)

        val_loss = validate_step(model, device, val_dataloader, loss_fn, useHeatmaps)

        scheduler_type = scheduler.__class__.__name__
        if scheduler_type == "ReduceLROnPlateau":
            scheduler.step(val_loss)
        else:
            # Update the learning rate using the scheduler
            scheduler.step()

        # Print out what's happening
        print(f"Epoch {epoch} | Train Loss: {train_loss:.7f} | Validation Loss: {val_loss:.7f}")

        # Update results dictionary
        results["train_loss"].append(train_loss)
        results["val_loss"].append(val_loss)

        # Save the trained model
        if save_all_epochs is True:
            save_model(save_path, model, optimizer, scheduler, loss_fn, results, epochs_without_improvement, best_val_loss, epoch)

        # Check for early stopping
        early_stopping.call(val_loss, model, optimizer, scheduler, loss_fn, results, epochs_without_improvement, epoch)
        if early_stopping.early_stop:
            print("Early stopping triggered.")
            break
        
    # End the timer and print out how long it took
    end_time = timer()
    print(f"Total training time: {end_time - start_time:.3f} seconds")

    # Return the filled results at the end of the epochs
    return results

## -----------------------------------------------------------------------------------------------------------------##
##                                                  FINE-TUNING IN-DOMAIN                                           ##
## -----------------------------------------------------------------------------------------------------------------##
def fine_tune(model: torch.nn.Module,
              device: torch.device,
              train_dataloader: torch.utils.data.DataLoader,
              val_dataloader: torch.utils.data.DataLoader,
              optimizer: torch.optim.Optimizer,
              scheduler: torch.optim.lr_scheduler,
              loss_fn: torch.nn.Module,
              epochs: int = 10,
              load_path: str = None,
              save_path: str = None,
              useHeatmaps: bool = True,
              patience: int = 10,
              useGradAcc: int = 1):
    
    assert load_path is not None, "You must provide a path to load the model from."
    
    # Load the model from the path
    model.load_state_dict(torch.load(load_path, map_location=torch.device(device)), strict=False) 
    model = model.to(device)  # Move the model to the specified device
    
    # Create empty results dictionary and initialize epoch
    results = {"train_loss": [], "val_loss": []}
    start_epoch = 1
    best_val_loss = float("inf")
    epochs_without_improvement = 0
    
    # Start the timer
    start_time = timer()

    # Create EarlyStopping instance
    early_stopping = EarlyStopping(patience=patience, save_path=save_path, counter=epochs_without_improvement, best_val_loss=best_val_loss)

    # Loop through training and validating steps for a number of epochs
    for epoch in tqdm(range(start_epoch, epochs + 1)):

        assert useGradAcc >= 1, "Gradient accumulation steps must be greater than 1"

        train_loss = train_step(model, device, train_dataloader, loss_fn, optimizer, useHeatmaps, gradient_accumulation_steps=useGradAcc)

        val_loss = validate_step(model, device, val_dataloader, loss_fn, useHeatmaps)

        scheduler_type = scheduler.__class__.__name__
        if scheduler_type == "ReduceLROnPlateau":
            scheduler.step(val_loss)
        else:
            # Update the learning rate using the scheduler
            scheduler.step()

        # Print out what's happening
        print(f"Epoch {epoch} | Train Loss: {train_loss:.7f} | Validation Loss: {val_loss:.7f}")

        # Update results dictionary
        results["train_loss"].append(train_loss)
        results["val_loss"].append(val_loss)

        # Check for early stopping
        early_stopping.call(val_loss, model, optimizer, scheduler, loss_fn, results, epochs_without_improvement, epoch)
        if early_stopping.early_stop:
            print("Early stopping triggered.")
            break
        
    # End the timer and print out how long it took
    end_time = timer()
    print(f"Total training time: {end_time - start_time:.3f} seconds")

    # Return the filled results at the end of the epochs
    return results   
                 


## -----------------------------------------------------------------------------------------------------------------##
##                                                  EVALUATION PART                                                 ##
## -----------------------------------------------------------------------------------------------------------------##
def test_step(model: torch.nn.Module,
              device: torch.device,
              dataloader: torch.utils.data.DataLoader,
              loss_fn: torch.nn.Module,
              num_landmarks: int,
              useHeatmaps: bool = False,
              sigma: int = 1.5,
              load_path: str = None):
    
    # Take the baseline of the path
    if load_path is not None:
        model_dir = os.path.dirname(load_path)
        
    # Put model in eval mode
    model = model.to(device)
    model.eval()
    model_name = model.__class__.__name__
    model_encoder = model.encoder.__class__.__name__ if hasattr(model, 'encoder') else ""

    # Setup test loss and test accuracy values
    test_loss = 0.0
    results = {}
    distances = [] 

    with torch.no_grad():
        # Loop through DataLoader batches
        for batch, data in enumerate(dataloader):
            images_name = data['name']
            images_tensor = data['image']
            #image_size = images_tensor.numpy().shape[2:]
            landmarks_tensor = data['landmarks']
            heatmaps_tensor = data['heatmaps']
            original_size = data['original_size']
            resized_size = data['resized_size']

            # Send data to target device
            X = images_tensor.to(device)

            if useHeatmaps:
                y = heatmaps_tensor.to(device)
            else:
                y = landmarks_tensor.to(device)

            # Forward pass
            y_pred = model(X)

            # Calculate and accumulate loss
            loss = loss_fn(y_pred, y)
            test_loss += loss.item()

            # Move the prediction and the GT to the CPU
            y_pred = y_pred.cpu()

            # Save the prediction heatmaps as images in the model directory 
            #os.makedirs(f"{model_dir}/predictions", exist_ok=True)
            #utilities.save_heatmaps(X, y_pred, images_name, f"{model_dir}/predictions")                

            # Compute the MSE and mAP between the original landmarks and the predicted landmarks
            mse_list, mAP_list_heatmaps, mAP_list_keypoints, iou_list, distance_list = metrics.compute_batch_metrics(landmarks_tensor, heatmaps_tensor, y_pred, resized_size, num_landmarks, useHeatmaps, sigma)
            # Append to full list in order to compute the MRE and SDR for all the images
            distances.extend(distance_list)
            
            # Store image names as keys and their corresponding predictions as values.
            for i, name in enumerate(images_name):  # Since they are in batch I loop them
                # Storing prediction and metrics values
                results[name] = { 
                    'prediction': y_pred[i],
                    'mse': mse_list[i],
                    'map1': mAP_list_heatmaps[i],
                    'map2': mAP_list_keypoints[i],
                    'iou': iou_list[i]
                }

            del batch, data, images_name, images_tensor, landmarks_tensor, heatmaps_tensor, original_size, resized_size, X, y, y_pred, loss, mse_list, mAP_list_heatmaps, mAP_list_keypoints, iou_list, distance_list   # Free memory
                                       

    # Adjust metrics to get average loss and accuracy per batch
    test_loss = test_loss / len(dataloader)

    # Compute metrics on full list
    #print("Dist shape:", len(distances))
    #print("Mean distance:", np.mean(distances))
    #print("Std distance:", np.std(distances))
    #print("Distances under 3px:", len([i for i in distances if i < 3]))
    #print("Distances above 15px:", len([i for i in distances if i > 15]))
    
    mre = metrics.compute_mre(distances)
    sdr = metrics.compute_sdr(distances)

    return test_loss, results, mre, sdr


def evaluate(model: torch.nn.Module,
          device: torch.device,
          test_dataloader: torch.utils.data.DataLoader,
          loss_fn: torch.nn.Module,
          load_path: str,
          num_landmarks: int = 6,
          useHeatmaps: bool = True,
          sigma: int = 1.5,
          currentKfold: int = 1,
          res_file_path: str = "results/readable_res.csv"):
    
    checkpoint = torch.load(load_path, map_location=torch.device(device))
    #model.load_state_dict(checkpoint['model'])

    model.load_state_dict(checkpoint['model_state_dict'])
    print(f"\nModel loaded from {load_path}")
    epoch = checkpoint.get('epoch', "Undefined")

    # Get the loss and the predictions dictionary
    test_loss, results, mre, sdr = test_step(model, device, test_dataloader, loss_fn, num_landmarks, useHeatmaps, sigma, load_path)

    total_mse_list = []
    total_mAP_heatmaps_list = []
    total_mAP_keypoints_list = []
    total_iou_list = []

    # Create a list with all metrics of all images
    for value in results.values():
        total_mse_list.append(value['mse'])
        total_mAP_heatmaps_list.append(value['map1'])
        total_mAP_keypoints_list.append(value['map2'])
        total_iou_list.append(value['iou'])

    # Compute the mean between all samples
    total_mse_mean = np.mean(total_mse_list)
    total_mAP_heatmaps_mean = np.mean(total_mAP_heatmaps_list)
    total_mAP_keypoints_mean = np.mean(total_mAP_keypoints_list)
    total_iou_mean = np.mean(total_iou_list)

    # Compute the standard deviation between all samples
    total_mse_std = np.std(total_mse_list)
    total_mAP_heatmaps_std = np.std(total_mAP_heatmaps_list)
    total_mAP_keypoints_std = np.std(total_mAP_keypoints_list)
    total_iou_std = np.std(total_iou_list)

    # Create a string representation of the sdr dictionary
    sdr_str = '\n'.join(f'\tThresholds {k}: {v*100:.2f}' for k, v in sorted(sdr.items()))

    # Print and Save results
    res_file = open(res_file_path, 'a')
    print(f"\n{load_path}", file=res_file)
    print(f"Fold {currentKfold} - Epoch: {epoch} | MSE: {total_mse_mean:.2f} ± {total_mse_std:.2f} | mAP heat: {total_mAP_heatmaps_mean:.2f} ± {total_mAP_heatmaps_std:.2f} | mAP key: {total_mAP_keypoints_mean:.2f} ± {total_mAP_keypoints_std:.2f} | IoU: {total_iou_mean:.2f} ± {total_iou_std:.2f} \nMRE: {mre:.2f} \nSDR: \n{sdr_str}", file=res_file)
    res_file.close()

    print(f"Fold {currentKfold} - Epoch: {epoch} | \nMSE: {total_mse_mean:.2f} ± {total_mse_std:.2f} | \nmAP heat: {total_mAP_heatmaps_mean:.2f} ± {total_mAP_heatmaps_std:.2f} | mAP key: {total_mAP_keypoints_mean:.2f} ± {total_mAP_keypoints_std:.2f} | \nIoU: {total_iou_mean:.2f} ± {total_iou_std:.2f} | \nMRE: {mre:.2f} | \nSDR: \n{sdr_str}")
    del total_mse_list, total_mAP_heatmaps_list, total_mAP_keypoints_list, total_iou_list

    return test_loss, results, mre, sdr, total_mse_mean, total_mAP_heatmaps_mean, total_mAP_keypoints_mean, total_iou_mean, epoch




# ------------------------------------------------------------------------
#                               Reinstantiate Model
# ------------------------------------------------------------------------

def reset_all_weights(model: nn.Module) -> None:
    """
    refs:
        - https://discuss.pytorch.org/t/how-to-re-set-alll-parameters-in-a-network/20819/6
        - https://stackoverflow.com/questions/63627997/reset-parameters-of-a-neural-network-in-pytorch
        - https://pytorch.org/docs/stable/generated/torch.nn.Module.html
    """

    @torch.no_grad()
    def weight_reset(m: nn.Module):
        # - check if the current module has reset_parameters & if it's callabed called it on m
        reset_parameters = getattr(m, "reset_parameters", None)
        if callable(reset_parameters):
            m.reset_parameters()

    # Applies fn recursively to every submodule see: https://pytorch.org/docs/stable/generated/torch.nn.Module.html
    model.apply(fn=weight_reset)
     
def reinstantiate_model(model, optimizer, scheduler):
    model_type = model.__class__.__name__
    scheduler_type = scheduler.__class__.__name__
    optimizer_type = optimizer.__class__.__name__
    #print(scheduler_params)
    
    reset_all_weights(model)

    if optimizer_type == 'AdamW':
        optimizer = torch.optim.AdamW(params=model.parameters(), lr=optimizer.param_groups[0]['lr'])
    else:    
        raise ValueError(f"Unsupported optimizer type: {optimizer_type}")

    if scheduler_type == 'ReduceLROnPlateau':
        scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=scheduler.factor, patience=scheduler.patience, verbose=True, mode=scheduler.mode)
    else:
        raise ValueError(f"Unsupported scheduler type: {scheduler_type}")
    
    return model, optimizer, scheduler


## -----------------------------------------------------------------------------------------------------------------##
##                                                            K-FOLD                                             ##
## -----------------------------------------------------------------------------------------------------------------##

        
def k_fold_train_and_validate(model: torch.nn.Module,
                                device: torch.device,
                                train_dataset: torch.utils.data.Dataset,
                                optimizer: torch.optim.Optimizer,
                                scheduler: torch.optim.lr_scheduler,
                                loss_fn: torch.nn.Module,
                                epochs: int,
                                early_stopping: int,
                                batch_size: int,
                                gradient_accumulation_steps: int,
                                num_landmarks: int,
                                sigma: int,
                                save_model_path: str,
                                log_file: str,
                                k_folds: int = 5,
                                onlyInference: bool = True
                                ):
    
    if onlyInference:
        k_train_losses = [0]
        k_val_losses = [0]
    else:
        k_train_losses = []
        k_val_losses = []

    k_test_losses = []
    k_mse = []
    k_iou = []
    k_map_heat = []
    k_map_key = []
    k_mre = []
    k_sdr = {}

    results_folds = []
        
    # Get the total number of samples
    total_size = len(train_dataset)

    # Divide by the number of folds to get the size of each fold
    fold_size = total_size // k_folds

    indices = list(range(total_size))
  

    for fold in range(k_folds):
        
        # Assign the fold as the val set
        val_ids = indices[fold*fold_size:(fold+1)*fold_size]

        # The remaining data will be used for training 
        train_ids = indices[:fold*fold_size] + indices[(fold+1)*fold_size:]

        # Create the subsets
        train_subsampler = torch.utils.data.SubsetRandomSampler(train_ids)
        val_subsampler = torch.utils.data.SubsetRandomSampler(val_ids)

        # Create the data loaders
        train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, sampler=train_subsampler, num_workers=4, pin_memory=True, drop_last=True)
        val_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, sampler=val_subsampler, num_workers=4, pin_memory=True)

        save_fold_path = f"{save_model_path}/fold_{fold}"
        print(f"Training fold {fold}...")
        print(f"Path: {save_fold_path}")
        
        if not onlyInference:
            
            model, optimizer, scheduler = reinstantiate_model(model, optimizer, scheduler)
            
            # Train on the current fold
            fold_train_results = train_and_validate(model, device, train_loader, val_loader, optimizer, scheduler, loss_fn, epochs, 
                                            save_fold_path, patience=early_stopping, useGradAcc=gradient_accumulation_steps, continue_training=False)
            
            last_train_loss = fold_train_results['train_loss'][-1]
            last_val_loss = fold_train_results['val_loss'][-1]

            k_train_losses.append(last_train_loss)
            k_val_losses.append(last_val_loss)

            print(f"FOLD {fold} | Train loss: {last_train_loss} | Val loss: {last_val_loss}")
            del fold_train_results, last_train_loss, last_val_loss, train_loader, train_subsampler, val_subsampler, train_ids, val_ids


        # ---------------------- Evaluate performances on val set (the training never has seen the images on the val set, it use only to minimize error) -------------------------------
        load_fold_path = os.path.join(save_fold_path, f"best_checkpoint.pt")
        # Get the loss and the predictions dictionary
        test_loss, results, mre, sdr, mse, mAP_heatmaps, mAP_keypoints, iou, epoch = evaluate(model, device, val_loader, loss_fn, load_fold_path, 
                                        num_landmarks, sigma, res_file_path=log_file)        

        k_test_losses.append(test_loss)
        
        k_mre.append(mre)
        
        # Update the sdr dictionary
        for threshold, value in sdr.items():
            if threshold not in k_sdr:
                k_sdr[threshold] = []
            k_sdr[threshold].append(value)

        # Create a list with all metrics of all images
        for value in results.values():
            k_mse.append(value['mse'])
            k_map_heat.append(value['map1'])
            k_map_key.append(value['map2'])
            k_iou.append(value['iou'])    

        del test_loss, results, mre, sdr, load_fold_path, val_loader, 
        
    # Compute the mean and SD for each threshold
    sdr_mean_std = {threshold: (np.mean(values), np.std(values)) for threshold, values in k_sdr.items()}

    # Compute the mean for the losses
    k_train_loss_mean = np.mean(k_train_losses)
    k_train_loss_std = np.std(k_train_losses)

    k_val_loss_mean = np.mean(k_val_losses)
    k_val_loss_std = np.std(k_val_losses)

    k_test_loss_mean = np.mean(k_test_losses)
    k_test_loss_std = np.std(k_test_losses)

    # Compute the mean between all samples
    k_mse_mean = np.mean(k_mse)
    k_map_heat_mean = np.mean(k_map_heat)
    k_map_key_mean = np.mean(k_map_key)
    k_iou_mean = np.mean(k_iou)

    # Compute the standard deviation between all samples
    k_mse_std = np.std(k_mse)
    k_map_heat_std = np.std(k_map_heat)
    k_map_key_std = np.std(k_map_key)
    k_iou_std = np.std(k_iou)

    # Compute the mean MRE and mean SDR
    k_mre_mean = np.mean(k_mre)
    k_mre_std = np.std(k_mre)

    res_file = open(log_file, 'a')
    print(f"----------------------------------------------------------------- GLOBAL RES for {k_folds} Folds \n",
        f"Train loss ---> Mean: {k_train_loss_mean} | Std: {k_train_loss_std} \n",
        f"Val loss ---> Mean: {k_val_loss_mean} | Std: {k_val_loss_std} \n",
        f"Test loss ---> Mean: {k_test_loss_mean} | Std: {k_test_loss_std} \n",
        f"MSE ---> Mean: {k_mse_mean:.2f} | Std: {k_mse_std:.2f} \n",
        f"mAp heat ---> Mean: {k_map_heat_mean:.2f} | Std: {k_map_heat_std:.2f} \n",
        f"mAp key ---> Mean: {k_map_key_mean:.2f} | Std: {k_map_key_std:.2f} \n",
        f"IOU ---> Mean: {k_iou_mean:.2f} | Std: {k_iou_std:.2f} \n",
        f"MRE ---> Mean: {k_mre_mean:.2f} | Std: {k_mre_std:.2f} \n",
        f"SDR:\n",
        *(f"Threshold {threshold}: Mean: {mean*100:.2f} | Std: {std*100:.2f}\n" for threshold, (mean, std) in sdr_mean_std.items()),
        file=res_file)
    res_file.close()


    print(f"----------------------------------------------------------------- GLOBAL RES for {k_folds} Folds \n",
        f"Train loss ---> Mean: {k_train_loss_mean} | Std: {k_train_loss_std} \n",
        f"Val loss ---> Mean: {k_val_loss_mean} | Std: {k_val_loss_std} \n",
        f"Test loss ---> Mean: {k_test_loss_mean} | Std: {k_test_loss_std} \n",
        f"MSE ---> Mean: {k_mse_mean:.2f} | Std: {k_mse_std:.2f} \n",
        f"mAp heat ---> Mean: {k_map_heat_mean:.2f} | Std: {k_map_heat_std:.2f} \n",
        f"mAp key ---> Mean: {k_map_key_mean:.2f} | Std: {k_map_key_std:.2f} \n",
        f"IOU ---> Mean: {k_iou_mean:.2f} | Std: {k_iou_std:.2f} \n",
        f"MRE ---> Mean: {k_mre_mean:.2f} | Std: {k_mre_std:.2f} \n",
        f"SDR:\n",
        *(f"Threshold {threshold}: Mean: {mean*100:.2f} | Std: {std*100:.2f}\n" for threshold, (mean, std) in sdr_mean_std.items()))
    del k_train_losses, k_val_losses, k_test_losses, k_mse, k_iou, k_map_heat, k_map_key, k_mre, k_sdr, results_folds, train_dataset, total_size, fold_size, indices