emad2001 commited on
Commit
3757e50
·
verified ·
1 Parent(s): 20a49af

Upload folder using huggingface_hub

Browse files
Files changed (46) hide show
  1. DiffusionXray-FewShot-LandmarkDetection/LICENSE +21 -0
  2. DiffusionXray-FewShot-LandmarkDetection/README.md +92 -0
  3. DiffusionXray-FewShot-LandmarkDetection/datasets/cephalo/400_junior/001.txt +27 -0
  4. DiffusionXray-FewShot-LandmarkDetection/datasets/cephalo/400_senior/001.txt +27 -0
  5. DiffusionXray-FewShot-LandmarkDetection/datasets/cephalo/jpg/001.jpg +3 -0
  6. DiffusionXray-FewShot-LandmarkDetection/datasets/chest/labels/CHNCXR_0001_0.txt +7 -0
  7. DiffusionXray-FewShot-LandmarkDetection/datasets/chest/pngs/CHNCXR_0001_0.png +3 -0
  8. DiffusionXray-FewShot-LandmarkDetection/datasets/hand/jpg/3128.jpg +3 -0
  9. DiffusionXray-FewShot-LandmarkDetection/datasets/hand/labels/all.csv +1 -0
  10. DiffusionXray-FewShot-LandmarkDetection/ddpm_pretraining/config/aariz_fastddpm.json +46 -0
  11. DiffusionXray-FewShot-LandmarkDetection/ddpm_pretraining/config/config.json +46 -0
  12. DiffusionXray-FewShot-LandmarkDetection/ddpm_pretraining/ddpm_datasets.py +259 -0
  13. DiffusionXray-FewShot-LandmarkDetection/ddpm_pretraining/main.py +109 -0
  14. DiffusionXray-FewShot-LandmarkDetection/ddpm_pretraining/model/ddpm_model.py +213 -0
  15. DiffusionXray-FewShot-LandmarkDetection/ddpm_pretraining/model/nn_blocks.py +450 -0
  16. DiffusionXray-FewShot-LandmarkDetection/ddpm_pretraining/model/training_functions.py +327 -0
  17. DiffusionXray-FewShot-LandmarkDetection/ddpm_pretraining/utils.py +180 -0
  18. DiffusionXray-FewShot-LandmarkDetection/downstream_task/config/config.json +48 -0
  19. DiffusionXray-FewShot-LandmarkDetection/downstream_task/imagenet_backbones_comparative_study.py +187 -0
  20. DiffusionXray-FewShot-LandmarkDetection/downstream_task/landmarks_datasets.py +393 -0
  21. DiffusionXray-FewShot-LandmarkDetection/downstream_task/main.py +379 -0
  22. DiffusionXray-FewShot-LandmarkDetection/downstream_task/metrics.py +234 -0
  23. DiffusionXray-FewShot-LandmarkDetection/downstream_task/model/deep_learning.py +725 -0
  24. DiffusionXray-FewShot-LandmarkDetection/downstream_task/model/models.py +463 -0
  25. DiffusionXray-FewShot-LandmarkDetection/downstream_task/utilities.py +253 -0
  26. DiffusionXray-FewShot-LandmarkDetection/experiments/launch_imagenet_comparative_study.bash +5 -0
  27. DiffusionXray-FewShot-LandmarkDetection/experiments/launch_landmarks_experiments.bash +108 -0
  28. DiffusionXray-FewShot-LandmarkDetection/experiments/launch_pretraining.bash +57 -0
  29. DiffusionXray-FewShot-LandmarkDetection/launch_experiments.bash +29 -0
  30. DiffusionXray-FewShot-LandmarkDetection/requirements.txt +21 -0
  31. DiffusionXray-FewShot-LandmarkDetection/ssl_pretraining/builders/__init__.py +31 -0
  32. DiffusionXray-FewShot-LandmarkDetection/ssl_pretraining/builders/barlow_twins.py +105 -0
  33. DiffusionXray-FewShot-LandmarkDetection/ssl_pretraining/builders/byol.py +114 -0
  34. DiffusionXray-FewShot-LandmarkDetection/ssl_pretraining/builders/dino.py +172 -0
  35. DiffusionXray-FewShot-LandmarkDetection/ssl_pretraining/builders/moco.py +118 -0
  36. DiffusionXray-FewShot-LandmarkDetection/ssl_pretraining/builders/mocov2.py +132 -0
  37. DiffusionXray-FewShot-LandmarkDetection/ssl_pretraining/builders/mocov3.py +148 -0
  38. DiffusionXray-FewShot-LandmarkDetection/ssl_pretraining/builders/simclr.py +99 -0
  39. DiffusionXray-FewShot-LandmarkDetection/ssl_pretraining/builders/simclrv2.py +113 -0
  40. DiffusionXray-FewShot-LandmarkDetection/ssl_pretraining/builders/simsiam.py +114 -0
  41. DiffusionXray-FewShot-LandmarkDetection/ssl_pretraining/builders/supcon.py +124 -0
  42. DiffusionXray-FewShot-LandmarkDetection/ssl_pretraining/builders/swav.py +179 -0
  43. DiffusionXray-FewShot-LandmarkDetection/ssl_pretraining/config/config.json +22 -0
  44. DiffusionXray-FewShot-LandmarkDetection/ssl_pretraining/main.py +350 -0
  45. DiffusionXray-FewShot-LandmarkDetection/ssl_pretraining/ssl_datasets.py +205 -0
  46. DiffusionXray-FewShot-LandmarkDetection/ssl_pretraining/utils.py +174 -0
DiffusionXray-FewShot-LandmarkDetection/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2024 MaLGa Vision
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
DiffusionXray-FewShot-LandmarkDetection/README.md ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Self-supervised pre-training with diffusion model for few-shot landmark detection in x-ray images
2
+ Official PyTorch implementation of the paper -> https://openaccess.thecvf.com/content/WACV2025/papers/Di_Via_Self-Supervised_Pre-Training_with_Diffusion_Model_for_Few-Shot_Landmark_Detection_in_WACV_2025_paper.pdf
3
+
4
+ # Abstract
5
+ Deep neural networks have been extensively applied in the medical domain for various tasks, including image classification, segmentation, and landmark detection. However, their application is often hindered by data scarcity, both in terms of available annotations and images. This study introduces a novel application of denoising diffusion probabilistic models (DDPMs) to the landmark detection task, specifically addressing the challenge of limited annotated data in x-ray imaging. Our key innovation lies in leveraging DDPMs for self-supervised pre-training in landmark detection, a previously unexplored approach in this domain. This method enables accurate landmark detection with minimal annotated training data (as few as 50 images), surpassing both ImageNet supervised pre-training and traditional self-supervised techniques across three popular x-ray benchmark datasets. To our knowledge, this work represents the first application of diffusion models for self-supervised learning in landmark detection, which may offer a valuable pre-training approach in few-shot regimes, for mitigating data scarcity.
6
+
7
+
8
+ ![ddpm_pipeline](https://github.com/user-attachments/assets/d58daec4-ed81-4b4e-aca0-4257e9149b5b)
9
+
10
+
11
+ # Getting Started
12
+ ## Installation
13
+ Install python packages
14
+ ```
15
+ pip install -r requirements.txt
16
+ ```
17
+
18
+ ## Preparing Datasets
19
+ Download the cephalometric ([link1](https://figshare.com/s/37ec464af8e81ae6ebbf), [link2](https://www.kaggle.com/datasets/c34a0ef0cd3cfd5c5afbdb30f8541e887171f19f196b1ad63790ca5b28c0ec93?select=cepha400)), hand [link](https://ipilab.usc.edu/research/baaweb/) and the chest [link](https://www.kaggle.com/datasets/nikhilpandey360/chest-xray-masks-and-labels) datasets.
20
+
21
+ Prepare datasets in the following directory structure.
22
+
23
+ - datasets
24
+ - cephalo
25
+ - 400_junior
26
+ - *.txt
27
+ - 400_senior
28
+ - *.txt
29
+ - jpg
30
+ - *.jpg
31
+ - hand
32
+ - labels
33
+ - all.csv # [download here](https://github.com/christianpayer/MedicalDataAugmentationTool-HeatmapRegression/blob/master/hand_xray/hand_xray_dataset/setup/all.csv)
34
+ - jpg
35
+ - *.jpg
36
+ - chest
37
+ - pngs
38
+ - CHNCXR_*.png
39
+ - labels
40
+ - CHNCXR_*.txt # unzip [chest_labels.zip](https://github.com/MIRACLE-Center/YOLO_Universal_Anatomical_Landmark_Detection/blob/main/data/chest_labels.zip)
41
+
42
+
43
+ ## Running Experiments
44
+ To run the experiments, follow these steps:
45
+ - Open a terminal.
46
+ - Navigate to the root directory of the repository.
47
+ - Make the launch_experiments.sh script executable using the following command:
48
+ ```
49
+ chmod +x launch_experiments.sh
50
+ ```
51
+ - Run the launch_experiments.sh script. The script automates the process of setting up and running the desired experiments.
52
+ ```
53
+ ./launch_experiments.sh
54
+ ```
55
+
56
+ # Download Pre-Trained models
57
+
58
+ All the pre-trained models used in the study are available at the following link:
59
+
60
+ [https://huggingface.co/Roberto98/X-rays_Self-Supervised_Landmark_Detection](https://huggingface.co/Roberto98/X-rays_Self-Supervised_Landmark_Detection)
61
+
62
+
63
+ In particular, it is possible to download:
64
+ - Our DDPM pre-trained model at 6k, 8k, and 8k iterations respectively for the Chest, Cephalometric, and Hand dataset
65
+ - MocoV3 densenet161 model at 10k iterations for the Chest, Cephalometric, and Hand dataset
66
+ - SimClrV2 densenet161 model at 10k iterations for the Chest, Cephalometric, and Hand dataset
67
+ - Dino densenet161 model at 10k iterations for the Chest, Cephalometric, and Hand dataset
68
+
69
+
70
+ # Citation
71
+
72
+ Accepted at WACV (Winter Conference on Applications of Computer Vision) 2025.
73
+
74
+ If you use this code or findings in your research, please cite:
75
+
76
+ ### Bibtex
77
+ ```
78
+ @InProceedings{Di_Via_2025_WACV,
79
+ author = {Di Via, Roberto and Odone, Francesca and Pastore, Vito Paolo},
80
+ title = {Self-Supervised Pre-Training with Diffusion Model for Few-Shot Landmark Detection in X-Ray Images},
81
+ booktitle = {Proceedings of the Winter Conference on Applications of Computer Vision (WACV)},
82
+ month = {February},
83
+ year = {2025},
84
+ pages = {3886-3896}
85
+ }
86
+ ```
87
+
88
+ ### APA
89
+
90
+ ```
91
+ Di Via, R., Odone, F., & Pastore, V. P. (2025). Self-supervised pre-training with diffusion model for few-shot landmark detection in x-ray images. IEEE/CVF Winter Conference on Applications of Computer Vision (WACV) 2025. https://openaccess.thecvf.com/content/WACV2025/papers/Di_Via_Self-Supervised_Pre-Training_with_Diffusion_Model_for_Few-Shot_Landmark_Detection_in_WACV_2025_paper.pdf
92
+ ```
DiffusionXray-FewShot-LandmarkDetection/datasets/cephalo/400_junior/001.txt ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 829,991
2
+ 1465,1035
3
+ 1300,1275
4
+ 588,1178
5
+ 1365,1631
6
+ 1342,2093
7
+ 1325,2224
8
+ 1209,2253
9
+ 1282,2270
10
+ 695,1822
11
+ 1457,1868
12
+ 1448,1864
13
+ 1586,1751
14
+ 1571,2015
15
+ 1501,1600
16
+ 1441,2210
17
+ 935,1514
18
+ 1368,1552
19
+ 664,1332
20
+ 3
21
+ 3
22
+ 2
23
+ 3
24
+ 3
25
+ 1
26
+ 2
27
+ 3
DiffusionXray-FewShot-LandmarkDetection/datasets/cephalo/400_senior/001.txt ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 835,996
2
+ 1473,1029
3
+ 1289,1279
4
+ 604,1228
5
+ 1375,1654
6
+ 1386,2019
7
+ 1333,2200
8
+ 1263,2272
9
+ 1305,2252
10
+ 694,1805
11
+ 1460,1870
12
+ 1450,1864
13
+ 1588,1753
14
+ 1569,2013
15
+ 1514,1620
16
+ 1382,2310
17
+ 944,1506
18
+ 1436,1569
19
+ 664,1340
20
+ 3
21
+ 3
22
+ 2
23
+ 3
24
+ 3
25
+ 1
26
+ 2
27
+ 3
DiffusionXray-FewShot-LandmarkDetection/datasets/cephalo/jpg/001.jpg ADDED

Git LFS Details

  • SHA256: d2a1ccba679f0a4eae7394bfee3244d4f3ab2dd33b15782e0d59080473bdbd26
  • Pointer size: 131 Bytes
  • Size of remote file: 335 kB
DiffusionXray-FewShot-LandmarkDetection/datasets/chest/labels/CHNCXR_0001_0.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ 6
2
+ 0.41625 0.14375
3
+ 0.148125 0.798125
4
+ 0.4075 0.59875
5
+ 0.63 0.12875
6
+ 0.891875 0.7825
7
+ 0.7625 0.685
DiffusionXray-FewShot-LandmarkDetection/datasets/chest/pngs/CHNCXR_0001_0.png ADDED

Git LFS Details

  • SHA256: e464a828425fe77ecebfcae955db4f9b9f1cf608a3fcb69a30e33a99eabd9459
  • Pointer size: 132 Bytes
  • Size of remote file: 5.92 MB
DiffusionXray-FewShot-LandmarkDetection/datasets/hand/jpg/3128.jpg ADDED

Git LFS Details

  • SHA256: 31b21bd9c4a1e43228406a1c73437b3c585260e017d1d582e62d4039d81c6b42
  • Pointer size: 131 Bytes
  • Size of remote file: 372 kB
DiffusionXray-FewShot-LandmarkDetection/datasets/hand/labels/all.csv ADDED
@@ -0,0 +1 @@
 
 
1
+ 3142,297,1913,324
DiffusionXray-FewShot-LandmarkDetection/ddpm_pretraining/config/aariz_fastddpm.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "gpu": 0,
3
+ "experiment_path": "ddpm_pretraining/ddpm_pretraining_experiments",
4
+ "model_for_inference": "",
5
+ "model": {
6
+ "unet": {
7
+ "channel_mults": [1, 2, 2, 4],
8
+ "attn_res": 32,
9
+ "num_head_channels": 4,
10
+ "res_blocks": 4,
11
+ "self_condition": true
12
+ },
13
+ "beta_schedule": {
14
+ "train": {
15
+ "schedule": "linear",
16
+ "n_timestep": 500,
17
+ "linear_start": 1e-4,
18
+ "linear_end": 0.02
19
+ },
20
+ "test": {
21
+ "schedule": "linear",
22
+ "n_timestep": 500,
23
+ "linear_start": 1e-4,
24
+ "linear_end": 0.02
25
+ }
26
+ },
27
+ "lr": 0.00005,
28
+ "optimizer": "adamw",
29
+ "loss_type": "l2",
30
+ "use_ema": true,
31
+ "iterations": 5000,
32
+ "freq_metrics": 500,
33
+ "freq_checkpoint": 500,
34
+ "continue_training": false
35
+ },
36
+ "dataset": {
37
+ "name": "Aariz/Aariz",
38
+ "path": "/teamspace/studios/this_studio",
39
+ "image_size": 512,
40
+ "image_channels": 3,
41
+ "batch_size": 6,
42
+ "grad_accumulation": 1,
43
+ "num_workers": 4,
44
+ "pin_memory": true
45
+ }
46
+ }
DiffusionXray-FewShot-LandmarkDetection/ddpm_pretraining/config/config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "gpu": 0,
3
+ "experiment_path": "ddpm_pretraining/ddpm_pretraining_experiments",
4
+ "model_for_inference": "",
5
+ "model": {
6
+ "unet": {
7
+ "channel_mults": [1,2,4,8],
8
+ "attn_res": 32,
9
+ "num_head_channels": 4,
10
+ "res_blocks": 4,
11
+ "self_condition": true
12
+ },
13
+ "beta_schedule": {
14
+ "train": {
15
+ "schedule": "linear",
16
+ "n_timestep":500,
17
+ "linear_start": 1e-4,
18
+ "linear_end": 0.02
19
+ },
20
+ "test": {
21
+ "schedule": "linear",
22
+ "n_timestep": 500,
23
+ "linear_start": 1e-4,
24
+ "linear_end": 0.02
25
+ }
26
+ },
27
+ "lr":1e-4,
28
+ "optimizer": "adamw",
29
+ "loss_type": "l2",
30
+ "use_ema": true,
31
+ "iterations": 30000,
32
+ "freq_metrics":2000,
33
+ "freq_checkpoint":2000,
34
+ "continue_training": false
35
+ },
36
+ "dataset":{
37
+ "name": "cephalo",
38
+ "path": "datasets/",
39
+ "image_size": 256,
40
+ "image_channels": 1,
41
+ "batch_size": 4,
42
+ "grad_accumulation": 8,
43
+ "num_workers": null,
44
+ "pin_memory": true
45
+ }
46
+ }
DiffusionXray-FewShot-LandmarkDetection/ddpm_pretraining/ddpm_datasets.py ADDED
@@ -0,0 +1,259 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+
4
+ # ------------------------------------------------------------------------
5
+ # Libraries
6
+ # ------------------------------------------------------------------------
7
+
8
+ import os
9
+ import numpy as np
10
+ from PIL import Image
11
+ from torch.utils.data import Dataset
12
+ import albumentations as A
13
+ import albumentations.pytorch
14
+
15
+ import torch
16
+ import utils
17
+ from skimage.transform import resize
18
+
19
+ # ------------------------------------------------------------------------
20
+ # Chest Dataset
21
+ # ------------------------------------------------------------------------
22
+
23
+ # Load the dataset from the train and test folders in the root directory
24
+ class ChestDiffusionDataset(Dataset):
25
+ def __init__(self, root_dir, channels=1, transform=None, phase='train'):
26
+
27
+ self.root_dir = root_dir
28
+ self.transform = transform
29
+ self.channels = channels
30
+
31
+ self.pth_Image = os.path.join(root_dir, 'pngs')
32
+
33
+ # file index
34
+ files = [i[:-4] for i in sorted(os.listdir(self.pth_Image))]
35
+
36
+ exclude_list = ['CHNCXR_0059_0', 'CHNCXR_0178_0', 'CHNCXR_0228_0', 'CHNCXR_0267_0', 'CHNCXR_0295_0', 'CHNCXR_0310_0', 'CHNCXR_0285_0', 'CHNCXR_0276_0', 'CHNCXR_0303_0']
37
+ if exclude_list is not None:
38
+ st = set(exclude_list)
39
+ files = [f for f in files if f not in st]
40
+
41
+ n = len(files)
42
+ train_num = 195
43
+ val_num = 34
44
+ test_num = n - train_num - val_num
45
+ if phase == 'train':
46
+ self.image_files = files[:train_num+val_num]
47
+ elif phase == 'test':
48
+ self.image_files = files[-test_num:]
49
+ elif phase == 'all':
50
+ self.image_files = files
51
+ else:
52
+ raise Exception("Unknown phase: {phase}".format(phase=phase))
53
+
54
+
55
+ def __len__(self):
56
+ return len(self.image_files)
57
+
58
+ def __getitem__(self, idx):
59
+ image_name = self.image_files[idx]
60
+
61
+ image = self.read_image(os.path.join(self.pth_Image, image_name + '.png'))
62
+
63
+ data_dict = {'name': image_name, 'image': image}
64
+
65
+ return data_dict
66
+
67
+ def read_image(self, image_path):
68
+
69
+ if self.channels == 3:
70
+ image = Image.open(image_path).convert('RGB')
71
+ image_np = np.array(image).astype(np.float32)
72
+
73
+ elif self.channels == 1:
74
+ image = Image.open(image_path).convert('L')
75
+ image_np = np.array(image).astype(np.float32)
76
+ image_np = np.expand_dims(image_np, axis=2) # add channel dimension
77
+ else:
78
+ raise ValueError('Channels must be either 1 or 3')
79
+
80
+ if self.transform:
81
+ image = self.transform(image=image_np)['image']
82
+
83
+ return image
84
+
85
+
86
+ # ------------------------------------------------------------------------
87
+ # HAND Dataset
88
+ # ------------------------------------------------------------------------
89
+
90
+ # Load the dataset from the train and test folders in the root directory
91
+ class HandDiffusionDataset(Dataset):
92
+ def __init__(self, root_dir, channels=1, transform=None, phase='train'):
93
+
94
+ self.root_dir = root_dir
95
+ self.transform = transform
96
+ self.channels = channels
97
+
98
+ self.pth_Image = os.path.join(root_dir, 'jpg')
99
+
100
+ # file index
101
+ files = [i[:-4] for i in sorted(os.listdir(self.pth_Image))]
102
+
103
+ n = len(files)
104
+ train_num = 550
105
+ val_num = 59
106
+ test_num = n - train_num - val_num
107
+ if phase == 'train':
108
+ self.image_files = files[:train_num+val_num]
109
+ elif phase == 'test':
110
+ self.image_files = files[-test_num:]
111
+ elif phase == 'all':
112
+ self.image_files = files
113
+ else:
114
+ raise Exception("Unknown phase: {phase}".format(phase=phase))
115
+
116
+
117
+ def __len__(self):
118
+ return len(self.image_files)
119
+
120
+ def __getitem__(self, idx):
121
+ image_name = self.image_files[idx]
122
+
123
+ image = self.read_image(os.path.join(self.pth_Image, image_name + '.jpg'))
124
+
125
+ data_dict = {'name': image_name, 'image': image}
126
+
127
+ return data_dict
128
+
129
+ def read_image(self, image_path):
130
+
131
+ if self.channels == 3:
132
+ image = Image.open(image_path).convert('RGB')
133
+ image_np = np.array(image).astype(np.float32)
134
+
135
+ elif self.channels == 1:
136
+ image = Image.open(image_path).convert('L')
137
+ image_np = np.array(image).astype(np.float32)
138
+ image_np = np.expand_dims(image_np, axis=2)
139
+ else:
140
+ raise ValueError('Channels must be either 1 or 3')
141
+
142
+ if self.transform:
143
+ image = self.transform(image=image_np)['image']
144
+
145
+ return image
146
+
147
+
148
+ # ------------------------------------------------------------------------
149
+ # CEPH Dataset
150
+ # ------------------------------------------------------------------------
151
+
152
+ class CephaloDiffusionDataset(Dataset):
153
+ def __init__(self, root_dir, channels=1, transform=None, phase='train'):
154
+ self.root_dir = root_dir
155
+ self.transform = transform
156
+ self.channels = channels
157
+
158
+ self.pth_Image = os.path.join(root_dir, 'jpg')
159
+
160
+ # file index
161
+ files = [i[:-4] for i in sorted(os.listdir(self.pth_Image))]
162
+
163
+ n = len(files)
164
+ train_num = 130
165
+ val_num = 20
166
+ test_num = n - train_num - val_num
167
+
168
+ if phase == 'train':
169
+ self.image_files = files[:train_num+val_num]
170
+ elif phase == 'test':
171
+ self.image_files = files[-test_num:]
172
+ elif phase == 'all':
173
+ self.image_files = files
174
+ else:
175
+ raise Exception("Unknown phase: {phase}".format(phase=phase))
176
+
177
+ def __len__(self):
178
+ return len(self.image_files)
179
+
180
+ def __getitem__(self, idx):
181
+ image_name = self.image_files[idx]
182
+
183
+ image = self.read_image(os.path.join(self.pth_Image, image_name + '.jpg'))
184
+
185
+ data_dict = {'name': image_name, 'image': image}
186
+
187
+ return data_dict
188
+
189
+ def read_image(self, image_path):
190
+
191
+ if self.channels == 3:
192
+ image = Image.open(image_path).convert('RGB')
193
+ image_np = np.array(image).astype(np.float32)
194
+
195
+ elif self.channels == 1:
196
+ image = Image.open(image_path).convert('L')
197
+ image_np = np.array(image).astype(np.float32)
198
+ image_np = np.expand_dims(image_np, axis=2)
199
+ else:
200
+ raise ValueError('Channels must be either 1 or 3')
201
+
202
+ if self.transform:
203
+ image = self.transform(image=image_np)['image']
204
+
205
+ return image
206
+
207
+
208
+ # ------------------------------------------------------------------------
209
+ # AARIZ Dataset
210
+ # ------------------------------------------------------------------------
211
+
212
+ class AarizDiffusionDataset(Dataset):
213
+ def __init__(self, root_dir, channels=3, transform=None, phase='train'):
214
+ """
215
+ Lightweight wrapper around the Aariz dataset. Uses the provided split
216
+ directories (`train`, `valid`, `test`) and consumes images from the
217
+ `Cephalograms` subfolder.
218
+ """
219
+ self.root_dir = root_dir
220
+ self.transform = transform
221
+ self.channels = channels
222
+
223
+ split_dir = 'train' if phase == 'train' else ('valid' if os.path.isdir(os.path.join(root_dir, 'valid')) else 'test')
224
+ self.images_dir = os.path.join(root_dir, split_dir, 'Cephalograms')
225
+
226
+ if not os.path.isdir(self.images_dir):
227
+ raise RuntimeError(f'Expected images under {self.images_dir}')
228
+
229
+ files = [i for i in sorted(os.listdir(self.images_dir)) if i.lower().endswith(('.png', '.jpg', '.jpeg'))]
230
+ if len(files) == 0:
231
+ raise RuntimeError(f'No images found in {self.images_dir}')
232
+
233
+ self.image_files = files
234
+
235
+ def __len__(self):
236
+ return len(self.image_files)
237
+
238
+ def __getitem__(self, idx):
239
+ image_name = self.image_files[idx]
240
+ image = self.read_image(os.path.join(self.images_dir, image_name))
241
+ return {'name': os.path.splitext(image_name)[0], 'image': image}
242
+
243
+ def read_image(self, image_path):
244
+ if self.channels == 3:
245
+ image = Image.open(image_path).convert('RGB')
246
+ image_np = np.array(image).astype(np.float32)
247
+ elif self.channels == 1:
248
+ image = Image.open(image_path).convert('L')
249
+ image_np = np.array(image).astype(np.float32)
250
+ image_np = np.expand_dims(image_np, axis=2)
251
+ else:
252
+ raise ValueError('Channels must be either 1 or 3')
253
+
254
+ if self.transform:
255
+ image = self.transform(image=image_np)['image']
256
+
257
+ return image
258
+
259
+
DiffusionXray-FewShot-LandmarkDetection/ddpm_pretraining/main.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # ------------------------------------------------------------------------
3
+ # Libraries
4
+ # ------------------------------------------------------------------------
5
+
6
+ # General libraries
7
+ import os
8
+ import argparse
9
+ import json
10
+ import sys
11
+ import time
12
+ import logging
13
+
14
+ import numpy as np
15
+
16
+ # Deep learning libraries
17
+ import torch
18
+ #import tensorflow as tf
19
+ #import tensorboard as tb
20
+ from tqdm import tqdm
21
+
22
+ # Custom libraries
23
+ from utils import *
24
+ from model.training_functions import *
25
+
26
+ # Set random seed
27
+ np.random.seed(42)
28
+ torch.manual_seed(42)
29
+ torch.cuda.manual_seed(42)
30
+
31
+ # ------------------------------------------------------------------------
32
+ # MAIN
33
+ # ------------------------------------------------------------------------
34
+
35
+ if __name__ == "__main__":
36
+ # Parse arguments from command line
37
+ parser = argparse.ArgumentParser()
38
+ parser.add_argument(
39
+ "-c",
40
+ "--config",
41
+ type=str,
42
+ default="./ddpm_pretraining/config/config.json",
43
+ help="Path to the JSON config file."
44
+ )
45
+
46
+ args = parser.parse_args()
47
+ config = json.load(open(args.config))
48
+
49
+ # Print system info
50
+ print("----------------------------------------- SYSTEM INFO -----------------------------------------")
51
+ print("Python version: {}".format(sys.version))
52
+ print("Pytorch version: {}".format(torch.__version__))
53
+
54
+ if "CUDA_VISIBLE_DEVICES" in os.environ:
55
+ GPU = os.environ["CUDA_VISIBLE_DEVICES"]
56
+ else:
57
+ GPU = config["gpu"]
58
+ os.environ["CUDA_VISIBLE_DEVICES"] = f"{GPU}"
59
+
60
+ device = f"cuda" if torch.cuda.is_available() else "cpu"
61
+ print(f"Torch GPU Name: {torch.cuda.get_device_name(0)}... Using GPU {GPU}" if device == "cuda" else "Torch GPU not available... Using CPU")
62
+
63
+ print("------------------------------------------------------------------------------------------------")
64
+
65
+ # Config params for training and testing the model
66
+ root_path = config["experiment_path"]
67
+ DATASET_NAME = config["dataset"]["name"]
68
+ DATASET_PATH = os.path.join(config["dataset"]["path"], DATASET_NAME)
69
+ batch_size = config["dataset"]["batch_size"]
70
+ image_size = config["dataset"]["image_size"]
71
+ image_channels = config["dataset"]["image_channels"]
72
+ pin_memory = config["dataset"]["pin_memory"]
73
+ num_workers = 2 if config["dataset"]["num_workers"] == None else config["dataset"]["num_workers"]
74
+
75
+ # Create train and test dataloaders
76
+ train_dataloader, test_dataloader = load_data(DATASET_PATH, image_size, image_channels, batch_size, pin_memory=pin_memory, num_workers=num_workers)
77
+
78
+ # Save model path and tensorboard writer and path for the experiment
79
+ PREFIX_PATH = f"{root_path}/{DATASET_NAME}/{config['model']['beta_schedule']['train']['schedule']}_{config['model']['beta_schedule']['train']['n_timestep']}/size{image_size}_ch{image_channels}"
80
+
81
+ # Create log file for the experiment
82
+ if not os.path.exists(f'{PREFIX_PATH}/log_file.txt'):
83
+ os.makedirs(PREFIX_PATH, exist_ok=True)
84
+
85
+ with open(f'{PREFIX_PATH}/log_file.txt', 'w') as f:
86
+ pass
87
+
88
+ logging.basicConfig(format="%(message)s", level=logging.INFO, filename=f'{PREFIX_PATH}/log_file.txt', filemode='a') # %(asctime)s
89
+
90
+ # Save the original model checkpoint and the ema model checkpoint
91
+ save_model_path = generate_path(f"{PREFIX_PATH}/models/")
92
+
93
+ # Print config params
94
+ print("----------------------------------------- CONFIG PARAMS -----------------------------------------")
95
+ print(f"Dataset path: {DATASET_PATH}")
96
+ print(f"Dataset size: {len(train_dataloader.dataset)}")
97
+ print(f"Batch size: {batch_size} | Accumulation steps: {config['dataset']['grad_accumulation']}")
98
+ print(f"Number of batches: {len(train_dataloader)}")
99
+ print(f"Image shape: ({image_size}, {image_size}, {image_channels})")
100
+ print(f"Save model path: {save_model_path}")
101
+ print("------------------------------------------------------------------------------------------------")
102
+
103
+ # Train diffusion model
104
+ print("----------------------------------------- START TRAINING -----------------------------------------")
105
+ print(f"Total epochs: {int(config['model']['iterations'] / (len(train_dataloader) / config['dataset']['grad_accumulation']))} | Total iterations: {config['model']['iterations']} | Iterations per epoch: {len(train_dataloader)/config['dataset']['grad_accumulation']}")
106
+ train_diffusion_model(config, train_dataloader, save_model_path, PREFIX_PATH, device, continue_training=config["model"]["continue_training"])
107
+
108
+ print("----------------------------------------- END TRAINING -----------------------------------------")
109
+
DiffusionXray-FewShot-LandmarkDetection/ddpm_pretraining/model/ddpm_model.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Diffusion model architecture modified from the original code
3
+ https://github.com/dome272/Diffusion-Models-pytorch/tree/main
4
+ and
5
+ https://huggingface.co/blog/annotated-diffusion
6
+ """
7
+
8
+ import torch
9
+ import torch.nn as nn
10
+ from tqdm import tqdm
11
+ import logging
12
+ from model.nn_blocks import *
13
+ from torch.optim import Adam, AdamW, SGD
14
+ from torchmetrics.image import StructuralSimilarityIndexMeasure
15
+ from torchmetrics import MeanSquaredError
16
+ from torchvision.utils import make_grid
17
+ import matplotlib.pyplot as plt
18
+ import numpy as np
19
+ import copy
20
+
21
+ class DDPM:
22
+ def __init__(
23
+ self,
24
+ image_size: int = 256,
25
+ channels: int = 3,
26
+ device = "cuda",
27
+ lr: float = 1e-4,
28
+ optimizer: str = "adam",
29
+ timesteps: int = 1000,
30
+ beta_schedule: str = "linear",
31
+ beta_start: float = 1e-4,
32
+ beta_end: float = 0.02,
33
+ unet_channels: list = [1, 2, 4, 8],
34
+ unet_self_condition: bool = True,
35
+ unet_att_res: int = 32,
36
+ unet_att_heads: int = 4,
37
+ unet_res_blocks: int = 4,
38
+ use_ema: bool = False,
39
+ ):
40
+
41
+ self.timesteps = timesteps
42
+ self.device = device
43
+ self.image_size = image_size
44
+ self.channels = channels
45
+ self.lr = lr
46
+
47
+ self.beta_start = beta_start
48
+ self.beta_end = beta_end
49
+
50
+ # define beta schedule
51
+ self.beta = self.prepare_noise_schedule(beta_schedule=beta_schedule).to(device)
52
+
53
+ # define alphas
54
+ self.alpha = 1.0 - self.beta
55
+ self.alpha_hat = torch.cumprod(self.alpha, dim=0)
56
+
57
+ # define model
58
+ self.model = Unet(
59
+ dim=image_size,
60
+ channels=channels,
61
+ dim_mults=unet_channels,
62
+ self_condition=unet_self_condition,
63
+ resnet_block_groups=unet_res_blocks,
64
+ att_heads=unet_att_heads,
65
+ att_res=unet_att_res,
66
+ )
67
+ self.model.to(self.device)
68
+
69
+ # define optimizer
70
+ self.optimizer = self.prepare_optimizer(optimizer)
71
+
72
+ # Initialize EMA
73
+ if use_ema:
74
+ self.ema = EMA(0.995)
75
+ self.ema_model = copy.deepcopy(self.model).eval().requires_grad_(False).to(self.device)
76
+ else:
77
+ self.ema = None
78
+ self.ema_model = None
79
+
80
+ def update_ema(self):
81
+ # Update the EMA model parameters
82
+ self.ema.update_model_average(self.ema_model, self.model)
83
+
84
+ def prepare_optimizer(self, optimizer):
85
+ if optimizer == "adam":
86
+ return Adam(self.model.parameters(), lr=self.lr)
87
+ elif optimizer == "adamw":
88
+ return AdamW(self.model.parameters(), lr=self.lr)
89
+ elif optimizer == "sgd":
90
+ return SGD(self.model.parameters(), lr=self.lr)
91
+ else:
92
+ raise NotImplementedError()
93
+
94
+ def prepare_noise_schedule(self, beta_schedule):
95
+ if beta_schedule == "linear":
96
+ return linear_beta_schedule(
97
+ timesteps=self.timesteps, beta_start=self.beta_start, beta_end=self.beta_end
98
+ )
99
+ elif beta_schedule == "cosine":
100
+ return cosine_beta_schedule(timesteps=self.timesteps)
101
+ elif beta_schedule == "quadratic":
102
+ return quadratic_beta_schedule(
103
+ timesteps=self.timesteps, beta_start=self.beta_start, beta_end=self.beta_end
104
+ )
105
+ elif beta_schedule == "sigmoid":
106
+ return sigmoid_beta_schedule(
107
+ timesteps=self.timesteps, beta_start=self.beta_start, beta_end=self.beta_end
108
+ )
109
+ else:
110
+ raise NotImplementedError
111
+
112
+
113
+ def noise_images(self, x, t):
114
+ sqrt_alpha_hat = torch.sqrt(self.alpha_hat[t])[:, None, None, None]
115
+ sqrt_one_minus_alpha_hat = torch.sqrt(1 - self.alpha_hat[t])[:, None, None, None]
116
+ Ɛ = torch.randn_like(x)
117
+ return sqrt_alpha_hat * x + sqrt_one_minus_alpha_hat * Ɛ, Ɛ
118
+
119
+ def noise_images_conditioned(self, x, t):
120
+ sqrt_alpha_hat = torch.sqrt(self.alpha_hat[t])
121
+ sqrt_one_minus_alpha_hat = torch.sqrt(1 - self.alpha_hat[t])
122
+ Ɛ = torch.randn_like(x)
123
+ return sqrt_alpha_hat * x + sqrt_one_minus_alpha_hat * Ɛ, Ɛ
124
+
125
+ def sample_timesteps(self, n):
126
+ return torch.randint(low=1, high=self.timesteps, size=(n,)).to(self.device)
127
+
128
+ def p_losses(self, x, t, loss_type="l1"):
129
+
130
+ x_t, noise = self.noise_images(x, t)
131
+ predicted_noise = self.model(x_t, t)
132
+
133
+ if loss_type == "l1":
134
+ #loss = nn.L1Loss()(noise, predicted_noise)
135
+ loss = F.l1_loss(noise, predicted_noise)
136
+ elif loss_type == "l2":
137
+ #loss = nn.MSELoss()(noise, predicted_noise)
138
+ loss = F.mse_loss(noise, predicted_noise)
139
+ elif loss_type == "huber":
140
+ #loss = nn.SmoothL1Loss()(noise, predicted_noise)
141
+ loss = F.smooth_l1_loss(noise, predicted_noise)
142
+ else:
143
+ raise NotImplementedError()
144
+
145
+ return loss
146
+
147
+
148
+ def sample(self, model, batch_size, timesteps=None, x_cond=None):
149
+ with torch.no_grad():
150
+ if timesteps is None:
151
+ timesteps = self.timesteps
152
+
153
+ if x_cond is None:
154
+ x = torch.randn((batch_size, self.channels, self.image_size, self.image_size)).to(self.device)
155
+ else:
156
+ x,_ = self.noise_images_conditioned(x_cond, timesteps-1)
157
+
158
+ for i in tqdm(reversed(range(1, timesteps)), position=0):
159
+ t = (torch.ones(batch_size) * i).long().to(self.device)
160
+ predicted_noise = model(x, t)
161
+ alpha = self.alpha[t][:, None, None, None]
162
+ alpha_hat = self.alpha_hat[t][:, None, None, None]
163
+
164
+ beta = self.beta[t][:, None, None, None]
165
+ if i > 1:
166
+ noise = torch.randn_like(x)
167
+ else:
168
+ noise = torch.zeros_like(x)
169
+ x = 1 / torch.sqrt(alpha) * (x - ((1 - alpha) / (torch.sqrt(1 - alpha_hat))) * predicted_noise) + torch.sqrt(beta) * noise
170
+
171
+ return x
172
+
173
+ @torch.no_grad()
174
+ def save_noising_process_image(self, x_start, filename):
175
+
176
+ images = [x_start[0:1]] # Select the first image from the batch
177
+ timesteps_to_visualize = [i for i in range(0, self.timesteps, self.timesteps // 10)]
178
+
179
+ for t in timesteps_to_visualize:
180
+ t_tensor = torch.tensor([t], device=self.device)
181
+ noised_image, _ = self.noise_images(x_start, t_tensor)
182
+ images.append(noised_image[0:1]) # Select the first image from the batch
183
+
184
+ # Create a grid of images
185
+ image_grid = make_grid(torch.cat(images), nrow=len(timesteps_to_visualize) + 1, normalize=False)
186
+ image_grid = image_grid.clamp(0, 1)
187
+
188
+ # Convert to numpy array and transpose axes to HWC format for plotting
189
+ np_image_grid = image_grid.cpu().numpy().transpose((1, 2, 0))
190
+
191
+ # Plot and save the image
192
+ plt.figure(figsize=(len(images) * 2, 2))
193
+ plt.imshow(np_image_grid)
194
+ plt.axis('off')
195
+ plt.savefig(filename, bbox_inches='tight')
196
+ plt.close()
197
+
198
+
199
+
200
+
201
+
202
+
203
+
204
+
205
+
206
+
207
+
208
+
209
+
210
+
211
+
212
+
213
+
DiffusionXray-FewShot-LandmarkDetection/ddpm_pretraining/model/nn_blocks.py ADDED
@@ -0,0 +1,450 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # https://github.com/abarankab/DDPM/blob/main/ddpm/unet.py
3
+
4
+
5
+ # ------------------------------------------------------------------------
6
+ # Libraries
7
+ # ------------------------------------------------------------------------
8
+
9
+ import math
10
+ from inspect import isfunction
11
+ from functools import partial
12
+
13
+ # from tqdm.auto import tqdm
14
+ from einops import rearrange, reduce
15
+ from einops.layers.torch import Rearrange
16
+
17
+ import torch
18
+ from torch import nn, einsum
19
+ import torch.nn.functional as F
20
+ from torch.utils.checkpoint import checkpoint
21
+
22
+ # ------------------------------------------------------------------------
23
+ # Utilities and Blocks
24
+ # ------------------------------------------------------------------------
25
+
26
+ def exists(x):
27
+ return x is not None
28
+
29
+
30
+ def default(val, d):
31
+ if exists(val):
32
+ return val
33
+ return d() if isfunction(d) else d
34
+
35
+
36
+ def num_to_groups(num, divisor):
37
+ groups = num // divisor
38
+ remainder = num % divisor
39
+ arr = [divisor] * groups
40
+ if remainder > 0:
41
+ arr.append(remainder)
42
+ return arr
43
+
44
+
45
+ class Residual(nn.Module):
46
+ def __init__(self, fn):
47
+ super().__init__()
48
+ self.fn = fn
49
+
50
+ def forward(self, x, *args, **kwargs):
51
+ return self.fn(x, *args, **kwargs) + x
52
+
53
+
54
+ def Upsample(dim, dim_out=None):
55
+ return nn.Sequential(
56
+ nn.Upsample(scale_factor=2, mode="nearest"),
57
+ nn.Conv2d(dim, default(dim_out, dim), 3, padding=1),
58
+ )
59
+
60
+
61
+ def Downsample(dim, dim_out=None):
62
+ # No More Strided Convolutions or Pooling
63
+ return nn.Sequential(
64
+ Rearrange("b c (h p1) (w p2) -> b (c p1 p2) h w", p1=2, p2=2),
65
+ nn.Conv2d(dim * 4, default(dim_out, dim), 1),
66
+ )
67
+
68
+
69
+ class SinusoidalPositionEmbeddings(nn.Module):
70
+ def __init__(self, dim):
71
+ super().__init__()
72
+ self.dim = dim
73
+
74
+ def forward(self, time):
75
+ device = time.device
76
+ half_dim = self.dim // 2
77
+ embeddings = math.log(10000) / (half_dim - 1)
78
+ embeddings = torch.exp(torch.arange(half_dim, device=device) * -embeddings)
79
+ embeddings = time[:, None] * embeddings[None, :]
80
+ embeddings = torch.cat((embeddings.sin(), embeddings.cos()), dim=-1)
81
+ return embeddings
82
+
83
+
84
+ class WeightStandardizedConv2d(nn.Conv2d):
85
+ """
86
+ https://arxiv.org/abs/1903.10520
87
+ weight standardization purportedly works synergistically with group normalization
88
+ """
89
+
90
+ def forward(self, x):
91
+ eps = 1e-5 if x.dtype == torch.float32 else 1e-3
92
+
93
+ weight = self.weight
94
+ mean = reduce(weight, "o ... -> o 1 1 1", "mean")
95
+ var = reduce(weight, "o ... -> o 1 1 1", partial(torch.var, unbiased=False))
96
+ normalized_weight = (weight - mean) * (var + eps).rsqrt()
97
+
98
+ return F.conv2d(
99
+ x,
100
+ normalized_weight,
101
+ self.bias,
102
+ self.stride,
103
+ self.padding,
104
+ self.dilation,
105
+ self.groups,
106
+ )
107
+
108
+
109
+ class Block(nn.Module):
110
+ def __init__(self, dim, dim_out, groups=8):
111
+ super().__init__()
112
+ self.proj = WeightStandardizedConv2d(dim, dim_out, 3, padding=1)
113
+ self.norm = nn.GroupNorm(groups, dim_out)
114
+ self.act = nn.SiLU()
115
+
116
+ def forward(self, x, scale_shift=None):
117
+ x = self.proj(x)
118
+ x = self.norm(x)
119
+
120
+ if exists(scale_shift):
121
+ scale, shift = scale_shift
122
+ x = x * (scale + 1) + shift
123
+
124
+ x = self.act(x)
125
+ return x
126
+
127
+
128
+ class ResnetBlock(nn.Module):
129
+ """https://arxiv.org/abs/1512.03385"""
130
+
131
+ def __init__(self, dim, dim_out, *, time_emb_dim=None, groups=8):
132
+ super().__init__()
133
+ self.mlp = (
134
+ nn.Sequential(nn.SiLU(), nn.Linear(time_emb_dim, dim_out * 2))
135
+ if exists(time_emb_dim)
136
+ else None
137
+ )
138
+
139
+ self.block1 = Block(dim, dim_out, groups=groups)
140
+ self.block2 = Block(dim_out, dim_out, groups=groups)
141
+ self.res_conv = nn.Conv2d(dim, dim_out, 1) if dim != dim_out else nn.Identity()
142
+
143
+ def forward(self, x, time_emb=None):
144
+ scale_shift = None
145
+ if exists(self.mlp) and exists(time_emb):
146
+ time_emb = self.mlp(time_emb)
147
+ time_emb = rearrange(time_emb, "b c -> b c 1 1")
148
+ scale_shift = time_emb.chunk(2, dim=1)
149
+
150
+ h = self.block1(x, scale_shift=scale_shift)
151
+ h = self.block2(h)
152
+ return h + self.res_conv(x)
153
+
154
+
155
+ class Attention(nn.Module):
156
+ def __init__(self, dim, heads=4, dim_head=32):
157
+ super().__init__()
158
+ self.scale = dim_head**-0.5
159
+ self.heads = heads
160
+ hidden_dim = dim_head * heads
161
+ self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias=False)
162
+ self.to_out = nn.Conv2d(hidden_dim, dim, 1)
163
+
164
+ def forward(self, x):
165
+ b, c, h, w = x.shape
166
+ qkv = self.to_qkv(x).chunk(3, dim=1)
167
+ q, k, v = map(
168
+ lambda t: rearrange(t, "b (h c) x y -> b h c (x y)", h=self.heads), qkv
169
+ )
170
+ q = q * self.scale
171
+
172
+ sim = einsum("b h d i, b h d j -> b h i j", q, k)
173
+ sim = sim - sim.amax(dim=-1, keepdim=True).detach()
174
+ attn = sim.softmax(dim=-1)
175
+
176
+ out = einsum("b h i j, b h d j -> b h i d", attn, v)
177
+ out = rearrange(out, "b h (x y) d -> b (h d) x y", x=h, y=w)
178
+ return self.to_out(out)
179
+
180
+
181
+ class LinearAttention(nn.Module):
182
+ def __init__(self, dim, heads=4, dim_head=32):
183
+ super().__init__()
184
+ self.scale = dim_head**-0.5
185
+ self.heads = heads
186
+ hidden_dim = dim_head * heads
187
+ self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias=False)
188
+
189
+ self.to_out = nn.Sequential(nn.Conv2d(hidden_dim, dim, 1), nn.GroupNorm(1, dim))
190
+
191
+ def forward(self, x):
192
+ b, c, h, w = x.shape
193
+ qkv = self.to_qkv(x).chunk(3, dim=1)
194
+ q, k, v = map(
195
+ lambda t: rearrange(t, "b (h c) x y -> b h c (x y)", h=self.heads), qkv
196
+ )
197
+
198
+ q = q.softmax(dim=-2)
199
+ k = k.softmax(dim=-1)
200
+
201
+ q = q * self.scale
202
+ context = torch.einsum("b h d n, b h e n -> b h d e", k, v)
203
+
204
+ out = torch.einsum("b h d e, b h d n -> b h e n", context, q)
205
+ out = rearrange(out, "b h c (x y) -> b (h c) x y", h=self.heads, x=h, y=w)
206
+ return self.to_out(out)
207
+
208
+
209
+ class PreNorm(nn.Module):
210
+ def __init__(self, dim, fn):
211
+ super().__init__()
212
+ self.fn = fn
213
+ self.norm = nn.GroupNorm(1, dim)
214
+
215
+ def forward(self, x):
216
+ x = self.norm(x)
217
+ return self.fn(x)
218
+
219
+
220
+ # ------------------------------------------------------------------------
221
+ # Unet Model with Time Embeddings
222
+ # ------------------------------------------------------------------------
223
+
224
+ class Unet(nn.Module):
225
+ def __init__(
226
+ self,
227
+ dim,
228
+ init_dim=None,
229
+ out_dim=None,
230
+ dim_mults=(1, 2, 4, 8),
231
+ channels=3,
232
+ self_condition=False,
233
+ resnet_block_groups=4,
234
+ att_res=32,
235
+ att_heads=4,
236
+ ):
237
+ super().__init__()
238
+
239
+ # determine dimensions
240
+ self.channels = channels
241
+ self.self_condition = self_condition
242
+ # input_channels = channels * (2 if self_condition else 1)
243
+ input_channels = channels if not self_condition else channels + 1
244
+
245
+ init_dim = default(init_dim, dim)
246
+ self.init_conv = nn.Conv2d(
247
+ input_channels, init_dim, 1, padding=0
248
+ ) # changed to 1 and 0 from 7,3
249
+
250
+ dims = [init_dim, *map(lambda m: dim * m, dim_mults)]
251
+ in_out = list(zip(dims[:-1], dims[1:]))
252
+
253
+ block_klass = partial(ResnetBlock, groups=resnet_block_groups)
254
+
255
+ # time embeddings
256
+ time_dim = dim * 4
257
+
258
+ self.time_mlp = nn.Sequential(
259
+ SinusoidalPositionEmbeddings(dim),
260
+ nn.Linear(dim, time_dim),
261
+ nn.GELU(),
262
+ nn.Linear(time_dim, time_dim),
263
+ )
264
+
265
+ # layers
266
+ self.downs = nn.ModuleList([])
267
+ self.ups = nn.ModuleList([])
268
+ num_resolutions = len(in_out)
269
+
270
+ for ind, (dim_in, dim_out) in enumerate(in_out):
271
+ is_last = ind >= (num_resolutions - 1)
272
+
273
+ self.downs.append(
274
+ nn.ModuleList(
275
+ [
276
+ block_klass(dim_in, dim_in, time_emb_dim=time_dim),
277
+ block_klass(dim_in, dim_in, time_emb_dim=time_dim),
278
+ Residual(
279
+ PreNorm(dim_in, LinearAttention(dim_in, att_heads, att_res))
280
+ ),
281
+ Downsample(dim_in, dim_out)
282
+ if not is_last
283
+ else nn.Conv2d(dim_in, dim_out, 3, padding=1),
284
+ ]
285
+ )
286
+ )
287
+
288
+ mid_dim = dims[-1]
289
+ self.mid_block1 = block_klass(mid_dim, mid_dim, time_emb_dim=time_dim)
290
+ self.mid_attn = Residual(
291
+ PreNorm(mid_dim, Attention(mid_dim, att_heads, att_res))
292
+ )
293
+ self.mid_block2 = block_klass(mid_dim, mid_dim, time_emb_dim=time_dim)
294
+
295
+ for ind, (dim_in, dim_out) in enumerate(reversed(in_out)):
296
+ is_last = ind == (len(in_out) - 1)
297
+
298
+ self.ups.append(
299
+ nn.ModuleList(
300
+ [
301
+ block_klass(dim_out + dim_in, dim_out, time_emb_dim=time_dim),
302
+ block_klass(dim_out + dim_in, dim_out, time_emb_dim=time_dim),
303
+ Residual(
304
+ PreNorm(
305
+ dim_out, LinearAttention(dim_out, att_heads, att_res)
306
+ )
307
+ ),
308
+ Upsample(dim_out, dim_in)
309
+ if not is_last
310
+ else nn.Conv2d(dim_out, dim_in, 3, padding=1),
311
+ ]
312
+ )
313
+ )
314
+
315
+ self.out_dim = default(out_dim, channels)
316
+
317
+ self.final_res_block = block_klass(dim * 2, dim, time_emb_dim=time_dim)
318
+ self.final_conv = nn.Conv2d(dim, self.out_dim, 1)
319
+
320
+
321
+ def forward(self, x, time=None, x_self_cond=None, checkpointing=True):
322
+ if self.self_condition:
323
+ x_self_cond = default(x_self_cond, lambda: torch.zeros_like(x))
324
+ x = torch.cat((x_self_cond, x), dim=1)
325
+
326
+ x = self.init_conv(x)
327
+ r = x.clone()
328
+
329
+ # Only compute time embedding if time is provided
330
+ if time is not None:
331
+ t = self.time_mlp(time)
332
+ else:
333
+ t = None
334
+
335
+ h = []
336
+
337
+ # If checkpointing is enabled, run the model in a memory efficient way
338
+ if checkpointing:
339
+ for block1, block2, attn, downsample in self.downs:
340
+ x = checkpoint(block1, x, t, use_reentrant=False)
341
+ h.append(x)
342
+
343
+ x = checkpoint(block2, x, t, use_reentrant=False)
344
+ x = checkpoint(attn, x, use_reentrant=False)
345
+ h.append(x)
346
+
347
+ x = checkpoint(downsample, x, use_reentrant=False)
348
+
349
+ x = checkpoint(self.mid_block1, x, t, use_reentrant=False)
350
+ x = checkpoint(self.mid_attn, x, use_reentrant=False)
351
+ x = checkpoint(self.mid_block2, x, t, use_reentrant=False)
352
+
353
+ for block1, block2, attn, upsample in self.ups:
354
+ x = torch.cat((x, h.pop()), dim=1)
355
+ x = checkpoint(block1, x, t, use_reentrant=False)
356
+
357
+ x = torch.cat((x, h.pop()), dim=1)
358
+ x = checkpoint(block2, x, t, use_reentrant=False)
359
+ x = checkpoint(attn, x, use_reentrant=False)
360
+
361
+ x = checkpoint(upsample, x, use_reentrant=False)
362
+
363
+ # If checkpointing is not enabled, run the model normally
364
+ else:
365
+ for block1, block2, attn, downsample in self.downs:
366
+ x = block1(x, t)
367
+ h.append(x)
368
+
369
+ x = block2(x, t)
370
+ x = attn(x)
371
+ h.append(x)
372
+
373
+ x = downsample(x)
374
+
375
+ x = self.mid_block1(x, t)
376
+ x = self.mid_attn(x)
377
+ x = self.mid_block2(x, t)
378
+
379
+ for block1, block2, attn, upsample in self.ups:
380
+ x = torch.cat((x, h.pop()), dim=1)
381
+ x = block1(x, t)
382
+
383
+ x = torch.cat((x, h.pop()), dim=1)
384
+ x = block2(x, t)
385
+ x = attn(x)
386
+
387
+ x = upsample(x)
388
+
389
+ x = torch.cat((x, r), dim=1)
390
+
391
+ x = self.final_res_block(x, t)
392
+ return self.final_conv(x)
393
+
394
+ # ------------------------------------------------------------------------
395
+ # Noise Schedule
396
+ # ------------------------------------------------------------------------
397
+
398
+ def cosine_beta_schedule(timesteps, s=0.008):
399
+ """
400
+ cosine schedule as proposed in https://arxiv.org/abs/2102.09672
401
+ """
402
+ steps = timesteps + 1
403
+ x = torch.linspace(0, timesteps, steps)
404
+ alphas_cumprod = torch.cos(((x / timesteps) + s) / (1 + s) * torch.pi * 0.5) ** 2
405
+ alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
406
+ betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
407
+ return torch.clamp(betas, 0.0001, 0.9999)
408
+
409
+ def linear_beta_schedule(timesteps, beta_start=0.0001, beta_end=0.02):
410
+ return torch.linspace(beta_start, beta_end, timesteps)
411
+
412
+
413
+ def quadratic_beta_schedule(timesteps, beta_start=0.0001, beta_end=0.02):
414
+ return torch.linspace(beta_start**0.5, beta_end**0.5, timesteps) ** 2
415
+
416
+
417
+ def sigmoid_beta_schedule(timesteps, beta_start=0.0001, beta_end=0.02):
418
+ betas = torch.linspace(-6, 6, timesteps)
419
+ return torch.sigmoid(betas) * (beta_end - beta_start) + beta_start
420
+
421
+ # ------------------------------------------------------------------------
422
+ # EMA (Exponential Moving Average)
423
+ # ------------------------------------------------------------------------
424
+
425
+ class EMA:
426
+ def __init__(self, beta):
427
+ super().__init__()
428
+ self.beta = beta
429
+ self.step = 0
430
+
431
+ def update_model_average(self, ma_model, current_model):
432
+ for current_params, ma_params in zip(current_model.parameters(), ma_model.parameters()):
433
+ old_weight, up_weight = ma_params.data, current_params.data
434
+ ma_params.data = self.update_average(old_weight, up_weight)
435
+
436
+ def update_average(self, old, new):
437
+ if old is None:
438
+ return new
439
+ return old * self.beta + (1 - self.beta) * new
440
+
441
+ def step_ema(self, ema_model, model, step_start_ema=2000):
442
+ if self.step < step_start_ema:
443
+ self.reset_parameters(ema_model, model)
444
+ self.step += 1
445
+ return
446
+ self.update_model_average(ema_model, model)
447
+ self.step += 1
448
+
449
+ def reset_parameters(self, ema_model, model):
450
+ ema_model.load_state_dict(model.state_dict())
DiffusionXray-FewShot-LandmarkDetection/ddpm_pretraining/model/training_functions.py ADDED
@@ -0,0 +1,327 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ # ------------------------------------------------------------------------
4
+ # Libraries
5
+ # ------------------------------------------------------------------------
6
+
7
+ # General libraries
8
+ import numpy as np
9
+ import logging
10
+ from typing import Dict, Any
11
+ import time
12
+ import os
13
+
14
+ # Deep learning libraries
15
+ #import tensorflow as tf
16
+ import torch
17
+ import torch.nn as nn
18
+
19
+ from tqdm import tqdm
20
+ from torchmetrics import MeanSquaredError
21
+ from torchmetrics.image import StructuralSimilarityIndexMeasure
22
+ from torchmetrics.image.fid import FrechetInceptionDistance
23
+
24
+ # Custom libraries
25
+ from utils import *
26
+ from model.nn_blocks import *
27
+
28
+ from model.ddpm_model import DDPM
29
+
30
+
31
+
32
+ def initialize_ddpm(config: Dict[str, Any], phase: str, device: torch.device) -> DDPM:
33
+
34
+ if phase == "train":
35
+ ddpm = DDPM(
36
+ image_size=config["dataset"]["image_size"],
37
+ channels=config["dataset"]["image_channels"],
38
+ device=device,
39
+ lr=config["model"]["lr"],
40
+ optimizer=config["model"]["optimizer"],
41
+ timesteps=config["model"]["beta_schedule"]["train"]["n_timestep"],
42
+ beta_schedule=config["model"]["beta_schedule"]["train"]["schedule"],
43
+ beta_start=config["model"]["beta_schedule"]["train"]["linear_start"],
44
+ beta_end=config["model"]["beta_schedule"]["train"]["linear_end"],
45
+ unet_self_condition=config["model"]["unet"]["self_condition"],
46
+ unet_channels=config["model"]["unet"]["channel_mults"],
47
+ unet_res_blocks=config["model"]["unet"]["res_blocks"],
48
+ unet_att_heads=config["model"]["unet"]["num_head_channels"],
49
+ unet_att_res=config["model"]["unet"]["attn_res"],
50
+ use_ema=config["model"]["use_ema"],
51
+ )
52
+
53
+ elif phase == "test":
54
+ ddpm = DDPM(
55
+ image_size=config["dataset"]["image_size"],
56
+ channels=config["dataset"]["image_channels"],
57
+ device=device,
58
+ lr=config["model"]["lr"],
59
+ optimizer=config["model"]["optimizer"],
60
+ timesteps=config["model"]["beta_schedule"]["test"]["n_timestep"],
61
+ beta_schedule=config["model"]["beta_schedule"]["test"]["schedule"],
62
+ beta_start=config["model"]["beta_schedule"]["test"]["linear_start"],
63
+ beta_end=config["model"]["beta_schedule"]["test"]["linear_end"],
64
+ unet_self_condition=config["model"]["unet"]["self_condition"],
65
+ unet_channels=config["model"]["unet"]["channel_mults"],
66
+ unet_res_blocks=config["model"]["unet"]["res_blocks"],
67
+ unet_att_heads=config["model"]["unet"]["num_head_channels"],
68
+ unet_att_res=config["model"]["unet"]["attn_res"],
69
+ )
70
+ else:
71
+ raise ValueError(f"Phase {phase} is not valid. Must be either 'train' or 'test'")
72
+
73
+ return ddpm
74
+
75
+ def save_model(model, optimizer, epoch, n_iter, loss, save_path, name="last_model"):
76
+ torch.save({
77
+ "epoch": epoch,
78
+ "n_iter": n_iter,
79
+ "model_state_dict": model.state_dict(),
80
+ "optimizer_state_dict": optimizer.state_dict(),
81
+ "loss": loss,
82
+ }, f"{save_path}/{name}.pt")
83
+
84
+ def save_ema_model(ema_model, epoch, n_iter, save_path, name="last_ema_model"):
85
+ torch.save({
86
+ "epoch": epoch,
87
+ "n_iter": n_iter,
88
+ "model_state_dict": ema_model.state_dict(),
89
+ }, f"{save_path}/{name}.pt")
90
+
91
+
92
+ def load_model(save_model_path, device, ddpm):
93
+ assert os.path.exists(f"{save_model_path}"), f"Model {save_model_path} does not exist"
94
+ checkpoint = torch.load(f"{save_model_path}", map_location=device)
95
+ ddpm.model.load_state_dict(checkpoint["model_state_dict"])
96
+ ddpm.optimizer.load_state_dict(checkpoint["optimizer_state_dict"]) if "optimizer_state_dict" in checkpoint else None
97
+ epoch = checkpoint.get('epoch', 'undefined')
98
+ n_iter = checkpoint.get('n_iter', 'undefined')
99
+ loss = checkpoint.get('loss', np.inf)
100
+ del checkpoint
101
+ print(f"Model loaded for epoch: {epoch} and iteration: {n_iter} with loss: {loss}")
102
+ return epoch, n_iter, loss
103
+
104
+ def load_ema_model(save_model_path, device, ddpm):
105
+ assert os.path.exists(f"{save_model_path}"), f"Model {save_model_path} does not exist"
106
+ ema_checkpoint = torch.load(f"{save_model_path}", map_location=device)
107
+ ddpm.ema_model.load_state_dict(ema_checkpoint["model_state_dict"])
108
+ ema_epoch = ema_checkpoint.get('epoch', 0)
109
+ ema_n_iter = ema_checkpoint.get('n_iter', 0)
110
+ del ema_checkpoint
111
+ print(f"EMA Model loaded for epoch: {ema_epoch} and iteration: {ema_n_iter}")
112
+ return ema_epoch, ema_n_iter
113
+
114
+
115
+ # ------------------------------------------------------------------------
116
+ # TRAINING DIFFUSION MODEL
117
+ # ------------------------------------------------------------------------
118
+
119
+
120
+ def train_diffusion_model(config, train_dataloader, save_model_path, root_path, device, continue_training=False):
121
+ # config params
122
+ image_size = config["dataset"]["image_size"]
123
+ channels = config["dataset"]["image_channels"]
124
+ batch_size = config["dataset"]["batch_size"]
125
+ grad_accumulation = config["dataset"]["grad_accumulation"]
126
+ iterations = config["model"]["iterations"]
127
+ # Compute number of epochs based on the number of iterations and the number of batches and gradient accumulation
128
+ # Total epochs = iterations / iterations per epoch (batches per epoch / grad_accumulation)
129
+ epochs = int(iterations / (len(train_dataloader) / grad_accumulation)) + 1
130
+
131
+ loss_type = config["model"]["loss_type"]
132
+ timesteps=config["model"]["beta_schedule"]["train"]["n_timestep"]
133
+ freq_metrics = config["model"]["freq_metrics"]
134
+ freq_checkpoint = config["model"]["freq_checkpoint"]
135
+ use_ema = config["model"]["use_ema"]
136
+
137
+ # instanciate the diffusion model
138
+ ddpm = initialize_ddpm(config, phase="train", device=device)
139
+
140
+ # Print model count trainable parameters
141
+ table, total_params = count_parameters(ddpm.model)
142
+ logging.info(f"Total Trainable Params: {total_params}")
143
+
144
+ # If continue_training is True, load the model
145
+ if continue_training:
146
+ model_path = os.path.join(save_model_path, "last_model.pt")
147
+ start_epoch, n_iter, best_loss = load_model(model_path, device, ddpm)
148
+
149
+ if use_ema:
150
+ ema_model_path = os.path.join(save_model_path, "last_ema_model.pt")
151
+ ema_epoch, ema_n_iter = load_ema_model(ema_model_path, device, ddpm)
152
+
153
+ else:
154
+ start_epoch, n_iter = 0, 0
155
+ best_loss = np.inf
156
+
157
+ # instanciate metrics
158
+ ssim = StructuralSimilarityIndexMeasure(data_range=None, reduction="elementwise_mean")
159
+ mse = MeanSquaredError()
160
+ fid = FrechetInceptionDistance(normalize=True)
161
+
162
+ # Start training
163
+ start_time = time.time()
164
+
165
+ try:
166
+ # Train diffusion model
167
+ for epoch in tqdm(range(start_epoch, epochs), initial=start_epoch, total=epochs, desc="Epoch"):
168
+ epoch_loss = 0.0
169
+ torch.cuda.empty_cache()
170
+
171
+ for batch_idx, data in enumerate(tqdm(train_dataloader, desc="Batch", leave=False)):
172
+
173
+ # Load data
174
+ x = data['image'].to(device)
175
+ x_names = data['name']
176
+ batch_size = x.shape[0]
177
+
178
+ # Save noising process image
179
+ if batch_idx == 0 and epoch == 0:
180
+ ddpm.save_noising_process_image(x, f'{root_path}/noising_process.png')
181
+
182
+ # Forward pass
183
+ t = ddpm.sample_timesteps(batch_size)
184
+ loss = ddpm.p_losses(x=x, t=t, loss_type=loss_type)
185
+
186
+ # Backward pass
187
+ loss.backward()
188
+ loss = loss / grad_accumulation # Normalize loss to account for batch accumulation
189
+ epoch_loss += loss.item()
190
+
191
+ # Update weights
192
+ if ((batch_idx + 1) % grad_accumulation == 0) or (batch_idx + 1 == len(train_dataloader)):
193
+
194
+ # Clip to solve the gradient exploding problem
195
+ nn.utils.clip_grad_value_(ddpm.model.parameters(), clip_value=1.0)
196
+
197
+ ddpm.optimizer.step()
198
+ ddpm.optimizer.zero_grad()
199
+
200
+ # Update EMA parameters after each training step
201
+ if use_ema: ddpm.update_ema()
202
+
203
+ # Update number of steps
204
+ n_iter += 1
205
+
206
+ # Save model checkpoint after each training step
207
+ save_model(ddpm.model, ddpm.optimizer, epoch, n_iter, loss, save_model_path, name="last_model")
208
+ if use_ema: save_ema_model(ddpm.ema_model, epoch, n_iter, save_model_path, name="last_ema_model")
209
+
210
+ # Compute Metrics
211
+ if n_iter % freq_metrics == 0 and batch_idx % grad_accumulation == 0 and n_iter != 0:
212
+ # Set model to eval mode
213
+ ddpm.model.eval()
214
+
215
+ # Generate images conditioned on the input images x
216
+ x_hat = ddpm.sample(model=ddpm.model, x_cond=x, batch_size=batch_size, timesteps=timesteps)
217
+ if use_ema: ema_x_hat = ddpm.sample(model=ddpm.ema_model, x_cond=x, batch_size=batch_size, timesteps=timesteps)
218
+
219
+ # Detach tensors from GPU
220
+ x = x.detach().cpu()
221
+ x_hat = x_hat.detach().cpu()
222
+
223
+ # Check pixel range of generated images
224
+ x_hat_min, x_hat_max = check_pixels_range_of_image(x_hat)
225
+
226
+ # Compute metrics
227
+ ssim_metric = ssim(x_hat, x)
228
+ mse_metric = mse(x_hat, x)
229
+
230
+ # Compute FID
231
+ real_images = x if channels == 3 else x.repeat(1, 3, 1, 1)
232
+ fake_images = x_hat if channels == 3 else x_hat.repeat(1, 3, 1, 1)
233
+
234
+ fid.update(real_images, real=True)
235
+ fid.update(fake_images.clamp(0, 1), real=False)
236
+ fid_score = fid.compute()
237
+
238
+ # Compute differece images
239
+ diff = compute_diff(real_images, fake_images)
240
+
241
+ # Save batch original images in the first row, generated images in the second row, and diff images in the third row
242
+ train_imgs_path = generate_path(f"{root_path}/images/train")
243
+ image_titles = [f"{x_names[i]}" for i in range(batch_size)]
244
+ save_images([real_images, fake_images, diff], f"{train_imgs_path}/train_epoch{epoch}_iteration{n_iter}_batch{batch_idx}.jpg", f"Epoch {epoch} - Iteration {n_iter} - Batch {batch_idx} - Timesteps {timesteps}", image_titles)
245
+
246
+ # Log metrics
247
+ logging.info(f"\nEpoch/Iteration {epoch}/{n_iter} \t Batch {batch_idx} \t Loss: {loss.item():.6f}")
248
+ logging.info(f"\t\t SSIM: {ssim_metric.item():.4f} \t MSE: {mse_metric.item():.6f} \t FID: {fid_score:.2f} \t Pixel range: [{x_hat_min:.2f}, {x_hat_max:.2f}]")
249
+
250
+
251
+ # Send message
252
+ message = (
253
+ f"<b>Epoch/Iteration {epoch}/{n_iter}</b> --> [{x_hat_min:.2f}, {x_hat_max:.2f}] \n"
254
+ f" • <b>Loss:</b> {loss.item():.4f} \n"
255
+ f" • <b>SSIM:</b> {ssim_metric.item():.4f} \n"
256
+ f" • <b>MSE:</b> {mse_metric.item():.4f} \n"
257
+ f" • <b>FID:</b> {fid_score:.4f}"
258
+ )
259
+
260
+ # Compute EMA metrics
261
+ if use_ema:
262
+
263
+ ema_x_hat = ema_x_hat.detach().cpu()
264
+
265
+ ema_ssim_metric = ssim(ema_x_hat, x)
266
+ ema_mse_metric = mse(ema_x_hat, x)
267
+
268
+ ema_x_hat_min, ema_x_hat_max = check_pixels_range_of_image(ema_x_hat)
269
+
270
+ ema_fake_images = ema_x_hat if channels == 3 else ema_x_hat.repeat(1, 3, 1, 1)
271
+
272
+ fid.update(ema_fake_images.clamp(0, 1), real=False)
273
+ ema_fid_score = fid.compute()
274
+
275
+ ema_diff = compute_diff(real_images, ema_fake_images)
276
+
277
+
278
+ # Set image titles for each image
279
+ ema_image_titles = [f"{x_names[i]}" for i in range(batch_size)]
280
+ save_images([real_images, ema_fake_images, ema_diff], f"{train_imgs_path}/train_epoch{epoch}_iteration{n_iter}_batch{batch_idx}_ema.jpg", f"Epoch {epoch} - Iteration {n_iter} - Batch {batch_idx} - Timesteps {timesteps}", ema_image_titles)
281
+
282
+ logging.info(f"\t\t SSIM: {ema_ssim_metric.item():.4f} \t MSE: {ema_mse_metric.item():.6f} \t FID: {ema_fid_score:.2f}\t Pixel range: [{ema_x_hat_min:.2f}, {ema_x_hat_max:.2f}]")
283
+
284
+
285
+ message += (
286
+ f"\n\n<b>EMA Epoch/Iteration {epoch}/{n_iter}</b> --> [{ema_x_hat_min:.2f}, {ema_x_hat_max:.2f}] \n"
287
+ f" • <b>Loss:</b> {loss.item():.4f} \n"
288
+ f" • <b>SSIM:</b> {ema_ssim_metric.item():.4f} \n"
289
+ f" • <b>MSE:</b> {ema_mse_metric.item():.4f} \n"
290
+ f" • <b>FID:</b> {ema_fid_score:.4f}"
291
+ )
292
+
293
+
294
+ # Delete tensors from GPU
295
+ del x, x_hat, real_images, fake_images, diff, ema_x_hat, ema_fake_images, ema_diff
296
+
297
+ # Log and print message
298
+ print(message)
299
+ logging.info(message)
300
+
301
+ # Set model back to train mode
302
+ ddpm.model.train()
303
+
304
+ # Save model checkpoint
305
+ if n_iter % freq_checkpoint == 0 and n_iter!= 0 and batch_idx % grad_accumulation == 0:
306
+ print(f"Saving model checkpoint at epoch {epoch} and iteration {n_iter} with loss: {loss.item():.4f}")
307
+ save_model(ddpm.model, ddpm.optimizer, epoch, n_iter, loss, save_model_path, name=f"model_epoch{epoch}_step{n_iter}")
308
+ if use_ema: save_ema_model(ddpm.ema_model, epoch, n_iter, save_model_path, name=f"ema_model_epoch{epoch}_step{n_iter}")
309
+
310
+ if n_iter % iterations == 0:
311
+ print(f"Reaching {iterations} iterations. Exiting training...")
312
+ exit()
313
+
314
+ epoch_loss /= len(train_dataloader)
315
+
316
+ except (KeyboardInterrupt, SystemExit, Exception) as e:
317
+ if isinstance(e, Exception):
318
+ print(f"Exception: {e}")
319
+ print("\nTraining interrupted. Saving final state...")
320
+ save_model(ddpm.model, ddpm.optimizer, epoch, n_iter, loss, save_model_path, name="last_model")
321
+ if use_ema: save_ema_model(ddpm.ema_model, epoch, n_iter, save_model_path, name="last_ema_model")
322
+
323
+ finally:
324
+ print(f"Training completed in {time.strftime('%H:%M:%S', time.gmtime(time.time() - start_time))}")
325
+ logging.info(f"Training completed in {time.strftime('%H:%M:%S', time.gmtime(time.time() - start_time))}")
326
+ del ddpm
327
+ torch.cuda.empty_cache()
DiffusionXray-FewShot-LandmarkDetection/ddpm_pretraining/utils.py ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # ------------------------------------------------------------------------
3
+ # Libraries
4
+ # ------------------------------------------------------------------------
5
+
6
+ # General libraries
7
+ import os
8
+ import cv2
9
+ from matplotlib import pyplot as plt
10
+ from prettytable import PrettyTable
11
+
12
+ # Deep learning libraries
13
+ import torch
14
+ import torchvision
15
+ from torch.utils.data import DataLoader
16
+ import albumentations as A
17
+
18
+ # Custom libraries
19
+ from ddpm_datasets import ChestDiffusionDataset, HandDiffusionDataset, CephaloDiffusionDataset, AarizDiffusionDataset
20
+
21
+ # ------------------------------------------------------------------------
22
+ # Logging and Utilities
23
+ # ------------------------------------------------------------------------
24
+
25
+ # Generate path if it does not exist
26
+ def generate_path(path):
27
+ if not os.path.exists(path):
28
+ os.makedirs(path)
29
+ return path
30
+
31
+ # Get the current GPU memory usage by tensors in megabytes for a given device
32
+ def gpu_memory_usage(device):
33
+ allocated = torch.cuda.memory_allocated(device)
34
+ reserved = torch.cuda.memory_reserved(device)
35
+ print(f'Allocated memory: {allocated / (1024 ** 2):.2f} MB')
36
+ print(f'Reserved memory: {reserved / (1024 ** 2):.2f} MB')
37
+
38
+ # Compute the number of trainable parameters in a model
39
+ def count_parameters(model):
40
+ table = PrettyTable(["Modules", "Parameters"])
41
+ total_params = 0
42
+ for name, parameter in model.named_parameters():
43
+ if not parameter.requires_grad:
44
+ continue
45
+ params = parameter.numel()
46
+ table.add_row([name, params])
47
+ total_params += params
48
+ #print(table)
49
+ print(f"Total Trainable Params: {total_params}")
50
+ return table, total_params
51
+
52
+ # ------------------------------------------------------------------------
53
+ # Visualizations
54
+ # ------------------------------------------------------------------------
55
+
56
+ def plot_images(images):
57
+ plt.figure(figsize=(32, 32))
58
+ plt.imshow(torch.cat([
59
+ torch.cat([i for i in images.cpu()], dim=-1),
60
+ ], dim=-2).permute(1, 2, 0).cpu())
61
+ plt.show()
62
+
63
+
64
+ def save_images(images, path, title, image_names, **kwargs):
65
+ batch_size = images[0].shape[0]
66
+ fig, axs = plt.subplots(3, batch_size, figsize=(batch_size * 2, 6))
67
+
68
+ for i in range(batch_size):
69
+ axs[0, i].imshow(images[0][i].permute(1, 2, 0).to('cpu').numpy())
70
+ axs[0, i].set_title(f"{image_names[i]}")
71
+ axs[0, i].axis('off')
72
+
73
+ axs[1, i].imshow(images[1][i].permute(1, 2, 0).to('cpu').numpy())
74
+ axs[1, i].set_title("Reconstructed")
75
+ axs[1, i].axis('off')
76
+
77
+ axs[2, i].imshow(images[2][i].permute(1, 2, 0).to('cpu').numpy())
78
+ axs[2, i].set_title("Difference")
79
+ axs[2, i].axis('off')
80
+
81
+ fig.suptitle(f"{title}")
82
+ fig.tight_layout()
83
+ fig.savefig(path, bbox_inches='tight')
84
+ plt.close(fig)
85
+
86
+ def check_pixels_range_of_image(tensor):
87
+ # Ensure the input is a tensor
88
+ assert torch.is_tensor(tensor), "Input must be a tensor"
89
+
90
+ # Flatten the tensor to get all pixel values
91
+ pixel_values = tensor.view(-1)
92
+
93
+ # Compute min and max values
94
+ min_val = pixel_values.min().item()
95
+ max_val = pixel_values.max().item()
96
+
97
+ #print(f"The range of pixel values is: {min_val} to {max_val}")
98
+ return min_val, max_val
99
+
100
+
101
+ def compute_diff(x, x_hat):
102
+ # Ensure both tensors are on the same device
103
+ assert x.device == x_hat.device, "Tensors must be on the same device"
104
+
105
+ # Ensure both tensors have the same shape
106
+ assert x.shape == x_hat.shape, "Tensors must have the same shape"
107
+
108
+ x_min, x_max = check_pixels_range_of_image(x)
109
+ x_hat_min, x_hat_max = check_pixels_range_of_image(x_hat)
110
+
111
+ # Ensure both tensors are have pixel values in the range [0, 1]
112
+ #assert x_min >= 0 and x_max <= 1, f"Pixel values of x must be in the range [0, 1]. Actual range: [{x_min}, {x_max}]"
113
+ #assert x_hat_min >= 0 and x_hat_max <= 1, f"Pixel values of x_hat must be in the range [0, 1]. Actual range: [{x_hat_min}, {x_hat_max}]"
114
+ #print(f"Pixel values of x are in the range [{x_min}, {x_max}]")
115
+ #print(f"Pixel values of x_hat are in the range [{x_hat_min}, {x_hat_max}]")
116
+ # Compute absolute difference
117
+ diff = torch.abs(x - x_hat)
118
+
119
+ # Normalize to the range [0, 1] and return the difference image
120
+ diff = (diff - diff.min()) / (diff.max() - diff.min())
121
+
122
+ return diff
123
+
124
+
125
+ # ------------------------------------------------------------------------
126
+ # Data Loading and Preprocessing
127
+ # ------------------------------------------------------------------------
128
+
129
+ def get_transforms(image_size, phase='train'):
130
+ resize_image_size = int(image_size*1.02)
131
+ if phase == 'train':
132
+ return A.Compose([
133
+ A.ShiftScaleRotate(shift_limit=0.02, scale_limit=0, rotate_limit=2, border_mode=cv2.BORDER_REPLICATE, p=0.5),
134
+ #A.Perspective(scale=(0, 0.02), pad_mode=cv2.BORDER_REPLICATE, p=0.5),
135
+ A.Resize(image_size, image_size),
136
+ #A.RandomCrop(height=image_size, width=image_size),
137
+ #A.HorizontalFlip(p=1),
138
+ #A.RandomBrightnessContrast(brightness_limit=(-0.1, 0.1), contrast_limit=(-0.2, 0.2), p=0.5),
139
+ A.Normalize(normalization='min_max'),
140
+ A.pytorch.ToTensorV2()
141
+ ])
142
+
143
+ elif phase == 'test':
144
+ return A.Compose([
145
+ A.Resize(image_size, image_size),
146
+ A.Normalize(normalization='min_max'),
147
+ A.pytorch.transforms.ToTensorV2()
148
+ ])
149
+ else:
150
+ raise ValueError('phase must be either "train" or "test"')
151
+
152
+
153
+ def load_data(dataset_path, image_size, image_channels, batch_size, pin_memory=False, num_workers = os.cpu_count()):
154
+ dataset_name = os.path.basename(dataset_path).lower()
155
+
156
+ transforms_train = get_transforms(image_size, phase='train')
157
+ transforms_test = get_transforms(image_size, phase='test')
158
+
159
+ if dataset_name == 'chest':
160
+ train_dataset = ChestDiffusionDataset(dataset_path, channels=image_channels, transform=transforms_train, phase='train')
161
+ test_dataset = ChestDiffusionDataset(dataset_path, channels=image_channels, transform=transforms_test, phase='test')
162
+ elif dataset_name == 'hand':
163
+ train_dataset = HandDiffusionDataset(dataset_path, channels=image_channels, transform=transforms_train, phase='train')
164
+ test_dataset = HandDiffusionDataset(dataset_path, channels=image_channels, transform=transforms_test, phase='test')
165
+ elif dataset_name == 'cephalo':
166
+ train_dataset = CephaloDiffusionDataset(dataset_path, channels=image_channels, transform=transforms_train, phase='train')
167
+ test_dataset = CephaloDiffusionDataset(dataset_path, channels=image_channels, transform=transforms_test, phase='test')
168
+ elif dataset_name == 'aariz':
169
+ train_dataset = AarizDiffusionDataset(dataset_path, channels=image_channels, transform=transforms_train, phase='train')
170
+ test_dataset = AarizDiffusionDataset(dataset_path, channels=image_channels, transform=transforms_test, phase='test')
171
+ else:
172
+ raise ValueError('Dataset name must be either "chest" or "hand" or "cephalo" or "aariz"')
173
+
174
+ train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=pin_memory, drop_last=True)
175
+ test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=pin_memory, drop_last=False)
176
+
177
+ return train_dataloader, test_dataloader
178
+
179
+
180
+
DiffusionXray-FewShot-LandmarkDetection/downstream_task/config/config.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "gpu": 0,
3
+ "experiment_path": "downstream_task/landmarks_experiments",
4
+ "model": {
5
+ "name": "ddpm",
6
+ "encoder": "",
7
+ "lr":1e-5,
8
+ "optimizer": "AdamW",
9
+ "scheduler": "ReduceLROnPlateau",
10
+ "loss_function": "CrossEntropyLoss",
11
+ "epochs": 200
12
+ },
13
+
14
+ "training_protocol":{
15
+ "apply": true,
16
+
17
+ "scratch": {
18
+ "apply": false,
19
+ "resume": false
20
+ },
21
+
22
+ "finetuning": {
23
+ "apply": true,
24
+ "resume": false,
25
+ "path": "",
26
+ "different_dataset": false
27
+ }
28
+ },
29
+
30
+ "inference_protocol": {
31
+ "apply": true,
32
+ "use_validation_set_for_inference": false
33
+ },
34
+
35
+ "dataset":{
36
+ "name": "chest",
37
+ "path": "datasets/",
38
+ "image_size": [256, 256],
39
+ "image_channels": 1,
40
+ "sigma": 5,
41
+ "batch_size": 2,
42
+ "grad_accumulation": 8,
43
+ "num_workers": null,
44
+ "pin_memory": true,
45
+ "training_samples": "all"
46
+ }
47
+ }
48
+
DiffusionXray-FewShot-LandmarkDetection/downstream_task/imagenet_backbones_comparative_study.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Libraries
3
+ # ------------------------------------------------------------------------
4
+
5
+ # General libraries
6
+ import os
7
+ import random
8
+ from datetime import datetime
9
+
10
+ # Deep learning libraries
11
+ import torch
12
+ from torch import nn
13
+ from torch.utils.data import DataLoader
14
+ from torch.optim.lr_scheduler import ReduceLROnPlateau
15
+
16
+ # Custom libraries
17
+ from utilities import *
18
+ from landmarks_datasets import *
19
+ from model.deep_learning import *
20
+ from model.models import *
21
+
22
+ # Set random seed
23
+ random.seed(42)
24
+ np.random.seed(42)
25
+ torch.manual_seed(42)
26
+ torch.cuda.manual_seed(42)
27
+
28
+
29
+ import ssl
30
+
31
+ ssl._DEFAULT_CIPHERS = 'HIGH:!DH:!aNULL'
32
+
33
+ def ignore_ssl_certificate_verification():
34
+ try:
35
+ # Python 3.4+
36
+ import ssl
37
+ ssl._create_default_https_context = ssl._create_unverified_context
38
+ except AttributeError:
39
+ # Python 2.x
40
+ import requests
41
+ from urllib3.exceptions import InsecureRequestWarning
42
+ requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
43
+
44
+ ignore_ssl_certificate_verification()
45
+
46
+
47
+ ## -----------------------------------------------------------------------------------------------------------------##
48
+ ## DATASETS ##
49
+ ## -----------------------------------------------------------------------------------------------------------------##
50
+
51
+ datasets_list = ["chest", "cephalo", "hand"]
52
+ backbone_list = ["vgg19", "densenet161", "resnext50_32x4d"] #"efficientnet-b5"]
53
+
54
+ EXPERIMENT_PATH = "downstream_task/landmarks_experiments/backbone_selection"
55
+
56
+ # Create folder for saving models
57
+ if not os.path.exists(EXPERIMENT_PATH):
58
+ os.makedirs(EXPERIMENT_PATH)
59
+
60
+ log_file = f"{EXPERIMENT_PATH}/experiments_results.txt"
61
+
62
+ NUM_EPOCHS = 200
63
+ K_FOLDS = 5
64
+ BATCH_SIZE = 2
65
+ GRAD_ACC = 8
66
+
67
+ LR = 1e-5
68
+ SIZE = (256, 256)
69
+ SIGMA = 5
70
+
71
+ PATIENCE = GRAD_ACC + 5
72
+ EARLY_STOPPING = PATIENCE * 2 + 1
73
+
74
+ NUM_CHANNELS = 1
75
+ ONLY_INFERENCE = False
76
+ PIN_MEMORY = True
77
+ NUM_WORKERS = 2
78
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
79
+
80
+
81
+ # ---------------------------------------------------------------- CHEST ---------
82
+ CHEST_DATASET_PATH = 'datasets/chest'
83
+ assert os.path.exists(CHEST_DATASET_PATH), f"Chest dataset path does not exist: {CHEST_DATASET_PATH}, current path: {os.getcwd()}"
84
+ CHEST_NUM_LANDMARKS = 6
85
+
86
+ CHEST_SIZE = SIZE
87
+ CHEST_SIGMA = SIGMA
88
+
89
+ chest_train_dataset = Chest(prefix=CHEST_DATASET_PATH, phase='train', size=SIZE, num_channels=NUM_CHANNELS, sigma=SIGMA)
90
+ chest_val_dataset = Chest(prefix=CHEST_DATASET_PATH, phase='validate', size=SIZE, num_channels=NUM_CHANNELS, sigma=SIGMA)
91
+ chest_test_dataset = Chest(prefix=CHEST_DATASET_PATH, phase='test', size=SIZE, num_channels=NUM_CHANNELS, sigma=SIGMA)
92
+
93
+ chest_train_val_dataset = torch.utils.data.ConcatDataset([chest_train_dataset, chest_val_dataset])
94
+ print(f"CHEST: {len(chest_train_dataset)} | {len(chest_val_dataset)} | {len(chest_test_dataset)}")
95
+
96
+
97
+ # ---------------------------------------------------------------- CEPHALOMETRIC ---------
98
+ CEPHALOMETRIC_DATASET_PATH = 'datasets/cephalo'
99
+ assert os.path.exists(CEPHALOMETRIC_DATASET_PATH), f"Cephalometric dataset path does not exist: {CEPHALOMETRIC_DATASET_PATH}, current path: {os.getcwd()}"
100
+ CEPHALOMETRIC_NUM_LANDMARKS = 19
101
+
102
+ CEPHALOMETRIC_SIZE = SIZE
103
+ CEPHALOMETRIC_SIGMA = SIGMA
104
+
105
+ cephalo_train_dataset = Cephalo(prefix=CEPHALOMETRIC_DATASET_PATH, phase='train', size=SIZE, num_channels=NUM_CHANNELS, sigma=SIGMA)
106
+ cephalo_val_dataset = Cephalo(prefix=CEPHALOMETRIC_DATASET_PATH, phase='validate', size=SIZE, num_channels=NUM_CHANNELS, sigma=SIGMA)
107
+ cephalo_test_dataset = Cephalo(prefix=CEPHALOMETRIC_DATASET_PATH, phase='test', size=SIZE, num_channels=NUM_CHANNELS, sigma=SIGMA)
108
+
109
+ cephalo_train_val_dataset = torch.utils.data.ConcatDataset([cephalo_train_dataset, cephalo_val_dataset])
110
+ print(f"CEPHALO: {len(cephalo_train_dataset)} | {len(cephalo_val_dataset)} | {len(cephalo_test_dataset)}")
111
+
112
+
113
+ # ---------------------------------------------------------------- HAND ---------
114
+ HAND_DATASET_PATH = 'datasets/hand'
115
+ assert os.path.exists(HAND_DATASET_PATH), f"Hand dataset path does not exist: {HAND_DATASET_PATH}, current path: {os.getcwd()}"
116
+ HAND_NUM_LANDMARKS = 37
117
+
118
+ HAND_SIZE = SIZE
119
+ HAND_SIGMA = SIGMA
120
+
121
+ hand_train_dataset = Hand(prefix=HAND_DATASET_PATH, phase='train', size=SIZE, num_channels=NUM_CHANNELS, sigma=SIGMA)
122
+ hand_val_dataset = Hand(prefix=HAND_DATASET_PATH, phase='validate', size=SIZE, num_channels=NUM_CHANNELS, sigma=SIGMA)
123
+ hand_test_dataset = Hand(prefix=HAND_DATASET_PATH, phase='test', size=SIZE, num_channels=NUM_CHANNELS, sigma=SIGMA)
124
+
125
+ hand_train_val_dataset = torch.utils.data.ConcatDataset([hand_train_dataset, hand_val_dataset])
126
+ print(f"HAND: {len(hand_train_dataset)} | {len(hand_val_dataset)} | {len(hand_test_dataset)}")
127
+
128
+ ## -----------------------------------------------------------------------------------------------------------------##
129
+ ## TRAINING ##
130
+ ## -----------------------------------------------------------------------------------------------------------------##
131
+
132
+
133
+
134
+ for i in datasets_list:
135
+ print(f"\n\n\n {datetime.now()} ---------------------- {i.upper()} -------------------------------------------")
136
+ print(f"SIZE: {SIZE} | BATCH: {BATCH_SIZE} | GRAD ACC: {GRAD_ACC} | SIGMA: {SIGMA} | LR: {LR} | CHANNELS: {NUM_CHANNELS}")
137
+
138
+ if i == "chest":
139
+ NUM_LANDMARKS = CHEST_NUM_LANDMARKS
140
+ dataset_name = i
141
+ training_dataset = chest_train_val_dataset
142
+
143
+ elif i == "hand":
144
+ NUM_LANDMARKS = HAND_NUM_LANDMARKS
145
+ dataset_name = i
146
+ training_dataset = hand_train_val_dataset
147
+
148
+ elif i == "cephalo":
149
+ NUM_LANDMARKS = CEPHALOMETRIC_NUM_LANDMARKS
150
+ dataset_name = i
151
+ training_dataset = cephalo_train_val_dataset
152
+
153
+ res_file = open(log_file, 'a')
154
+ print(f"\n\n ----------------------------------------- {dataset_name.upper()} DATASET ------------------------", file=res_file)
155
+ res_file.close()
156
+
157
+
158
+ # -------------------------------------------- SEGMENTATION MODELS -------------
159
+ useHEATMAPS = True
160
+ pretrained = "imagenet"
161
+
162
+ for backbone in backbone_list:
163
+
164
+ model = smpUnet(
165
+ encoder_name=backbone,
166
+ encoder_weights="imagenet",
167
+ in_channels=NUM_CHANNELS,
168
+ classes=NUM_LANDMARKS
169
+ ).to(device)
170
+
171
+ model_name = model.__class__.__name__
172
+
173
+ loss_fn = nn.CrossEntropyLoss()
174
+ optimizer = torch.optim.AdamW(params=model.parameters(), lr=LR)
175
+ scheduler = ReduceLROnPlateau(optimizer, patience=PATIENCE, factor=0.5)
176
+
177
+ res_file = open(log_file, 'a')
178
+ print(f"\n\n --------- Model: {model_name}_{backbone} | Dataset: {dataset_name} | Batch: {BATCH_SIZE} | Sigma: {SIGMA} | Size: {SIZE}", file=res_file)
179
+ res_file.close()
180
+
181
+ save_model_path = generate_save_model_path(EXPERIMENT_PATH, model_name, dataset_name, SIGMA, SIZE, pretrained, backbone)
182
+
183
+ k_fold_train_and_validate(model, device, training_dataset, optimizer, scheduler, loss_fn, NUM_EPOCHS, EARLY_STOPPING, BATCH_SIZE, GRAD_ACC,
184
+ NUM_LANDMARKS, SIGMA, save_model_path, log_file, K_FOLDS, onlyInference=ONLY_INFERENCE)
185
+
186
+ free_gpu_cache()
187
+ del model, loss_fn, optimizer, scheduler
DiffusionXray-FewShot-LandmarkDetection/downstream_task/landmarks_datasets.py ADDED
@@ -0,0 +1,393 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pandas as pd
3
+ from PIL import Image
4
+ import os
5
+
6
+ import torch
7
+ import utilities
8
+ from skimage.transform import resize
9
+
10
+ import albumentations as A
11
+ from albumentations.pytorch import ToTensorV2
12
+ import cv2
13
+ ## -----------------------------------------------------------------------------------------------------------------##
14
+ ## CHEST DATASET ##
15
+ ## -----------------------------------------------------------------------------------------------------------------##
16
+ """
17
+ LINK: https://www.kaggle.com/datasets/nikhilpandey360/chest-xray-masks-and-labels
18
+
19
+ X-ray images in this data set have been acquired from the tuberculosis control program of the Department of Health and Human Services of Montgomery County, MD, USA.
20
+ This set contains 138 posterior-anterior x-rays, of which 80 x-rays are normal and 58 x-rays are abnormal with manifestations of tuberculosis.
21
+ All images are de-identified and available in DICOM format. The set covers a wide range of abnormalities, including effusions and miliary patterns.
22
+ """
23
+ class Chest(torch.utils.data.Dataset):
24
+
25
+ def __init__(self, prefix, phase, size=(512, 512), num_channels=1, fuse_heatmap=False, sigma=8):
26
+ self.phase = phase
27
+ self.new_size = size
28
+ self.dataset_name = 'Chest'
29
+
30
+ self.transforms = self.get_transforms()
31
+ self.num_channels = num_channels
32
+ self.fuse_heatmap = fuse_heatmap
33
+ self.sigma = sigma
34
+ self.num_landmarks = 6
35
+ self.pth_Image = os.path.join(prefix, 'pngs')
36
+ self.pth_Label = os.path.join(prefix, 'labels')
37
+
38
+ # file index
39
+ files = [i[:-4] for i in sorted(os.listdir(self.pth_Image))]
40
+
41
+ exclude_list = ['CHNCXR_0059_0', 'CHNCXR_0178_0', 'CHNCXR_0228_0', 'CHNCXR_0267_0', 'CHNCXR_0295_0', 'CHNCXR_0310_0', 'CHNCXR_0285_0', 'CHNCXR_0276_0', 'CHNCXR_0303_0']
42
+ if exclude_list is not None:
43
+ st = set(exclude_list)
44
+ files = [f for f in files if f not in st]
45
+
46
+ n = len(files)
47
+ train_num = 195
48
+ val_num = 34
49
+ test_num = n - train_num - val_num
50
+ if self.phase == 'train':
51
+ self.indexes = files[:train_num]
52
+ elif self.phase == 'validate':
53
+ self.indexes = files[train_num:-test_num]
54
+ elif self.phase == 'test':
55
+ self.indexes = files[-test_num:]
56
+ elif self.phase == 'all':
57
+ self.indexes = files
58
+ else:
59
+ raise Exception("Unknown phase: {phase}".format(phase=phase))
60
+
61
+ def __getitem__(self, index):
62
+ name = self.indexes[index]
63
+ ret = {'name': name}
64
+
65
+ img, img_size= self.readImage(os.path.join(self.pth_Image, name + '.png'))
66
+ points = self.readLandmark(name)
67
+ heatmaps = utilities.points_to_heatmap(points, sigma=self.sigma, img_size=self.new_size, fuse=self.fuse_heatmap)
68
+
69
+ transformed = self.transforms(image=img, masks=heatmaps)
70
+
71
+ # img shape: CxHxW | heatmaps is a list of CxHxW: example: [CxHxW, CxHxW, CxHxW, CxHxW, CxHxW, CxHxW]
72
+ img, heatmaps = transformed['image'], transformed['masks']
73
+
74
+ # Image is a torch tensor [C, H, W]
75
+ ret['image'] = img
76
+ ret['landmarks'] = torch.FloatTensor(points)
77
+ # Convert heatmaps to torch tensor [C, H, W]. Stack to give new dimension and float32 type to avoid error in loss function
78
+ ret['heatmaps'] = torch.stack([hm.float() for hm in heatmaps])
79
+ ret['original_size'] = torch.FloatTensor(img_size)
80
+ ret['resized_size'] = torch.FloatTensor(self.new_size)
81
+
82
+ return ret
83
+
84
+ def __len__(self):
85
+ return len(self.indexes)
86
+
87
+ def readLandmark(self, name):
88
+ path = os.path.join(self.pth_Label, name + '.txt')
89
+ points = []
90
+ with open(path, 'r') as f:
91
+ n = int(f.readline())
92
+ for i in range(n):
93
+ ratios = [float(i) for i in f.readline().split()]
94
+ points.append(ratios)
95
+ return np.array(points)
96
+
97
+ def readImage(self, path):
98
+
99
+ if self.num_channels == 3:
100
+ img = Image.open(path).convert('RGB')
101
+ arr = np.array(img).astype(np.float32)
102
+
103
+ elif self.num_channels == 1:
104
+ img = Image.open(path).convert('L')
105
+ arr = np.array(img).astype(np.float32)
106
+ arr = np.expand_dims(arr, 2)
107
+ else:
108
+ raise ValueError('Channels must be either 1 or 3')
109
+
110
+ # Original size in (width, height)
111
+ origin_size = img.size
112
+ resized_image = resize(arr, (self.new_size[0], self.new_size[1], self.num_channels))
113
+
114
+ return resized_image, origin_size
115
+
116
+ def get_transforms(self):
117
+ if self.phase == 'train':
118
+ return A.Compose([
119
+ A.ShiftScaleRotate(shift_limit=0.02, scale_limit=0, rotate_limit=2, border_mode=cv2.BORDER_REPLICATE, p=0.5),
120
+ #A.Perspective(scale=(0, 0.02), pad_mode=cv2.BORDER_REPLICATE, p=0.5),
121
+ #A.RandomBrightnessContrast(brightness_limit=(-0.1, 0.1), contrast_limit=(-0.2, 0.2), p=0.5),
122
+ #A.Resize(self.new_size[0], self.new_size[1]),
123
+ A.Normalize(normalization='min_max'),
124
+ A.pytorch.ToTensorV2()
125
+ ])
126
+ elif self.phase == 'validate':
127
+ return A.Compose([
128
+ #A.Resize(self.new_size[0], self.new_size[1]),
129
+ A.Normalize(normalization='min_max'),
130
+ A.pytorch.ToTensorV2()
131
+ ])
132
+ elif self.phase == 'test':
133
+ return A.Compose([
134
+ #A.Resize(self.new_size[0], self.new_size[1]),
135
+ A.Normalize(normalization='min_max'),
136
+ A.pytorch.transforms.ToTensorV2()
137
+ ])
138
+ else:
139
+ raise ValueError('phase must be either "train" or "validate" or "test"')
140
+
141
+ ## -----------------------------------------------------------------------------------------------------------------##
142
+ ## HAND DATASET ##
143
+ ## -----------------------------------------------------------------------------------------------------------------##
144
+
145
+ """
146
+ LINK: https://ipilab.usc.edu/research/baaweb/
147
+ ASI: Asian; BLK: African American; CAU: Caucasian; HIS: Hispanic.
148
+ """
149
+
150
+ class Hand(torch.utils.data.Dataset):
151
+
152
+ def __init__(self, prefix, phase, size=(512, 368), num_channels=1, fuse_heatmap=False, sigma=5):
153
+
154
+ self.phase = phase
155
+ self.new_size = size
156
+ self.dataset_name = 'Hand'
157
+
158
+ self.transforms = self.get_transforms()
159
+ self.num_channels = num_channels
160
+ self.fuse_heatmap = fuse_heatmap
161
+ self.sigma = sigma
162
+ self.num_landmarks = 37
163
+
164
+ self.pth_Image = os.path.join(prefix, 'jpg')
165
+ self.labels = pd.read_csv(os.path.join(
166
+ prefix, 'labels/all.csv'), header=None, index_col=0)
167
+
168
+ # file index
169
+ index_set = set(self.labels.index) # Set of all the labels
170
+ files = [i[:-4] for i in sorted(os.listdir(self.pth_Image))] # -4 to cut ".jpg" # List of all the images
171
+ files = [i for i in files if int(i) in index_set] # List of filters that has a label
172
+
173
+ n = len(files)
174
+ train_num = 550
175
+ val_num = 59
176
+ test_num = n - train_num - val_num
177
+
178
+ if phase == 'train':
179
+ self.indexes = files[:train_num]
180
+ elif phase == 'validate':
181
+ self.indexes = files[train_num:-test_num]
182
+ elif phase == 'test':
183
+ self.indexes = files[-test_num:]
184
+ elif phase == 'all':
185
+ self.indexes = files
186
+ else:
187
+ raise Exception("Unknown phase: {phase}".format(phase=phase))
188
+
189
+ def __getitem__(self, index):
190
+ name = self.indexes[index]
191
+ ret = {'name': name}
192
+
193
+ img, img_size = self.readImage(
194
+ os.path.join(self.pth_Image, name + '.jpg'))
195
+
196
+ points = self.readLandmark(name, img_size)
197
+ heatmaps = utilities.points_to_heatmap(points, sigma=self.sigma, img_size=self.new_size, fuse=self.fuse_heatmap)
198
+
199
+ transformed = self.transforms(image=img, masks=heatmaps)
200
+ img, heatmaps = transformed['image'], transformed['masks']
201
+
202
+ ret['image'] = img
203
+ ret['landmarks'] = torch.FloatTensor(points)
204
+ ret['heatmaps'] = torch.stack([hm.float() for hm in heatmaps])
205
+ ret['original_size'] = torch.FloatTensor(img_size)
206
+ ret['resized_size'] = torch.FloatTensor(self.new_size)
207
+
208
+ return ret
209
+
210
+
211
+ def __len__(self):
212
+ return len(self.indexes)
213
+
214
+ def readLandmark(self, name, origin_size):
215
+ li = list(self.labels.loc[int(name), :])
216
+ points = []
217
+ for i in range(0, len(li), 2):
218
+ ratios = (li[i] / origin_size[0], li[i + 1] / origin_size[1])
219
+ points.append(ratios)
220
+ return np.array(points)
221
+
222
+ def readImage(self, path):
223
+
224
+ if self.num_channels == 3:
225
+ img = Image.open(path).convert('RGB')
226
+ arr = np.array(img).astype(np.float32)
227
+
228
+ elif self.num_channels == 1:
229
+ img = Image.open(path).convert('L')
230
+ arr = np.array(img).astype(np.float32)
231
+ arr = np.expand_dims(arr, 2)
232
+ else:
233
+ raise ValueError('Channels must be either 1 or 3')
234
+
235
+ # Original size in (width, height)
236
+ origin_size = img.size
237
+ resized_image = resize(arr, (self.new_size[0], self.new_size[1], self.num_channels))
238
+
239
+ return resized_image, origin_size
240
+
241
+ def get_transforms(self):
242
+ if self.phase == 'train':
243
+ return A.Compose([
244
+ A.ShiftScaleRotate(shift_limit=0.02, scale_limit=(-0.02, 0.02), rotate_limit=2, border_mode=cv2.BORDER_REPLICATE, p=0.5),
245
+ #A.Perspective(scale=(0, 0.02), pad_mode=cv2.BORDER_REPLICATE, p=0.5),
246
+ #A.RandomBrightnessContrast(brightness_limit=(-0.1, 0.1), contrast_limit=(-0.2, 0.2), p=0.5),
247
+ #A.Resize(self.new_size[0], self.new_size[1]),
248
+ A.Normalize(normalization='min_max'),
249
+ A.pytorch.ToTensorV2()
250
+ ])
251
+ elif self.phase == 'validate':
252
+ return A.Compose([
253
+ #A.Resize(self.new_size[0], self.new_size[1]),
254
+ A.Normalize(normalization='min_max'),
255
+ A.pytorch.ToTensorV2()
256
+ ])
257
+ elif self.phase == 'test':
258
+ return A.Compose([
259
+ #A.Resize(self.new_size[0], self.new_size[1]),
260
+ A.Normalize(normalization='min_max'),
261
+ A.pytorch.transforms.ToTensorV2()
262
+ ])
263
+ else:
264
+ raise ValueError('phase must be either "train" or "validate" or "test"')
265
+
266
+
267
+
268
+ ## -----------------------------------------------------------------------------------------------------------------##
269
+ ## CEPHALOMETRIC DATASET ##
270
+ ## -----------------------------------------------------------------------------------------------------------------##
271
+
272
+ """
273
+ LINK: https://www.kaggle.com/datasets/c34a0ef0cd3cfd5c5afbdb30f8541e887171f19f196b1ad63790ca5b28c0ec93
274
+ https://figshare.com/s/37ec464af8e81ae6ebbf?file=5466581
275
+ """
276
+
277
+
278
+ class Cephalo(torch.utils.data.Dataset):
279
+
280
+ def __init__(self, prefix, phase, size=(512, 416), num_channels=1, fuse_heatmap=False, sigma=5):
281
+ self.phase = phase
282
+ self.new_size = size
283
+ self.dataset_name = 'Cephalo'
284
+
285
+ self.transforms = self.get_transforms()
286
+ self.num_channels = num_channels
287
+ self.fuse_heatmap = fuse_heatmap
288
+ self.sigma = sigma
289
+
290
+ self.num_landmarks = 19
291
+
292
+
293
+ self.pth_Image = os.path.join(prefix, 'jpg')
294
+ self.pth_label_junior = os.path.join(prefix, '400_junior')
295
+ self.pth_label_senior = os.path.join(prefix, '400_senior')
296
+
297
+ # file index
298
+ files = [i[:-4] for i in sorted(os.listdir(self.pth_Image))]
299
+ n = len(files)
300
+
301
+ if phase == 'train':
302
+ self.indexes = files[:130]
303
+ elif phase == 'validate':
304
+ self.indexes = files[130:150]
305
+ elif phase == 'test':
306
+ self.indexes = files[150:400]
307
+ elif phase == 'all':
308
+ self.indexes = files
309
+ else:
310
+ raise Exception("Unknown phase: {phase}".format(phase=phase))
311
+
312
+
313
+ def __getitem__(self, index):
314
+ name = self.indexes[index]
315
+ ret = {'name': name}
316
+
317
+ img, img_size = self.readImage(os.path.join(self.pth_Image, name+'.jpg'))
318
+ points = self.readLandmark(name, img_size)
319
+ heatmaps = utilities.points_to_heatmap(points, sigma=self.sigma, img_size=self.new_size, fuse=self.fuse_heatmap)
320
+
321
+ transformed = self.transforms(image=img, masks=heatmaps)
322
+ img, heatmaps = transformed['image'], transformed['masks']
323
+
324
+ ret['image'] = img
325
+ ret['landmarks'] = torch.FloatTensor(points)
326
+ ret['heatmaps'] = torch.stack([hm.float() for hm in heatmaps])
327
+ ret['original_size'] = torch.FloatTensor(img_size)
328
+ ret['resized_size'] = torch.FloatTensor(self.new_size)
329
+
330
+ return ret
331
+
332
+ def __len__(self):
333
+ return len(self.indexes)
334
+
335
+ def readLandmark(self, name, origin_size):
336
+ points = []
337
+ with open(os.path.join(self.pth_label_junior, name + '.txt')) as f1:
338
+ with open(os.path.join(self.pth_label_senior, name + '.txt')) as f2:
339
+ for i in range(self.num_landmarks):
340
+ landmark1 = f1.readline().rstrip('\n').split(',')
341
+ landmark2 = f2.readline().rstrip('\n').split(',')
342
+ # Average of junior and senior landmarks
343
+ landmark = [(float(i) + float(j)) / 2 for i, j in zip(landmark1, landmark2)]
344
+ #landmark = [float(i) for i in landmark1]
345
+ ratios = (landmark[0] / origin_size[0], landmark[1] / origin_size[1])
346
+ points.append(ratios)
347
+ return np.array(points)
348
+
349
+ def readImage(self, path):
350
+
351
+ if self.num_channels == 3:
352
+ img = Image.open(path).convert('RGB')
353
+ arr = np.array(img).astype(np.float32)
354
+
355
+ elif self.num_channels == 1:
356
+ img = Image.open(path).convert('L')
357
+ arr = np.array(img).astype(np.float32)
358
+ arr = np.expand_dims(arr, 2)
359
+ else:
360
+ raise ValueError('Channels must be either 1 or 3')
361
+
362
+ # Original size in (width, height)
363
+ origin_size = img.size
364
+ resized_image = resize(arr, (self.new_size[0], self.new_size[1], self.num_channels))
365
+
366
+ return resized_image, origin_size
367
+
368
+ def get_transforms(self):
369
+ if self.phase == 'train':
370
+ return A.Compose([
371
+ A.ShiftScaleRotate(shift_limit=0.02, scale_limit=(-0.02, 0.02), rotate_limit=2, border_mode=cv2.BORDER_REPLICATE, p=0.5),
372
+ #A.Perspective(scale=(0, 0.02), pad_mode=cv2.BORDER_REPLICATE, p=0.5),
373
+ #A.RandomBrightnessContrast(brightness_limit=(-0.1, 0.1), contrast_limit=(-0.2, 0.2), p=0.5),
374
+ #A.Resize(self.new_size[0], self.new_size[1]),
375
+ A.Normalize(normalization='min_max'),
376
+ A.pytorch.ToTensorV2()
377
+ ])
378
+ elif self.phase == 'validate':
379
+ return A.Compose([
380
+ #A.Resize(self.new_size[0], self.new_size[1]),
381
+ A.Normalize(normalization='min_max'),
382
+ A.pytorch.ToTensorV2()
383
+ ])
384
+ elif self.phase == 'test':
385
+ return A.Compose([
386
+ #A.Resize(self.new_size[0], self.new_size[1]),
387
+ A.Normalize(normalization='min_max'),
388
+ A.pytorch.transforms.ToTensorV2()
389
+ ])
390
+ else:
391
+ raise ValueError('phase must be either "train" or "validate" or "test"')
392
+
393
+
DiffusionXray-FewShot-LandmarkDetection/downstream_task/main.py ADDED
@@ -0,0 +1,379 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ # ------------------------------------------------------------------------
4
+ # Libraries
5
+ # ------------------------------------------------------------------------
6
+
7
+ # General libraries
8
+ import os
9
+ import sys
10
+ import random
11
+ from datetime import datetime
12
+ import time
13
+ import argparse
14
+ import json
15
+
16
+ # Deep learning libraries
17
+ import torch
18
+ from torch import nn
19
+ from torch.utils.data import DataLoader
20
+ from torch.optim.lr_scheduler import ReduceLROnPlateau
21
+
22
+ # Custom libraries
23
+ from utilities import *
24
+ from landmarks_datasets import *
25
+ from model.deep_learning import *
26
+ from model.models import *
27
+
28
+ # Set random seed
29
+ random.seed(42)
30
+ np.random.seed(42)
31
+ torch.manual_seed(42)
32
+ torch.cuda.manual_seed(42)
33
+
34
+
35
+ # ------------------------------------------------------------------------
36
+ # MAIN
37
+ # ------------------------------------------------------------------------
38
+
39
+ if __name__ == "__main__":
40
+ # Parse arguments from command line
41
+ parser = argparse.ArgumentParser()
42
+ parser.add_argument(
43
+ "-c",
44
+ "--config",
45
+ type=str,
46
+ default="downstream_task/config/config.json",
47
+ help="Path to the JSON config file."
48
+ )
49
+ parser.add_argument(
50
+ "-p",
51
+ "--load_path",
52
+ type=str,
53
+ default=None,
54
+ help="Path to the model to be loaded."
55
+ )
56
+
57
+ args = parser.parse_args()
58
+ config = json.load(open(args.config))
59
+
60
+ # Print system info
61
+ print("----------------------------------------- SYSTEM INFO -----------------------------------------")
62
+ print("Python version: {}".format(sys.version))
63
+ print("Pytorch version: {}".format(torch.__version__))
64
+
65
+ if "CUDA_VISIBLE_DEVICES" in os.environ:
66
+ GPU = os.environ["CUDA_VISIBLE_DEVICES"]
67
+ else:
68
+ GPU = config["gpu"]
69
+ os.environ["CUDA_VISIBLE_DEVICES"] = f"{GPU}"
70
+
71
+ device = f"cuda" if torch.cuda.is_available() else "cpu"
72
+ print(f"Torch GPU Name: {torch.cuda.get_device_name(0)}... Using GPU {GPU}" if device == "cuda" else "Torch GPU not available... Using CPU")
73
+
74
+ print("------------------------------------------------------------------------------------------------")
75
+
76
+ # -------------------------------------------- PATHS -------------
77
+ PREFIX = generate_path(config["experiment_path"])
78
+ log_file = f"{PREFIX}/experiments_results.txt"
79
+ DATASET_NAME = config["dataset"]["name"]
80
+ DATASET_PATH = os.path.join(config["dataset"]["path"], DATASET_NAME)
81
+
82
+ # -------------------------------------------- PARAMETERS -------------
83
+ # Dataset parameters
84
+ SIZE = tuple(config["dataset"]["image_size"])
85
+ NUM_CHANNELS = config["dataset"]["image_channels"]
86
+ SIGMA = config["dataset"]["sigma"]
87
+ TRAINING_SAMPLES = config["dataset"]["training_samples"]
88
+ PIN_MEMORY = config["dataset"]["pin_memory"]
89
+ NUM_WORKERS = 2 if config["dataset"]["num_workers"] == None else config["dataset"]["num_workers"]
90
+
91
+ # Model parameters
92
+ MODEL_NAME = config["model"]["name"]
93
+ SSL_MODELS = ["moco", "mocov2", "mocov3", "simclr", "simclrv2", "dino", "barlow_twins", "byol"]
94
+
95
+ if MODEL_NAME == "imagenet":
96
+ MODEL_NAME = "smpUnet"
97
+ elif MODEL_NAME == "ddpm":
98
+ pass
99
+ elif MODEL_NAME in SSL_MODELS:
100
+ NUM_CHANNELS = 3
101
+ else:
102
+ raise Exception("Model not found... Choose between: ddpm, imagenet, moco, mocov2, mocov3, simclr, simclrv2, dino, barlow_twins, byol")
103
+
104
+ BACKBONE_NAME = config["model"]["encoder"]
105
+ # Replace "efficientnet_b0" by "efficientnet-b0" and so on to match the model name
106
+ BACKBONE_NAME = BACKBONE_NAME.replace("_", "-") if "efficientnet" in BACKBONE_NAME else BACKBONE_NAME
107
+
108
+ PRETRAINED = config["training_protocol"]["scratch"]["apply"] == False
109
+ NUM_EPOCHS = config["model"]["epochs"]
110
+ BATCH_SIZE = config["dataset"]["batch_size"]
111
+ GRAD_ACC = config["dataset"]["grad_accumulation"]
112
+ LR = config["model"]["lr"] if PRETRAINED else config["model"]["lr"] / 0.1
113
+ OPTIMIZER = config["model"]["optimizer"]
114
+ SCHEDULER = config["model"]["scheduler"]
115
+ LOSS_FUNCTION = config["model"]["loss_function"]
116
+ PATIENCE = GRAD_ACC + 5
117
+ EARLY_STOPPING = PATIENCE * 2 + 1
118
+ print(f"Pretrained: {PRETRAINED} -> the actual learning rate is {LR}")
119
+
120
+ # ---------------------------------------------------------------- DATASET ---------
121
+ if DATASET_NAME == "chest":
122
+ train_dataset = Chest(prefix=DATASET_PATH, phase='train', size=SIZE, num_channels=NUM_CHANNELS, sigma=SIGMA)
123
+ val_dataset = Chest(prefix=DATASET_PATH, phase='validate', size=SIZE, num_channels=NUM_CHANNELS, sigma=SIGMA)
124
+ test_dataset = Chest(prefix=DATASET_PATH, phase='test', size=SIZE, num_channels=NUM_CHANNELS, sigma=SIGMA)
125
+
126
+ elif DATASET_NAME == "hand":
127
+ train_dataset = Hand(prefix=DATASET_PATH, phase='train', size=SIZE, num_channels=NUM_CHANNELS, sigma=SIGMA)
128
+ val_dataset = Hand(prefix=DATASET_PATH, phase='validate', size=SIZE, num_channels=NUM_CHANNELS, sigma=SIGMA)
129
+ test_dataset = Hand(prefix=DATASET_PATH, phase='test', size=SIZE, num_channels=NUM_CHANNELS, sigma=SIGMA)
130
+ elif DATASET_NAME == "cephalo":
131
+ train_dataset = Cephalo(prefix=DATASET_PATH, phase='train', size=SIZE, num_channels=NUM_CHANNELS, sigma=SIGMA)
132
+ val_dataset = Cephalo(prefix=DATASET_PATH, phase='validate', size=SIZE, num_channels=NUM_CHANNELS, sigma=SIGMA)
133
+ test_dataset = Cephalo(prefix=DATASET_PATH, phase='test', size=SIZE, num_channels=NUM_CHANNELS, sigma=SIGMA)
134
+ else:
135
+ raise Exception("Dataset not found")
136
+
137
+ NUM_LANDMARKS = train_dataset.num_landmarks
138
+
139
+ # ---------------------------------------------------------------- DATA LOADING ---------
140
+ # Randomly exclude images to reduce the number of samples in the training dataset
141
+ #random_indices = np.random.choice(len(train_dataset), TRAINING_SAMPLES, replace=False)
142
+ #print(random_indices)
143
+ #train_dataset.indexes = [train_dataset.indexes[i] for i in sorted(random_indices)]
144
+
145
+ if TRAINING_SAMPLES == "all":
146
+ pass
147
+ else:
148
+ assert len(train_dataset) >= int(TRAINING_SAMPLES), "The number of training samples is greater than the number of samples in the dataset"
149
+
150
+ train_dataset.indexes = train_dataset.indexes[:int(TRAINING_SAMPLES)]
151
+
152
+ # create dataloaders
153
+ train_dataloader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, pin_memory=PIN_MEMORY, num_workers=NUM_WORKERS, drop_last=False)
154
+ val_dataloader = DataLoader(val_dataset, batch_size=BATCH_SIZE, shuffle=False, pin_memory=PIN_MEMORY, num_workers=NUM_WORKERS)
155
+ test_dataloader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=False, pin_memory=PIN_MEMORY, num_workers=NUM_WORKERS)
156
+
157
+ # ---------------------------------------------------------------- LOG FILE ---------
158
+ # Print dataset and experiment info in log file
159
+ res_file = open(log_file, 'a')
160
+ print(f"\n\n\n {datetime.now()} ---------------------- {DATASET_NAME} -------------------------------------------", file=res_file)
161
+ print(f"SIZE: {SIZE} | BATCH: {BATCH_SIZE} | GRAD ACC: {GRAD_ACC} | SIGMA: {SIGMA} | LR: {LR} | CHANNELS: {NUM_CHANNELS} | Train Samples {TRAINING_SAMPLES}", file=res_file)
162
+ print(f"samples -> Train: {len(train_dataset)} | Val: {len(val_dataset)} | Test: {len(test_dataset)}", file=res_file)
163
+ print(f"dataloaders -> Train: {len(train_dataloader)} | Val: {len(val_dataloader)} | Test: {len(test_dataloader)}", file=res_file)
164
+ res_file.close()
165
+
166
+ print(f"\n\n\n {datetime.now()} ---------------------- {DATASET_NAME} -------------------------------------------")
167
+ print(f"SIZE: {SIZE} | BATCH: {BATCH_SIZE} | GRAD ACC: {GRAD_ACC} | SIGMA: {SIGMA} | LR: {LR} | CHANNELS: {NUM_CHANNELS} | Train Samples {TRAINING_SAMPLES}")
168
+ print(f"samples -> Train: {len(train_dataset)} | Val: {len(val_dataset)} | Test: {len(test_dataset)}")
169
+ print(f"dataloaders -> Train: {len(train_dataloader)} | Val: {len(val_dataloader)} | Test: {len(test_dataloader)}")
170
+ # ---------------------------------------------------------------- MODEL ---------
171
+
172
+ if MODEL_NAME == "smpUnet" and BACKBONE_NAME is not None:
173
+ if PRETRAINED == True and config["training_protocol"]["finetuning"]["resume"] == False:
174
+ model = smpUnet(
175
+ encoder_name=BACKBONE_NAME,
176
+ encoder_weights="imagenet",
177
+ in_channels=NUM_CHANNELS,
178
+ classes=NUM_LANDMARKS
179
+ ).to(device)
180
+ model_name = f"{MODEL_NAME}/{model.encoder_name}/{model.encoder_weights}"
181
+ else:
182
+ model = smpUnet(
183
+ encoder_name=BACKBONE_NAME,
184
+ encoder_weights=None,
185
+ in_channels=NUM_CHANNELS,
186
+ classes=NUM_LANDMARKS
187
+ ).to(device)
188
+ model_name = f"{MODEL_NAME}/{model.encoder_name}/random"
189
+
190
+ elif MODEL_NAME in SSL_MODELS and BACKBONE_NAME is not None:
191
+ model = smpUnet(
192
+ encoder_name=BACKBONE_NAME,
193
+ encoder_weights=None,
194
+ in_channels=NUM_CHANNELS,
195
+ classes=NUM_LANDMARKS
196
+ ).to(device)
197
+
198
+ assert os.path.exists(f'{config["training_protocol"]["finetuning"]["path"]}'), f"{BACKBONE_NAME} pretrained model path not found"
199
+
200
+ model.encoder.load_state_dict(torch.load(f'{config["training_protocol"]["finetuning"]["path"]}', map_location=device))
201
+ model_name = f"{MODEL_NAME}/{model.encoder_name}"
202
+
203
+
204
+ elif MODEL_NAME == "ddpm":
205
+ BACKBONE_NAME = ""
206
+ model = Unet(
207
+ dim=SIZE[0],
208
+ channels=NUM_CHANNELS,
209
+ dim_mults=[1,2,4,8],
210
+ self_condition=True,
211
+ resnet_block_groups=4,
212
+ att_heads=4,
213
+ att_res=32
214
+ ).to(device)
215
+
216
+ if PRETRAINED == True and config["training_protocol"]["finetuning"]["resume"] == False:
217
+ model_name = f"{MODEL_NAME}/pretrained"
218
+ checkpoint = torch.load(config["training_protocol"]["finetuning"]["path"], map_location=device)
219
+ model.load_state_dict(checkpoint["model_state_dict"])
220
+ pretrained_epoch = checkpoint.get("epoch", "undefined")
221
+ #print(f"Loaded model weights from {checkpoint['epoch']} epoch with fid {checkpoint['fid']}")
222
+ del checkpoint
223
+ """
224
+ # freeze downsampling layers
225
+ for name, param in model.named_parameters():
226
+ if 'downs' in name:
227
+ param.requires_grad = False
228
+ """
229
+ else:
230
+ model_name = f"{MODEL_NAME}/random"
231
+
232
+ # change the number of output channels of the final convolutional layer
233
+ model.final_conv = nn.Conv2d(model.final_conv.in_channels, NUM_LANDMARKS, 1)
234
+
235
+ # ---------------------------------------------------------------- COUNT PARAMS ---------
236
+ table, total_params = count_parameters(model)
237
+ res_file = open(log_file, 'a')
238
+ #print(table, file=res_file)
239
+ print(f"Total Trainable Params: {total_params}", file=res_file)
240
+ res_file.close()
241
+
242
+ # ---------------------------------------------------------------- LOSS FUNCTION ---------
243
+ if LOSS_FUNCTION == "CrossEntropyLoss":
244
+ loss_fn = nn.CrossEntropyLoss()
245
+ else:
246
+ raise Exception("Loss function not found... Choose between: CrossEntropyLoss")
247
+
248
+ # ---------------------------------------------------------------- OPTIMIZER ---------
249
+ if OPTIMIZER == "Adam":
250
+ optimizer = torch.optim.Adam(params=model.parameters(), lr=LR)
251
+ elif OPTIMIZER == "AdamW":
252
+ optimizer = torch.optim.AdamW(params=model.parameters(), lr=LR)
253
+ else:
254
+ raise Exception("Optimizer not found... Choose between: Adam, AdamW")
255
+
256
+ # ---------------------------------------------------------------- SCHEDULER ---------
257
+ if SCHEDULER == "ReduceLROnPlateau":
258
+ scheduler = ReduceLROnPlateau(optimizer, patience=PATIENCE, factor=0.5, verbose=True)
259
+ else:
260
+ raise Exception("Scheduler not found... Choose between: ReduceLROnPlateau")
261
+
262
+
263
+ # ---------------------------------------------------------------- MODEL PATHS ---------
264
+ save_model_path = f"{PREFIX}/{DATASET_NAME}/size{SIZE[0]}x{SIZE[1]}_ch{NUM_CHANNELS}_samples{TRAINING_SAMPLES}/{model_name}"
265
+
266
+ use_validation_set_for_inference = True if config["inference_protocol"]["use_validation_set_for_inference"]=="true" else False
267
+
268
+ if use_validation_set_for_inference==True and PRETRAINED == True and config["model"]["name"] == "ddpm" and config["training_protocol"]["finetuning"]["resume"] == False:
269
+ save_model_path = f"{save_model_path}/val/epoch{pretrained_epoch}"
270
+
271
+ print(save_model_path)
272
+ save_model_path = generate_path(save_model_path)
273
+
274
+ load_model_path = os.path.join(save_model_path, f"best_checkpoint.pt")
275
+
276
+ # ---------------------------------------------------------------- TRAINING ---------
277
+ start_time = time.time()
278
+
279
+ if config["training_protocol"]["apply"] == True:
280
+
281
+ # Assert if the model is being trained from scratch or if it is being fine-tuned
282
+ assert config["training_protocol"]["scratch"]["apply"] != config["training_protocol"]["finetuning"]["apply"], "Choose only one training protocol (scratch or finetuning)"
283
+ print(f"Training model on the {'validation' if use_validation_set_for_inference==True else 'test'} dataset")
284
+
285
+ # Get the training protocol
286
+ if config["training_protocol"]["scratch"]["apply"] == True:
287
+ loss_results = train_and_validate(model, device, train_dataloader, val_dataloader, optimizer, scheduler, loss_fn, NUM_EPOCHS,
288
+ save_model_path, patience=EARLY_STOPPING, useGradAcc=GRAD_ACC, continue_training=config["training_protocol"]["scratch"]["resume"])
289
+ elif config["training_protocol"]["finetuning"]["apply"] == True:
290
+
291
+ DIFFERENT_DATASET = True if config["training_protocol"]["finetuning"]["different_dataset"] == "true" else False
292
+
293
+ if DIFFERENT_DATASET == True:
294
+ load_path = config["training_protocol"]["finetuning"]["path"]
295
+ assert os.path.exists(load_path), "Pretrained model path not found"
296
+ loss_results = fine_tune(model, device, train_dataloader, val_dataloader, optimizer, scheduler, loss_fn, NUM_EPOCHS,
297
+ load_path, save_model_path, patience=EARLY_STOPPING, useGradAcc=GRAD_ACC)
298
+ else:
299
+ loss_results = train_and_validate(model, device, train_dataloader, val_dataloader, optimizer, scheduler, loss_fn, NUM_EPOCHS,
300
+ save_model_path, patience=EARLY_STOPPING, useGradAcc=GRAD_ACC, continue_training=config["training_protocol"]["finetuning"]["resume"])
301
+ else:
302
+ raise Exception("Training protocol not found... Choose between: scratch, finetuning")
303
+
304
+ # ---------------------------------------------------------------- TESTING --------
305
+ end_time = time.time()
306
+
307
+ if args.load_path is not None:
308
+ load_model_path = args.load_path
309
+
310
+ if config["inference_protocol"]["apply"] == True:
311
+ print(f"Testing model on the {'validation' if use_validation_set_for_inference==True else 'test'} dataset")
312
+ res_file = open(log_file, 'a')
313
+ print(f"Testing model on the {'validation' if use_validation_set_for_inference==True else 'test'} dataset", file=res_file)
314
+ res_file.close()
315
+
316
+ if use_validation_set_for_inference == True:
317
+ test_loss, results, mre, sdr, mse, mAP_heatmaps, mAP_keypoints, iou, epoch = evaluate(model, device, val_dataloader, loss_fn, load_model_path,
318
+ NUM_LANDMARKS, sigma=SIGMA, res_file_path=log_file)
319
+ else:
320
+ test_loss, results, mre, sdr, mse, mAP_heatmaps, mAP_keypoints, iou, epoch = evaluate(model, device, test_dataloader, loss_fn, load_model_path,
321
+ NUM_LANDMARKS, sigma=SIGMA, res_file_path=log_file)
322
+
323
+ # ---------------------------------------------------------------- TELEGRAM ---------
324
+ # Free GPU cache and RAM memory
325
+ #free_gpu_cache()
326
+
327
+
328
+ sdr_str = '\n'.join(f'\tThresholds {k}: {v*100:.2f}' for k, v in sorted(sdr.items()))
329
+
330
+ message = (
331
+ f"<b>{DATASET_NAME}</b> | Train Samples: {TRAINING_SAMPLES} \n"
332
+ f"<b>Model:</b> {model_name} \n"
333
+ f"<b>Shape:</b>[{SIZE}, {SIZE}, {NUM_CHANNELS}] \n"
334
+ f"<b>Sigma:</b> {SIGMA} \n"
335
+ f"<b>Batch:</b> {BATCH_SIZE}x{GRAD_ACC} \n"
336
+ f"<b>Time:</b> {time.strftime('%H:%M:%S', time.gmtime(end_time - start_time))} \n"
337
+ f"<b>MRE:</b> {mre:.2f} \n\n"
338
+ f"<b>SDR:</b> \n{sdr_str} \n"
339
+ )
340
+
341
+ send_telegram_message(message)
342
+
343
+ # Save the results in a file
344
+ results_dir = f"outputs/{DATASET_NAME}_{MODEL_NAME}"
345
+ os.makedirs(f'{results_dir}', exist_ok=True)
346
+
347
+ if not os.path.exists(f'{results_dir}/outputs_{DATASET_NAME}_{MODEL_NAME}_{BACKBONE_NAME}_{TRAINING_SAMPLES}.txt'):
348
+ with open(f'{results_dir}/outputs_{DATASET_NAME}_{MODEL_NAME}_{BACKBONE_NAME}_{TRAINING_SAMPLES}.txt', 'w') as f:
349
+ print(f"\n\n{DATASET_NAME} | {MODEL_NAME} | {BACKBONE_NAME} | {TRAINING_SAMPLES}", file=f)
350
+ print(f"Shape: [{SIZE}, {SIZE}, {NUM_CHANNELS}] | Sigma: {SIGMA} | Batch: {BATCH_SIZE}x{GRAD_ACC}", file=f)
351
+ print(f"Time: {time.strftime('%H:%M:%S', time.gmtime(end_time - start_time))}", file=f)
352
+ print(f"MRE: {mre:.2f}", file=f)
353
+ print(f"SDR: \n{sdr_str}", file=f)
354
+ print(f"MSE: {mse:.2f}", file=f)
355
+ print(f"IOU: {iou:.2f}", file=f)
356
+ print(f"mAP Heatmaps: {mAP_heatmaps:.2f}", file=f)
357
+ print(f"mAP Keypoints: {mAP_keypoints:.2f}", file=f)
358
+ print(f"Epoch: {epoch}", file=f)
359
+ print(f"Test Loss: {test_loss:.2f}", file=f)
360
+ print(f"Total Trainable Params: {total_params}", file=f)
361
+ print(f"Model Path: {save_model_path}", file=f)
362
+ else:
363
+ with open(f'{results_dir}/outputs_{DATASET_NAME}_{MODEL_NAME}_{BACKBONE_NAME}_{TRAINING_SAMPLES}.txt', 'a') as f:
364
+ print(f"\n\n{DATASET_NAME} | {MODEL_NAME} | {BACKBONE_NAME} | {TRAINING_SAMPLES}", file=f)
365
+ print(f"Shape: [{SIZE}, {SIZE}, {NUM_CHANNELS}] | Sigma: {SIGMA} | Batch: {BATCH_SIZE}x{GRAD_ACC}", file=f)
366
+ print(f"Time: {time.strftime('%H:%M:%S', time.gmtime(end_time - start_time))}", file=f)
367
+ print(f"MRE: {mre:.2f}", file=f)
368
+ print(f"SDR: \n{sdr_str}", file=f)
369
+ print(f"MSE: {mse:.2f}", file=f)
370
+ print(f"IOU: {iou:.2f}", file=f)
371
+ print(f"mAP Heatmaps: {mAP_heatmaps:.2f}", file=f)
372
+ print(f"mAP Keypoints: {mAP_keypoints:.2f}", file=f)
373
+ print(f"Epoch: {epoch}", file=f)
374
+ print(f"Test Loss: {test_loss:.2f}", file=f)
375
+ print(f"Total Trainable Params: {total_params}", file=f)
376
+ print(f"Model Path: {save_model_path}", file=f)
377
+
378
+
379
+
DiffusionXray-FewShot-LandmarkDetection/downstream_task/metrics.py ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import numpy as np
3
+ import scipy.spatial.distance as dist
4
+ import utilities
5
+ import matplotlib.pyplot as plt
6
+ from matplotlib.colors import LinearSegmentedColormap
7
+ from PIL import Image
8
+
9
+ ## -----------------------------------------------------------------------------------------------------------------##
10
+ ## Mean Squared Error ##
11
+ ## -----------------------------------------------------------------------------------------------------------------##
12
+
13
+ def compute_mse(gt_keypoints, pred_keypoints):
14
+ assert gt_keypoints.shape == pred_keypoints.shape, "The ground truth list has not the same shape of the predicted list"
15
+
16
+ # Compute squared differences
17
+ squared_diff = np.square(gt_keypoints - pred_keypoints)
18
+
19
+ # Compute mean
20
+ mse = np.mean(squared_diff)
21
+
22
+ return mse
23
+
24
+ ## -----------------------------------------------------------------------------------------------------------------##
25
+ ## mAp with OKS for heatmaps ##
26
+ ## -----------------------------------------------------------------------------------------------------------------##
27
+ def compute_oks_heatmaps(ground_truth, prediction, sigma):
28
+ distance = dist.cdist(ground_truth, prediction, 'euclidean')
29
+ scale = 1
30
+ oks = np.exp(-1 * (distance ** 2) / (2 * (sigma**2) * (scale ** 2)))
31
+ return oks
32
+
33
+ def compute_map_heatmaps(ground_truth_heatmaps, predicted_heatmaps, sigma=0.1, thresholds=np.arange(0.5, 1.0, 0.05)):
34
+ aps = []
35
+
36
+ assert ground_truth_heatmaps.shape == predicted_heatmaps.shape, "Heatmaps should have the same shape"
37
+
38
+ oks = compute_oks_heatmaps(ground_truth_heatmaps, predicted_heatmaps, sigma)
39
+ for threshold in thresholds:
40
+ tp = np.sum(oks >= threshold)
41
+ fp = np.sum(oks < threshold)
42
+ precision = tp / (tp + fp)
43
+ aps.append(precision)
44
+ map_value = np.mean(aps)
45
+
46
+ return map_value
47
+
48
+
49
+ ## -----------------------------------------------------------------------------------------------------------------##
50
+ ## mAp with OKS for keypoints ##
51
+ ## -----------------------------------------------------------------------------------------------------------------##
52
+ def compute_oks_keypoints(ground_truth, prediction, sigma):
53
+ # Calculate the distance between the ground truth and prediction points
54
+ distance = np.sqrt(np.sum((ground_truth - prediction)**2, axis=1))
55
+
56
+ # Calculate the scale, assuming the points are normalized
57
+ #scale = np.max(ground_truth) - np.min(ground_truth)
58
+ scale = 1
59
+ # Calculate the OKS value
60
+ oks = np.exp(-1 * (distance ** 2) / (2 * (sigma**2) * (scale ** 2)))
61
+ return oks
62
+
63
+
64
+ def compute_map_keypoints(ground_truth_keypoints, predicted_keypoints, sigma=0.1, thresholds=np.arange(0.5, 1.0, 0.05)):
65
+ aps = []
66
+
67
+ # Calculate OKS value
68
+ oks = compute_oks_keypoints(ground_truth_keypoints, predicted_keypoints, sigma)
69
+ for threshold in thresholds:
70
+ # Calculate precision
71
+ tp = np.sum(oks >= threshold)
72
+ fp = np.sum(oks < threshold)
73
+ precision = tp / (tp + fp)
74
+ aps.append(precision)
75
+ # Calculate the mean average precision
76
+ map_value = np.mean(aps)
77
+ return map_value
78
+
79
+
80
+
81
+ ## -----------------------------------------------------------------------------------------------------------------##
82
+ ## Intersection Over Union ##
83
+ ## -----------------------------------------------------------------------------------------------------------------##
84
+
85
+ def compute_iou_heatmaps(heatmap1, heatmap2):
86
+
87
+ assert heatmap1.shape == heatmap2.shape, "Heatmaps should have the same shape"
88
+
89
+ overlap = np.logical_and(heatmap1, heatmap2)
90
+ union = np.logical_or(heatmap1, heatmap2)
91
+ overlap_area = np.sum(overlap)
92
+ union_area = np.sum(union)
93
+ IoU = overlap_area / union_area
94
+ return IoU
95
+
96
+
97
+
98
+ ## -----------------------------------------------------------------------------------------------------------------##
99
+ ## Aux functions ##
100
+ ## -----------------------------------------------------------------------------------------------------------------##
101
+
102
+ from collections.abc import Iterable
103
+
104
+ def radial(pt1, pt2, factor=1):
105
+ if not isinstance(factor,Iterable):
106
+ factor = [factor]*len(pt1)
107
+ return sum(((i-j)*s)**2 for i, j,s in zip(pt1, pt2, factor))**0.5
108
+
109
+ def cal_all_distance(points, gt_points, factor=1):
110
+ '''
111
+ points: [(x,y,z...)]
112
+ gt_points: [(x,y,z...)]
113
+ return : [d1,d2, ...]
114
+ '''
115
+ n1 = len(points)
116
+ n2 = len(gt_points)
117
+ if n1 == 0:
118
+ print("[Warning]: Empty input for calculating mean and std")
119
+ return 0, 0
120
+ if n1 != n2:
121
+ raise Exception("Error: lengthes dismatch, {}<>{}".format(n1, n2))
122
+ return [radial(p, q, factor) for p, q in zip(points, gt_points)]
123
+
124
+
125
+ ## -----------------------------------------------------------------------------------------------------------------##
126
+ ## Mean Radial Error (MRE) ##
127
+ ## -----------------------------------------------------------------------------------------------------------------##
128
+
129
+ """
130
+ MRE (Mean Radial Error):
131
+ This measures the average euclidean distance between predicted landmarks and ground truth landmarks.
132
+ It is calculated by taking the mean of the list of distances (cal_all_distance).
133
+ """
134
+
135
+ def compute_mre(distance_list):
136
+ return np.mean(distance_list)
137
+
138
+ ## -----------------------------------------------------------------------------------------------------------------##
139
+ ## Successful Detection Rate (SDR) ##
140
+ ## -----------------------------------------------------------------------------------------------------------------##
141
+ """
142
+ SDR (Successful Detection Rate):
143
+ This measures the percentage of predicted landmarks that are within a threshold distance of the ground truth.
144
+ It is calculated by get_sdr which counts the number of distances below each threshold and divides by the total number of landmarks.
145
+ """
146
+
147
+ def compute_sdr(distance_list, threshold=[2, 2.5, 3, 4, 6, 9, 10]):
148
+ """
149
+ Compute Successful Detection Rate (SDR) in pixel for a given list of distances and thresholds.
150
+ The SDR is the proportion of predicted points that fall within a certain distance threshold from the ground truth points.
151
+ """
152
+ sdr = {}
153
+ n = len(distance_list)
154
+
155
+ for th in threshold:
156
+ sdr[th] = sum(d <= th for d in distance_list) / n
157
+ return sdr
158
+
159
+ ## -----------------------------------------------------------------------------------------------------------------##
160
+ ## COMPUTE BATCH METRICS ##
161
+ ## -----------------------------------------------------------------------------------------------------------------##
162
+
163
+
164
+ def compute_batch_metrics(gt_batch_keypoints, gt_batch_heatmaps, pred_batch, image_size, num_landmarks, useHeatmaps, sigma):
165
+
166
+ batch_size = pred_batch.shape[0]
167
+ mse_list = []
168
+ map_list1 = []
169
+ map_list2 = []
170
+ iou_list = []
171
+ distance_list = []
172
+
173
+ #sigma = sigma/10
174
+ sigma = 5
175
+
176
+ # Loop through the batch
177
+ for i in range(batch_size):
178
+ single_gt_keypoints = gt_batch_keypoints[i, :, :].numpy()
179
+ single_gt_heatmaps = gt_batch_heatmaps[i, :, :].numpy()
180
+ single_prediction = pred_batch[i, :, :].numpy()
181
+ single_image_size = tuple(image_size[i].int().tolist())
182
+
183
+ # So when i compare predicted extracted keypoints with original keypoints they all have the same system reference and same origin size
184
+ single_gt_keypoints = utilities.extract_landmarks(single_gt_heatmaps, num_landmarks)
185
+
186
+ # Fuse the original heatmaps
187
+ single_gt_heatmaps_fused = utilities.points_to_heatmap(single_gt_keypoints, img_size=single_image_size, sigma=sigma, fuse=True)
188
+ #single_gt_heatmaps_fused = utilities.fuse_heatmaps(single_gt_heatmaps)
189
+
190
+ if useHeatmaps:
191
+ # Extract landmarks from the model's output
192
+ single_pred_keypoints = utilities.extract_landmarks(single_prediction, num_landmarks)
193
+
194
+ # Upscaling the prediction to the original image size to compute metrics
195
+ single_pred_heatmaps = utilities.points_to_heatmap(single_pred_keypoints, img_size=single_image_size, sigma=sigma, fuse=True)
196
+ else:
197
+ single_pred_keypoints = single_prediction
198
+ single_pred_heatmaps = utilities.points_to_heatmap(single_pred_keypoints, img_size=single_image_size, sigma=sigma, fuse=True)
199
+
200
+ gt_scaled_points = np.array(utilities.scale_points(single_gt_keypoints, single_image_size))
201
+ pred_scaled_points = np.array(utilities.scale_points(single_pred_keypoints, single_image_size))
202
+
203
+ # Compute Distance list for MRE and SDR
204
+ if num_landmarks == 6:
205
+ physical_factor = 1 # chest
206
+ elif num_landmarks == 19:
207
+ physical_factor = np.array([2400/single_image_size[0], 1935/single_image_size[1]]) * 0.1 # head
208
+ #physical_factor = 0.46875 # ceph
209
+ #physical_factor = 0.9375
210
+ elif num_landmarks == 37:
211
+ physical_factor = 50/radial(gt_scaled_points[0], gt_scaled_points[4]) # hand
212
+ else:
213
+ raise Exception("Error: Unknown number of landmarks")
214
+
215
+ cur_distance_list = cal_all_distance(pred_scaled_points, gt_scaled_points, physical_factor)
216
+ distance_list += cur_distance_list
217
+
218
+ # Compute MSE
219
+ mse = compute_mse(gt_scaled_points, pred_scaled_points)
220
+ mse_list.append(mse)
221
+
222
+ # Compute mAP with keypoints
223
+ map2 = compute_map_keypoints(gt_scaled_points, pred_scaled_points)
224
+ map_list2.append(map2)
225
+
226
+ # Compute mAP with heatmaps
227
+ map1 = compute_map_heatmaps(single_gt_heatmaps_fused, single_pred_heatmaps)
228
+ map_list1.append(map1)
229
+
230
+ # Compute IoU
231
+ iou = compute_iou_heatmaps(single_gt_heatmaps_fused, single_pred_heatmaps)
232
+ iou_list.append(iou)
233
+
234
+ return mse_list, map_list1, map_list2, iou_list, distance_list
DiffusionXray-FewShot-LandmarkDetection/downstream_task/model/deep_learning.py ADDED
@@ -0,0 +1,725 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import os
3
+ import numpy as np
4
+ from timeit import default_timer as timer
5
+ from tqdm.auto import tqdm
6
+ import torch
7
+ import metrics
8
+ from torch import nn
9
+ import utilities
10
+ import csv
11
+ import matplotlib.pyplot as plt
12
+ from sklearn.model_selection import KFold
13
+ ## -----------------------------------------------------------------------------------------------------------------##
14
+ ## TRAINING with GRADIENT ACCUMULATION ##
15
+ ## -----------------------------------------------------------------------------------------------------------------##
16
+
17
+ def train_step(model: torch.nn.Module,
18
+ device: torch.device,
19
+ dataloader: torch.utils.data.DataLoader,
20
+ loss_fn: torch.nn.Module,
21
+ optimizer: torch.optim.Optimizer,
22
+ useHeatmaps: bool = False,
23
+ gradient_accumulation_steps: int = 1):
24
+ # Put model in train mode
25
+ model = model.to(device)
26
+ model.train()
27
+
28
+ # Setup train loss value
29
+ train_loss = 0.0
30
+
31
+ # Loop through data loader data batches
32
+ for batch, data in enumerate(dataloader):
33
+
34
+ img_name = data['name']
35
+ images_tensor = data['image']
36
+ landmarks_tensor = data['landmarks']
37
+ heatmaps_tensor = data['heatmaps']
38
+
39
+ # Send data to target device
40
+ X = images_tensor.to(device)
41
+
42
+ if useHeatmaps:
43
+ y = heatmaps_tensor.to(device)
44
+ else:
45
+ y = landmarks_tensor.to(device)
46
+
47
+ #print(f"Batch {batch} - image tensor: {X.shape} - GT tensor: {y.shape}")
48
+
49
+ # Forward pass
50
+ y_pred = model(X)
51
+
52
+ #print(f"y pred shape: {y_pred.shape} - y shape: {y.shape}")
53
+
54
+ # Calculate and accumulate loss
55
+ loss = loss_fn(y_pred, y)
56
+
57
+ # normalize loss to account for batch accumulation
58
+ loss = loss / gradient_accumulation_steps
59
+ train_loss += loss.item()
60
+
61
+ # Loss backward
62
+ loss.backward()
63
+
64
+ # Check if it is time to update the weights
65
+ if ((batch + 1) % gradient_accumulation_steps == 0) or (batch + 1 == len(dataloader)):
66
+ # Optimizer step
67
+ optimizer.step()
68
+ # Reset gradients
69
+ optimizer.zero_grad()
70
+
71
+ # Adjust metrics to get average loss and accuracy per batch
72
+ train_loss /= len(dataloader)
73
+
74
+ return train_loss
75
+
76
+ ## -----------------------------------------------------------------------------------------------------------------##
77
+ ## VALIDATION PART ##
78
+ ## -----------------------------------------------------------------------------------------------------------------##
79
+
80
+ def validate_step(model: torch.nn.Module,
81
+ device: torch.device,
82
+ dataloader: torch.utils.data.DataLoader,
83
+ loss_fn: torch.nn.Module,
84
+ useHeatmaps: bool = False):
85
+ # Put model in eval mode
86
+ model = model.to(device)
87
+ model.eval()
88
+
89
+ # Setup validation loss value
90
+ val_loss = 0.0
91
+
92
+ with torch.no_grad():
93
+ # Loop through DataLoader batches
94
+ for batch, data in enumerate(dataloader):
95
+ images_tensor = data['image']
96
+ landmarks_tensor = data['landmarks']
97
+ heatmaps_tensor = data['heatmaps']
98
+
99
+ # Send data to target device
100
+ X = images_tensor.to(device)
101
+
102
+ if useHeatmaps:
103
+ y = heatmaps_tensor.to(device)
104
+ else:
105
+ y = landmarks_tensor.to(device)
106
+
107
+ # Forward pass
108
+ val_pred_logits = model(X)
109
+
110
+ # Calculate and accumulate loss
111
+ loss = loss_fn(val_pred_logits, y)
112
+ val_loss += loss.item()
113
+
114
+ # Adjust metrics to get average loss per batch
115
+ val_loss = val_loss / len(dataloader)
116
+
117
+ return val_loss
118
+
119
+
120
+ ## -----------------------------------------------------------------------------------------------------------------##
121
+ ## EARLY STOPPING ##
122
+ ## -----------------------------------------------------------------------------------------------------------------##
123
+
124
+ class EarlyStopping:
125
+ """Early stops the training if validation loss doesn't improve after a given patience."""
126
+ def __init__(self, patience=10, delta=0, save_path=not None, counter=0, best_val_loss=None):
127
+ self.patience = patience
128
+ self.counter = counter
129
+ self.best_val_loss = best_val_loss
130
+ self.early_stop = False
131
+ self.val_loss_min = np.Inf
132
+ self.delta = delta
133
+ self.path = save_path
134
+
135
+ def call(self, val_loss, model, optimizer, scheduler, loss_fn, results, epochs_without_improvement, epoch):
136
+
137
+ if self.best_val_loss is None:
138
+ self.best_val_loss = val_loss
139
+ save_model(self.path, model, optimizer, scheduler, loss_fn, results, epochs_without_improvement, self.best_val_loss, epoch, called_by_early_stopping=True)
140
+
141
+ elif val_loss >= self.best_val_loss + self.delta:
142
+ self.counter += 1
143
+ print(f'EarlyStopping counter: {self.counter} out of {self.patience}')
144
+ if self.counter >= self.patience:
145
+ self.early_stop = True
146
+ else:
147
+ self.best_val_loss = val_loss
148
+ save_model(self.path, model, optimizer, scheduler, loss_fn, results, epochs_without_improvement, self.best_val_loss, epoch, called_by_early_stopping=True)
149
+ self.counter = 0
150
+
151
+
152
+ ## -----------------------------------------------------------------------------------------------------------------##
153
+ ## SAVE AND LOAD A MODEL ##
154
+ ## -----------------------------------------------------------------------------------------------------------------##
155
+ def save_model(save_path, model, optimizer, scheduler, loss_fn, results, epochs_without_improvement, best_val_loss, epoch, called_by_early_stopping=False):
156
+ if not os.path.exists(save_path):
157
+ os.makedirs(save_path)
158
+
159
+ if called_by_early_stopping:
160
+ checkpoint_path = os.path.join(save_path, "best_checkpoint.pt")
161
+ else:
162
+ checkpoint_path = os.path.join(save_path, f"checkpoint_epoch{epoch}.pt")
163
+
164
+ torch.save({
165
+ 'model_state_dict': model.state_dict(),
166
+ 'optimizer_state_dict': optimizer.state_dict(),
167
+ 'scheduler_state_dict': scheduler.state_dict(),
168
+ 'loss_fn': loss_fn.state_dict(),
169
+ 'results': results,
170
+ 'epochs_without_improvement': epochs_without_improvement,
171
+ 'best_val_loss': best_val_loss,
172
+ 'epoch': epoch
173
+ }, checkpoint_path)
174
+ #print(f"Model saved to {checkpoint_path}")
175
+
176
+
177
+ def load_model(load_path, model, optimizer, scheduler, loss_fn, device):
178
+ checkpoint = torch.load(load_path, map_location=torch.device(device))
179
+
180
+ # Load the state_dict into the model only if it exists in the checkpoint
181
+ if 'model_state_dict' in checkpoint:
182
+ model.load_state_dict(checkpoint['model_state_dict'])
183
+ model = model.to(device) # Move the model to the specified device
184
+
185
+ # Load the optimizer state_dict only if it exists in the checkpoint
186
+ if 'optimizer_state_dict' in checkpoint:
187
+ optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
188
+
189
+ # Load the scheduler state_dict only if it exists in the checkpoint
190
+ if 'scheduler_state_dict' in checkpoint:
191
+ scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
192
+
193
+ # Load the loss_fn state_dict only if it exists in the checkpoint
194
+ if 'loss_fn' in checkpoint:
195
+ loss_fn.load_state_dict(checkpoint['loss_fn'])
196
+
197
+ # Load other values only if they exist in the checkpoint
198
+ start_epoch = checkpoint.get('epoch', 0) + 1
199
+ results = checkpoint.get('results', None)
200
+ epochs_without_improvement = checkpoint.get('epochs_without_improvement', 0)
201
+ best_val_loss = checkpoint.get('best_val_loss', None)
202
+ print(f"Model loaded from {load_path} | Starting from epoch {start_epoch} | Best validation loss: {best_val_loss} | Epochs without improvement: {epochs_without_improvement}")
203
+ return model, optimizer, scheduler, loss_fn, start_epoch, results, epochs_without_improvement, best_val_loss
204
+
205
+
206
+ ## -----------------------------------------------------------------------------------------------------------------##
207
+ ## TRAINING + VALIDATION PART ##
208
+ ## -----------------------------------------------------------------------------------------------------------------##
209
+
210
+ def train_and_validate(model: torch.nn.Module,
211
+ device: torch.device,
212
+ train_dataloader: torch.utils.data.DataLoader,
213
+ val_dataloader: torch.utils.data.DataLoader,
214
+ optimizer: torch.optim.Optimizer,
215
+ scheduler: torch.optim.lr_scheduler,
216
+ loss_fn: torch.nn.Module,
217
+ epochs: int = 10,
218
+ save_path: str = None,
219
+ useHeatmaps: bool = True,
220
+ patience: int = 10,
221
+ save_all_epochs: bool = False,
222
+ useGradAcc: int = 1,
223
+ continue_training: bool = False):
224
+
225
+ if continue_training:
226
+ model_path = os.path.join(save_path, "best_checkpoint.pt")
227
+ assert model_path is not None, "If you want to continue training, you must provide a path to load the model from."
228
+
229
+ # Load the model from the path
230
+ model, optimizer, scheduler, loss_fn, start_epoch, results, epochs_without_improvement, best_val_loss = load_model(model_path, model, optimizer, scheduler, loss_fn, device)
231
+ else:
232
+ # Create empty results dictionary and initialize epoch
233
+ results = {"train_loss": [], "val_loss": []}
234
+ start_epoch = 1
235
+ best_val_loss = float("inf")
236
+ epochs_without_improvement = 0
237
+
238
+ # Start the timer
239
+ start_time = timer()
240
+
241
+ # Create EarlyStopping instance
242
+ early_stopping = EarlyStopping(patience=patience, save_path=save_path, counter=epochs_without_improvement, best_val_loss=best_val_loss)
243
+
244
+ # Loop through training and validating steps for a number of epochs
245
+ for epoch in tqdm(range(start_epoch, epochs + 1)):
246
+
247
+ assert useGradAcc >= 1, "Gradient accumulation steps must be greater than 1"
248
+
249
+ train_loss = train_step(model, device, train_dataloader, loss_fn, optimizer, useHeatmaps, gradient_accumulation_steps=useGradAcc)
250
+
251
+ val_loss = validate_step(model, device, val_dataloader, loss_fn, useHeatmaps)
252
+
253
+ scheduler_type = scheduler.__class__.__name__
254
+ if scheduler_type == "ReduceLROnPlateau":
255
+ scheduler.step(val_loss)
256
+ else:
257
+ # Update the learning rate using the scheduler
258
+ scheduler.step()
259
+
260
+ # Print out what's happening
261
+ print(f"Epoch {epoch} | Train Loss: {train_loss:.7f} | Validation Loss: {val_loss:.7f}")
262
+
263
+ # Update results dictionary
264
+ results["train_loss"].append(train_loss)
265
+ results["val_loss"].append(val_loss)
266
+
267
+ # Save the trained model
268
+ if save_all_epochs is True:
269
+ save_model(save_path, model, optimizer, scheduler, loss_fn, results, epochs_without_improvement, best_val_loss, epoch)
270
+
271
+ # Check for early stopping
272
+ early_stopping.call(val_loss, model, optimizer, scheduler, loss_fn, results, epochs_without_improvement, epoch)
273
+ if early_stopping.early_stop:
274
+ print("Early stopping triggered.")
275
+ break
276
+
277
+ # End the timer and print out how long it took
278
+ end_time = timer()
279
+ print(f"Total training time: {end_time - start_time:.3f} seconds")
280
+
281
+ # Return the filled results at the end of the epochs
282
+ return results
283
+
284
+ ## -----------------------------------------------------------------------------------------------------------------##
285
+ ## FINE-TUNING IN-DOMAIN ##
286
+ ## -----------------------------------------------------------------------------------------------------------------##
287
+ def fine_tune(model: torch.nn.Module,
288
+ device: torch.device,
289
+ train_dataloader: torch.utils.data.DataLoader,
290
+ val_dataloader: torch.utils.data.DataLoader,
291
+ optimizer: torch.optim.Optimizer,
292
+ scheduler: torch.optim.lr_scheduler,
293
+ loss_fn: torch.nn.Module,
294
+ epochs: int = 10,
295
+ load_path: str = None,
296
+ save_path: str = None,
297
+ useHeatmaps: bool = True,
298
+ patience: int = 10,
299
+ useGradAcc: int = 1):
300
+
301
+ assert load_path is not None, "You must provide a path to load the model from."
302
+
303
+ # Load the model from the path
304
+ model.load_state_dict(torch.load(load_path, map_location=torch.device(device)), strict=False)
305
+ model = model.to(device) # Move the model to the specified device
306
+
307
+ # Create empty results dictionary and initialize epoch
308
+ results = {"train_loss": [], "val_loss": []}
309
+ start_epoch = 1
310
+ best_val_loss = float("inf")
311
+ epochs_without_improvement = 0
312
+
313
+ # Start the timer
314
+ start_time = timer()
315
+
316
+ # Create EarlyStopping instance
317
+ early_stopping = EarlyStopping(patience=patience, save_path=save_path, counter=epochs_without_improvement, best_val_loss=best_val_loss)
318
+
319
+ # Loop through training and validating steps for a number of epochs
320
+ for epoch in tqdm(range(start_epoch, epochs + 1)):
321
+
322
+ assert useGradAcc >= 1, "Gradient accumulation steps must be greater than 1"
323
+
324
+ train_loss = train_step(model, device, train_dataloader, loss_fn, optimizer, useHeatmaps, gradient_accumulation_steps=useGradAcc)
325
+
326
+ val_loss = validate_step(model, device, val_dataloader, loss_fn, useHeatmaps)
327
+
328
+ scheduler_type = scheduler.__class__.__name__
329
+ if scheduler_type == "ReduceLROnPlateau":
330
+ scheduler.step(val_loss)
331
+ else:
332
+ # Update the learning rate using the scheduler
333
+ scheduler.step()
334
+
335
+ # Print out what's happening
336
+ print(f"Epoch {epoch} | Train Loss: {train_loss:.7f} | Validation Loss: {val_loss:.7f}")
337
+
338
+ # Update results dictionary
339
+ results["train_loss"].append(train_loss)
340
+ results["val_loss"].append(val_loss)
341
+
342
+ # Check for early stopping
343
+ early_stopping.call(val_loss, model, optimizer, scheduler, loss_fn, results, epochs_without_improvement, epoch)
344
+ if early_stopping.early_stop:
345
+ print("Early stopping triggered.")
346
+ break
347
+
348
+ # End the timer and print out how long it took
349
+ end_time = timer()
350
+ print(f"Total training time: {end_time - start_time:.3f} seconds")
351
+
352
+ # Return the filled results at the end of the epochs
353
+ return results
354
+
355
+
356
+
357
+ ## -----------------------------------------------------------------------------------------------------------------##
358
+ ## EVALUATION PART ##
359
+ ## -----------------------------------------------------------------------------------------------------------------##
360
+ def test_step(model: torch.nn.Module,
361
+ device: torch.device,
362
+ dataloader: torch.utils.data.DataLoader,
363
+ loss_fn: torch.nn.Module,
364
+ num_landmarks: int,
365
+ useHeatmaps: bool = False,
366
+ sigma: int = 1.5,
367
+ load_path: str = None):
368
+
369
+ # Take the baseline of the path
370
+ if load_path is not None:
371
+ model_dir = os.path.dirname(load_path)
372
+
373
+ # Put model in eval mode
374
+ model = model.to(device)
375
+ model.eval()
376
+ model_name = model.__class__.__name__
377
+ model_encoder = model.encoder.__class__.__name__ if hasattr(model, 'encoder') else ""
378
+
379
+ # Setup test loss and test accuracy values
380
+ test_loss = 0.0
381
+ results = {}
382
+ distances = []
383
+
384
+ with torch.no_grad():
385
+ # Loop through DataLoader batches
386
+ for batch, data in enumerate(dataloader):
387
+ images_name = data['name']
388
+ images_tensor = data['image']
389
+ #image_size = images_tensor.numpy().shape[2:]
390
+ landmarks_tensor = data['landmarks']
391
+ heatmaps_tensor = data['heatmaps']
392
+ original_size = data['original_size']
393
+ resized_size = data['resized_size']
394
+
395
+ # Send data to target device
396
+ X = images_tensor.to(device)
397
+
398
+ if useHeatmaps:
399
+ y = heatmaps_tensor.to(device)
400
+ else:
401
+ y = landmarks_tensor.to(device)
402
+
403
+ # Forward pass
404
+ y_pred = model(X)
405
+
406
+ # Calculate and accumulate loss
407
+ loss = loss_fn(y_pred, y)
408
+ test_loss += loss.item()
409
+
410
+ # Move the prediction and the GT to the CPU
411
+ y_pred = y_pred.cpu()
412
+
413
+ # Save the prediction heatmaps as images in the model directory
414
+ #os.makedirs(f"{model_dir}/predictions", exist_ok=True)
415
+ #utilities.save_heatmaps(X, y_pred, images_name, f"{model_dir}/predictions")
416
+
417
+ # Compute the MSE and mAP between the original landmarks and the predicted landmarks
418
+ mse_list, mAP_list_heatmaps, mAP_list_keypoints, iou_list, distance_list = metrics.compute_batch_metrics(landmarks_tensor, heatmaps_tensor, y_pred, resized_size, num_landmarks, useHeatmaps, sigma)
419
+ # Append to full list in order to compute the MRE and SDR for all the images
420
+ distances.extend(distance_list)
421
+
422
+ # Store image names as keys and their corresponding predictions as values.
423
+ for i, name in enumerate(images_name): # Since they are in batch I loop them
424
+ # Storing prediction and metrics values
425
+ results[name] = {
426
+ 'prediction': y_pred[i],
427
+ 'mse': mse_list[i],
428
+ 'map1': mAP_list_heatmaps[i],
429
+ 'map2': mAP_list_keypoints[i],
430
+ 'iou': iou_list[i]
431
+ }
432
+
433
+ del batch, data, images_name, images_tensor, landmarks_tensor, heatmaps_tensor, original_size, resized_size, X, y, y_pred, loss, mse_list, mAP_list_heatmaps, mAP_list_keypoints, iou_list, distance_list # Free memory
434
+
435
+
436
+ # Adjust metrics to get average loss and accuracy per batch
437
+ test_loss = test_loss / len(dataloader)
438
+
439
+ # Compute metrics on full list
440
+ #print("Dist shape:", len(distances))
441
+ #print("Mean distance:", np.mean(distances))
442
+ #print("Std distance:", np.std(distances))
443
+ #print("Distances under 3px:", len([i for i in distances if i < 3]))
444
+ #print("Distances above 15px:", len([i for i in distances if i > 15]))
445
+
446
+ mre = metrics.compute_mre(distances)
447
+ sdr = metrics.compute_sdr(distances)
448
+
449
+ return test_loss, results, mre, sdr
450
+
451
+
452
+ def evaluate(model: torch.nn.Module,
453
+ device: torch.device,
454
+ test_dataloader: torch.utils.data.DataLoader,
455
+ loss_fn: torch.nn.Module,
456
+ load_path: str,
457
+ num_landmarks: int = 6,
458
+ useHeatmaps: bool = True,
459
+ sigma: int = 1.5,
460
+ currentKfold: int = 1,
461
+ res_file_path: str = "results/readable_res.csv"):
462
+
463
+ checkpoint = torch.load(load_path, map_location=torch.device(device))
464
+ #model.load_state_dict(checkpoint['model'])
465
+
466
+ model.load_state_dict(checkpoint['model_state_dict'])
467
+ print(f"\nModel loaded from {load_path}")
468
+ epoch = checkpoint.get('epoch', "Undefined")
469
+
470
+ # Get the loss and the predictions dictionary
471
+ test_loss, results, mre, sdr = test_step(model, device, test_dataloader, loss_fn, num_landmarks, useHeatmaps, sigma, load_path)
472
+
473
+ total_mse_list = []
474
+ total_mAP_heatmaps_list = []
475
+ total_mAP_keypoints_list = []
476
+ total_iou_list = []
477
+
478
+ # Create a list with all metrics of all images
479
+ for value in results.values():
480
+ total_mse_list.append(value['mse'])
481
+ total_mAP_heatmaps_list.append(value['map1'])
482
+ total_mAP_keypoints_list.append(value['map2'])
483
+ total_iou_list.append(value['iou'])
484
+
485
+ # Compute the mean between all samples
486
+ total_mse_mean = np.mean(total_mse_list)
487
+ total_mAP_heatmaps_mean = np.mean(total_mAP_heatmaps_list)
488
+ total_mAP_keypoints_mean = np.mean(total_mAP_keypoints_list)
489
+ total_iou_mean = np.mean(total_iou_list)
490
+
491
+ # Compute the standard deviation between all samples
492
+ total_mse_std = np.std(total_mse_list)
493
+ total_mAP_heatmaps_std = np.std(total_mAP_heatmaps_list)
494
+ total_mAP_keypoints_std = np.std(total_mAP_keypoints_list)
495
+ total_iou_std = np.std(total_iou_list)
496
+
497
+ # Create a string representation of the sdr dictionary
498
+ sdr_str = '\n'.join(f'\tThresholds {k}: {v*100:.2f}' for k, v in sorted(sdr.items()))
499
+
500
+ # Print and Save results
501
+ res_file = open(res_file_path, 'a')
502
+ print(f"\n{load_path}", file=res_file)
503
+ print(f"Fold {currentKfold} - Epoch: {epoch} | MSE: {total_mse_mean:.2f} ± {total_mse_std:.2f} | mAP heat: {total_mAP_heatmaps_mean:.2f} ± {total_mAP_heatmaps_std:.2f} | mAP key: {total_mAP_keypoints_mean:.2f} ± {total_mAP_keypoints_std:.2f} | IoU: {total_iou_mean:.2f} ± {total_iou_std:.2f} \nMRE: {mre:.2f} \nSDR: \n{sdr_str}", file=res_file)
504
+ res_file.close()
505
+
506
+ print(f"Fold {currentKfold} - Epoch: {epoch} | \nMSE: {total_mse_mean:.2f} ± {total_mse_std:.2f} | \nmAP heat: {total_mAP_heatmaps_mean:.2f} ± {total_mAP_heatmaps_std:.2f} | mAP key: {total_mAP_keypoints_mean:.2f} ± {total_mAP_keypoints_std:.2f} | \nIoU: {total_iou_mean:.2f} ± {total_iou_std:.2f} | \nMRE: {mre:.2f} | \nSDR: \n{sdr_str}")
507
+ del total_mse_list, total_mAP_heatmaps_list, total_mAP_keypoints_list, total_iou_list
508
+
509
+ return test_loss, results, mre, sdr, total_mse_mean, total_mAP_heatmaps_mean, total_mAP_keypoints_mean, total_iou_mean, epoch
510
+
511
+
512
+
513
+
514
+ # ------------------------------------------------------------------------
515
+ # Reinstantiate Model
516
+ # ------------------------------------------------------------------------
517
+
518
+ def reset_all_weights(model: nn.Module) -> None:
519
+ """
520
+ refs:
521
+ - https://discuss.pytorch.org/t/how-to-re-set-alll-parameters-in-a-network/20819/6
522
+ - https://stackoverflow.com/questions/63627997/reset-parameters-of-a-neural-network-in-pytorch
523
+ - https://pytorch.org/docs/stable/generated/torch.nn.Module.html
524
+ """
525
+
526
+ @torch.no_grad()
527
+ def weight_reset(m: nn.Module):
528
+ # - check if the current module has reset_parameters & if it's callabed called it on m
529
+ reset_parameters = getattr(m, "reset_parameters", None)
530
+ if callable(reset_parameters):
531
+ m.reset_parameters()
532
+
533
+ # Applies fn recursively to every submodule see: https://pytorch.org/docs/stable/generated/torch.nn.Module.html
534
+ model.apply(fn=weight_reset)
535
+
536
+ def reinstantiate_model(model, optimizer, scheduler):
537
+ model_type = model.__class__.__name__
538
+ scheduler_type = scheduler.__class__.__name__
539
+ optimizer_type = optimizer.__class__.__name__
540
+ #print(scheduler_params)
541
+
542
+ reset_all_weights(model)
543
+
544
+ if optimizer_type == 'AdamW':
545
+ optimizer = torch.optim.AdamW(params=model.parameters(), lr=optimizer.param_groups[0]['lr'])
546
+ else:
547
+ raise ValueError(f"Unsupported optimizer type: {optimizer_type}")
548
+
549
+ if scheduler_type == 'ReduceLROnPlateau':
550
+ scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=scheduler.factor, patience=scheduler.patience, verbose=True, mode=scheduler.mode)
551
+ else:
552
+ raise ValueError(f"Unsupported scheduler type: {scheduler_type}")
553
+
554
+ return model, optimizer, scheduler
555
+
556
+
557
+ ## -----------------------------------------------------------------------------------------------------------------##
558
+ ## K-FOLD ##
559
+ ## -----------------------------------------------------------------------------------------------------------------##
560
+
561
+
562
+ def k_fold_train_and_validate(model: torch.nn.Module,
563
+ device: torch.device,
564
+ train_dataset: torch.utils.data.Dataset,
565
+ optimizer: torch.optim.Optimizer,
566
+ scheduler: torch.optim.lr_scheduler,
567
+ loss_fn: torch.nn.Module,
568
+ epochs: int,
569
+ early_stopping: int,
570
+ batch_size: int,
571
+ gradient_accumulation_steps: int,
572
+ num_landmarks: int,
573
+ sigma: int,
574
+ save_model_path: str,
575
+ log_file: str,
576
+ k_folds: int = 5,
577
+ onlyInference: bool = True
578
+ ):
579
+
580
+ if onlyInference:
581
+ k_train_losses = [0]
582
+ k_val_losses = [0]
583
+ else:
584
+ k_train_losses = []
585
+ k_val_losses = []
586
+
587
+ k_test_losses = []
588
+ k_mse = []
589
+ k_iou = []
590
+ k_map_heat = []
591
+ k_map_key = []
592
+ k_mre = []
593
+ k_sdr = {}
594
+
595
+ results_folds = []
596
+
597
+ # Get the total number of samples
598
+ total_size = len(train_dataset)
599
+
600
+ # Divide by the number of folds to get the size of each fold
601
+ fold_size = total_size // k_folds
602
+
603
+ indices = list(range(total_size))
604
+
605
+
606
+ for fold in range(k_folds):
607
+
608
+ # Assign the fold as the val set
609
+ val_ids = indices[fold*fold_size:(fold+1)*fold_size]
610
+
611
+ # The remaining data will be used for training
612
+ train_ids = indices[:fold*fold_size] + indices[(fold+1)*fold_size:]
613
+
614
+ # Create the subsets
615
+ train_subsampler = torch.utils.data.SubsetRandomSampler(train_ids)
616
+ val_subsampler = torch.utils.data.SubsetRandomSampler(val_ids)
617
+
618
+ # Create the data loaders
619
+ train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, sampler=train_subsampler, num_workers=4, pin_memory=True, drop_last=True)
620
+ val_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, sampler=val_subsampler, num_workers=4, pin_memory=True)
621
+
622
+ save_fold_path = f"{save_model_path}/fold_{fold}"
623
+ print(f"Training fold {fold}...")
624
+ print(f"Path: {save_fold_path}")
625
+
626
+ if not onlyInference:
627
+
628
+ model, optimizer, scheduler = reinstantiate_model(model, optimizer, scheduler)
629
+
630
+ # Train on the current fold
631
+ fold_train_results = train_and_validate(model, device, train_loader, val_loader, optimizer, scheduler, loss_fn, epochs,
632
+ save_fold_path, patience=early_stopping, useGradAcc=gradient_accumulation_steps, continue_training=False)
633
+
634
+ last_train_loss = fold_train_results['train_loss'][-1]
635
+ last_val_loss = fold_train_results['val_loss'][-1]
636
+
637
+ k_train_losses.append(last_train_loss)
638
+ k_val_losses.append(last_val_loss)
639
+
640
+ print(f"FOLD {fold} | Train loss: {last_train_loss} | Val loss: {last_val_loss}")
641
+ del fold_train_results, last_train_loss, last_val_loss, train_loader, train_subsampler, val_subsampler, train_ids, val_ids
642
+
643
+
644
+ # ---------------------- Evaluate performances on val set (the training never has seen the images on the val set, it use only to minimize error) -------------------------------
645
+ load_fold_path = os.path.join(save_fold_path, f"best_checkpoint.pt")
646
+ # Get the loss and the predictions dictionary
647
+ test_loss, results, mre, sdr, mse, mAP_heatmaps, mAP_keypoints, iou, epoch = evaluate(model, device, val_loader, loss_fn, load_fold_path,
648
+ num_landmarks, sigma, res_file_path=log_file)
649
+
650
+ k_test_losses.append(test_loss)
651
+
652
+ k_mre.append(mre)
653
+
654
+ # Update the sdr dictionary
655
+ for threshold, value in sdr.items():
656
+ if threshold not in k_sdr:
657
+ k_sdr[threshold] = []
658
+ k_sdr[threshold].append(value)
659
+
660
+ # Create a list with all metrics of all images
661
+ for value in results.values():
662
+ k_mse.append(value['mse'])
663
+ k_map_heat.append(value['map1'])
664
+ k_map_key.append(value['map2'])
665
+ k_iou.append(value['iou'])
666
+
667
+ del test_loss, results, mre, sdr, load_fold_path, val_loader,
668
+
669
+ # Compute the mean and SD for each threshold
670
+ sdr_mean_std = {threshold: (np.mean(values), np.std(values)) for threshold, values in k_sdr.items()}
671
+
672
+ # Compute the mean for the losses
673
+ k_train_loss_mean = np.mean(k_train_losses)
674
+ k_train_loss_std = np.std(k_train_losses)
675
+
676
+ k_val_loss_mean = np.mean(k_val_losses)
677
+ k_val_loss_std = np.std(k_val_losses)
678
+
679
+ k_test_loss_mean = np.mean(k_test_losses)
680
+ k_test_loss_std = np.std(k_test_losses)
681
+
682
+ # Compute the mean between all samples
683
+ k_mse_mean = np.mean(k_mse)
684
+ k_map_heat_mean = np.mean(k_map_heat)
685
+ k_map_key_mean = np.mean(k_map_key)
686
+ k_iou_mean = np.mean(k_iou)
687
+
688
+ # Compute the standard deviation between all samples
689
+ k_mse_std = np.std(k_mse)
690
+ k_map_heat_std = np.std(k_map_heat)
691
+ k_map_key_std = np.std(k_map_key)
692
+ k_iou_std = np.std(k_iou)
693
+
694
+ # Compute the mean MRE and mean SDR
695
+ k_mre_mean = np.mean(k_mre)
696
+ k_mre_std = np.std(k_mre)
697
+
698
+ res_file = open(log_file, 'a')
699
+ print(f"----------------------------------------------------------------- GLOBAL RES for {k_folds} Folds \n",
700
+ f"Train loss ---> Mean: {k_train_loss_mean} | Std: {k_train_loss_std} \n",
701
+ f"Val loss ---> Mean: {k_val_loss_mean} | Std: {k_val_loss_std} \n",
702
+ f"Test loss ---> Mean: {k_test_loss_mean} | Std: {k_test_loss_std} \n",
703
+ f"MSE ---> Mean: {k_mse_mean:.2f} | Std: {k_mse_std:.2f} \n",
704
+ f"mAp heat ---> Mean: {k_map_heat_mean:.2f} | Std: {k_map_heat_std:.2f} \n",
705
+ f"mAp key ---> Mean: {k_map_key_mean:.2f} | Std: {k_map_key_std:.2f} \n",
706
+ f"IOU ---> Mean: {k_iou_mean:.2f} | Std: {k_iou_std:.2f} \n",
707
+ f"MRE ---> Mean: {k_mre_mean:.2f} | Std: {k_mre_std:.2f} \n",
708
+ f"SDR:\n",
709
+ *(f"Threshold {threshold}: Mean: {mean*100:.2f} | Std: {std*100:.2f}\n" for threshold, (mean, std) in sdr_mean_std.items()),
710
+ file=res_file)
711
+ res_file.close()
712
+
713
+
714
+ print(f"----------------------------------------------------------------- GLOBAL RES for {k_folds} Folds \n",
715
+ f"Train loss ---> Mean: {k_train_loss_mean} | Std: {k_train_loss_std} \n",
716
+ f"Val loss ---> Mean: {k_val_loss_mean} | Std: {k_val_loss_std} \n",
717
+ f"Test loss ---> Mean: {k_test_loss_mean} | Std: {k_test_loss_std} \n",
718
+ f"MSE ---> Mean: {k_mse_mean:.2f} | Std: {k_mse_std:.2f} \n",
719
+ f"mAp heat ---> Mean: {k_map_heat_mean:.2f} | Std: {k_map_heat_std:.2f} \n",
720
+ f"mAp key ---> Mean: {k_map_key_mean:.2f} | Std: {k_map_key_std:.2f} \n",
721
+ f"IOU ---> Mean: {k_iou_mean:.2f} | Std: {k_iou_std:.2f} \n",
722
+ f"MRE ---> Mean: {k_mre_mean:.2f} | Std: {k_mre_std:.2f} \n",
723
+ f"SDR:\n",
724
+ *(f"Threshold {threshold}: Mean: {mean*100:.2f} | Std: {std*100:.2f}\n" for threshold, (mean, std) in sdr_mean_std.items()))
725
+ del k_train_losses, k_val_losses, k_test_losses, k_mse, k_iou, k_map_heat, k_map_key, k_mre, k_sdr, results_folds, train_dataset, total_size, fold_size, indices
DiffusionXray-FewShot-LandmarkDetection/downstream_task/model/models.py ADDED
@@ -0,0 +1,463 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ # https://github.com/abarankab/DDPM/blob/main/ddpm/unet.py
4
+
5
+
6
+ import math
7
+ from inspect import isfunction
8
+ from functools import partial
9
+
10
+ # from tqdm.auto import tqdm
11
+ from einops import rearrange, reduce
12
+ from einops.layers.torch import Rearrange
13
+
14
+ import torch
15
+ from torch import nn, einsum
16
+ import torch.nn.functional as F
17
+
18
+ from torch.utils.checkpoint import checkpoint
19
+
20
+ class EMA:
21
+ def __init__(self, beta):
22
+ super().__init__()
23
+ self.beta = beta
24
+ self.step = 0
25
+
26
+ def update_model_average(self, ma_model, current_model):
27
+ for current_params, ma_params in zip(current_model.parameters(), ma_model.parameters()):
28
+ old_weight, up_weight = ma_params.data, current_params.data
29
+ ma_params.data = self.update_average(old_weight, up_weight)
30
+
31
+ def update_average(self, old, new):
32
+ if old is None:
33
+ return new
34
+ return old * self.beta + (1 - self.beta) * new
35
+
36
+ def step_ema(self, ema_model, model, step_start_ema=2000):
37
+ if self.step < step_start_ema:
38
+ self.reset_parameters(ema_model, model)
39
+ self.step += 1
40
+ return
41
+ self.update_model_average(ema_model, model)
42
+ self.step += 1
43
+
44
+ def reset_parameters(self, ema_model, model):
45
+ ema_model.load_state_dict(model.state_dict())
46
+
47
+
48
+ def exists(x):
49
+ return x is not None
50
+
51
+
52
+ def default(val, d):
53
+ if exists(val):
54
+ return val
55
+ return d() if isfunction(d) else d
56
+
57
+
58
+ def num_to_groups(num, divisor):
59
+ groups = num // divisor
60
+ remainder = num % divisor
61
+ arr = [divisor] * groups
62
+ if remainder > 0:
63
+ arr.append(remainder)
64
+ return arr
65
+
66
+
67
+ class Residual(nn.Module):
68
+ def __init__(self, fn):
69
+ super().__init__()
70
+ self.fn = fn
71
+
72
+ def forward(self, x, *args, **kwargs):
73
+ return self.fn(x, *args, **kwargs) + x
74
+
75
+
76
+ def Upsample(dim, dim_out=None):
77
+ return nn.Sequential(
78
+ nn.Upsample(scale_factor=2, mode="nearest"),
79
+ nn.Conv2d(dim, default(dim_out, dim), 3, padding=1),
80
+ )
81
+
82
+
83
+ def Downsample(dim, dim_out=None):
84
+ # No More Strided Convolutions or Pooling
85
+ return nn.Sequential(
86
+ Rearrange("b c (h p1) (w p2) -> b (c p1 p2) h w", p1=2, p2=2),
87
+ nn.Conv2d(dim * 4, default(dim_out, dim), 1),
88
+ )
89
+
90
+
91
+ class SinusoidalPositionEmbeddings(nn.Module):
92
+ def __init__(self, dim):
93
+ super().__init__()
94
+ self.dim = dim
95
+
96
+ def forward(self, time):
97
+ device = time.device
98
+ half_dim = self.dim // 2
99
+ embeddings = math.log(10000) / (half_dim - 1)
100
+ embeddings = torch.exp(torch.arange(half_dim, device=device) * -embeddings)
101
+ embeddings = time[:, None] * embeddings[None, :]
102
+ embeddings = torch.cat((embeddings.sin(), embeddings.cos()), dim=-1)
103
+ return embeddings
104
+
105
+
106
+ class WeightStandardizedConv2d(nn.Conv2d):
107
+ """
108
+ https://arxiv.org/abs/1903.10520
109
+ weight standardization purportedly works synergistically with group normalization
110
+ """
111
+
112
+ def forward(self, x):
113
+ eps = 1e-5 if x.dtype == torch.float32 else 1e-3
114
+
115
+ weight = self.weight
116
+ mean = reduce(weight, "o ... -> o 1 1 1", "mean")
117
+ var = reduce(weight, "o ... -> o 1 1 1", partial(torch.var, unbiased=False))
118
+ normalized_weight = (weight - mean) * (var + eps).rsqrt()
119
+
120
+ return F.conv2d(
121
+ x,
122
+ normalized_weight,
123
+ self.bias,
124
+ self.stride,
125
+ self.padding,
126
+ self.dilation,
127
+ self.groups,
128
+ )
129
+
130
+
131
+ class Block(nn.Module):
132
+ def __init__(self, dim, dim_out, groups=8):
133
+ super().__init__()
134
+ self.proj = WeightStandardizedConv2d(dim, dim_out, 3, padding=1)
135
+ self.norm = nn.GroupNorm(groups, dim_out)
136
+ self.act = nn.SiLU()
137
+
138
+ def forward(self, x, scale_shift=None):
139
+ x = self.proj(x)
140
+ x = self.norm(x)
141
+
142
+ if exists(scale_shift):
143
+ scale, shift = scale_shift
144
+ x = x * (scale + 1) + shift
145
+
146
+ x = self.act(x)
147
+ return x
148
+
149
+
150
+ class ResnetBlock(nn.Module):
151
+ """https://arxiv.org/abs/1512.03385"""
152
+
153
+ def __init__(self, dim, dim_out, *, time_emb_dim=None, groups=8):
154
+ super().__init__()
155
+ self.mlp = (
156
+ nn.Sequential(nn.SiLU(), nn.Linear(time_emb_dim, dim_out * 2))
157
+ if exists(time_emb_dim)
158
+ else None
159
+ )
160
+
161
+ self.block1 = Block(dim, dim_out, groups=groups)
162
+ self.block2 = Block(dim_out, dim_out, groups=groups)
163
+ self.res_conv = nn.Conv2d(dim, dim_out, 1) if dim != dim_out else nn.Identity()
164
+
165
+ def forward(self, x, time_emb=None):
166
+ scale_shift = None
167
+ if exists(self.mlp) and exists(time_emb):
168
+ time_emb = self.mlp(time_emb)
169
+ time_emb = rearrange(time_emb, "b c -> b c 1 1")
170
+ scale_shift = time_emb.chunk(2, dim=1)
171
+
172
+ h = self.block1(x, scale_shift=scale_shift)
173
+ h = self.block2(h)
174
+ return h + self.res_conv(x)
175
+
176
+
177
+ class Attention(nn.Module):
178
+ def __init__(self, dim, heads=4, dim_head=32):
179
+ super().__init__()
180
+ self.scale = dim_head**-0.5
181
+ self.heads = heads
182
+ hidden_dim = dim_head * heads
183
+ self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias=False)
184
+ self.to_out = nn.Conv2d(hidden_dim, dim, 1)
185
+
186
+ def forward(self, x):
187
+ b, c, h, w = x.shape
188
+ qkv = self.to_qkv(x).chunk(3, dim=1)
189
+ q, k, v = map(
190
+ lambda t: rearrange(t, "b (h c) x y -> b h c (x y)", h=self.heads), qkv
191
+ )
192
+ q = q * self.scale
193
+
194
+ sim = einsum("b h d i, b h d j -> b h i j", q, k)
195
+ sim = sim - sim.amax(dim=-1, keepdim=True).detach()
196
+ attn = sim.softmax(dim=-1)
197
+
198
+ out = einsum("b h i j, b h d j -> b h i d", attn, v)
199
+ out = rearrange(out, "b h (x y) d -> b (h d) x y", x=h, y=w)
200
+ return self.to_out(out)
201
+
202
+
203
+ class LinearAttention(nn.Module):
204
+ def __init__(self, dim, heads=4, dim_head=32):
205
+ super().__init__()
206
+ self.scale = dim_head**-0.5
207
+ self.heads = heads
208
+ hidden_dim = dim_head * heads
209
+ self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias=False)
210
+
211
+ self.to_out = nn.Sequential(nn.Conv2d(hidden_dim, dim, 1), nn.GroupNorm(1, dim))
212
+
213
+ def forward(self, x):
214
+ b, c, h, w = x.shape
215
+ qkv = self.to_qkv(x).chunk(3, dim=1)
216
+ q, k, v = map(
217
+ lambda t: rearrange(t, "b (h c) x y -> b h c (x y)", h=self.heads), qkv
218
+ )
219
+
220
+ q = q.softmax(dim=-2)
221
+ k = k.softmax(dim=-1)
222
+
223
+ q = q * self.scale
224
+ context = torch.einsum("b h d n, b h e n -> b h d e", k, v)
225
+
226
+ out = torch.einsum("b h d e, b h d n -> b h e n", context, q)
227
+ out = rearrange(out, "b h c (x y) -> b (h c) x y", h=self.heads, x=h, y=w)
228
+ return self.to_out(out)
229
+
230
+
231
+ class PreNorm(nn.Module):
232
+ def __init__(self, dim, fn):
233
+ super().__init__()
234
+ self.fn = fn
235
+ self.norm = nn.GroupNorm(1, dim)
236
+
237
+ def forward(self, x):
238
+ x = self.norm(x)
239
+ return self.fn(x)
240
+
241
+
242
+
243
+ # ------------------------------------------------------------------------
244
+ # Unet Model with Time Embeddings
245
+ # ------------------------------------------------------------------------
246
+
247
+ class Unet(nn.Module):
248
+ def __init__(
249
+ self,
250
+ dim,
251
+ init_dim=None,
252
+ out_dim=None,
253
+ dim_mults=(1, 2, 4, 8),
254
+ channels=3,
255
+ self_condition=False,
256
+ resnet_block_groups=4,
257
+ att_res=32,
258
+ att_heads=4,
259
+ ):
260
+ super().__init__()
261
+
262
+ # determine dimensions
263
+ self.channels = channels
264
+ self.self_condition = self_condition
265
+ # input_channels = channels * (2 if self_condition else 1)
266
+ input_channels = channels if not self_condition else channels + 1
267
+
268
+ init_dim = default(init_dim, dim)
269
+ self.init_conv = nn.Conv2d(
270
+ input_channels, init_dim, 1, padding=0
271
+ ) # changed to 1 and 0 from 7,3
272
+
273
+ dims = [init_dim, *map(lambda m: dim * m, dim_mults)]
274
+ in_out = list(zip(dims[:-1], dims[1:]))
275
+
276
+ block_klass = partial(ResnetBlock, groups=resnet_block_groups)
277
+
278
+ # time embeddings
279
+ time_dim = dim * 4
280
+
281
+ self.time_mlp = nn.Sequential(
282
+ SinusoidalPositionEmbeddings(dim),
283
+ nn.Linear(dim, time_dim),
284
+ nn.GELU(),
285
+ nn.Linear(time_dim, time_dim),
286
+ )
287
+
288
+ # layers
289
+ self.downs = nn.ModuleList([])
290
+ self.ups = nn.ModuleList([])
291
+ num_resolutions = len(in_out)
292
+
293
+ for ind, (dim_in, dim_out) in enumerate(in_out):
294
+ is_last = ind >= (num_resolutions - 1)
295
+
296
+ self.downs.append(
297
+ nn.ModuleList(
298
+ [
299
+ block_klass(dim_in, dim_in, time_emb_dim=time_dim),
300
+ block_klass(dim_in, dim_in, time_emb_dim=time_dim),
301
+ Residual(
302
+ PreNorm(dim_in, LinearAttention(dim_in, att_heads, att_res))
303
+ ),
304
+ Downsample(dim_in, dim_out)
305
+ if not is_last
306
+ else nn.Conv2d(dim_in, dim_out, 3, padding=1),
307
+ ]
308
+ )
309
+ )
310
+
311
+ mid_dim = dims[-1]
312
+ self.mid_block1 = block_klass(mid_dim, mid_dim, time_emb_dim=time_dim)
313
+ self.mid_attn = Residual(
314
+ PreNorm(mid_dim, Attention(mid_dim, att_heads, att_res))
315
+ )
316
+ self.mid_block2 = block_klass(mid_dim, mid_dim, time_emb_dim=time_dim)
317
+
318
+ for ind, (dim_in, dim_out) in enumerate(reversed(in_out)):
319
+ is_last = ind == (len(in_out) - 1)
320
+
321
+ self.ups.append(
322
+ nn.ModuleList(
323
+ [
324
+ block_klass(dim_out + dim_in, dim_out, time_emb_dim=time_dim),
325
+ block_klass(dim_out + dim_in, dim_out, time_emb_dim=time_dim),
326
+ Residual(
327
+ PreNorm(
328
+ dim_out, LinearAttention(dim_out, att_heads, att_res)
329
+ )
330
+ ),
331
+ Upsample(dim_out, dim_in)
332
+ if not is_last
333
+ else nn.Conv2d(dim_out, dim_in, 3, padding=1),
334
+ ]
335
+ )
336
+ )
337
+
338
+ self.out_dim = default(out_dim, channels)
339
+
340
+ self.final_res_block = block_klass(dim * 2, dim, time_emb_dim=time_dim)
341
+ self.final_conv = nn.Conv2d(dim, self.out_dim, 1)
342
+
343
+
344
+ def forward(self, x, time=None, x_self_cond=None, checkpointing=True):
345
+ if self.self_condition:
346
+ x_self_cond = default(x_self_cond, lambda: torch.zeros_like(x))
347
+ x = torch.cat((x_self_cond, x), dim=1)
348
+
349
+ x = self.init_conv(x)
350
+ r = x.clone()
351
+
352
+ # Only compute time embedding if time is provided
353
+ if time is not None:
354
+ t = self.time_mlp(time)
355
+ else:
356
+ t = None
357
+
358
+ h = []
359
+
360
+ # If checkpointing is enabled, run the model in a memory efficient way
361
+ if checkpointing:
362
+ for block1, block2, attn, downsample in self.downs:
363
+ x = checkpoint(block1, x, t, use_reentrant=False)
364
+ h.append(x)
365
+
366
+ x = checkpoint(block2, x, t, use_reentrant=False)
367
+ x = checkpoint(attn, x, use_reentrant=False)
368
+ h.append(x)
369
+
370
+ x = checkpoint(downsample, x, use_reentrant=False)
371
+
372
+ x = checkpoint(self.mid_block1, x, t, use_reentrant=False)
373
+ x = checkpoint(self.mid_attn, x, use_reentrant=False)
374
+ x = checkpoint(self.mid_block2, x, t, use_reentrant=False)
375
+
376
+ for block1, block2, attn, upsample in self.ups:
377
+ x = torch.cat((x, h.pop()), dim=1)
378
+ x = checkpoint(block1, x, t, use_reentrant=False)
379
+
380
+ x = torch.cat((x, h.pop()), dim=1)
381
+ x = checkpoint(block2, x, t, use_reentrant=False)
382
+ x = checkpoint(attn, x, use_reentrant=False)
383
+
384
+ x = checkpoint(upsample, x, use_reentrant=False)
385
+
386
+ # If checkpointing is not enabled, run the model normally
387
+ else:
388
+ for block1, block2, attn, downsample in self.downs:
389
+ x = block1(x, t)
390
+ h.append(x)
391
+
392
+ x = block2(x, t)
393
+ x = attn(x)
394
+ h.append(x)
395
+
396
+ x = downsample(x)
397
+
398
+ x = self.mid_block1(x, t)
399
+ x = self.mid_attn(x)
400
+ x = self.mid_block2(x, t)
401
+
402
+ for block1, block2, attn, upsample in self.ups:
403
+ x = torch.cat((x, h.pop()), dim=1)
404
+ x = block1(x, t)
405
+
406
+ x = torch.cat((x, h.pop()), dim=1)
407
+ x = block2(x, t)
408
+ x = attn(x)
409
+
410
+ x = upsample(x)
411
+
412
+ x = torch.cat((x, r), dim=1)
413
+
414
+ x = self.final_res_block(x, t)
415
+ return self.final_conv(x)
416
+
417
+
418
+
419
+
420
+ def cosine_beta_schedule(timesteps, s=0.008):
421
+ """
422
+ cosine schedule as proposed in https://arxiv.org/abs/2102.09672
423
+ """
424
+ steps = timesteps + 1
425
+ x = torch.linspace(0, timesteps, steps)
426
+ alphas_cumprod = torch.cos(((x / timesteps) + s) / (1 + s) * torch.pi * 0.5) ** 2
427
+ alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
428
+ betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
429
+ return torch.clip(betas, 0.0001, 0.9999)
430
+
431
+ def linear_beta_schedule(timesteps, beta_start=0.0001, beta_end=0.02):
432
+ return torch.linspace(beta_start, beta_end, timesteps)
433
+
434
+
435
+ def quadratic_beta_schedule(timesteps, beta_start=0.0001, beta_end=0.02):
436
+ return torch.linspace(beta_start**0.5, beta_end**0.5, timesteps) ** 2
437
+
438
+
439
+ def sigmoid_beta_schedule(timesteps, beta_start=0.0001, beta_end=0.02):
440
+ betas = torch.linspace(-6, 6, timesteps)
441
+ return torch.sigmoid(betas) * (beta_end - beta_start) + beta_start
442
+
443
+
444
+
445
+
446
+
447
+
448
+ # ------------------------------------------------------------------------
449
+ # Unet Model pre-trained on ImageNet
450
+ # ------------------------------------------------------------------------
451
+
452
+ from segmentation_models_pytorch import Unet as smpUnet
453
+
454
+ class smpUnet(smpUnet):
455
+ def __init__(self, encoder_name, encoder_weights, in_channels, classes):
456
+ super().__init__(encoder_name=encoder_name, encoder_weights=encoder_weights, in_channels=in_channels, classes=classes)
457
+ self.encoder_name = encoder_name
458
+ self.encoder_weights = encoder_weights
459
+ self.in_channels = in_channels
460
+ self.classes = classes
461
+
462
+
463
+
DiffusionXray-FewShot-LandmarkDetection/downstream_task/utilities.py ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import matplotlib.pyplot as plt
3
+ import cv2
4
+ import os
5
+ import requests
6
+ import torch
7
+ from GPUtil import showUtilization as gpu_usage
8
+ from prettytable import PrettyTable
9
+
10
+ ## -----------------------------------------------------------------------------------------------------------------##
11
+ ## CLEAN GPU MEMORY USAGE AND DATASETS ##
12
+ ## -----------------------------------------------------------------------------------------------------------------##
13
+
14
+ def free_gpu_cache():
15
+ print("Initial GPU Usage")
16
+ gpu_usage()
17
+ torch.cuda.synchronize()
18
+ torch.cuda.empty_cache()
19
+ print("GPU Usage after emptying the cache")
20
+ gpu_usage()
21
+
22
+ # Compute the number of trainable parameters in a model
23
+ def count_parameters(model):
24
+ table = PrettyTable(["Modules", "Parameters"])
25
+ total_params = 0
26
+ for name, parameter in model.named_parameters():
27
+ if not parameter.requires_grad:
28
+ continue
29
+ params = parameter.numel()
30
+ table.add_row([name, params])
31
+ total_params += params
32
+ #print(table)
33
+ print(f"Total Trainable Params: {total_params}")
34
+ return table, total_params
35
+
36
+ ## -----------------------------------------------------------------------------------------------------------------##
37
+ ## HEATMAPS GENERATION FROM LANDMARKS POINTS ##
38
+ ## -----------------------------------------------------------------------------------------------------------------##
39
+
40
+ def fuse_heatmaps(heatmaps):
41
+ fused_heatmap = np.sum(heatmaps, axis=0)
42
+
43
+ # Threshold the heatmap so that values below a certain threshold are set to 0
44
+ binary_fused_heatmap = np.where(fused_heatmap < 0.5, 0, 1)
45
+
46
+ assert is_binary_image(binary_fused_heatmap), "Image is not binary"
47
+
48
+ return binary_fused_heatmap
49
+
50
+ def fuse_heatmaps(heatmaps):
51
+ # Use np.maximum.reduce to get the maximum value from each stack of pixels across the heatmaps
52
+ fused_heatmap = np.maximum.reduce(heatmaps)
53
+ return fused_heatmap
54
+
55
+ def scale_points(points: list, img_size: tuple, orig_size: tuple = None, offset=0):
56
+ # Scale coordinates according to image size
57
+ if orig_size:
58
+ # Points * Ratio -> Downscaling
59
+ scaled_points = [tuple([round(p*isize/osize)+offset for p, isize, osize in zip(point, img_size, orig_size)]) for point in points]
60
+ else:
61
+ # Points * current Size -> Upscaling (when i use "extract_landmarks" function the points are with size (1,1))
62
+ scaled_points = [tuple([round(r*sz)+offset for sz, r in zip(point, img_size)]) for point in points]
63
+
64
+ return scaled_points
65
+
66
+
67
+ def points_to_heatmap(points: list, img_size: tuple, orig_size: tuple = None, sigma=2, fuse=False, offset=0):
68
+
69
+ # Scale coordinates according to image size
70
+ if orig_size:
71
+ scaled_points = scale_points(points, img_size, orig_size, offset=offset)
72
+ else:
73
+ scaled_points = scale_points(points, img_size, offset=offset)
74
+
75
+ # Generate heatmaps with the scaled points
76
+ heatmaps = [generate_heatmap_from_points(point, img_size, sigma) for point in scaled_points]
77
+
78
+ if fuse:
79
+ heatmaps = fuse_heatmaps(heatmaps)
80
+
81
+ return np.array(heatmaps)
82
+
83
+
84
+ def generate_heatmap_from_points(point, img_size, sigma):
85
+ # Create a meshgrid of x,y coordinates for the image size
86
+ x, y = np.meshgrid(np.arange(img_size[0]), np.arange(img_size[1]))
87
+ # Calculate the heatmap using a Gaussian function centered at the input point
88
+ heatmap = np.exp(-((x - point[0]) ** 2 + (y - point[1]) ** 2) / (2 * sigma ** 2))
89
+ # Threshold the heatmap so that values below a certain threshold are set to 0
90
+ binary_heatmap = np.where(heatmap < 0.5*heatmap.max(), 0, 1)
91
+
92
+ assert is_binary_image(binary_heatmap), "Image is not binary"
93
+
94
+ return binary_heatmap
95
+
96
+ def is_binary_image(image):
97
+ binary_check = np.logical_or(image == 0, image == 1)
98
+ return binary_check.all()
99
+ ## -----------------------------------------------------------------------------------------------------------------##
100
+ ## LANDMARKS EXTRACTION FROM HEATMAPS ##
101
+ ## -----------------------------------------------------------------------------------------------------------------##
102
+
103
+ def extract_landmarks(heatmaps, num_landmarks):
104
+ landmarks = np.zeros((num_landmarks, 2), dtype=np.float64)
105
+ heatmap_height, heatmap_width = heatmaps.shape[1], heatmaps.shape[2]
106
+
107
+ # Loop all the heatmaps (one for each landmark)
108
+ for i in range(num_landmarks):
109
+ heatmap_channel = heatmaps[i] # get the heatmap number i
110
+
111
+ # binarize the heatmap channel using a threshold
112
+ binary_img = np.where(heatmap_channel < 0.5 * heatmap_channel.max(), 0, 1)
113
+ binary_img = binary_img.astype(np.uint8) # convert the binary image to uint8 datatype
114
+
115
+ assert is_binary_image(binary_img), "Image is not binary"
116
+
117
+ contours, _ = cv2.findContours(binary_img, cv2.RETR_EXTERNAL,
118
+ cv2.CHAIN_APPROX_SIMPLE) # find the contours in the binary image
119
+
120
+ if contours:
121
+ max_contour = max(contours, key=cv2.contourArea) # find the contour with the maximum area
122
+ M = cv2.moments(max_contour) # calculate the moments of the maximum contour
123
+
124
+ if M['m00'] != 0: # avoid divide by zero error
125
+ centroid_x = M['m10'] / M['m00'] # calculate the x-coordinate of centroid
126
+ centroid_y = M['m01'] / M['m00'] # calculate the y-coordinate of centroid
127
+ landmarks[i, :] = [centroid_x / heatmap_height,
128
+ centroid_y / heatmap_width] # normalize the coordinates
129
+
130
+ return landmarks
131
+
132
+
133
+ ## -----------------------------------------------------------------------------------------------------------------##
134
+ ## PLOT LOSS CURVES ##
135
+ ## -----------------------------------------------------------------------------------------------------------------##
136
+
137
+ def plot_loss_curves(results_path: str, save_dir: str = None):
138
+ # Load the results dictionary
139
+ results = torch.load(results_path)['results']
140
+ # Get the loss values of the results dictionary (training and validation)
141
+ train_loss = results['train_loss']
142
+ val_loss = results['val_loss']
143
+
144
+ # Figure out how many epochs there were
145
+ epochs = range(1, len(results['train_loss']) + 1)
146
+
147
+ # Plot loss
148
+ plt.figure(figsize=(10, 5))
149
+ plt.plot(epochs, train_loss, label='train_loss')
150
+ plt.plot(epochs, val_loss, label='val_loss')
151
+ plt.title('Loss')
152
+ plt.xlabel('Epochs')
153
+ plt.legend()
154
+
155
+ if save_dir is not None:
156
+ if not os.path.exists(save_dir):
157
+ os.makedirs(save_dir)
158
+ plt.savefig(os.path.join(save_dir, f"loss_epochs{epochs}"))
159
+
160
+ plt.show()
161
+
162
+ # -----------------------------------------------------------------------------------------------------------------##
163
+ #
164
+ # -----------------------------------------------------------------------------------------------------------------##
165
+
166
+ # Function to generate the save model path for both custom model and segmentation model.
167
+ def generate_save_model_path(PREFIX, model_name, dataset_name, sigma, size, pretrained=None, backbone=None):
168
+ if pretrained is not None:
169
+ pretrained_dir = pretrained
170
+ else:
171
+ pretrained_dir = "no_pretrain"
172
+
173
+ if backbone is not None:
174
+ backbone_dir = backbone
175
+ else:
176
+ backbone_dir = "no_backbone"
177
+
178
+ save_model_path = f'{PREFIX}/results/models/{model_name}/{pretrained_dir}/{backbone_dir}/{dataset_name}/sigma{sigma}_size{str(size).replace(", ", "x")}'
179
+ return save_model_path
180
+
181
+ # Generate path if it does not exist
182
+ def generate_path(path):
183
+ if not os.path.exists(path):
184
+ os.makedirs(path)
185
+ return path
186
+
187
+ # Save the: original image with the heatmaps overlayed, single heatmaps and fused heatmap
188
+ def save_heatmaps(batch_images, batch_heatmaps, images_name, save_dir):
189
+ for i, sample in enumerate(batch_heatmaps):
190
+
191
+ original_image = batch_images[i].permute(1, 2, 0).cpu().numpy()
192
+
193
+ plt.figure(figsize=(10, 10))
194
+ plt.imshow(original_image)
195
+ for j, heatmap in enumerate(sample):
196
+ plt.imshow(heatmap, cmap='viridis', alpha=0.25)
197
+
198
+ # Save the single heatmaps
199
+ plt.imsave(f"{save_dir}/{images_name[i]}_heatmap_{j}.png", heatmap, cmap='viridis')
200
+
201
+ # Save the original image with the heatmaps overlayed
202
+ plt.savefig(f"{save_dir}/{images_name[i]}_overlayed_heatmaps.png")
203
+ plt.close()
204
+
205
+ # Save the fused heatmap
206
+ fused_heatmap = fuse_heatmaps(sample)
207
+ plt.imsave(f"{save_dir}/{images_name[i]}_fused_heatmap.png", fused_heatmap, cmap='viridis')
208
+
209
+
210
+
211
+ # -----------------------------------------------------------------------------------------------------------------##
212
+
213
+ # Load env variables from a .env file
214
+ def load_env_variables(env_file):
215
+ with open(env_file, 'r') as f:
216
+ for line in f:
217
+ key, value = line.strip().split('=')
218
+ # trim whitespace
219
+ key = key.strip()
220
+ value = value.strip()
221
+ # remove quotes if present
222
+ if value[0] == value[-1] and value.startswith(("'", '"')):
223
+ value = value[1:-1]
224
+
225
+ os.environ[key] = value
226
+
227
+ def send_telegram_message(text, env_file='~/.env'):
228
+ env_file_path = os.path.expanduser(env_file)
229
+
230
+ if os.path.exists(env_file_path):
231
+ load_env_variables(env_file_path)
232
+ else:
233
+ print(f"Error: {env_file_path} does not exist")
234
+ return
235
+
236
+ token = os.getenv('TELEGRAM_TOKEN')
237
+ chat_id = os.getenv('TELEGRAM_CHAT_ID')
238
+
239
+ url_req = "https://api.telegram.org/bot" + token + "/sendMessage"
240
+ payload = {
241
+ 'chat_id': chat_id,
242
+ 'text': text,
243
+ 'parse_mode': 'HTML' #or HTML or MarkdownV2
244
+ }
245
+
246
+ try:
247
+ response = requests.get(url_req, params=payload)
248
+ except Exception as e:
249
+ print(f"Error sending telegram message: {e}")
250
+
251
+ return response
252
+
253
+
DiffusionXray-FewShot-LandmarkDetection/experiments/launch_imagenet_comparative_study.bash ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Print the message to the user
4
+ echo "Launching the comparative study of ImageNet backbones for the downstream task..."
5
+ python downstream_task/imagenet_backbones_comparative_study.py
DiffusionXray-FewShot-LandmarkDetection/experiments/launch_landmarks_experiments.bash ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Path to the config.json file
4
+ CONFIG_PATH="downstream_task/config/config.json"
5
+
6
+ # Path to the main.py script
7
+ MAIN_PY_PATH="downstream_task/main.py"
8
+
9
+ # Temporary file for modified config
10
+ TEMP_CONFIG="temp_config.json"
11
+
12
+ # Ask for new values for the config file
13
+ read -p "Insert the dataset name to use for the downstream task ('chest' or 'hand' or 'cephalo'): " DATASET_NAME
14
+
15
+ # Assert that the dataset name is valid
16
+ if [ "$DATASET_NAME" != "chest" ] && [ "$DATASET_NAME" != "hand" ] && [ "$DATASET_NAME" != "cephalo" ]; then
17
+ echo "Invalid dataset name. Please choose 'chest' or 'hand' or 'cephalo'."
18
+ exit 1
19
+ fi
20
+
21
+ read -p "Insert the name of the model to be used for the downstream task ('ddpm' or 'imagenet' or 'moco' or 'mocov2' or 'mocov3' or 'simclr' or 'simclrv2' or 'dino' or 'barlow_twins' or 'byol'): " MODEL_NAME
22
+
23
+ # If model_name is "ddpm", then ask for the pretrained model path and if the user is tuning the DDPM pre-training iterations
24
+ if [ "$MODEL_NAME" == "ddpm" ]; then
25
+ BACKBONE_ENCODER=""
26
+
27
+ read -p "Insert the path to the pretrained ddpm model for the downstream task: " PRETRAINED_MODEL_PATH
28
+
29
+ # Ask if the user is tuning the DDPM pre-training iterations
30
+ read -p "Are you tuning the DDPM pre-training iterations? (true or false): " USE_VAL_SET
31
+
32
+ # Assert that the value is valid
33
+ if [ "$USE_VAL_SET" != "true" ] && [ "$USE_VAL_SET" != "false" ]; then
34
+ echo "Invalid value for tuning DDPM. Please choose 'true' or 'false'."
35
+ exit 1
36
+ fi
37
+
38
+ # Ask if the dataset used for the downstream task is different from the one used for pretraining
39
+ read -p "Is the dataset used for the downstream task different from the one used for pretraining? (true or false): " DIFFERENT_DATASET
40
+
41
+ # Assert that the value for different dataset is valid
42
+ if [ "$DIFFERENT_DATASET" == "true" ]; then
43
+ DIFFERENT_DATASET=true
44
+ elif [ "$DIFFERENT_DATASET" == "false" ]; then
45
+ DIFFERENT_DATASET=false
46
+ else
47
+ echo "Invalid value for different dataset. Please choose 'true' or 'false'."
48
+ exit 1
49
+ fi
50
+
51
+ elif [ "$MODEL_NAME" == "imagenet" ]; then
52
+ PRETRAINED_MODEL_PATH=""
53
+ USE_VAL_SET=false
54
+ DIFFERENT_DATASET=false
55
+
56
+ # Ask for the backbone encoder to be used for the downstream task
57
+ read -p "Insert the name of the backbone encoder to be used for the downstream task ('resnet18', 'resnet50', 'resnet101', 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d', 'vgg11', 'vgg13', 'vgg16', 'vgg19', 'densenet121', 'densenet169', 'densenet201', 'densenet161'): " BACKBONE_ENCODER
58
+
59
+ # Assert that the backbone encoder is valid
60
+ if [ "$BACKBONE_ENCODER" != "resnet18" ] && [ "$BACKBONE_ENCODER" != "resnet50" ] && [ "$BACKBONE_ENCODER" != "resnet101" ] && [ "$BACKBONE_ENCODER" != "resnet152" ] && [ "$BACKBONE_ENCODER" != "resnext50_32x4d" ] && [ "$BACKBONE_ENCODER" != "resnext101_32x8d" ] && [ "$BACKBONE_ENCODER" != "vgg11" ] && [ "$BACKBONE_ENCODER" != "vgg13" ] && [ "$BACKBONE_ENCODER" != "vgg16" ] && [ "$BACKBONE_ENCODER" != "vgg19" ] && [ "$BACKBONE_ENCODER" != "densenet121" ] && [ "$BACKBONE_ENCODER" != "densenet169" ] && [ "$BACKBONE_ENCODER" != "densenet201" ] && [ "$BACKBONE_ENCODER" != "densenet161" ]; then
61
+ echo "Invalid backbone encoder. Please choose 'resnet18', 'resnet50', 'resnet101', 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d', 'vgg11', 'vgg13', 'vgg16', 'vgg19', 'densenet121', 'densenet169', 'densenet201', 'densenet161'."
62
+ exit 1
63
+ fi
64
+
65
+
66
+ elif [ "$MODEL_NAME" == "moco" ] || [ "$MODEL_NAME" == "mocov2" ] || [ "$MODEL_NAME" == "mocov3" ] || [ "$MODEL_NAME" == "simclr" ] || [ "$MODEL_NAME" == "simclrv2" ] || [ "$MODEL_NAME" == "dino" ] || [ "$MODEL_NAME" == "barlow_twins" ] || [ "$MODEL_NAME" == "byol" ]; then
67
+ # Ask for the backbone encoder to be used for the downstream task
68
+ read -p "Insert the name of the backbone encoder to be used for the downstream task ('resnet18', 'resnet50', 'resnet101', 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d', 'vgg11', 'vgg13', 'vgg16', 'vgg19', 'densenet121', 'densenet169', 'densenet201', 'densenet161'): " BACKBONE_ENCODER
69
+
70
+ # Assert that the backbone encoder is valid
71
+ if [ "$BACKBONE_ENCODER" != "resnet18" ] && [ "$BACKBONE_ENCODER" != "resnet50" ] && [ "$BACKBONE_ENCODER" != "resnet101" ] && [ "$BACKBONE_ENCODER" != "resnet152" ] && [ "$BACKBONE_ENCODER" != "resnext50_32x4d" ] && [ "$BACKBONE_ENCODER" != "resnext101_32x8d" ] && [ "$BACKBONE_ENCODER" != "vgg11" ] && [ "$BACKBONE_ENCODER" != "vgg13" ] && [ "$BACKBONE_ENCODER" != "vgg16" ] && [ "$BACKBONE_ENCODER" != "vgg19" ] && [ "$BACKBONE_ENCODER" != "densenet121" ] && [ "$BACKBONE_ENCODER" != "densenet169" ] && [ "$BACKBONE_ENCODER" != "densenet201" ] && [ "$BACKBONE_ENCODER" != "densenet161" ]; then
72
+ echo "Invalid backbone encoder. Please choose 'resnet18', 'resnet50', 'resnet101', 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d', 'vgg11', 'vgg13', 'vgg16', 'vgg19', 'densenet121', 'densenet169', 'densenet201', 'densenet161'."
73
+ exit 1
74
+ fi
75
+
76
+ read -p "Insert the path to the $BACKBONE_ENCODER backbone model pre-trained with $MODEL_NAME: " PRETRAINED_MODEL_PATH
77
+ USE_VAL_SET=false
78
+ DIFFERENT_DATASET=false
79
+
80
+ else
81
+ echo "Invalid model name. Please choose 'ddpm' or 'moco' or 'mocov2' or 'mocov3' or 'simclr' or 'simclrv2' or 'dino' or 'barlow_twins' or 'byol'."
82
+ exit 1
83
+ fi
84
+
85
+ # Assert that the path to the pretrained model is valid
86
+ if [ "$PRETRAINED_MODEL_PATH" != "" ] && [ ! -f $PRETRAINED_MODEL_PATH ]; then
87
+ echo "Invalid path to the pretrained model."
88
+ exit 1
89
+ fi
90
+
91
+
92
+ read -p "Insert the number of training labeled samples to be used for the downstream task ('all' or a number): " TRAINING_SAMPLES
93
+
94
+ # Assert that the number of training samples is valid
95
+ if [ "$TRAINING_SAMPLES" != "all" ] && ! [[ "$TRAINING_SAMPLES" =~ ^[0-9]+$ ]]; then
96
+ echo "Invalid number of training samples. Please choose 'all' or a number."
97
+ exit 1
98
+ fi
99
+
100
+ # Load the original config
101
+ #jq '.model.name = $MODEL_NAME | .training_protocol.finetuning.path = $PRETRAINED_MODEL_PATH | .training_protocol.finetuning.different_dataset = $DIFFERENT_DATASET | .inference_protocol.use_validation_set_for_inference = $USE_VAL_SET | .dataset.name = $DATASET_NAME | .dataset.training_samples = $TRAINING_SAMPLES' $CONFIG_PATH > $TEMP_CONFIG
102
+ jq --arg BACKBONE_ENCODER "$BACKBONE_ENCODER" --arg MODEL_NAME "$MODEL_NAME" --arg PRETRAINED_MODEL_PATH "$PRETRAINED_MODEL_PATH" --arg DIFFERENT_DATASET "$DIFFERENT_DATASET" --arg USE_VAL_SET "$USE_VAL_SET" --arg DATASET_NAME "$DATASET_NAME" --arg TRAINING_SAMPLES "$TRAINING_SAMPLES" '.model.name = $MODEL_NAME | .model.encoder = $BACKBONE_ENCODER | .training_protocol.finetuning.path = $PRETRAINED_MODEL_PATH | .training_protocol.finetuning.different_dataset = $DIFFERENT_DATASET | .inference_protocol.use_validation_set_for_inference = $USE_VAL_SET | .dataset.name = $DATASET_NAME | .dataset.training_samples = $TRAINING_SAMPLES' $CONFIG_PATH > $TEMP_CONFIG
103
+
104
+ # Execute the main.py script with the modified config
105
+ python $MAIN_PY_PATH --config $TEMP_CONFIG
106
+
107
+ # Remove the temporary config file
108
+ rm $TEMP_CONFIG
DiffusionXray-FewShot-LandmarkDetection/experiments/launch_pretraining.bash ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Path to the config.json file
4
+ DDPM_CONFIG_PATH="ddpm_pretraining/config/config.json"
5
+ SSL_CONFIG_PATH="ssl_pretraining/config/config.json"
6
+
7
+ # Path to the main.py script
8
+ DDPM_PY_PATH="ddpm_pretraining/main.py"
9
+ SSL_PY_PATH="ssl_pretraining/main.py"
10
+
11
+ # Temporary file for modified config
12
+ TEMP_CONFIG="temp_config.json"
13
+
14
+ # Ask for new values for the config file
15
+ read -p "Insert the dataset name to use for the downstream task ('chest' or 'hand' or 'cephalo'): " DATASET_NAME
16
+
17
+ # Assert that the dataset name is valid
18
+ if [ "$DATASET_NAME" != "chest" ] && [ "$DATASET_NAME" != "hand" ] && [ "$DATASET_NAME" != "cephalo" ]; then
19
+ echo "Invalid dataset name. Please choose 'chest' or 'hand' or 'cephalo'."
20
+ exit 1
21
+ fi
22
+
23
+ # Ask for the model to be used for the pre-training task
24
+ read -p "Insert the name of the model to be used for the pre-training task ('ddpm' or 'moco' or 'mocov2' or 'mocov3' or 'simclr' or 'simclrv2' or 'dino' or 'barlow_twins' or 'byol'): " MODEL_NAME
25
+
26
+ # Assert that the model name is valid
27
+ if [ "$MODEL_NAME" == "ddpm" ]; then
28
+ # Load the original config
29
+ jq --arg DATASET_NAME "$DATASET_NAME" '.dataset.name = $DATASET_NAME' $DDPM_CONFIG_PATH > $TEMP_CONFIG
30
+
31
+ # Execute the main.py script with the modified config
32
+ python $DDPM_PY_PATH --config $TEMP_CONFIG
33
+
34
+ elif [ "$MODEL_NAME" == "moco" ] || [ "$MODEL_NAME" == "mocov2" ] || [ "$MODEL_NAME" == "mocov3" ] || [ "$MODEL_NAME" == "simclr" ] || [ "$MODEL_NAME" == "simclrv2" ] || [ "$MODEL_NAME" == "dino" ] || [ "$MODEL_NAME" == "barlow_twins" ] || [ "$MODEL_NAME" == "byol" ]; then
35
+
36
+ # Ask for the backbone encoder to be used for the pre-training task
37
+ read -p "Insert the name of the backbone encoder to be used for the pre-training task ('resnet18', 'resnet50', 'resnet101', 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d', 'vgg11', 'vgg13', 'vgg16', 'vgg19', 'densenet121', 'densenet169', 'densenet201', 'densenet161'): " BACKBONE_ENCODER
38
+
39
+ # Assert that the backbone encoder is valid
40
+ if [ "$BACKBONE_ENCODER" != "resnet18" ] && [ "$BACKBONE_ENCODER" != "resnet50" ] && [ "$BACKBONE_ENCODER" != "resnet101" ] && [ "$BACKBONE_ENCODER" != "resnet152" ] && [ "$BACKBONE_ENCODER" != "resnext50_32x4d" ] && [ "$BACKBONE_ENCODER" != "resnext101_32x8d" ] && [ "$BACKBONE_ENCODER" != "vgg11" ] && [ "$BACKBONE_ENCODER" != "vgg13" ] && [ "$BACKBONE_ENCODER" != "vgg16" ] && [ "$BACKBONE_ENCODER" != "vgg19" ] && [ "$BACKBONE_ENCODER" != "densenet121" ] && [ "$BACKBONE_ENCODER" != "densenet169" ] && [ "$BACKBONE_ENCODER" != "densenet201" ] && [ "$BACKBONE_ENCODER" != "densenet161" ]; then
41
+ echo "Invalid backbone encoder. Please choose 'resnet18', 'resnet50', 'resnet101', 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d', 'vgg11', 'vgg13', 'vgg16', 'vgg19', 'densenet121', 'densenet169', 'densenet201', 'densenet161'."
42
+ exit 1
43
+ fi
44
+
45
+ # Load the original config
46
+ jq --arg BACKBONE_ENCODER "$BACKBONE_ENCODER" --arg MODEL_NAME "$MODEL_NAME" --arg DATASET_NAME "$DATASET_NAME" '.dataset.name = $DATASET_NAME | .model.encoder = $BACKBONE_ENCODER | .model.name = $MODEL_NAME' $SSL_CONFIG_PATH > $TEMP_CONFIG
47
+
48
+ # Execute the main.py script with the modified config
49
+ python $SSL_PY_PATH --config $TEMP_CONFIG
50
+
51
+ else
52
+ echo "Invalid model name. Please choose 'ddpm' or 'moco' or 'mocov2' or 'mocov3' or 'simclr' or 'simclrv2' or 'dino' or 'barlow_twins' or 'byol'."
53
+ exit 1
54
+ fi
55
+
56
+ # Remove the temporary config file
57
+ rm $TEMP_CONFIG
DiffusionXray-FewShot-LandmarkDetection/launch_experiments.bash ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ # Check if more than one gpu is available, if so ask for which one to use
3
+ if [ $(nvidia-smi --query-gpu=count --format=csv,noheader | wc -l) -gt 1 ]; then
4
+ read -p "Insert the GPU number to use: " GPU
5
+ export CUDA_VISIBLE_DEVICES=$GPU
6
+ fi
7
+
8
+ # Ask for which experiment to run
9
+ read -p "Insert the experiment to run (1 for pre-training task, 2 for ImageNet comparative study, 3 for downstream task experiments): " EXPERIMENT
10
+
11
+ # Execute the corresponding experiment
12
+ case $EXPERIMENT in
13
+ 1)
14
+ bash experiments/launch_pretraining.bash
15
+ ;;
16
+ 2)
17
+ bash experiments/launch_imagenet_comparative_study.bash
18
+ ;;
19
+ 3)
20
+ bash experiments/launch_landmarks_experiments.bash
21
+ ;;
22
+ *)
23
+
24
+ echo "Invalid experiment number"
25
+ ;;
26
+ esac
27
+
28
+
29
+
DiffusionXray-FewShot-LandmarkDetection/requirements.txt ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ albumentations==1.4.4
2
+ einops==0.8.0
3
+ GPUtil==1.4.0
4
+ matplotlib==3.8.2
5
+ numpy==1.23.5
6
+ opencv_python==4.9.0.80
7
+ opencv_python_headless==4.9.0.80
8
+ pandas==2.2.3
9
+ Pillow==9.3.0
10
+ Pillow==11.0.0
11
+ prettytable==3.12.0
12
+ Requests==2.32.3
13
+ scikit_learn==1.3.2
14
+ scipy==1.14.1
15
+ segmentation_models_pytorch==0.3.3
16
+ skimage==0.0
17
+ torch==2.1.0+cu118
18
+ torchmetrics==1.3.0
19
+ torchvision==0.16.0+cu118
20
+ tqdm==4.66.1
21
+ urllib3==1.26.13
DiffusionXray-FewShot-LandmarkDetection/ssl_pretraining/builders/__init__.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+
3
+ Self-supervised Implementation taken from https://github.com/giakoumoglou/pyssl
4
+
5
+ """
6
+ from .barlow_twins import BarlowTwins
7
+ from .byol import BYOL
8
+ from .dino import DINO
9
+ from .moco import MoCo
10
+ from .mocov2 import MoCoV2
11
+ from .mocov3 import MoCoV3
12
+ from .simclr import SimCLR
13
+ from .simsiam import SimSiam
14
+ from .simclrv2 import SimCLRv2
15
+ from .supcon import SupCon
16
+ from .swav import SwAV
17
+
18
+
19
+ __all__ = [
20
+ 'BarlowTwins',
21
+ 'BYOL',
22
+ 'DINO',
23
+ 'MoCo',
24
+ 'MoCoV2',
25
+ 'MoCoV3',
26
+ 'SimCLR',
27
+ 'SimCLRv2',
28
+ 'SimSiam',
29
+ 'SupCon',
30
+ 'SwAV',
31
+ ]
DiffusionXray-FewShot-LandmarkDetection/ssl_pretraining/builders/barlow_twins.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2023. All rights reserved.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import torch
8
+ from torch import nn
9
+ import torchvision.transforms as T
10
+ from PIL import Image
11
+
12
+
13
+ __all__ = ['BarlowTwins']
14
+
15
+
16
+ class BarlowTwins(nn.Module):
17
+ """
18
+ Barlow Twins
19
+ Link: https://arxiv.org/abs/2104.02057
20
+ Implementation: https://arxiv.org/abs/2103.03230
21
+ """
22
+ def __init__(self, backbone, feature_size, projection_dim=8192, hidden_dim=8192, lamda=0.005,
23
+ image_size=224, mean=(0.5,), std=(0.229, 0.224, 0.225)):
24
+ super().__init__()
25
+ self.lamda = lamda
26
+ self.image_size = image_size
27
+ self.mean = mean
28
+ self.std = std
29
+ self.backbone = backbone
30
+ self.projector = Projector(feature_size, hidden_dim, projection_dim)
31
+ self.encoder = nn.Sequential(self.backbone, self.projector)
32
+ self.bn = nn.BatchNorm1d(projection_dim, affine=False)
33
+ self.augment = T.Compose([
34
+ T.RandomResizedCrop(image_size, interpolation=Image.BICUBIC),
35
+ T.RandomHorizontalFlip(p=0.5),
36
+ T.RandomApply([T.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.2, hue=0.1)], p=0.8),
37
+ T.RandomGrayscale(p=0.2),
38
+ T.RandomApply([T.GaussianBlur(kernel_size=image_size//20*2+1, sigma=(0.1, 2.0))], p=0.5),
39
+ T.RandomSolarize(threshold=0.5, p=0.0),
40
+ T.Normalize(mean=mean, std=std),
41
+ ])
42
+ self.augment_prime = T.Compose([
43
+ T.RandomResizedCrop(image_size, interpolation=Image.BICUBIC),
44
+ T.RandomHorizontalFlip(p=0.5),
45
+ T.RandomApply([T.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.2, hue=0.1)], p=0.8),
46
+ T.RandomGrayscale(p=0.2),
47
+ T.RandomApply([T.GaussianBlur(kernel_size=image_size//20*2+1, sigma=(0.1, 2.0))], p=0.5),
48
+ T.RandomSolarize(threshold=0.5, p=0.2),
49
+ T.Normalize(mean=mean, std=std),
50
+ ])
51
+
52
+ def forward(self, x):
53
+ x1, x2 = self.augment(x), self.augment_prime(x)
54
+ z1, z2 = self.encoder(x1), self.encoder(x2)
55
+ bz = z1.shape[0]
56
+ c = self.bn(z1).T @ self.bn(z2)
57
+ c.div_(bz)
58
+ on_diag = torch.diagonal(c).add_(-1).pow_(2).sum()
59
+ off_diag = off_diagonal(c).pow_(2).sum()
60
+ loss = on_diag + self.lamda * off_diag
61
+ return loss
62
+
63
+
64
+ class Projector(nn.Module):
65
+ """ Projector for Barlow Twins """
66
+ def __init__(self, in_dim, hidden_dim=2048, out_dim=128):
67
+ super().__init__()
68
+
69
+ self.layer1 = nn.Sequential(
70
+ nn.Linear(in_dim, hidden_dim, bias=False),
71
+ nn.BatchNorm1d(hidden_dim, eps=1e-5, affine=True),
72
+ nn.ReLU(inplace=True),
73
+ )
74
+ self.layer2 = nn.Sequential(
75
+ nn.Linear(hidden_dim, hidden_dim, bias=False),
76
+ nn.BatchNorm1d(hidden_dim, eps=1e-5, affine=True),
77
+ nn.ReLU(inplace=True),
78
+ )
79
+ self.layer3 = nn.Sequential(
80
+ nn.Linear(hidden_dim, out_dim, bias=False),
81
+ )
82
+ def forward(self, x):
83
+ x = self.layer1(x)
84
+ x = self.layer2(x)
85
+ x = self.layer3(x)
86
+ return x
87
+
88
+
89
+ def off_diagonal(x):
90
+ n, m = x.shape
91
+ return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten()
92
+
93
+
94
+ if __name__ == '__main__':
95
+ import torchvision
96
+ backbone = torchvision.models.resnet50(pretrained=False)
97
+ feature_size = backbone.fc.in_features
98
+ backbone.fc = torch.nn.Identity()
99
+
100
+ model = BarlowTwins(backbone, feature_size)
101
+
102
+ x = torch.rand(4, 3, 224, 224)
103
+ with torch.no_grad():
104
+ loss = model.forward(x)
105
+ print(f'loss = {loss}')
DiffusionXray-FewShot-LandmarkDetection/ssl_pretraining/builders/byol.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2023. All rights reserved.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import torch
8
+ from torch import nn
9
+ import torch.nn.functional as F
10
+ import torchvision.transforms as T
11
+ import copy
12
+ from PIL import Image
13
+
14
+
15
+ __all__ = ['BYOL']
16
+
17
+
18
+ class BYOL(nn.Module):
19
+ """
20
+ BYOL: Bootstrap your own latent: A new approach to self-supervised Learning
21
+ Link: https://arxiv.org/abs/2006.07733
22
+ Implementation: https://github.com/deepmind/deepmind-research/tree/master/byol
23
+ """
24
+ def __init__(self, backbone, feature_size, projection_dim=256, hidden_dim=4096, tau=0.996,
25
+ image_size=224, mean=(0.5,), std=(0.229, 0.224, 0.225)):
26
+ super().__init__()
27
+ self.projection_dim = projection_dim
28
+ self.tau = tau # EMA update
29
+ self.backbone = backbone
30
+ self.projector = MLP(feature_size, hidden_dim=hidden_dim, out_dim=projection_dim)
31
+ self.image_size = image_size
32
+ self.mean = mean
33
+ self.std = std
34
+ self.online_encoder = self.encoder = nn.Sequential(self.backbone, self.projector)
35
+ self.online_predictor = MLP(in_dim=projection_dim, hidden_dim=hidden_dim, out_dim=projection_dim)
36
+ self.target_encoder = copy.deepcopy(self.online_encoder) # target must be a deepcopy of online, since we will use the backbone trained by online
37
+ self._init_target_encoder()
38
+ self.augment1 = T.Compose([
39
+ T.RandomResizedCrop(image_size, scale=(0.08, 1.0), ratio=(3.0/4.0,4.0/3.0), interpolation=Image.BICUBIC),
40
+ T.RandomHorizontalFlip(p=0.5),
41
+ T.RandomApply([T.ColorJitter(0.4, 0.4, 0.2, 0.1)], p=0.8),
42
+ T.RandomGrayscale(p=0.2),
43
+ T.RandomApply([T.GaussianBlur(kernel_size=image_size//20*2+1, sigma=(0.1, 2.0))], p=0.5),
44
+ T.Normalize(mean=mean, std=std)
45
+ ])
46
+ self.augment2 = T.Compose([
47
+ T.RandomResizedCrop(image_size, scale=(0.08, 1.0), ratio=(3.0/4.0,4.0/3.0), interpolation=Image.BICUBIC),
48
+ T.RandomHorizontalFlip(p=0.5),
49
+ T.RandomApply([T.ColorJitter(0.4, 0.4, 0.2, 0.1)], p=0.8),
50
+ T.RandomGrayscale(p=0.2),
51
+ T.RandomApply([T.GaussianBlur(kernel_size=image_size//20*2+1, sigma=(0.1, 2.0))], p=0.5),
52
+ T.RandomSolarize(threshold=0.5, p=0.2),
53
+ T.Normalize(mean=mean, std=std)
54
+ ])
55
+
56
+ def forward(self, x):
57
+ x1, x2 = self.augment1(x), self.augment2(x)
58
+ z1_o, z2_o = self.online_encoder(x1), self.online_encoder(x2)
59
+ p1_o, p2_o = self.online_predictor(z1_o), self.online_predictor(z2_o)
60
+ with torch.no_grad():
61
+ self._momentum_update_target_encoder()
62
+ z1_t, z2_t = self.target_encoder(x1), self.target_encoder(x2)
63
+ loss = mean_squared_error(p1_o, z2_t) / 2 + mean_squared_error(p2_o, z1_t) / 2
64
+ return loss
65
+
66
+ def _init_target_encoder(self):
67
+ for param_o, param_t in zip(self.online_encoder.parameters(), self.target_encoder.parameters()):
68
+ param_t.data.copy_(param_o.data)
69
+ param_t.requires_grad = False
70
+
71
+ @torch.no_grad()
72
+ def _momentum_update_target_encoder(self):
73
+ for param_o, param_t in zip(self.online_encoder.parameters(), self.target_encoder.parameters()):
74
+ param_t.data = self.tau * param_t.data + (1. - self.tau) * param_o.data
75
+
76
+
77
+ def mean_squared_error(p, z):
78
+ p = F.normalize(p, dim=1)
79
+ z = F.normalize(z, dim=1)
80
+ return 2 - 2 * (p * z.detach()).sum(dim=-1).mean()
81
+
82
+
83
+ class MLP(nn.Module):
84
+ """ Projection Head and Prediction Head for BYOL """
85
+ def __init__(self, in_dim, hidden_dim=4096, out_dim=256):
86
+ super().__init__()
87
+
88
+ self.layer1 = nn.Sequential(
89
+ nn.Linear(in_dim, hidden_dim),
90
+ nn.BatchNorm1d(hidden_dim),
91
+ nn.ReLU(inplace=True)
92
+ )
93
+ self.layer2 = nn.Sequential(
94
+ nn.Linear(hidden_dim, out_dim),
95
+ )
96
+
97
+ def forward(self, x):
98
+ x = self.layer1(x)
99
+ x = self.layer2(x)
100
+ return x
101
+
102
+
103
+ if __name__ == '__main__':
104
+ import torchvision
105
+ backbone = torchvision.models.resnet50(pretrained=False)
106
+ feature_size = backbone.fc.in_features
107
+ backbone.fc = torch.nn.Identity()
108
+
109
+ model = BYOL(backbone, feature_size, tau=0.996)
110
+
111
+ x = torch.rand(4, 3, 224, 224)
112
+ with torch.no_grad():
113
+ loss = model.forward(x)
114
+ print(f'loss = {loss}')
DiffusionXray-FewShot-LandmarkDetection/ssl_pretraining/builders/dino.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2023. All rights reserved.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import torch
8
+ from torch import nn
9
+ import torch.nn.functional as F
10
+ import torchvision.transforms as T
11
+ from PIL import Image
12
+ import copy
13
+
14
+
15
+ __all__ = ['DINO']
16
+
17
+
18
+ class DINO(nn.Module):
19
+ """
20
+ DINO: Emerging Properties in Self-Supervised Vision Transformers
21
+ Link: https://arxiv.org/abs/2104.14294
22
+ Implementation: https://github.com/facebookresearch/dino
23
+ """
24
+ def __init__(self, backbone, feature_size, projection_dim=256, hidden_dim=2048, bottleneck_dim=256, temp_s=0.1, temp_t=0.5, m=0.5, lamda=0.996, num_crops=6,
25
+ image_size=224, mean=(0.5,), std=(0.229, 0.224, 0.225)):
26
+ super().__init__()
27
+ self.projection_dim = projection_dim
28
+ self.temp_s = temp_s
29
+ self.temp_t = temp_t
30
+ self.register_buffer("center", torch.zeros(1, projection_dim))
31
+ self.m = m
32
+ self.lamda = lamda # EMA update
33
+ self.backbone = backbone
34
+ self.image_size = image_size
35
+ self.mean = mean
36
+ self.std = std
37
+ self.head_student = Head(feature_size, hidden_dim=hidden_dim, bottleneck_dim=bottleneck_dim, out_dim=projection_dim)
38
+ self.student = self.encoder = nn.Sequential(self.backbone, self.head_student)
39
+ self.head_teacher = Head(feature_size, hidden_dim=hidden_dim, bottleneck_dim=bottleneck_dim, out_dim=projection_dim)
40
+ self.teacher = nn.Sequential(copy.deepcopy(backbone), self.head_teacher)
41
+ self._init_teacher()
42
+ self.num_crops = num_crops
43
+ self.augment_global1 = T.Compose([
44
+ T.RandomResizedCrop(image_size, scale=(0.04, 1.0), interpolation=Image.BICUBIC),
45
+ T.RandomHorizontalFlip(p=0.5),
46
+ T.RandomApply([T.ColorJitter(0.4, 0.4, 0.2, 0.1)], p=0.8),
47
+ T.RandomGrayscale(p=0.2),
48
+ T.RandomApply([T.GaussianBlur(kernel_size=image_size//20*2+1, sigma=(0.1, 2.0))], p=0.5),
49
+ T.Normalize(mean=mean, std=std),
50
+ ])
51
+ self.augment_global2 = T.Compose([
52
+ T.RandomResizedCrop(image_size, scale=(0.04, 1.0), interpolation=Image.BICUBIC),
53
+ T.RandomHorizontalFlip(p=0.5),
54
+ T.RandomApply([T.ColorJitter(0.4, 0.4, 0.2, 0.1)], p=0.8),
55
+ T.RandomGrayscale(p=0.2),
56
+ T.RandomApply([T.GaussianBlur(kernel_size=image_size//20*2+1, sigma=(0.1, 2.0))], p=0.5),
57
+ T.RandomSolarize(threshold=0.5, p=0.2),
58
+ T.Normalize(mean=mean, std=std),
59
+ ])
60
+ self.augment_local = T.Compose([
61
+ T.RandomResizedCrop(int(image_size*3/7), scale=(0.05, 1.0), interpolation=Image.BICUBIC),
62
+ T.RandomHorizontalFlip(p=0.5),
63
+ T.RandomApply([T.ColorJitter(0.4, 0.4, 0.2, 0.1)], p=0.8),
64
+ T.RandomGrayscale(p=0.2),
65
+ T.RandomApply([T.GaussianBlur(kernel_size=image_size//20*2+1, sigma=(0.1, 2.0))], p=0.5),
66
+ T.Normalize(mean=mean, std=std),
67
+ ])
68
+
69
+ def forward(self, x):
70
+ x1, x2 = self.augment_global1(x), self.augment_global2(x)
71
+
72
+ xc = []
73
+ if self.num_crops > 0:
74
+ for _ in range(self.num_crops):
75
+ xc.append(self.augment_local(x))
76
+
77
+ z1_s, z2_s = self.student(x1), self.student(x2)
78
+
79
+ zc_s = []
80
+ for x in xc:
81
+ zc_s.append(self.student(x))
82
+
83
+ with torch.no_grad():
84
+ self._momentum_update_teacher()
85
+ z1_t, z2_t = self.teacher(x1), self.teacher(x2)
86
+
87
+ z_s = [z1_s, z2_s] + zc_s
88
+ z_t = [z1_t, z2_t]
89
+
90
+ loss, loss_terms = 0, 0
91
+ for iq, q in enumerate(z_t):
92
+ for iv, v in enumerate(z_s):
93
+ if iv==iq:
94
+ continue
95
+ loss += cross_entropy_loss(q, v, self.temp_s, self.temp_t, self.center)
96
+ loss_terms += 1
97
+ loss /= loss_terms
98
+
99
+ self._update_center(z1_t, z2_t)
100
+ return loss
101
+
102
+ def _init_teacher(self):
103
+ for param_q, param_k in zip(self.student.parameters(), self.teacher.parameters()):
104
+ param_k.data.copy_(param_q.data) # initialize
105
+ param_k.requires_grad = False # not update by gradient
106
+
107
+ @torch.no_grad()
108
+ def _momentum_update_teacher(self):
109
+ for param_q, param_k in zip(self.student.parameters(), self.teacher.parameters()):
110
+ param_k.data = self.lamda * param_k.data + (1. - self.lamda) * param_q.data
111
+
112
+ @torch.no_grad()
113
+ def _update_center(self, z1_t, z2_t):
114
+ self.center = self.m*self.center + (1-self.m)*torch.cat([z1_t, z2_t]).mean(dim=0)
115
+
116
+
117
+ def cross_entropy_loss(z_t, z_s, temp_s, temp_t, center):
118
+ z_t = z_t.detach() # stop gradient
119
+ z_s = z_s / temp_s
120
+ z_t = F.softmax((z_t - center) / temp_t, dim=1) # center + sharpen
121
+ return - (z_t * F.log_softmax(z_s, dim=1)).sum(dim=1).mean()
122
+
123
+
124
+ class Head(nn.Module):
125
+ """ Projection Head for DINO """
126
+ def __init__(self, in_dim, hidden_dim=2048, bottleneck_dim=256, out_dim=256, ):
127
+ super().__init__()
128
+
129
+ self.layer1 = nn.Sequential(
130
+ nn.Linear(in_dim, hidden_dim),
131
+ nn.GELU(),
132
+ )
133
+ self.layer2 = nn.Sequential(
134
+ nn.Linear(hidden_dim, hidden_dim),
135
+ nn.GELU(),
136
+ )
137
+ self.layer3 = nn.Sequential(
138
+ nn.Linear(hidden_dim, bottleneck_dim),
139
+ )
140
+ self.apply(self._init_weights)
141
+
142
+ self.last_layer = nn.utils.weight_norm(nn.Linear(bottleneck_dim, out_dim, bias=False))
143
+ self.last_layer.weight_g.data.fill_(1)
144
+ self.last_layer.weight_g.requires_grad = False
145
+
146
+ def forward(self, x):
147
+ x = self.layer1(x)
148
+ x = self.layer2(x)
149
+ x = self.layer3(x)
150
+ x = nn.functional.normalize(x, dim=-1, p=2)
151
+ x = self.last_layer(x)
152
+ return x
153
+
154
+ def _init_weights(self, m):
155
+ if isinstance(m, nn.Linear):
156
+ torch.nn.init.trunc_normal_(m.weight, std=0.02)
157
+ if isinstance(m, nn.Linear) and m.bias is not None:
158
+ nn.init.constant_(m.bias, 0)
159
+
160
+
161
+ if __name__ == '__main__':
162
+ import torchvision
163
+ backbone = torchvision.models.resnet50(pretrained=False)
164
+ feature_size = backbone.fc.in_features
165
+ backbone.fc = torch.nn.Identity()
166
+
167
+ model = DINO(backbone, feature_size)
168
+
169
+ x = torch.rand(4, 3, 224, 224)
170
+ with torch.no_grad():
171
+ loss = model.forward(x)
172
+ print(f'loss = {loss}')
DiffusionXray-FewShot-LandmarkDetection/ssl_pretraining/builders/moco.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2023. All rights reserved.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import torch
8
+ from torch import nn
9
+ import torch.nn.functional as F
10
+ import torchvision.transforms as T
11
+ import copy
12
+
13
+
14
+ __all__ = ['MoCo']
15
+
16
+
17
+ class MoCo(nn.Module):
18
+ """
19
+ MoCo: Momentum Contrast
20
+ Link: https://arxiv.org/abs/1911.05722
21
+ Implementation: https://github.com/facebookresearch/moco
22
+ """
23
+ def __init__(self, backbone, feature_size, projection_dim=128, K=65536, m=0.999, temperature=0.07,
24
+ image_size=224, mean=(0.5,), std=(0.229, 0.224, 0.225)):
25
+ super().__init__()
26
+ self.projection_dim = projection_dim
27
+ self.K = K
28
+ self.m = m
29
+ self.temperature = temperature
30
+ self.backbone = backbone
31
+ self.projector = nn.Linear(feature_size, projection_dim)
32
+ self.image_size = image_size
33
+ self.mean = mean
34
+ self.std = std
35
+ self.encoder_q = self.encoder = nn.Sequential(self.backbone, self.projector)
36
+ self.encoder_k = copy.deepcopy(self.encoder_q)
37
+ self._init_encoder_k()
38
+ self.register_buffer("queue", torch.randn(projection_dim, K))
39
+ self.queue = nn.functional.normalize(self.queue, dim=0)
40
+ self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
41
+ self.augment = T.Compose([
42
+ T.RandomResizedCrop(image_size, scale=(0.2, 1.0)),
43
+ T.RandomGrayscale(p=0.2),
44
+ T.ColorJitter(0.4, 0.4, 0.4, 0.4),
45
+ T.RandomHorizontalFlip(),
46
+ T.Normalize(mean=mean, std=std)
47
+ ])
48
+
49
+ def forward(self, x):
50
+ x_q, x_k = self.augment(x), self.augment(x)
51
+ q = self.encoder_q(x_q)
52
+ q = nn.functional.normalize(q, dim=1)
53
+ with torch.no_grad():
54
+ self._momentum_update_encoder_k()
55
+ x_k, idx_unshuffle = self._batch_shuffle_single_gpu(x_k)
56
+ k = self.encoder_k(x_k)
57
+ k = nn.functional.normalize(k, dim=1)
58
+ k = self._batch_unshuffle_single_gpu(k, idx_unshuffle)
59
+ loss = infonce_loss(q, k, self.queue, self.temperature)
60
+ self._dequeue_and_enqueue(k)
61
+ return loss
62
+
63
+ @torch.no_grad()
64
+ def _init_encoder_k(self):
65
+ for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):
66
+ param_k.data.copy_(param_q.data)
67
+ param_k.requires_grad = False
68
+
69
+ @torch.no_grad()
70
+ def _momentum_update_encoder_k(self):
71
+ for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):
72
+ param_k.data = param_k.data * self.m + param_q.data * (1.0 - self.m)
73
+
74
+ @torch.no_grad()
75
+ def _dequeue_and_enqueue(self, keys):
76
+ bz = keys.shape[0]
77
+ ptr = int(self.queue_ptr)
78
+ assert self.K % bz == 0
79
+ self.queue[:, ptr:(ptr + bz)] = keys.t()
80
+ ptr = (ptr + bz) % self.K
81
+ self.queue_ptr[0] = ptr
82
+
83
+ @torch.no_grad()
84
+ def _batch_shuffle_single_gpu(self, x):
85
+ idx_shuffle = torch.randperm(x.shape[0]).cuda()
86
+ idx_unshuffle = torch.argsort(idx_shuffle)
87
+ return x[idx_shuffle], idx_unshuffle
88
+
89
+ @torch.no_grad()
90
+ def _batch_unshuffle_single_gpu(self, x, idx_unshuffle):
91
+ return x[idx_unshuffle]
92
+
93
+
94
+ def infonce_loss(q, k, queue, temperature=0.07):
95
+ """ InfoNCE loss """
96
+ l_pos = torch.einsum("nc,nc->n", [q, k]).unsqueeze(-1)
97
+ l_neg = torch.einsum("nc,ck->nk", [q, queue.clone().detach()])
98
+ logits = torch.cat([l_pos, l_neg], dim=1)
99
+ logits /= temperature
100
+ labels = torch.zeros(logits.shape[0], dtype=torch.long).to(q.device)
101
+ loss = F.cross_entropy(logits, labels)
102
+ return loss
103
+
104
+
105
+ if __name__ == '__main__':
106
+ import torchvision
107
+ backbone = torchvision.models.resnet50(pretrained=False)
108
+ feature_size = backbone.fc.in_features
109
+ backbone.fc = torch.nn.Identity()
110
+
111
+ model = MoCo(backbone, feature_size)
112
+
113
+ x = torch.rand(4, 3, 224, 224)
114
+ with torch.no_grad():
115
+ loss = model.forward(x)
116
+ print(f'loss = {loss}')
117
+
118
+
DiffusionXray-FewShot-LandmarkDetection/ssl_pretraining/builders/mocov2.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2023. All rights reserved.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import torch
8
+ from torch import nn
9
+ import torch.nn.functional as F
10
+ import torchvision.transforms as T
11
+ import copy
12
+
13
+
14
+ __all__ = ['MoCoV2']
15
+
16
+
17
+ class MoCoV2(nn.Module):
18
+ """
19
+ MoCo v2: Momentum Contrast v2
20
+ Link: https://arxiv.org/abs/2003.04297
21
+ Implementation: https://github.com/facebookresearch/moco
22
+ """
23
+ def __init__(self, backbone, feature_size, projection_dim=128, K=65536, m=0.999, temperature=0.07,
24
+ image_size=224, mean=(0.5,), std=(0.229, 0.224, 0.225)):
25
+ super().__init__()
26
+ self.projection_dim = projection_dim
27
+ self.K = K
28
+ self.m = m
29
+ self.temperature = temperature
30
+ self.backbone = backbone
31
+ self.projector = Projector(feature_size, feature_size, projection_dim)
32
+ self.image_size = image_size
33
+ self.mean = mean
34
+ self.std = std
35
+ self.encoder_q = self.encoder = nn.Sequential(self.backbone, self.projector)
36
+ self.encoder_k = copy.deepcopy(self.encoder_q)
37
+ self._init_encoder_k()
38
+ self.register_buffer("queue", torch.randn(projection_dim, K))
39
+ self.queue = nn.functional.normalize(self.queue, dim=0)
40
+ self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
41
+ self.augment = T.Compose([
42
+ T.RandomResizedCrop(image_size, scale=(0.2, 1.0)),
43
+ T.RandomApply([T.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8),
44
+ T.RandomGrayscale(p=0.2),
45
+ T.RandomApply([T.GaussianBlur(kernel_size=image_size//20*2+1, sigma=(0.1, 2.0))], p=0.5),
46
+ T.RandomHorizontalFlip(),
47
+ T.Normalize(mean=mean, std=std)
48
+ ])
49
+
50
+ def forward(self, x):
51
+ x_q, x_k = self.augment(x), self.augment(x)
52
+ q = self.encoder_q(x_q)
53
+ q = nn.functional.normalize(q, dim=1)
54
+ with torch.no_grad():
55
+ self._momentum_update_encoder_k()
56
+ x_k, idx_unshuffle = self._batch_shuffle_single_gpu(x_k)
57
+ k = self.encoder_k(x_k)
58
+ k = nn.functional.normalize(k, dim=1)
59
+ k = self._batch_unshuffle_single_gpu(k, idx_unshuffle)
60
+ loss = infonce_loss(q, k, self.queue, self.temperature)
61
+ self._dequeue_and_enqueue(k)
62
+ return loss
63
+
64
+ @torch.no_grad()
65
+ def _init_encoder_k(self):
66
+ for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):
67
+ param_k.data.copy_(param_q.data)
68
+ param_k.requires_grad = False
69
+
70
+ @torch.no_grad()
71
+ def _momentum_update_encoder_k(self):
72
+ for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):
73
+ param_k.data = param_k.data * self.m + param_q.data * (1.0 - self.m)
74
+
75
+ @torch.no_grad()
76
+ def _dequeue_and_enqueue(self, keys):
77
+ bz = keys.shape[0]
78
+ ptr = int(self.queue_ptr)
79
+ assert self.K % bz == 0
80
+ self.queue[:, ptr:(ptr + bz)] = keys.t()
81
+ ptr = (ptr + bz) % self.K
82
+ self.queue_ptr[0] = ptr
83
+
84
+ @torch.no_grad()
85
+ def _batch_shuffle_single_gpu(self, x):
86
+ idx_shuffle = torch.randperm(x.shape[0]).cuda()
87
+ idx_unshuffle = torch.argsort(idx_shuffle)
88
+ return x[idx_shuffle], idx_unshuffle
89
+
90
+ @torch.no_grad()
91
+ def _batch_unshuffle_single_gpu(self, x, idx_unshuffle):
92
+ return x[idx_unshuffle]
93
+
94
+
95
+ def infonce_loss(q, k, queue, temperature=0.07):
96
+ """ InfoNCE loss """
97
+ l_pos = torch.einsum("nc,nc->n", [q, k]).unsqueeze(-1)
98
+ l_neg = torch.einsum("nc,ck->nk", [q, queue.clone().detach()])
99
+ logits = torch.cat([l_pos, l_neg], dim=1)
100
+ logits /= temperature
101
+ labels = torch.zeros(logits.shape[0], dtype=torch.long).to(q.device)
102
+ loss = F.cross_entropy(logits, labels)
103
+ return loss
104
+
105
+
106
+ class Projector(nn.Module):
107
+ """ Projector for MoCov2: Copy from SimCLR"""
108
+ def __init__(self, in_dim, hidden_dim=None, out_dim=128):
109
+ super().__init__()
110
+ if hidden_dim == None: hidden_dim=in_dim
111
+ self.layer1 = nn.Sequential(
112
+ nn.Linear(in_dim, hidden_dim),
113
+ nn.ReLU(inplace=True),
114
+ nn.Linear(hidden_dim, out_dim),
115
+ )
116
+ def forward(self, x):
117
+ x = self.layer1(x)
118
+ return x
119
+
120
+
121
+ if __name__ == '__main__':
122
+ import torchvision
123
+ backbone = torchvision.models.resnet50(pretrained=False)
124
+ feature_size = backbone.fc.in_features
125
+ backbone.fc = torch.nn.Identity()
126
+
127
+ model = MoCoV2(backbone, feature_size)
128
+
129
+ x = torch.rand(4, 3, 224, 224)
130
+ with torch.no_grad():
131
+ loss = model.forward(x)
132
+ print(f'loss = {loss}')
DiffusionXray-FewShot-LandmarkDetection/ssl_pretraining/builders/mocov3.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2023. All rights reserved.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+
8
+ import torch
9
+ from torch import nn
10
+ import torch.nn.functional as F
11
+ import torchvision.transforms as T
12
+ import copy
13
+ from PIL import Image
14
+
15
+
16
+ __all__ = ['MoCoV3']
17
+
18
+
19
+ class MoCoV3(nn.Module):
20
+ """
21
+ MoCo v3: Momentum Contrast v3
22
+ Link: https://arxiv.org/abs/2104.02057
23
+ Implementation: https://github.com/facebookresearch/moco-v3
24
+ """
25
+ def __init__(self, backbone, feature_size, projection_dim=256, hidden_dim=2048, temperature=0.5, m=0.999,
26
+ image_size=224, mean=(0.5,), std=(0.229, 0.224, 0.225)):
27
+ super().__init__()
28
+ self.temperature = temperature
29
+ self.m = m
30
+ self.backbone = backbone
31
+ self.projector = Projector(feature_size, hidden_dim=hidden_dim, out_dim=projection_dim)
32
+ self.image_size = image_size
33
+ self.mean = mean
34
+ self.std = std
35
+ self.encoder_q = self.encoder = nn.Sequential(self.backbone, self.projector)
36
+ self.predictor = Predictor(in_dim=projection_dim, hidden_dim=hidden_dim, out_dim=projection_dim)
37
+ self.encoder_k = copy.deepcopy(self.encoder_q)
38
+ self._init_encoder_k()
39
+ self.augment1 = T.Compose([
40
+ T.RandomResizedCrop(image_size, scale=(0.08, 1.0), ratio=(3.0/4.0,4.0/3.0), interpolation=Image.BICUBIC),
41
+ T.RandomHorizontalFlip(p=0.5),
42
+ T.RandomApply([T.ColorJitter(0.4, 0.4, 0.2, 0.1)], p=0.8),
43
+ T.RandomGrayscale(p=0.2),
44
+ T.RandomApply([T.GaussianBlur(kernel_size=image_size//20*2+1, sigma=(0.1, 2.0))], p=0.5),
45
+ T.Normalize(mean=mean, std=std)
46
+ ])
47
+ self.augment2 = T.Compose([
48
+ T.RandomResizedCrop(image_size, scale=(0.08, 1.0), ratio=(3.0/4.0,4.0/3.0), interpolation=Image.BICUBIC),
49
+ T.RandomHorizontalFlip(p=0.5),
50
+ T.RandomApply([T.ColorJitter(0.4, 0.4, 0.2, 0.1)], p=0.8),
51
+ T.RandomGrayscale(p=0.2),
52
+ T.RandomApply([T.GaussianBlur(kernel_size=image_size//20*2+1, sigma=(0.1, 2.0))], p=0.5),
53
+ T.RandomSolarize(threshold=0.5, p=0.2),
54
+ T.Normalize(mean=mean, std=std)
55
+ ])
56
+
57
+ def forward(self, x):
58
+ x1, x2 = self.augment1(x), self.augment2(x)
59
+ q1 = self.predictor(self.encoder_q(x1))
60
+ q2 = self.predictor(self.encoder_q(x2))
61
+ with torch.no_grad():
62
+ self._update_momentum_encoder()
63
+ k1 = self.encoder_k(x1)
64
+ k2 = self.encoder_k(x2)
65
+ loss = infonce_loss(q1, k2, self.temperature) + infonce_loss(q2, k1, self.temperature)
66
+ return loss
67
+
68
+ @torch.no_grad()
69
+ def _update_momentum_encoder(self):
70
+ for param_b, param_m in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):
71
+ param_m.data = param_m.data * self.m + param_b.data * (1. - self.m)
72
+
73
+ @torch.no_grad()
74
+ def _init_encoder_k(self):
75
+ for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):
76
+ param_k.data.copy_(param_q.data)
77
+ param_k.requires_grad = False
78
+
79
+
80
+ def infonce_loss(q, k, temperature=0.07):
81
+ """ InfoNCE loss """
82
+ q = nn.functional.normalize(q, dim=1)
83
+ k = nn.functional.normalize(k, dim=1)
84
+ logits = torch.einsum('nc,mc->nm', [q, k])
85
+ logits /= temperature
86
+ labels = (torch.arange(logits.shape[0], dtype=torch.long)).to(q.device)
87
+ loss = F.cross_entropy(logits, labels)
88
+ return loss
89
+
90
+
91
+ class Projector(nn.Module):
92
+ """ Projector for SimCLR v2, used in MoCo v3 too """
93
+ def __init__(self, in_dim, hidden_dim=2048, out_dim=256):
94
+ super().__init__()
95
+
96
+ self.layer1 = nn.Sequential(
97
+ nn.Linear(in_dim, hidden_dim),
98
+ nn.BatchNorm1d(hidden_dim, eps=1e-5, affine=True),
99
+ nn.ReLU(inplace=True),
100
+ )
101
+ self.layer2 = nn.Sequential(
102
+ nn.Linear(hidden_dim, hidden_dim),
103
+ nn.BatchNorm1d(hidden_dim, eps=1e-5, affine=True),
104
+ nn.ReLU(inplace=True),
105
+ )
106
+ self.layer3 = nn.Sequential(
107
+ nn.Linear(hidden_dim, out_dim),
108
+ nn.BatchNorm1d(out_dim, eps=1e-5, affine=True),
109
+ )
110
+ def forward(self, x):
111
+ x = self.layer1(x)
112
+ x = self.layer2(x)
113
+ x = self.layer3(x)
114
+ return x
115
+
116
+
117
+ class Predictor(nn.Module):
118
+ """ Projection Head and Prediction Head for BYOL, used in MoCo v3 too """
119
+ def __init__(self, in_dim, hidden_dim=4096, out_dim=256):
120
+ super().__init__()
121
+
122
+ self.layer1 = nn.Sequential(
123
+ nn.Linear(in_dim, hidden_dim),
124
+ nn.BatchNorm1d(hidden_dim),
125
+ nn.ReLU(inplace=True)
126
+ )
127
+ self.layer2 = nn.Sequential(
128
+ nn.Linear(hidden_dim, out_dim),
129
+ )
130
+
131
+ def forward(self, x):
132
+ x = self.layer1(x)
133
+ x = self.layer2(x)
134
+ return x
135
+
136
+
137
+ if __name__ == '__main__':
138
+ import torchvision
139
+ backbone = torchvision.models.resnet50(pretrained=False)
140
+ feature_size = backbone.fc.in_features
141
+ backbone.fc = torch.nn.Identity()
142
+
143
+ model = MoCoV3(backbone, feature_size)
144
+
145
+ x = torch.rand(4, 3, 224, 224)
146
+ with torch.no_grad():
147
+ loss = model.forward(x)
148
+ print(f'loss = {loss}')
DiffusionXray-FewShot-LandmarkDetection/ssl_pretraining/builders/simclr.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2023. All rights reserved.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import torch
8
+ from torch import nn
9
+ import torch.nn.functional as F
10
+ import torchvision.transforms as T
11
+
12
+
13
+ __all__ = ['SimCLR']
14
+
15
+
16
+ class SimCLR(nn.Module):
17
+ """
18
+ SimCLR: A Simple Framework for Contrastive Learning of Visual Representations
19
+ Link: https://arxiv.org/abs/2002.05709
20
+ Implementation: https://github.com/google-research/simclr
21
+ """
22
+ def __init__(self, backbone, feature_size, projection_dim=128, temperature=0.5,
23
+ image_size=224, mean=(0.5,), std=(0.229, 0.224, 0.225)):
24
+ super().__init__()
25
+ self.projection_dim = projection_dim
26
+ self.temperature = temperature
27
+ self.image_size = image_size
28
+ self.mean = mean
29
+ self.std = std
30
+ self.backbone = backbone
31
+ self.projector = Projector(feature_size, hidden_dim=feature_size, out_dim=projection_dim)
32
+ self.encoder = nn.Sequential(self.backbone, self.projector)
33
+ self.augment = T.Compose([
34
+ T.RandomResizedCrop(image_size, scale=(0.2, 1.0)),
35
+ T.RandomHorizontalFlip(),
36
+ T.RandomApply([T.ColorJitter(0.8,0.8,0.8,0.2)], p=0.8),
37
+ T.RandomGrayscale(p=0.2),
38
+ T.RandomApply([T.GaussianBlur(kernel_size=image_size//20*2+1, sigma=(0.1, 2.0))], p=0.5),
39
+ T.Normalize(mean=mean, std=std),
40
+ ])
41
+
42
+ def forward(self, x):
43
+ x1, x2 = self.augment(x), self.augment(x)
44
+ z1, z2 = self.encoder(x1), self.encoder(x2)
45
+ loss = nt_xent_loss(z1, z2, self.temperature)
46
+ return loss
47
+
48
+
49
+ def nt_xent_loss(z1, z2, temperature=0.5):
50
+ """ NT-Xent loss """
51
+ z1 = F.normalize(z1, dim=1)
52
+ z2 = F.normalize(z2, dim=1)
53
+ N, Z = z1.shape
54
+ device = z1.device
55
+ representations = torch.cat([z1, z2], dim=0)
56
+ similarity_matrix = F.cosine_similarity(representations.unsqueeze(1), representations.unsqueeze(0), dim=-1)
57
+ l_pos = torch.diag(similarity_matrix, N)
58
+ r_pos = torch.diag(similarity_matrix, -N)
59
+ positives = torch.cat([l_pos, r_pos]).view(2 * N, 1)
60
+ diag = torch.eye(2*N, dtype=torch.bool, device=device)
61
+ diag[N:,:N] = diag[:N,N:] = diag[:N,:N]
62
+ negatives = similarity_matrix[~diag].view(2*N, -1)
63
+ logits = torch.cat([positives, negatives], dim=1)
64
+ logits /= temperature
65
+ labels = torch.zeros(2*N, device=device, dtype=torch.int64)
66
+ loss = F.cross_entropy(logits, labels, reduction='sum')
67
+ return loss / (2 * N)
68
+
69
+
70
+ class Projector(nn.Module):
71
+ """ Projector for SimCLR """
72
+ def __init__(self, in_dim, hidden_dim=None, out_dim=128):
73
+ super().__init__()
74
+
75
+ if hidden_dim is None:
76
+ self.layer1 = nn.Linear(in_dim, out_dim)
77
+ else:
78
+ self.layer1 = nn.Sequential(
79
+ nn.Linear(in_dim, hidden_dim),
80
+ nn.ReLU(inplace=True),
81
+ nn.Linear(hidden_dim, out_dim),
82
+ )
83
+ def forward(self, x):
84
+ x = self.layer1(x)
85
+ return x
86
+
87
+
88
+ if __name__ == '__main__':
89
+ import torchvision
90
+ backbone = torchvision.models.resnet50(pretrained=False)
91
+ feature_size = backbone.fc.in_features
92
+ backbone.fc = torch.nn.Identity()
93
+
94
+ model = SimCLR(backbone, feature_size)
95
+
96
+ x = torch.rand(4, 3, 224, 224)
97
+ with torch.no_grad():
98
+ loss = model.forward(x)
99
+ print(f'loss = {loss}')
DiffusionXray-FewShot-LandmarkDetection/ssl_pretraining/builders/simclrv2.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2023. All rights reserved.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import torch
8
+ from torch import nn
9
+ import torch.nn.functional as F
10
+ import torchvision.transforms as T
11
+
12
+
13
+ __all__ = ['SimCLRv2']
14
+
15
+
16
+
17
+ class SimCLRv2(nn.Module):
18
+ """
19
+ SimCLR: A Simple Framework for Contrastive Learning of Visual Representations
20
+ Link: https://arxiv.org/abs/2002.05709
21
+ Implementation: https://github.com/google-research/simclr
22
+ """
23
+ def __init__(self, backbone, feature_size, projection_dim=128, temperature=0.5,
24
+ image_size=224, mean=(0.5,), std=(0.229, 0.224, 0.225)):
25
+ super().__init__()
26
+ self.projection_dim = projection_dim
27
+ self.temperature = temperature
28
+ self.image_size = image_size
29
+ self.mean = mean
30
+ self.std = std
31
+ self.backbone = backbone
32
+ self.projector = Projector(feature_size, hidden_dim=feature_size, out_dim=projection_dim)
33
+ self.encoder = nn.Sequential(self.backbone, self.projector)
34
+ self.augment = T.Compose([
35
+ T.RandomResizedCrop(image_size, scale=(0.2, 1.0)),
36
+ T.RandomHorizontalFlip(),
37
+ T.RandomApply([T.ColorJitter(0.8,0.8,0.8,0.2)], p=0.8),
38
+ T.RandomGrayscale(p=0.2),
39
+ T.RandomApply([T.GaussianBlur(kernel_size=image_size//20*2+1, sigma=(0.1, 2.0))], p=0.5),
40
+ T.Normalize(mean=mean, std=std),
41
+ ])
42
+
43
+ def forward(self, x):
44
+ x1, x2 = self.augment(x), self.augment(x)
45
+ z1, z2 = self.encoder(x1), self.encoder(x2)
46
+ loss = nt_xent_loss(z1, z2, self.temperature)
47
+ return loss
48
+
49
+ @torch.no_grad()
50
+ def eval(self):
51
+ super().eval()
52
+ self.backbone = nn.Sequential(self.backbone, self.projector.layer1)
53
+
54
+
55
+ def nt_xent_loss(z1, z2, temperature=0.5):
56
+ """ NT-Xent loss """
57
+ z1 = F.normalize(z1, dim=1)
58
+ z2 = F.normalize(z2, dim=1)
59
+ N, Z = z1.shape
60
+ device = z1.device
61
+ representations = torch.cat([z1, z2], dim=0)
62
+ similarity_matrix = F.cosine_similarity(representations.unsqueeze(1), representations.unsqueeze(0), dim=-1)
63
+ l_pos = torch.diag(similarity_matrix, N)
64
+ r_pos = torch.diag(similarity_matrix, -N)
65
+ positives = torch.cat([l_pos, r_pos]).view(2 * N, 1)
66
+ diag = torch.eye(2*N, dtype=torch.bool, device=device)
67
+ diag[N:,:N] = diag[:N,N:] = diag[:N,:N]
68
+ negatives = similarity_matrix[~diag].view(2*N, -1)
69
+ logits = torch.cat([positives, negatives], dim=1)
70
+ logits /= temperature
71
+ labels = torch.zeros(2*N, device=device, dtype=torch.int64)
72
+ loss = F.cross_entropy(logits, labels, reduction='sum')
73
+ return loss / (2 * N)
74
+
75
+
76
+ class Projector(nn.Module):
77
+ """ Projector for SimCLR v2 """
78
+ def __init__(self, in_dim, hidden_dim=2048, out_dim=128):
79
+ super().__init__()
80
+
81
+ self.layer1 = nn.Sequential(
82
+ nn.Linear(in_dim, hidden_dim),
83
+ nn.BatchNorm1d(hidden_dim, eps=1e-5, affine=True),
84
+ nn.ReLU(inplace=True),
85
+ )
86
+ self.layer2 = nn.Sequential(
87
+ nn.Linear(hidden_dim, hidden_dim),
88
+ nn.BatchNorm1d(hidden_dim, eps=1e-5, affine=True),
89
+ nn.ReLU(inplace=True),
90
+ )
91
+ self.layer3 = nn.Sequential(
92
+ nn.Linear(hidden_dim, out_dim),
93
+ nn.BatchNorm1d(out_dim, eps=1e-5, affine=True),
94
+ )
95
+ def forward(self, x):
96
+ x = self.layer1(x)
97
+ x = self.layer2(x)
98
+ x = self.layer3(x)
99
+ return x
100
+
101
+
102
+ if __name__ == '__main__':
103
+ import torchvision
104
+ backbone = torchvision.models.resnet50(pretrained=False)
105
+ feature_size = backbone.fc.in_features
106
+ backbone.fc = torch.nn.Identity()
107
+
108
+ model = SimCLRv2(backbone, feature_size)
109
+
110
+ x = torch.rand(4, 3, 224, 224)
111
+ with torch.no_grad():
112
+ loss = model.forward(x)
113
+ print(f'loss = {loss}')
DiffusionXray-FewShot-LandmarkDetection/ssl_pretraining/builders/simsiam.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2023. All rights reserved.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import torch
8
+ from torch import nn
9
+ import torch.nn.functional as F
10
+ import torchvision.transforms as T
11
+
12
+
13
+ __all__ = ['SimSiam']
14
+
15
+
16
+ class SimSiam(nn.Module):
17
+ """
18
+ SimSiam: Exploring Simple Siamese Representation Learning
19
+ Link: https://arxiv.org/abs/2011.10566
20
+ Implementation: https://github.com/facebookresearch/simsiam
21
+ """
22
+ def __init__(self, backbone, feature_size, projection_dim=2048, hidden_dim_proj=2048, hidden_dim_pred=512,
23
+ image_size=224, mean=(0.5,), std=(0.229, 0.224, 0.225)):
24
+ super().__init__()
25
+ self.projection_dim = projection_dim
26
+ self.image_size = image_size
27
+ self.mean = mean
28
+ self.std = std
29
+ self.backbone = backbone
30
+ self.projector = Projector(feature_size, hidden_dim=hidden_dim_proj, out_dim=projection_dim)
31
+ self.predictor = Predictor(in_dim=projection_dim, hidden_dim=hidden_dim_pred, out_dim=projection_dim)
32
+ self.encoder = nn.Sequential(self.backbone, self.projector)
33
+ self.augment = T.Compose([
34
+ T.RandomResizedCrop(image_size, scale=(0.2, 1.0)),
35
+ T.RandomApply([T.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8),
36
+ T.RandomGrayscale(p=0.2),
37
+ T.RandomApply([T.GaussianBlur(kernel_size=image_size//20*2+1, sigma=(0.1, 2.0))], p=0.5),
38
+ T.RandomHorizontalFlip(),
39
+ T.Normalize(mean=mean, std=std)
40
+ ])
41
+
42
+ def forward(self, x):
43
+ x1, x2 = self.augment(x), self.augment(x)
44
+ z1, z2 = self.encoder(x1), self.encoder(x2)
45
+ p1, p2 = self.predictor(z1), self.predictor(z2)
46
+ loss = negative_cosine_similarity(p1, z2) / 2 + negative_cosine_similarity(p2, z1) / 2
47
+ return loss
48
+
49
+
50
+ def negative_cosine_similarity(p, z):
51
+ """ Negative Cosine Similarity """
52
+ z = z.detach()
53
+ p = F.normalize(p, dim=1)
54
+ z = F.normalize(z, dim=1)
55
+ return -(p*z).sum(dim=1).mean()
56
+
57
+
58
+ class Projector(nn.Module):
59
+ """ Projection Head for SimSiam """
60
+ def __init__(self, in_dim, hidden_dim=2048, out_dim=2048):
61
+ super().__init__()
62
+
63
+ self.layer1 = nn.Sequential(
64
+ nn.Linear(in_dim, hidden_dim),
65
+ nn.BatchNorm1d(hidden_dim),
66
+ nn.ReLU(inplace=True)
67
+ )
68
+ self.layer2 = nn.Sequential(
69
+ nn.Linear(hidden_dim, hidden_dim),
70
+ nn.BatchNorm1d(hidden_dim),
71
+ nn.ReLU(inplace=True)
72
+ )
73
+ self.layer3 = nn.Sequential(
74
+ nn.Linear(hidden_dim, out_dim),
75
+ nn.BatchNorm1d(hidden_dim)
76
+ )
77
+
78
+ def forward(self, x):
79
+ x = self.layer1(x)
80
+ x = self.layer2(x)
81
+ x = self.layer3(x)
82
+ return x
83
+
84
+
85
+ class Predictor(nn.Module):
86
+ """ Predictor for SimSiam """
87
+ def __init__(self, in_dim=2048, hidden_dim=512, out_dim=2048):
88
+ super().__init__()
89
+
90
+ self.layer1 = nn.Sequential(
91
+ nn.Linear(in_dim, hidden_dim),
92
+ nn.BatchNorm1d(hidden_dim),
93
+ nn.ReLU(inplace=True)
94
+ )
95
+ self.layer2 = nn.Linear(hidden_dim, out_dim)
96
+
97
+ def forward(self, x):
98
+ x = self.layer1(x)
99
+ x = self.layer2(x)
100
+ return x
101
+
102
+
103
+ if __name__ == '__main__':
104
+ import torchvision
105
+ backbone = torchvision.models.resnet50(pretrained=False)
106
+ feature_size = backbone.fc.in_features
107
+ backbone.fc = torch.nn.Identity()
108
+
109
+ model = SimSiam(backbone, feature_size)
110
+
111
+ x = torch.rand(4, 3, 224, 224)
112
+ with torch.no_grad():
113
+ loss = model.forward(x)
114
+ print(f'loss = {loss}')
DiffusionXray-FewShot-LandmarkDetection/ssl_pretraining/builders/supcon.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2023. All rights reserved.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import torch
8
+ from torch import nn
9
+ import torchvision.transforms as T
10
+
11
+
12
+ __all__ = ['SupCon']
13
+
14
+
15
+ class SupCon(nn.Module):
16
+ """
17
+ SupCon: Supervised Contrastive Learning
18
+ Link: https://arxiv.org/abs/2004.11362
19
+ Implementation: https://github.com/HobbitLong/SupContrast
20
+ """
21
+ def __init__(self, backbone, feature_size, projection_dim=128, temperature=0.07,
22
+ image_size=224, mean=(0.5,), std=(0.229, 0.224, 0.225)):
23
+ super().__init__()
24
+ self.projection_dim = projection_dim
25
+ self.temperature = temperature
26
+ self.image_size = image_size
27
+ self.mean = mean
28
+ self.std = std
29
+ self.backbone = backbone
30
+ self.projector = Projector(feature_size, hidden_dim=feature_size, out_dim=projection_dim)
31
+ self.encoder = nn.Sequential(self.backbone, self.projector)
32
+ self.augment = T.Compose([
33
+ T.RandomResizedCrop(image_size, scale=(0.2, 1.)),
34
+ T.RandomHorizontalFlip(),
35
+ T.RandomApply([T.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8),
36
+ T.RandomGrayscale(p=0.2),
37
+ T.Normalize(mean=mean, std=std)
38
+ ])
39
+
40
+ def forward(self, x, y):
41
+ x1, x2 = self.augment(x), self.augment(x)
42
+ z1, z2 = self.encoder(x1), self.encoder(x2)
43
+ z = torch.cat([z1.unsqueeze(1), z2.unsqueeze(1)], dim=1)
44
+ loss = sup_con_loss(z, y, temperature=self.temperature)
45
+ return loss
46
+
47
+
48
+ def sup_con_loss(features, labels=None, mask=None, temperature=0.07, contrast_mode='all', base_temperature=0.07):
49
+ """
50
+ Supervised Contrastive Loss. It also supports the unsupervised contrastive loss in SimCLR
51
+ If both labels and mask are None, it degenerates to SimCLR unsupervised loss
52
+ """
53
+ device = features.device
54
+ if len(features.shape) < 3:
55
+ raise ValueError('features needs to be [bsz, n_views, ...], at least 3 dimensions are required')
56
+ if len(features.shape) > 3:
57
+ features = features.view(features.shape[0], features.shape[1], -1)
58
+ if contrast_mode not in ['all', 'one']:
59
+ raise ValueError('Unknown mode: {}'.format(contrast_mode))
60
+ bz = features.shape[0]
61
+ if labels is not None and mask is not None:
62
+ raise ValueError('Cannot define both labels and mask')
63
+ elif labels is None and mask is None:
64
+ mask = torch.eye(bz, dtype=torch.float32).to(device)
65
+ elif labels is not None:
66
+ labels = labels.contiguous().view(-1, 1)
67
+ if labels.shape[0] != bz:
68
+ raise ValueError('Num of labels does not match num of features')
69
+ mask = torch.eq(labels, labels.T).float().to(device)
70
+ else:
71
+ mask = mask.float().to(device)
72
+ contrast_count = features.shape[1]
73
+ contrast_feature = torch.cat(torch.unbind(features, dim=1), dim=0)
74
+ if contrast_mode == 'one':
75
+ anchor_feature = features[:, 0]
76
+ anchor_count = 1
77
+ elif contrast_mode == 'all':
78
+ anchor_feature = contrast_feature
79
+ anchor_count = contrast_count
80
+ anchor_dot_contrast = torch.div(torch.matmul(anchor_feature, contrast_feature.T), temperature)
81
+ logits_max, _ = torch.max(anchor_dot_contrast, dim=1, keepdim=True)
82
+ logits = anchor_dot_contrast - logits_max.detach()
83
+ mask = mask.repeat(anchor_count, contrast_count)
84
+ logits_mask = torch.scatter(torch.ones_like(mask), 1, torch.arange(bz * anchor_count).view(-1, 1).to(device), 0)
85
+ mask = mask * logits_mask
86
+ exp_logits = torch.exp(logits) * logits_mask
87
+ log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True))
88
+ mean_log_prob_pos = (mask * log_prob).sum(1) / mask.sum(1)
89
+ loss = - (temperature / base_temperature) * mean_log_prob_pos
90
+ loss = loss.view(anchor_count, bz)
91
+ return loss.mean()
92
+
93
+
94
+ class Projector(nn.Module):
95
+ """ Projector for SupCon """
96
+ def __init__(self, in_dim, hidden_dim=None, out_dim=128):
97
+ super().__init__()
98
+
99
+ if hidden_dim is None:
100
+ self.layer1 = nn.Linear(in_dim, out_dim)
101
+ else:
102
+ self.layer1 = nn.Sequential(
103
+ nn.Linear(in_dim, hidden_dim),
104
+ nn.ReLU(inplace=True),
105
+ nn.Linear(hidden_dim, out_dim),
106
+ )
107
+ def forward(self, x):
108
+ x = self.layer1(x)
109
+ return x
110
+
111
+
112
+ if __name__ == '__main__':
113
+ import torchvision
114
+ backbone = torchvision.models.resnet50(pretrained=False)
115
+ feature_size = backbone.fc.in_features
116
+ backbone.fc = torch.nn.Identity()
117
+
118
+ model = SupCon(backbone, feature_size)
119
+
120
+ x = torch.rand(4, 3, 224, 224)
121
+ y = torch.rand(4)
122
+ with torch.no_grad():
123
+ loss = model.forward(x, y)
124
+ print(f'loss = {loss}')
DiffusionXray-FewShot-LandmarkDetection/ssl_pretraining/builders/swav.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2023. All rights reserved.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import torch
8
+ from torch import nn
9
+ import torch.nn.functional as F
10
+ import torchvision.transforms as T
11
+
12
+
13
+ __all__ = ['SwAV']
14
+
15
+
16
+ class SwAV(nn.Module):
17
+ """
18
+ SwAV: Unsupervised Learning of Visual Features by Contrasting Cluster Assignments
19
+ Link: https://arxiv.org/abs/2006.09882
20
+ Implementation: https://github.com/facebookresearch/swav
21
+ """
22
+
23
+ def __init__(self, backbone, feature_size, projection_dim=128, hidden_dim=2048, temperature=0.1, epsilon=0.05,
24
+ sinkhorn_iterations=3, num_prototypes=3000, queue_length=64, use_the_queue=True, num_crops=6,
25
+ image_size=224, mean=(0.5,), std=(0.229, 0.224, 0.225)):
26
+ super().__init__()
27
+ self.projection_dim = projection_dim
28
+ self.temperature = temperature
29
+ self.epsilon = epsilon
30
+ self.sinkhorn_iterations = sinkhorn_iterations
31
+ self.num_prototypes = num_prototypes
32
+ self.queue_length = queue_length
33
+ self.use_the_queue = use_the_queue
34
+ self.image_size = image_size
35
+ self.mean = mean
36
+ self.std = std
37
+ self.register_buffer("queue", torch.zeros(2, self.queue_length, self.projection_dim))
38
+ self.backbone = backbone
39
+ self.projector = Projector(feature_size, hidden_dim, projection_dim)
40
+ self.encoder = nn.Sequential(self.backbone, self.projector)
41
+ self.prototypes = nn.Linear(self.projection_dim, self.num_prototypes, bias=False)
42
+ self._init_weights()
43
+ self.num_crops = num_crops
44
+ self.augment_global = T.Compose([
45
+ T.RandomResizedCrop(image_size, scale=(0.14, 1.0)),
46
+ T.RandomApply([T.ColorJitter(0.8, 0.8, 0.8, 0.2)], p=0.8),
47
+ T.RandomGrayscale(p=0.2),
48
+ T.RandomHorizontalFlip(p=0.5),
49
+ T.RandomApply([T.GaussianBlur(kernel_size=image_size//20*2+1, sigma=(0.1, 2.0))], p=0.5),
50
+ T.Normalize(mean=mean, std=std),
51
+ ])
52
+ self.augment_local = T.Compose([
53
+ T.RandomResizedCrop(int(image_size*3/7), scale=(0.05, 0.14)),
54
+ T.RandomHorizontalFlip(),
55
+ T.RandomApply([T.ColorJitter(0.8,0.8,0.8,0.2)], p=0.8),
56
+ T.RandomGrayscale(p=0.2),
57
+ T.RandomApply([T.GaussianBlur(kernel_size=image_size//20*2+1, sigma=(0.1, 2.0))], p=0.5),
58
+ T.Normalize(mean=mean, std=std),
59
+ ])
60
+
61
+ def forward(self, x):
62
+ x1, x2 = self.augment_global(x), self.augment_global(x)
63
+ if self.num_crops >0:
64
+ xc = []
65
+ for _ in range(self.num_crops):
66
+ xc.append(self.augment_local(x))
67
+ bz = x1.shape[0]
68
+ with torch.no_grad(): # normalize prototypes
69
+ w = self.prototypes.weight.data.clone()
70
+ w = nn.functional.normalize(w, dim=1, p=2)
71
+ self.prototypes.weight.copy_(w)
72
+ z1, z2 = self.encoder(x1), self.encoder(x2)
73
+ z1, z2 = nn.functional.normalize(z1, dim=1, p=2), nn.functional.normalize(z2, dim=1, p=2)
74
+ z1, z2 = z1.detach(), z2.detach()
75
+ c1, c2 = self.prototypes(z1), self.prototypes(z2)
76
+ _c1, _c2 = c1.detach(), c2.detach()
77
+ with torch.no_grad():
78
+ if self.queue is not None:
79
+ if self.use_the_queue:
80
+ _c1 = torch.cat((torch.mm(self.queue[0], self.prototypes.weight.t()), _c1))
81
+ _c2 = torch.cat((torch.mm(self.queue[1], self.prototypes.weight.t()), _c2))
82
+ self.queue[0, bz:] = self.queue[0, :-bz].clone()
83
+ self.queue[0, :bz] = z1
84
+ self.queue[1, bz:] = self.queue[1, :-bz].clone()
85
+ self.queue[1, :bz] = z2
86
+ q1, q2 = self.sinkhorn(_c1)[:bz, :], self.sinkhorn(_c2)[:bz, :]
87
+ z_c, c_c = [], []
88
+ for x in xc:
89
+ z = self.encoder(x)
90
+ z = nn.functional.normalize(z, dim=1, p=2)
91
+ z = z.detach()
92
+ z_c.append(z)
93
+ c_c.append(self.prototypes(z))
94
+ loss = swav_loss(c1, c2, c_c, q1, q2, self.temperature, 2+len(xc))
95
+ return loss
96
+
97
+ @torch.no_grad()
98
+ def _init_weights(self):
99
+ for m in self.modules():
100
+ if isinstance(m, nn.Conv2d):
101
+ nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
102
+ elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
103
+ nn.init.constant_(m.weight, 1)
104
+ nn.init.constant_(m.bias, 0)
105
+
106
+ @torch.no_grad()
107
+ def freeze_prototypes(self):
108
+ for name, p in self.prototypes.named_parameters():
109
+ if "prototypes" in name:
110
+ p.grad = None
111
+
112
+ @torch.no_grad()
113
+ def sinkhorn(self, Q):
114
+ with torch.no_grad():
115
+ Q = torch.exp(Q / self.epsilon).t()
116
+ B = Q.shape[1]
117
+ K = Q.shape[0]
118
+ sum_Q = torch.sum(Q)
119
+ Q /= sum_Q
120
+ for _ in range(self.sinkhorn_iterations):
121
+ sum_of_rows = torch.sum(Q, dim=1, keepdim=True)
122
+ Q /= sum_of_rows
123
+ Q /= K
124
+ Q /= torch.sum(Q, dim=0, keepdim=True)
125
+ Q /= B
126
+ Q *= B
127
+ return Q.t()
128
+
129
+
130
+ def cross_entropy_loss(q, p):
131
+ return torch.mean(torch.sum(q * F.log_softmax(p, dim=1), dim=1))
132
+
133
+
134
+ def swav_loss(c1, c2, c_c, q1, q2, temperature, num_crops):
135
+ loss = 0
136
+
137
+ p1, p2 = c1/temperature, c2/temperature
138
+ loss += cross_entropy_loss(q1, p2) / (num_crops - 1)
139
+ loss += cross_entropy_loss(q2, p1) / (num_crops - 1)
140
+
141
+ for c in range(len(c_c)):
142
+ p = c_c[c] / temperature
143
+ loss += cross_entropy_loss(q1, p) / (num_crops - 1)
144
+ loss += cross_entropy_loss(q2, p) / (num_crops - 1)
145
+
146
+ return loss/2
147
+
148
+
149
+ class Projector(nn.Module):
150
+ """ Projector for SwAV """
151
+ def __init__(self, in_dim, hidden_dim=2048, out_dim=128):
152
+ super().__init__()
153
+
154
+ if hidden_dim is None:
155
+ self.layer1 = nn.Linear(in_dim, out_dim)
156
+ else:
157
+ self.layer1 = nn.Sequential(
158
+ nn.Linear(in_dim, hidden_dim),
159
+ nn.BatchNorm1d(hidden_dim),
160
+ nn.ReLU(inplace=True),
161
+ nn.Linear(hidden_dim, out_dim),
162
+ )
163
+ def forward(self, x):
164
+ x = self.layer1(x)
165
+ return x
166
+
167
+
168
+ if __name__ == '__main__':
169
+ import torchvision
170
+ backbone = torchvision.models.resnet50(pretrained=False)
171
+ feature_size = backbone.fc.in_features
172
+ backbone.fc = torch.nn.Identity()
173
+
174
+ model = SwAV(backbone, feature_size)
175
+
176
+ x = torch.rand(4, 3, 224, 224)
177
+ with torch.no_grad():
178
+ loss = model.forward(x)
179
+ print(f'loss = {loss}')
DiffusionXray-FewShot-LandmarkDetection/ssl_pretraining/config/config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "gpu": 0,
3
+ "experiment_path": "ssl_pretraining/ssl_pretraining_experiments",
4
+ "model": {
5
+ "name": "moco_v2",
6
+ "encoder": "resnet18",
7
+ "lr":1e-4,
8
+ "optimizer": "adamw",
9
+ "epochs": 20000
10
+ },
11
+ "dataset":{
12
+ "name": "hand",
13
+ "path": "datasets/",
14
+ "image_size": 256,
15
+ "image_channels": 3,
16
+ "batch_size": 4,
17
+ "grad_accumulation": 8,
18
+ "num_workers": null,
19
+ "pin_memory": true
20
+ }
21
+ }
22
+
DiffusionXray-FewShot-LandmarkDetection/ssl_pretraining/main.py ADDED
@@ -0,0 +1,350 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ import torchvision
4
+ from builders.moco import MoCo
5
+ from builders.mocov2 import MoCoV2
6
+ from builders.mocov3 import MoCoV3
7
+ from builders.simclr import SimCLR
8
+ from builders.simclrv2 import SimCLRv2
9
+ from builders.dino import DINO
10
+ from builders.byol import BYOL
11
+ from builders.barlow_twins import BarlowTwins
12
+ from segmentation_models_pytorch import Unet as smpUnet
13
+ from utils import *
14
+ from ssl_datasets import *
15
+ import logging
16
+ import os
17
+ import sys
18
+ from tqdm import tqdm
19
+ import time
20
+ import argparse
21
+ import json
22
+ import signal
23
+ import sys
24
+ import tempfile
25
+
26
+ # Set random seed
27
+ np.random.seed(42)
28
+ torch.manual_seed(42)
29
+ torch.cuda.manual_seed(42)
30
+
31
+
32
+
33
+ def safe_save(state, filename):
34
+ """
35
+ Safely save a model state to a file using atomic operations.
36
+
37
+ Args:
38
+ state (dict): The state to be saved (usually containing model and optimizer states)
39
+ filename (str): The name of the file to save the state to
40
+ """
41
+ # Create a temporary file
42
+ temp_filename = None
43
+ try:
44
+ # Create a temporary file in the same directory as the target file
45
+ directory = os.path.dirname(filename)
46
+ with tempfile.NamedTemporaryFile(delete=False, dir=directory) as tmp_file:
47
+ temp_filename = tmp_file.name
48
+ # Save the state to the temporary file
49
+ torch.save(state, temp_filename)
50
+
51
+ # If the save was successful, rename the temporary file to the target filename
52
+ # This operation is atomic on most Unix-like systems
53
+ os.replace(temp_filename, filename)
54
+ #print(f"Model safely saved to {filename}")
55
+ except Exception as e:
56
+ print(f"Error during safe save: {e}")
57
+ # If there was an error, remove the temporary file if it was created
58
+ if temp_filename and os.path.exists(temp_filename):
59
+ os.remove(temp_filename)
60
+
61
+ # ------------------------------------------------------------------------
62
+ # MAIN
63
+ # ------------------------------------------------------------------------
64
+
65
+ if __name__ == "__main__":
66
+ # Parse arguments from command line
67
+ parser = argparse.ArgumentParser()
68
+ parser.add_argument(
69
+ "-c",
70
+ "--config",
71
+ type=str,
72
+ default="config/config.json",
73
+ help="Path to the JSON config file."
74
+ )
75
+
76
+ args = parser.parse_args()
77
+ config = json.load(open(args.config))
78
+
79
+ # Config params for training and testing the model
80
+ ROOT_PATH = config["experiment_path"]
81
+ DATASET_NAME = config["dataset"]["name"]
82
+ DATASET_PATH = os.path.join(config["dataset"]["path"], DATASET_NAME)
83
+ IMAGE_SIZE = config["dataset"]["image_size"]
84
+ IMAGE_CHANNELS = config["dataset"]["image_channels"]
85
+ BATCH_SIZE = config["dataset"]["batch_size"]
86
+ GRAD_ACCUMULATION = config["dataset"]["grad_accumulation"]
87
+ PIN_MEMORY = config["dataset"]["pin_memory"]
88
+ NUM_WORKERS = os.cpu_count() if config["dataset"]["num_workers"] == None else config["dataset"]["num_workers"]
89
+
90
+ # SSL training params
91
+ LR = config["model"]["lr"]
92
+ EPOCHS = config["model"]["epochs"]
93
+ SSL_METHOD = config["model"]["name"]
94
+ BACKBONE_NAME = config["model"]["encoder"]
95
+ OPTIMIZER = config["model"]["optimizer"]
96
+
97
+ # Load the dataset
98
+ train_dataloader, test_dataloader = load_data(DATASET_PATH, IMAGE_SIZE, IMAGE_CHANNELS, BATCH_SIZE, pin_memory=PIN_MEMORY, num_workers=NUM_WORKERS)
99
+
100
+ # Save model path and tensorboard writer and path for the experiment
101
+ PREFIX_PATH = f"{ROOT_PATH}/{DATASET_NAME}/size{IMAGE_SIZE}_ch{IMAGE_CHANNELS}"
102
+
103
+ # Create log file for the experiment
104
+ if not os.path.exists(f'{PREFIX_PATH}/models/{SSL_METHOD}/{BACKBONE_NAME}/log_file.txt'):
105
+ os.makedirs(f"{PREFIX_PATH}/models/{SSL_METHOD}/{BACKBONE_NAME}/", exist_ok=True)
106
+
107
+ with open(f'{PREFIX_PATH}/models/{SSL_METHOD}/{BACKBONE_NAME}/log_file.txt', 'w') as f:
108
+ pass
109
+
110
+ save_model_path = generate_path(f"{PREFIX_PATH}/models/{SSL_METHOD}/{BACKBONE_NAME}")
111
+ logging.basicConfig(format="%(message)s", level=logging.INFO, filename=f'{PREFIX_PATH}/models/{SSL_METHOD}/{BACKBONE_NAME}/log_file.txt', filemode='a') # %(asctime)s
112
+
113
+
114
+ print("----------------------------------------- SYSTEM INFO -----------------------------------------")
115
+ print("Python version: {}".format(sys.version))
116
+ print("Pytorch version: {}".format(torch.__version__))
117
+
118
+ if "CUDA_VISIBLE_DEVICES" in os.environ:
119
+ GPU = os.environ["CUDA_VISIBLE_DEVICES"]
120
+ else:
121
+ GPU = config["gpu"]
122
+ os.environ["CUDA_VISIBLE_DEVICES"] = f"{GPU}"
123
+
124
+ device = f"cuda" if torch.cuda.is_available() else "cpu"
125
+ print(f"Torch GPU Name: {torch.cuda.get_device_name(0)}... Using GPU {GPU}" if device == "cuda" else "Torch GPU not available... Using CPU")
126
+
127
+ print("------------------------------------------------------------------------------------------------")
128
+
129
+ print("----------------------------------------- CONFIG INFO -----------------------------------------")
130
+ print(f"Dataset Name: {DATASET_NAME}")
131
+ print(f"Dataset Path: {DATASET_PATH}")
132
+ print(f"Image Size: ({IMAGE_SIZE}, {IMAGE_SIZE}, {IMAGE_CHANNELS})")
133
+ print(f"Batch Size: {BATCH_SIZE} with cumulation of {GRAD_ACCUMULATION}")
134
+ print(f"SSL Method: {SSL_METHOD}")
135
+ print(f"Epochs: {EPOCHS}")
136
+ print(f"Starting Learning Rate: {LR}")
137
+ print(f"Save Model Path: {save_model_path}")
138
+ print("------------------------------------------------------------------------------------------------")
139
+
140
+ print("----------------------------------------- START TRAINING -----------------------------------------")
141
+
142
+ # Initialize backbones for self-supervised learning method
143
+ if BACKBONE_NAME == "resnet18":
144
+ backbone = torchvision.models.resnet18(weights=None)
145
+ # Change the last layer of the backbone to Identity
146
+ feature_size = backbone.fc.in_features
147
+ backbone.fc = torch.nn.Identity()
148
+
149
+ elif BACKBONE_NAME == "resnet34":
150
+ backbone = torchvision.models.resnet34(weights=None)
151
+ feature_size = backbone.fc.in_features
152
+ backbone.fc = torch.nn.Identity()
153
+
154
+ elif BACKBONE_NAME == "resnet50":
155
+ backbone = torchvision.models.resnet50(weights=None)
156
+ feature_size = backbone.fc.in_features
157
+ backbone.fc = torch.nn.Identity()
158
+
159
+ elif BACKBONE_NAME == "resnet101":
160
+ backbone = torchvision.models.resnet101(weights=None)
161
+ feature_size = backbone.fc.in_features
162
+ backbone.fc = torch.nn.Identity()
163
+
164
+ elif BACKBONE_NAME == "resnet152":
165
+ backbone = torchvision.models.resnet152(weights=None)
166
+ feature_size = backbone.fc.in_features
167
+ backbone.fc = torch.nn.Identity()
168
+
169
+ elif BACKBONE_NAME == "resnext50_32x4d":
170
+ backbone = torchvision.models.resnext50_32x4d(weights=None)
171
+ feature_size = backbone.fc.in_features
172
+ backbone.fc = torch.nn.Identity()
173
+
174
+ elif BACKBONE_NAME == "resnext101_32x8d":
175
+ backbone = torchvision.models.resnext101_32x8d(weights=None)
176
+ feature_size = backbone.fc.in_features
177
+ backbone.fc = torch.nn.Identity()
178
+
179
+ elif BACKBONE_NAME == "vgg11":
180
+ backbone = torchvision.models.vgg11(weights=None)
181
+ feature_size = backbone.classifier[6].in_features
182
+ backbone.classifier[6] = torch.nn.Identity()
183
+
184
+ elif BACKBONE_NAME == "vgg13":
185
+ backbone = torchvision.models.vgg13(weights=None)
186
+ feature_size = backbone.classifier[6].in_features
187
+ backbone.classifier[6] = torch.nn.Identity()
188
+
189
+ elif BACKBONE_NAME == "vgg16":
190
+ backbone = torchvision.models.vgg16(weights=None)
191
+ feature_size = backbone.classifier[6].in_features
192
+ backbone.classifier[6] = torch.nn.Identity()
193
+
194
+ elif BACKBONE_NAME == "vgg19":
195
+ backbone = torchvision.models.vgg19(weights=None)
196
+ feature_size = backbone.classifier[6].in_features
197
+ backbone.classifier[6] = torch.nn.Identity()
198
+
199
+ elif BACKBONE_NAME == "densenet121":
200
+ backbone = torchvision.models.densenet121(weights=None)
201
+ feature_size = backbone.classifier.in_features
202
+ backbone.classifier = torch.nn.Identity()
203
+
204
+ elif BACKBONE_NAME == "densenet169":
205
+ backbone = torchvision.models.densenet169(weights=None)
206
+ feature_size = backbone.classifier.in_features
207
+ backbone.classifier = torch.nn.Identity()
208
+
209
+ elif BACKBONE_NAME == "densenet201":
210
+ backbone = torchvision.models.densenet201(weights=None)
211
+ feature_size = backbone.classifier.in_features
212
+ backbone.classifier = torch.nn.Identity()
213
+
214
+ elif BACKBONE_NAME == "densenet161":
215
+ backbone = torchvision.models.densenet161(weights=None)
216
+ feature_size = backbone.classifier.in_features
217
+ backbone.classifier = torch.nn.Identity()
218
+ else:
219
+ raise ValueError(f"Unknown backbone: {BACKBONE_NAME}")
220
+
221
+
222
+ # Initialize ssl method model and optimizer
223
+ if SSL_METHOD == "moco":
224
+ model = MoCo(backbone, feature_size, projection_dim=128, K=65536, m=0.999, temperature=0.07, image_size=IMAGE_SIZE)
225
+ elif SSL_METHOD == "mocov2":
226
+ model = MoCoV2(backbone, feature_size, projection_dim=128, K=65536, m=0.999, temperature=0.07, image_size=IMAGE_SIZE)
227
+ elif SSL_METHOD == "mocov3":
228
+ model = MoCoV3(backbone, feature_size, projection_dim=256, hidden_dim=2048, temperature=0.5, m=0.999, image_size=IMAGE_SIZE)
229
+
230
+ elif SSL_METHOD == "simclr":
231
+ model = SimCLR(backbone, feature_size, projection_dim=128, temperature=0.5, image_size=IMAGE_SIZE)
232
+ elif SSL_METHOD == "simclrv2":
233
+ model = SimCLRv2(backbone, feature_size, projection_dim=128, temperature=0.5, image_size=IMAGE_SIZE)
234
+
235
+ elif SSL_METHOD == "dino":
236
+ model = DINO(backbone, feature_size, projection_dim=256, hidden_dim=2048, bottleneck_dim=256, temp_s=0.1, temp_t=0.5, m=0.5, lamda=0.996, num_crops=6) # image_size=IMAGE_SIZE)
237
+
238
+ elif SSL_METHOD == "byol":
239
+ model = BYOL(backbone, feature_size, projection_dim=256, hidden_dim=4096, tau=0.996, image_size=IMAGE_SIZE)
240
+
241
+ elif SSL_METHOD == "barlow_twins":
242
+ model = BarlowTwins(backbone, feature_size, projection_dim=8192, hidden_dim=8192, lamda=0.005, image_size=IMAGE_SIZE)
243
+
244
+ else:
245
+ raise ValueError(f"Unknown SSL method: {SSL_METHOD}")
246
+
247
+
248
+ model = model.to(device)
249
+
250
+ if OPTIMIZER == "adam":
251
+ optimizer = torch.optim.Adam(model.parameters(), lr=LR)
252
+ elif OPTIMIZER == "adamw":
253
+ optimizer = torch.optim.AdamW(model.parameters(), lr=LR)
254
+ else:
255
+ raise ValueError(f"Unknown optimizer: {OPTIMIZER} - Choose either 'adam' or 'adamw'")
256
+
257
+
258
+ # Check if there are any saved models in the save_model_path
259
+ if os.path.exists(f'{save_model_path}/last_model.pth'):
260
+
261
+ # Get the latest model weights
262
+ model_path = f'{save_model_path}/last_model.pth'
263
+ checkpoint = torch.load(model_path, map_location=device)
264
+
265
+ # Load the model weights and optimizer state
266
+ model.load_state_dict(checkpoint['model_state_dict'])
267
+ optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
268
+ starting_epoch = checkpoint['epoch']
269
+ n_iter = checkpoint['iteration']
270
+ print(f"Loading model weights from epoch {starting_epoch} and iteration {n_iter}")
271
+ del checkpoint
272
+ else:
273
+ starting_epoch = 0
274
+ n_iter = 0
275
+
276
+ # switch to train mode
277
+ model.train()
278
+
279
+ # train model
280
+ best_loss = float('inf')
281
+ early_stopping = 0
282
+ loss_margin = 0
283
+ epochs_losses = []
284
+
285
+ start_time = time.time()
286
+
287
+ try:
288
+ for epoch in tqdm(range(starting_epoch, EPOCHS), initial=starting_epoch, total=EPOCHS, desc="Epoch"):
289
+ epoch_loss = 0
290
+
291
+ for batch_idx, data in enumerate(train_dataloader):
292
+ images = data['image'].to(device)
293
+
294
+ loss = model(images)
295
+
296
+ loss.backward()
297
+
298
+ if ((batch_idx + 1) % GRAD_ACCUMULATION == 0) or (batch_idx + 1 == len(train_dataloader)):
299
+ optimizer.step()
300
+ optimizer.zero_grad()
301
+ n_iter += 1
302
+
303
+ # Safely save the last model weights and optimizer state
304
+ state = {
305
+ 'model_state_dict': model.state_dict(),
306
+ 'optimizer_state_dict': optimizer.state_dict(),
307
+ 'epoch': epoch,
308
+ 'iteration': n_iter
309
+ }
310
+ safe_save(state, f'{save_model_path}/last_model.pth')
311
+ del state
312
+
313
+ # Update iteration
314
+ loss = loss / GRAD_ACCUMULATION # Normalize loss to account for batch accumulation
315
+ epoch_loss += loss.item()
316
+
317
+ # Save resnet backbone model every 2k iterations
318
+ if n_iter % 2000 == 0 and n_iter!= 0 and batch_idx % GRAD_ACCUMULATION == 0:
319
+ print(f"Saving model weights at epoch {epoch} and iteration {n_iter}")
320
+ logging.info(f"\t\tEPOCH: {epoch} | ITER: {n_iter} | LOSS: {loss.item():.4f}")
321
+ # Save the model weights and optimizer state
322
+ torch.save(model.backbone.state_dict(), f'{save_model_path}/model_epoch{epoch}_iter{n_iter}.pth')
323
+
324
+ if n_iter % 20000 == 0:
325
+ print(f"Reaching 20k iterations... exiting training of model {SSL_METHOD} with backbone {BACKBONE_NAME}")
326
+ torch.save(model.backbone.state_dict(), f'{save_model_path}/model_epoch{epoch}_iter{n_iter}.pth')
327
+ raise StopIteration
328
+
329
+ # Average loss for the epoch
330
+ avg_loss = epoch_loss / len(train_dataloader)
331
+ epochs_losses.append(avg_loss)
332
+
333
+ # Update tqdm description every 10 epochs
334
+ if epoch % 10 == 0:
335
+ tqdm.write(f"Epoch {epoch}, Iteration: {n_iter}, Loss: {avg_loss:.4f}")
336
+
337
+ # Catch keyboard interrupt, the StopIteration when reaching 20k iterations, and if the process is killed, and if there is any other exception
338
+ except (KeyboardInterrupt, StopIteration, SystemExit, Exception) as e:
339
+ print("\nTraining interrupted. Saving final state...")
340
+ state = {
341
+ 'model_state_dict': model.state_dict(),
342
+ 'optimizer_state_dict': optimizer.state_dict(),
343
+ 'epoch': epoch,
344
+ 'iteration': n_iter
345
+ }
346
+ safe_save(state, f'{save_model_path}/last_model.pth')
347
+ print(f"Final state saved in {save_model_path}/last_model.pth - Exiting...")
348
+
349
+ finally:
350
+ print(f"Training completed in {time.strftime('%H:%M:%S', time.gmtime(time.time() - start_time))}")
DiffusionXray-FewShot-LandmarkDetection/ssl_pretraining/ssl_datasets.py ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+
4
+ # ------------------------------------------------------------------------
5
+ # Libraries
6
+ # ------------------------------------------------------------------------
7
+
8
+ import os
9
+ import numpy as np
10
+ from PIL import Image
11
+ from torch.utils.data import Dataset
12
+ import albumentations as A
13
+ import albumentations.pytorch
14
+
15
+
16
+
17
+ # ------------------------------------------------------------------------
18
+ # Chest Dataset
19
+ # ------------------------------------------------------------------------
20
+
21
+ # Load the dataset from the train and test folders in the root directory
22
+ class ChestDataset(Dataset):
23
+ def __init__(self, root_dir, channels=1, transform=None, phase='train'):
24
+
25
+ self.root_dir = root_dir
26
+ self.transform = transform
27
+ self.channels = channels
28
+
29
+ self.pth_Image = os.path.join(root_dir, 'pngs')
30
+
31
+ # file index
32
+ files = [i[:-4] for i in sorted(os.listdir(self.pth_Image))]
33
+
34
+ exclude_list = ['CHNCXR_0059_0', 'CHNCXR_0178_0', 'CHNCXR_0228_0', 'CHNCXR_0267_0', 'CHNCXR_0295_0', 'CHNCXR_0310_0', 'CHNCXR_0285_0', 'CHNCXR_0276_0', 'CHNCXR_0303_0']
35
+ if exclude_list is not None:
36
+ st = set(exclude_list)
37
+ files = [f for f in files if f not in st]
38
+
39
+ n = len(files)
40
+ train_num = 195
41
+ val_num = 34
42
+ test_num = n - train_num - val_num
43
+ if phase == 'train':
44
+ self.image_files = files[:train_num+val_num]
45
+ elif phase == 'test':
46
+ self.image_files = files[-test_num:]
47
+ elif phase == 'all':
48
+ self.image_files = files
49
+ else:
50
+ raise Exception("Unknown phase: {phase}".format(phase=phase))
51
+
52
+
53
+ def __len__(self):
54
+ return len(self.image_files)
55
+
56
+ def __getitem__(self, idx):
57
+ image_name = self.image_files[idx]
58
+
59
+ image = self.read_image(os.path.join(self.pth_Image, image_name + '.png'))
60
+
61
+ data_dict = {'name': image_name, 'image': image}
62
+
63
+ return data_dict
64
+
65
+ def read_image(self, image_path):
66
+
67
+ if self.channels == 3:
68
+ image = Image.open(image_path).convert('RGB')
69
+ image_np = np.array(image).astype(np.float32)
70
+
71
+ elif self.channels == 1:
72
+ image = Image.open(image_path).convert('L')
73
+ image_np = np.array(image).astype(np.float32)
74
+ image_np = np.expand_dims(image_np, axis=2) # add channel dimension
75
+ else:
76
+ raise ValueError('Channels must be either 1 or 3')
77
+
78
+ if self.transform:
79
+ image = self.transform(image=image_np)['image']
80
+
81
+ return image
82
+
83
+
84
+ # ------------------------------------------------------------------------
85
+ # HAND Dataset
86
+ # ------------------------------------------------------------------------
87
+
88
+ # Load the dataset from the train and test folders in the root directory
89
+ class HandDataset(Dataset):
90
+ def __init__(self, root_dir, channels=1, transform=None, phase='train'):
91
+
92
+ self.root_dir = root_dir
93
+ self.transform = transform
94
+ self.channels = channels
95
+
96
+ self.pth_Image = os.path.join(root_dir, 'jpg')
97
+
98
+ # file index
99
+ files = [i[:-4] for i in sorted(os.listdir(self.pth_Image))]
100
+
101
+ n = len(files)
102
+ train_num = 550
103
+ val_num = 59
104
+ test_num = n - train_num - val_num
105
+ if phase == 'train':
106
+ self.image_files = files[:train_num+val_num]
107
+ elif phase == 'test':
108
+ self.image_files = files[-test_num:]
109
+ elif phase == 'all':
110
+ self.image_files = files
111
+ else:
112
+ raise Exception("Unknown phase: {phase}".format(phase=phase))
113
+
114
+
115
+ def __len__(self):
116
+ return len(self.image_files)
117
+
118
+ def __getitem__(self, idx):
119
+ image_name = self.image_files[idx]
120
+
121
+ image = self.read_image(os.path.join(self.pth_Image, image_name + '.jpg'))
122
+
123
+ data_dict = {'name': image_name, 'image': image}
124
+
125
+ return data_dict
126
+
127
+ def read_image(self, image_path):
128
+
129
+ if self.channels == 3:
130
+ image = Image.open(image_path).convert('RGB')
131
+ image_np = np.array(image).astype(np.float32)
132
+
133
+ elif self.channels == 1:
134
+ image = Image.open(image_path).convert('L')
135
+ image_np = np.array(image).astype(np.float32)
136
+ image_np = np.expand_dims(image_np, axis=2)
137
+ else:
138
+ raise ValueError('Channels must be either 1 or 3')
139
+
140
+ if self.transform:
141
+ image = self.transform(image=image_np)['image']
142
+
143
+ return image
144
+
145
+
146
+ # ------------------------------------------------------------------------
147
+ # CEPH Dataset
148
+ # ------------------------------------------------------------------------
149
+
150
+ class CephaloDataset(Dataset):
151
+ def __init__(self, root_dir, channels=1, transform=None, phase='train'):
152
+ self.root_dir = root_dir
153
+ self.transform = transform
154
+ self.channels = channels
155
+
156
+ self.pth_Image = os.path.join(root_dir, 'jpg')
157
+
158
+ # file index
159
+ files = [i[:-4] for i in sorted(os.listdir(self.pth_Image))]
160
+
161
+ n = len(files)
162
+ train_num = 130
163
+ val_num = 20
164
+ test_num = n - train_num - val_num
165
+
166
+ if phase == 'train':
167
+ self.image_files = files[:train_num+val_num]
168
+ elif phase == 'test':
169
+ self.image_files = files[-test_num:]
170
+ elif phase == 'all':
171
+ self.image_files = files
172
+ else:
173
+ raise Exception("Unknown phase: {phase}".format(phase=phase))
174
+
175
+ def __len__(self):
176
+ return len(self.image_files)
177
+
178
+ def __getitem__(self, idx):
179
+ image_name = self.image_files[idx]
180
+
181
+ image = self.read_image(os.path.join(self.pth_Image, image_name + '.jpg'))
182
+
183
+ data_dict = {'name': image_name, 'image': image}
184
+
185
+ return data_dict
186
+
187
+ def read_image(self, image_path):
188
+
189
+ if self.channels == 3:
190
+ image = Image.open(image_path).convert('RGB')
191
+ image_np = np.array(image).astype(np.float32)
192
+
193
+ elif self.channels == 1:
194
+ image = Image.open(image_path).convert('L')
195
+ image_np = np.array(image).astype(np.float32)
196
+ image_np = np.expand_dims(image_np, axis=2)
197
+ else:
198
+ raise ValueError('Channels must be either 1 or 3')
199
+
200
+ if self.transform:
201
+ image = self.transform(image=image_np)['image']
202
+
203
+ return image
204
+
205
+
DiffusionXray-FewShot-LandmarkDetection/ssl_pretraining/utils.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # ------------------------------------------------------------------------
3
+ # Libraries
4
+ # ------------------------------------------------------------------------
5
+
6
+ # General libraries
7
+ import os
8
+ import cv2
9
+ from matplotlib import pyplot as plt
10
+ from prettytable import PrettyTable
11
+
12
+ # Deep learning libraries
13
+ import torch
14
+ import torchvision
15
+ from torch.utils.data import DataLoader
16
+ import albumentations as A
17
+
18
+ # Custom libraries
19
+ from ssl_datasets import ChestDataset, HandDataset, CephaloDataset
20
+
21
+ # ------------------------------------------------------------------------
22
+ # Logging and Utilities
23
+ # ------------------------------------------------------------------------
24
+
25
+ # Generate path if it does not exist
26
+ def generate_path(path):
27
+ if not os.path.exists(path):
28
+ os.makedirs(path)
29
+ return path
30
+
31
+ # Get the current GPU memory usage by tensors in megabytes for a given device
32
+ def gpu_memory_usage(device):
33
+ allocated = torch.cuda.memory_allocated(device)
34
+ reserved = torch.cuda.memory_reserved(device)
35
+ print(f'Allocated memory: {allocated / (1024 ** 2):.2f} MB')
36
+ print(f'Reserved memory: {reserved / (1024 ** 2):.2f} MB')
37
+
38
+ # Compute the number of trainable parameters in a model
39
+ def count_parameters(model):
40
+ table = PrettyTable(["Modules", "Parameters"])
41
+ total_params = 0
42
+ for name, parameter in model.named_parameters():
43
+ if not parameter.requires_grad:
44
+ continue
45
+ params = parameter.numel()
46
+ table.add_row([name, params])
47
+ total_params += params
48
+ #print(table)
49
+ print(f"Total Trainable Params: {total_params}")
50
+ return table, total_params
51
+
52
+ # ------------------------------------------------------------------------
53
+ # Visualizations
54
+ # ------------------------------------------------------------------------
55
+
56
+ def plot_images(images):
57
+ plt.figure(figsize=(32, 32))
58
+ plt.imshow(torch.cat([
59
+ torch.cat([i for i in images.cpu()], dim=-1),
60
+ ], dim=-2).permute(1, 2, 0).cpu())
61
+ plt.show()
62
+
63
+
64
+ def save_images(images, path, **kwargs):
65
+
66
+ x_grid = torchvision.utils.make_grid(images[0], **kwargs)
67
+ x_hat_grid = torchvision.utils.make_grid(images[1], **kwargs)
68
+ diff_grid = torchvision.utils.make_grid(images[2], **kwargs)
69
+
70
+ # Apply 'viridis' colormap to the difference image
71
+ #diff_grid = cm.viridis(diff_grid.detach().cpu().numpy())
72
+
73
+ grid = torch.cat((x_grid, x_hat_grid, diff_grid), dim=1)
74
+ ndarr = grid.permute(1, 2, 0).to('cpu').numpy()
75
+
76
+ plt.figure(figsize=(10, 10))
77
+ plt.imshow(ndarr)
78
+ plt.axis('off')
79
+ plt.savefig(path, bbox_inches='tight')
80
+ plt.close()
81
+
82
+ def check_pixels_range_of_image(tensor):
83
+ # Ensure the input is a tensor
84
+ assert torch.is_tensor(tensor), "Input must be a tensor"
85
+
86
+ # Flatten the tensor to get all pixel values
87
+ pixel_values = tensor.view(-1)
88
+
89
+ # Compute min and max values
90
+ min_val = pixel_values.min().item()
91
+ max_val = pixel_values.max().item()
92
+
93
+ #print(f"The range of pixel values is: {min_val} to {max_val}")
94
+ return min_val, max_val
95
+
96
+
97
+ def compute_diff(x, x_hat):
98
+ # Ensure both tensors are on the same device
99
+ assert x.device == x_hat.device, "Tensors must be on the same device"
100
+
101
+ # Ensure both tensors have the same shape
102
+ assert x.shape == x_hat.shape, "Tensors must have the same shape"
103
+
104
+ x_min, x_max = check_pixels_range_of_image(x)
105
+ x_hat_min, x_hat_max = check_pixels_range_of_image(x_hat)
106
+
107
+ # Ensure both tensors are have pixel values in the range [0, 1]
108
+ #assert x_min >= 0 and x_max <= 1, f"Pixel values of x must be in the range [0, 1]. Actual range: [{x_min}, {x_max}]"
109
+ #assert x_hat_min >= 0 and x_hat_max <= 1, f"Pixel values of x_hat must be in the range [0, 1]. Actual range: [{x_hat_min}, {x_hat_max}]"
110
+ #print(f"Pixel values of x are in the range [{x_min}, {x_max}]")
111
+ #print(f"Pixel values of x_hat are in the range [{x_hat_min}, {x_hat_max}]")
112
+ # Compute absolute difference
113
+ diff = torch.abs(x - x_hat)
114
+
115
+ # Normalize to the range [0, 1] and return the difference image
116
+ diff = (diff - diff.min()) / (diff.max() - diff.min())
117
+
118
+ return diff
119
+
120
+
121
+ # ------------------------------------------------------------------------
122
+ # Data Loading and Preprocessing
123
+ # ------------------------------------------------------------------------
124
+
125
+ def get_transforms(image_size, phase='train'):
126
+ resize_image_size = int(image_size*1.02)
127
+ if phase == 'train':
128
+ return A.Compose([
129
+ #A.ShiftScaleRotate(shift_limit=0.02, scale_limit=0, rotate_limit=2, border_mode=cv2.BORDER_REPLICATE, p=0.5),
130
+ #A.Perspective(scale=(0, 0.02), pad_mode=cv2.BORDER_REPLICATE, p=0.5),
131
+ A.Resize(image_size, image_size),
132
+ #A.RandomCrop(height=image_size, width=image_size),
133
+ #A.HorizontalFlip(p=1),
134
+ #A.RandomBrightnessContrast(brightness_limit=(-0.1, 0.1), contrast_limit=(-0.2, 0.2), p=0.5),
135
+ A.Normalize(normalization='min_max'),
136
+ A.pytorch.ToTensorV2()
137
+ ])
138
+
139
+ elif phase == 'test':
140
+ return A.Compose([
141
+ A.Resize(image_size, image_size),
142
+ A.Normalize(normalization='min_max'),
143
+ A.pytorch.transforms.ToTensorV2()
144
+ ])
145
+ else:
146
+ raise ValueError('phase must be either "train" or "test"')
147
+
148
+
149
+ def load_data(dataset_path, image_size, image_channels, batch_size, pin_memory=False, num_workers = os.cpu_count()):
150
+ dataset_name = os.path.basename(dataset_path)
151
+
152
+ transforms_train = get_transforms(image_size, phase='train')
153
+ transforms_test = get_transforms(image_size, phase='test')
154
+
155
+ if dataset_name == 'chest':
156
+ train_dataset = ChestDataset(dataset_path, channels=image_channels, transform=transforms_train, phase='train')
157
+ test_dataset = ChestDataset(dataset_path, channels=image_channels, transform=transforms_test, phase='test')
158
+ elif dataset_name == 'hand':
159
+ train_dataset = HandDataset(dataset_path, channels=image_channels, transform=transforms_train, phase='train')
160
+ test_dataset = HandDataset(dataset_path, channels=image_channels, transform=transforms_test, phase='test')
161
+ elif dataset_name == 'cephalo':
162
+ train_dataset = CephaloDataset(dataset_path, channels=image_channels, transform=transforms_train, phase='train')
163
+ test_dataset = CephaloDataset(dataset_path, channels=image_channels, transform=transforms_test, phase='test')
164
+ else:
165
+ raise ValueError('Dataset name must be either "chest" or "hand" or "cephalo"')
166
+
167
+ train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=pin_memory, drop_last=True)
168
+ test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=pin_memory, drop_last=False)
169
+
170
+ return train_dataloader, test_dataloader
171
+
172
+
173
+
174
+