File size: 7,830 Bytes
f0e6555
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
{
    "imports": [
        "$import datetime",
        "$import numpy",
        "$import torch",
        "$import ignite",
        "$import scripts"
    ],
    "bundle_root": ".",
    "ckpt_path": "$@bundle_root + '/models/model.pt'",
    "device": "$torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')",
    "val_interval": 1,
    "num_iters": 400,
    "batch_size": 600,
    "num_epochs": 100,
    "num_substeps": 3,
    "learning_rate": 0.0001,
    "num_workers": 8,
    "dataset_dir": ".",
    "dataset_file": "$@dataset_dir + '/valvelandmarks.npz'",
    "output_dir": "$datetime.datetime.now().strftime('./results/output_%y%m%d_%H%M%S')",
    "network_def": {
        "_target_": "scripts.valve_landmarks.PointRegressor",
        "in_shape": [
            1,
            256,
            256
        ],
        "out_shape": [
            2,
            10
        ],
        "channels": [
            8,
            16,
            32,
            64,
            128
        ],
        "strides": [
            2,
            2,
            2,
            2,
            2
        ]
    },
    "network": "$@network_def.to(@device)",
    "im_shape": [
        1,
        256,
        256
    ],
    "both_keys": [
        "image",
        "label"
    ],
    "rand_prob": 0.5,
    "train_transforms": {
        "_target_": "Compose",
        "transforms": [
            {
                "_target_": "EnsureTyped",
                "keys": "@both_keys",
                "data_type": "numpy",
                "dtype": "$(numpy.float32, numpy.int32)"
            },
            {
                "_target_": "EnsureTyped",
                "keys": "@both_keys"
            },
            {
                "_target_": "ScaleIntensityd",
                "keys": "image"
            },
            {
                "_target_": "EnsureChannelFirstd",
                "keys": "@both_keys",
                "channel_dim": "no_channel"
            },
            {
                "_target_": "RandAxisFlipd",
                "keys": "@both_keys",
                "prob": "@rand_prob"
            },
            {
                "_target_": "RandRotate90d",
                "keys": "@both_keys",
                "prob": "@rand_prob"
            },
            {
                "_target_": "RandSmoothFieldAdjustIntensityd",
                "keys": "image",
                "prob": "@rand_prob",
                "spatial_size": "@im_shape",
                "rand_size": [
                    5,
                    5
                ],
                "gamma": [
                    0.1,
                    1
                ],
                "mode": "$monai.utils.InterpolateMode.BICUBIC",
                "align_corners": true
            },
            {
                "_target_": "RandGaussianNoised",
                "keys": "image",
                "prob": "@rand_prob",
                "std": 0.05
            },
            {
                "_target_": "scripts.valve_landmarks.RandFourierDropoutd",
                "keys": "image",
                "prob": "@rand_prob"
            },
            {
                "_target_": "scripts.valve_landmarks.RandImageLMDeformd",
                "prob": "@rand_prob",
                "spatial_size": [
                    256,
                    256
                ],
                "rand_size": [
                    7,
                    7
                ],
                "pad": 2,
                "field_mode": "$monai.utils.InterpolateMode.BICUBIC",
                "align_corners": true,
                "def_range": 0.05
            },
            {
                "_target_": "scripts.valve_landmarks.RandLMShiftd",
                "keys": "@both_keys",
                "prob": "@rand_prob",
                "spatial_size": [
                    256,
                    256
                ],
                "max_shift": 8
            },
            {
                "_target_": "Lambdad",
                "keys": "label",
                "func": "$scripts.valve_landmarks.convert_lm_image_t"
            }
        ]
    },
    "eval_transforms": {
        "_target_": "Compose",
        "transforms": [
            {
                "_target_": "EnsureTyped",
                "keys": "@both_keys",
                "data_type": "numpy",
                "dtype": "$(numpy.float32, numpy.int32)"
            },
            {
                "_target_": "EnsureTyped",
                "keys": "@both_keys"
            },
            {
                "_target_": "ScaleIntensityd",
                "keys": "image"
            },
            {
                "_target_": "EnsureChannelFirstd",
                "keys": "@both_keys",
                "channel_dim": "no_channel"
            },
            {
                "_target_": "Lambdad",
                "keys": "label",
                "func": "$scripts.valve_landmarks.convert_lm_image_t"
            }
        ]
    },
    "train_dataset": {
        "_target_": "NPZDictItemDataset",
        "npzfile": "$@dataset_file",
        "keys": {
            "trainImgs": "image",
            "trainLMImgs": "label"
        },
        "transform": "@train_transforms"
    },
    "eval_dataset": {
        "_target_": "NPZDictItemDataset",
        "npzfile": "$@dataset_file",
        "keys": {
            "testImgs": "image",
            "testLMImgs": "label"
        },
        "transform": "@eval_transforms"
    },
    "sampler": {
        "_target_": "torch.utils.data.WeightedRandomSampler",
        "weights": "$torch.ones(len(@train_dataset))",
        "replacement": true,
        "num_samples": "$@num_iters*@batch_size"
    },
    "train_dataloader": {
        "_target_": "ThreadDataLoader",
        "dataset": "@train_dataset",
        "batch_size": "@batch_size",
        "repeats": "@num_substeps",
        "num_workers": "@num_workers",
        "sampler": "@sampler"
    },
    "eval_dataloader": {
        "_target_": "DataLoader",
        "dataset": "@eval_dataset",
        "batch_size": "@batch_size",
        "num_workers": "@num_workers"
    },
    "lossfn": {
        "_target_": "torch.nn.L1Loss"
    },
    "optimizer": {
        "_target_": "torch.optim.Adam",
        "params": "$@network.parameters()",
        "lr": "@learning_rate"
    },
    "evaluator": {
        "_target_": "SupervisedEvaluator",
        "device": "@device",
        "val_data_loader": "@eval_dataloader",
        "network": "@network",
        "key_val_metric": {
            "val_mean_dist": {
                "_target_": "ignite.metrics.MeanPairwiseDistance",
                "output_transform": "$scripts.valve_landmarks._output_lm_trans"
            }
        },
        "metric_cmp_fn": "$lambda current, prev: prev < 0 or current < prev",
        "val_handlers": [
            {
                "_target_": "StatsHandler",
                "output_transform": "$lambda x: None"
            }
        ]
    },
    "handlers": [
        {
            "_target_": "ValidationHandler",
            "validator": "@evaluator",
            "epoch_level": true,
            "interval": "@val_interval"
        },
        {
            "_target_": "CheckpointSaver",
            "save_dir": "@output_dir",
            "save_dict": {
                "net": "@network"
            },
            "save_interval": 1,
            "save_final": true,
            "epoch_level": true
        }
    ],
    "trainer": {
        "_target_": "SupervisedTrainer",
        "max_epochs": "@num_epochs",
        "device": "@device",
        "train_data_loader": "@train_dataloader",
        "network": "@network",
        "loss_function": "@lossfn",
        "optimizer": "@optimizer",
        "key_train_metric": null,
        "train_handlers": "@handlers"
    },
    "training": [
        "$@trainer.run()"
    ]
}