File size: 4,770 Bytes
910004d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 |
{
"imports": [
"$import torch",
"$from datetime import datetime",
"$from pathlib import Path",
"$from PIL import Image",
"$from scripts.utils import visualize_2d_image"
],
"bundle_root": ".",
"model_dir": "$@bundle_root + '/models'",
"dataset_dir": "/workspace/data/medical",
"output_dir": "$@bundle_root + '/output'",
"create_output_dir": "$Path(@output_dir).mkdir(exist_ok=True)",
"device": "$torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')",
"output_postfix": "$datetime.now().strftime('%Y%m%d_%H%M%S')",
"channel": 0,
"spatial_dims": 2,
"image_channels": 1,
"latent_channels": 1,
"infer_patch_size": [
240,
240
],
"infer_batch_size_img": 1,
"infer_batch_size_slice": 1,
"autoencoder_def": {
"_target_": "monai.networks.nets.autoencoderkl.AutoencoderKL",
"spatial_dims": "@spatial_dims",
"in_channels": "@image_channels",
"out_channels": "@image_channels",
"latent_channels": "@latent_channels",
"channels": [
64,
128,
256
],
"num_res_blocks": 2,
"norm_num_groups": 32,
"norm_eps": 1e-06,
"attention_levels": [
false,
false,
false
],
"with_encoder_nonlocal_attn": true,
"with_decoder_nonlocal_attn": true,
"include_fc": false
},
"load_autoencoder_path": "$@bundle_root + '/models/model_autoencoder.pt'",
"load_autoencoder": "$@autoencoder_def.load_old_state_dict(torch.load(@load_autoencoder_path))",
"autoencoder": "$@autoencoder_def.to(@device)",
"preprocessing_transforms": [
{
"_target_": "LoadImaged",
"keys": "image"
},
{
"_target_": "EnsureChannelFirstd",
"keys": "image"
},
{
"_target_": "Lambdad",
"keys": "image",
"func": "$lambda x: x[@channel, :, :, :]"
},
{
"_target_": "EnsureChannelFirstd",
"keys": "image",
"channel_dim": "no_channel"
},
{
"_target_": "EnsureTyped",
"keys": "image"
},
{
"_target_": "Orientationd",
"keys": "image",
"axcodes": "RAS"
},
{
"_target_": "CenterSpatialCropd",
"keys": "image",
"roi_size": "$[@infer_patch_size[0], @infer_patch_size[1], 20]"
},
{
"_target_": "ScaleIntensityRangePercentilesd",
"keys": "image",
"lower": 0,
"upper": 100,
"b_min": 0,
"b_max": 1
}
],
"crop_transforms": [
{
"_target_": "DivisiblePadd",
"keys": "image",
"k": [
4,
4,
1
]
},
{
"_target_": "RandSpatialCropSamplesd",
"keys": "image",
"random_size": false,
"roi_size": "$[@infer_patch_size[0], @infer_patch_size[1], 1]",
"num_samples": "@infer_batch_size_slice"
},
{
"_target_": "SqueezeDimd",
"keys": "image",
"dim": 3
}
],
"final_transforms": [
{
"_target_": "ScaleIntensityRangePercentilesd",
"keys": "image",
"lower": 0,
"upper": 100,
"b_min": 0,
"b_max": 1
}
],
"preprocessing": {
"_target_": "Compose",
"transforms": "$@preprocessing_transforms + @crop_transforms + @final_transforms"
},
"dataset": {
"_target_": "monai.apps.DecathlonDataset",
"root_dir": "@dataset_dir",
"task": "Task01_BrainTumour",
"section": "validation",
"cache_rate": 0.0,
"num_workers": 8,
"download": false,
"transform": "@preprocessing"
},
"dataloader": {
"_target_": "DataLoader",
"dataset": "@dataset",
"batch_size": 1,
"shuffle": true,
"num_workers": 0
},
"recon_img_pil": "$Image.fromarray(visualize_2d_image(@recon_img), 'RGB')",
"orig_img_pil": "$Image.fromarray(visualize_2d_image(@input_img[0,0,...]), 'RGB')",
"input_img": "$monai.utils.first(@dataloader)['image'].to(@device)",
"recon_img": "$@autoencoder(@input_img)[0][0,0,...]",
"run": [
"$@create_output_dir",
"$@load_autoencoder",
"$@orig_img_pil.save(@output_dir+'/orig_img_'+@output_postfix+'.png')",
"$@recon_img_pil.save(@output_dir+'/recon_img_'+@output_postfix+'.png')"
]
}
|