File size: 2,158 Bytes
b70d0f3 581456e b70d0f3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 |
Predictor:
Model:
classpath: UNetpp:UNetpp
UNetpp:
outputs_criterions: None
pretrained: false
Dataset:
groups_src:
CBCT:
groups_dest:
CBCT:
transforms:
Clip:
min_value: min
max_value: percentile:99.5
save_clip_min: false
save_clip_max: false
mask: MASK
Normalize:
lazy: false
channels: None
min_value: -1
max_value: 1
Mask:
path: MASK
value_outside: -1
patch_transforms: None
is_input: true
MASK:
groups_dest:
MASK:
transforms: None
patch_transforms: None
is_input: false
augmentations:
DataAugmentation_0:
data_augmentations:
Flip:
f_prob:
- 0
- 0
- 1
prob: 1
nb: 1
DataAugmentation_1:
data_augmentations:
Flip:
f_prob:
- 0
- 1
- 0
prob: 1
nb: 1
Patch:
patch_size:
- 1
- 512
- 512
overlap: None
mask: None
pad_value: -1
extend_slice: 2
subset: None
filter: None
dataset_filenames:
- ./Dataset/AB:a:mha
- ./Dataset/TH:a:mha
use_cache: false
batch_size: 32
outputs_dataset:
Head:Tanh:
OutputDataset:
name_class: OutSameAsGroupDataset
before_reduction_transforms: None
after_reduction_transforms: None
final_transforms:
UnNormalize:UnNormalize: {}
Mask:
path: MASK
value_outside: -1024
TensorCast:
dtype: int16
dataset_filename: Dataset:mha
group: sCT
same_as_group: CBCT:CBCT
patch_combine: None
inverse_transform: false
reduction: Mean
train_name: Out
manual_seed: 32
gpu_checkpoints: None
images_log: None
combine: Mean
autocast: false
data_log: None
|