File size: 1,897 Bytes
6ec9956 7a52c73 6ec9956 8d0a460 6ec9956 c29d465 6ec9956 7d4dc86 6ec9956 e9eb731 6ec9956 6ea1752 7d4dc86 180e3d8 c29d465 85ccb02 6ec9956 afbbbfc 2d34814 6ec9956 111de7a c1770b3 6ec9956 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 |
Predictor:
Model:
classpath: model:Unet_TS_CT
Unet_TS_CT:
outputs_criterions: None
channels:
- 1
- 32
- 64
- 128
- 256
- 320
- 320
Dataset:
groups_src:
Volume_0:
groups_dest:
Volume:
transforms:
TensorCast:
dtype: float32
inverse: false
Canonical:
inverse: true
ResampleToResolution:
spacing:
- 1.5
- 1.5
- 1.5
inverse: true
Padding:
padding:
- 32
- 32
- 32
- 32
- 32
- 32
mode: constant
inverse: true
patch_transforms: None
is_input: true
augmentations: None
Patch:
patch_size:
- 96
- 128
- 160
overlap: 32
mask: None
pad_value: 0
extend_slice: 0
subset: None
filter: None
dataset_filenames:
- ./Dataset/:mha
use_cache: false
batch_size: 1
outputs_dataset:
Head:Softmax:
OutputDataset:
name_class: OutSameAsGroupDataset
before_reduction_transforms:
Softmax:
dim: 0
Argmax:
dim: 0
TensorCast:
dtype: uint8
inverse: true
after_reduction_transforms:
Sum:
dim: 0
final_transforms: None
dataset_filename: Output:mha
group: Output
same_as_group: Volume_0:Volume
patch_combine: Cosinus
inverse_transform: true
reduction: Concat
train_name: TotalSegmentator
manual_seed: 32
gpu_checkpoints: None
images_log: None
combine: Concat
autocast: true
data_log: None
|