File size: 2,382 Bytes
b59fff8
 
 
 
 
 
 
 
a40eebc
b59fff8
 
 
c69e95c
 
 
 
 
 
3ec180c
81797bb
5715ff8
 
 
 
 
 
 
c69e95c
371261a
 
 
 
158823e
371261a
033fa8e
371261a
 
 
d96b347
158823e
b59fff8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3d78ba9
b59fff8
 
 
 
 
 
5715ff8
ac44a0c
 
 
b59fff8
 
ac44a0c
5715ff8
ac44a0c
a2b5119
ac44a0c
 
5715ff8
3d78ba9
5715ff8
b59fff8
a40eebc
b59fff8
ac44a0c
 
b59fff8
 
 
 
ac44a0c
b59fff8
 
ac44a0c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
Predictor:
  Model:
    classpath: Model:UNetpp
    UNetpp:
      outputs_criterions: None
      pretrained: false
  Dataset:
    groups_src:
      Volume_0:
        groups_dest:
          Volume:
            transforms:
              Clip/0:
                min_value: -32000
                max_value: 32000
                save_clip_min: false
                save_clip_max: false
                mask: None
              TensorCast:
                dtype: int16
                inverse: false
              ResampleToResolution:
                spacing:
                - 1
                - 1
                - 3
                inverse: true
              Clip/1:
                min_value: min
                max_value: percentile:99.5
                save_clip_min: false
                save_clip_max: false
                mask: None
              Normalize:
                lazy: false
                channels: None
                min_value: -1
                max_value: 1
                inverse: false
            patch_transforms: None
            is_input: true
    augmentations:
      DataAugmentation_0:
        data_augmentations:
          Flip:
            f_prob:
            - 0
            - 0.5
            - 0.5
            prob: 1
        nb: 2
    Patch:
      patch_size:
      - 1
      - 512
      - 512
      overlap: None
      mask: None
      pad_value: -1
      extend_slice: 2
    subset: None
    filter: None
    dataset_filenames:
    - ./Dataset:mha
    use_cache: false
    batch_size: 8
  outputs_dataset:
    Head:Tanh:
      OutputDataset:
        name_class: OutSameAsGroupDataset
        before_reduction_transforms: 
          UnNormalize:
            min_value: -1024
            max_value: 3071
          TensorCast:
            dtype: int16
            inverse: false
        after_reduction_transforms: 
          InferenceStack:
            dataset: Predictions/ImpactSynth/Output:mha
            name: InferenceStack
            mode: mean
        final_transforms: None
        dataset_filename: Output:mha
        inverse_transform: true
        group: sCT
        same_as_group: Volume_0:Volume
        patch_combine: None
        reduction: Concat
        Concat: {}
  train_name: ImpactSynth
  manual_seed: 32
  gpu_checkpoints: None
  images_log: None
  combine: Concat
  autocast: false
  data_log: None
  Concat: {}