yssszzzzzzzzy commited on
Commit
9134fc5
·
verified ·
1 Parent(s): 8cc27bc

Upload 7 files

Browse files
options/DefocusDeblur_Single_8bit_HINT.yml ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # general settings
2
+ name: DefocusDeblur_Single_8bit_HINT_n
3
+ model_type: ImageCleanModel
4
+ scale: 1
5
+ num_gpu: 8 # set num_gpu: 0 for cpu mode
6
+ manual_seed: 100
7
+
8
+ # dataset and data loader settings
9
+ datasets:
10
+ train:
11
+ name: TrainSet
12
+ type: Dataset_PairedImage
13
+ dataroot_gt: ./Defocus_Deblurring/Datasets/train/DPDD/target_crops
14
+ dataroot_lq: ./Defocus_Deblurring/Datasets/train/DPDD/inputC_crops
15
+ geometric_augs: true
16
+
17
+ filename_tmpl: '{}'
18
+ io_backend:
19
+ type: disk
20
+
21
+ # data loader
22
+ use_shuffle: true
23
+ num_worker_per_gpu: 8
24
+ batch_size_per_gpu: 8
25
+
26
+ ### -------------Progressive training--------------------------
27
+ mini_batch_sizes: [6,3,2] # Batch size per gpu
28
+ iters: [100000,100000,100000]
29
+ gt_size: 384 # Max patch size for progressive training
30
+ gt_sizes: [128,256,320] # Patch sizes for progressive training.
31
+ ### ------------------------------------------------------------
32
+
33
+ ### ------- Training on single fixed-patch size 128x128---------
34
+ # mini_batch_sizes: [8]
35
+ # iters: [300000]
36
+ # gt_size: 128
37
+ # gt_sizes: [128]
38
+ ### ------------------------------------------------------------
39
+
40
+ dataset_enlarge_ratio: 1
41
+ prefetch_mode: ~
42
+
43
+ val:
44
+ name: ValSet
45
+ type: Dataset_PairedImage
46
+ dataroot_gt: ./Defocus_Deblurring/Datasets/val/DPDD/target_crops
47
+ dataroot_lq: ./Defocus_Deblurring/Datasets/val/DPDD/inputC_crops
48
+ io_backend:
49
+ type: disk
50
+
51
+ # network structures
52
+ network_g:
53
+ type: HINT
54
+ inp_channels: 3
55
+ out_channels: 3
56
+ dim: 48
57
+ num_blocks: [4,6,6,8]
58
+ num_refinement_blocks: 4
59
+ heads: [8,8,8,8]
60
+ ffn_expansion_factor: 2.66
61
+ bias: False
62
+ LayerNorm_type: WithBias
63
+ dual_pixel_task: false
64
+
65
+
66
+ # path
67
+ path:
68
+ pretrain_network_g: ~
69
+ strict_load_g: true
70
+ resume_state: ~
71
+
72
+ # training settings
73
+ train:
74
+ total_iter: 300000
75
+ warmup_iter: -1 # no warm up
76
+ use_grad_clip: true
77
+
78
+ # Split 300k iterations into two cycles.
79
+ # 1st cycle: fixed 3e-4 LR for 92k iters.
80
+ # 2nd cycle: cosine annealing (3e-4 to 1e-6) for 208k iters.
81
+ scheduler:
82
+ type: CosineAnnealingRestartCyclicLR
83
+ periods: [92000, 208000]
84
+ restart_weights: [1,1]
85
+ eta_mins: [0.0003,0.000001]
86
+
87
+ mixing_augs:
88
+ mixup: false
89
+ mixup_beta: 1.2
90
+ use_identity: true
91
+
92
+ optim_g:
93
+ type: AdamW
94
+ lr: !!float 3e-4
95
+ weight_decay: !!float 1e-4
96
+ betas: [0.9, 0.999]
97
+
98
+ # losses
99
+ pixel_opt:
100
+ type: L1Loss
101
+ loss_weight: 1
102
+ reduction: mean
103
+ fft_loss_opt:
104
+ type: FFTLoss
105
+ loss_weight: 0.1
106
+ reduction: mean
107
+
108
+ # validation settings
109
+ val:
110
+ window_size: 8
111
+ val_freq: !!float 4e3
112
+ save_img: false
113
+ rgb2bgr: true
114
+ use_image: false
115
+ max_minibatch: 8
116
+
117
+ metrics:
118
+ psnr: # metric name, can be arbitrary
119
+ type: calculate_psnr
120
+ crop_border: 0
121
+ test_y_channel: false
122
+
123
+ # logging settings
124
+ logger:
125
+ print_freq: 1000
126
+ save_checkpoint_freq: !!float 4e3
127
+ use_tb_logger: true
128
+ wandb:
129
+ project: ~
130
+ resume_id: ~
131
+
132
+ # dist training settings
133
+ dist_params:
134
+ backend: nccl
135
+ port: 29500
options/Deraining_HINT_syn_rain100L.yml ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # general settings
2
+ name: Deraining_HINT_rain100L
3
+ model_type: ImageCleanModel
4
+ scale: 1
5
+ num_gpu: 4 # set num_gpu: 0 for cpu mode
6
+ manual_seed: 100
7
+
8
+ # dataset and data loader settings
9
+ datasets:
10
+ train:
11
+ name: TrainSet
12
+ type: Dataset_PairedImage
13
+ dataroot_gt: ./dataset/Rain100L/train/clean
14
+ dataroot_lq: ./dataset/Rain100L/train/rainy
15
+ geometric_augs: true
16
+
17
+ filename_tmpl: '{}'
18
+ io_backend:
19
+ type: disk
20
+
21
+ # data loader
22
+ use_shuffle: true
23
+ num_worker_per_gpu: 8
24
+ batch_size_per_gpu: 8
25
+
26
+ ### -------------Progressive training--------------------------
27
+ mini_batch_sizes: [6,4,3,1] # Batch size per gpu
28
+ iters: [92000,64000,48000,96000]
29
+ gt_size: 384 # Max patch size for progressive training
30
+ gt_sizes: [128,160,192,256] # Patch sizes for progressive training.
31
+ ### ------------------------------------------------------------
32
+
33
+ ### ------- Training on single fixed-patch size 128x128---------
34
+ # mini_batch_sizes: [8]
35
+ # iters: [300000]
36
+ # gt_size: 128
37
+ # gt_sizes: [128]
38
+ ### ------------------------------------------------------------
39
+
40
+ dataset_enlarge_ratio: 1
41
+ prefetch_mode: ~
42
+
43
+ val:
44
+ name: ValSet
45
+ type: Dataset_PairedImage
46
+ dataroot_gt: ./dataset/Rain100L/test/clean
47
+ dataroot_lq: ./dataset/Rain100L/test/rainy
48
+ io_backend:
49
+ type: disk
50
+
51
+ # network structures
52
+ network_g:
53
+ type: HINT
54
+ inp_channels: 3
55
+ out_channels: 3
56
+ dim: 48
57
+ num_blocks: [4,6,6,8]
58
+ num_refinement_blocks: 4
59
+ heads: [8,8,8,8]
60
+ ffn_expansion_factor: 2.66
61
+ bias: False
62
+ LayerNorm_type: WithBias
63
+ dual_pixel_task: false
64
+
65
+
66
+ # path
67
+ path:
68
+ pretrain_network_g: ~
69
+ strict_load_g: true
70
+ resume_state: ~
71
+
72
+ # training settings
73
+ train:
74
+ total_iter: 300000
75
+ warmup_iter: -1 # no warm up
76
+ use_grad_clip: true
77
+
78
+ # Split 300k iterations into two cycles.
79
+ # 1st cycle: fixed 3e-4 LR for 92k iters.
80
+ # 2nd cycle: cosine annealing (3e-4 to 1e-6) for 208k iters.
81
+ scheduler:
82
+ type: CosineAnnealingRestartCyclicLR
83
+ periods: [92000, 208000]
84
+ restart_weights: [1,1]
85
+ eta_mins: [0.0003,0.000001]
86
+
87
+ mixing_augs:
88
+ mixup: false
89
+ mixup_beta: 1.2
90
+ use_identity: true
91
+
92
+ optim_g:
93
+ type: AdamW
94
+ lr: !!float 3e-4
95
+ weight_decay: !!float 1e-4
96
+ betas: [0.9, 0.999]
97
+
98
+ # losses
99
+ pixel_opt:
100
+ type: L1Loss
101
+ loss_weight: 1
102
+ reduction: mean
103
+ fft_loss_opt:
104
+ type: FFTLoss
105
+ loss_weight: 0.1
106
+ reduction: mean
107
+
108
+
109
+ # validation settings
110
+ val:
111
+ window_size: 8
112
+ val_freq: !!float 4e3
113
+ save_img: false
114
+ rgb2bgr: true
115
+ use_image: true
116
+ max_minibatch: 8
117
+
118
+ metrics:
119
+ psnr: # metric name, can be arbitrary
120
+ type: calculate_psnr
121
+ crop_border: 0
122
+ test_y_channel: true
123
+
124
+ # logging settings
125
+ logger:
126
+ print_freq: 1000
127
+ save_checkpoint_freq: !!float 4e3
128
+ use_tb_logger: true
129
+ wandb:
130
+ project: ~
131
+ resume_id: ~
132
+
133
+ # dist training settings
134
+ dist_params:
135
+ backend: nccl
136
+ port: 29500
options/Desnow_snow100k_HINT.yml ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # general settings
2
+ name: Desnow_HINT
3
+ model_type: ImageCleanModel
4
+ scale: 1
5
+ num_gpu: 8 # set num_gpu: 0 for cpu mode
6
+ manual_seed: 100
7
+
8
+ # dataset and data loader settings
9
+ datasets:
10
+ train:
11
+ name: TrainSet
12
+ type: Dataset_PairedImage
13
+ dataroot_gt: ./dataset/Snow100K/train2500/Gt
14
+ dataroot_lq: ./dataset/Snow100K/train2500/Snow
15
+ geometric_augs: true
16
+
17
+ filename_tmpl: '{}'
18
+ io_backend:
19
+ type: disk
20
+
21
+ # data loader
22
+ use_shuffle: true
23
+ num_worker_per_gpu: 8
24
+ batch_size_per_gpu: 8
25
+
26
+ ### ------- Training on single fixed-patch size 128x128---------
27
+ mini_batch_sizes: [6,5,2,1,1]
28
+ iters: [50000,40000,30000,20000,10000]
29
+ gt_size: 128
30
+ gt_sizes: [128,192,256,320,384]
31
+ ### ------------------------------------------------------------
32
+
33
+ dataset_enlarge_ratio: 1
34
+ prefetch_mode: ~
35
+
36
+ val:
37
+ name: ValSet
38
+ type: Dataset_PairedImage
39
+ dataroot_gt: ./dataset/Snow100K/test2000/Gt
40
+ dataroot_lq: ./dataset/Snow100K/test2000/Snow
41
+ gt_size: 256
42
+ io_backend:
43
+ type: disk
44
+
45
+ # network structures
46
+ network_g:
47
+ type: HINT
48
+ inp_channels: 3
49
+ out_channels: 3
50
+ dim: 48
51
+ num_blocks: [4,6,6,8]
52
+ num_refinement_blocks: 4
53
+ heads: [8,8,8,8]
54
+ ffn_expansion_factor: 2.66
55
+ bias: False
56
+ LayerNorm_type: WithBias
57
+ dual_pixel_task: False
58
+
59
+
60
+ # path
61
+ path:
62
+ pretrain_network_g: ~
63
+ strict_load_g: true
64
+ resume_state: ~
65
+
66
+ # training settings
67
+ train:
68
+ total_iter: 300000
69
+ warmup_iter: -1 # no warm up
70
+ use_grad_clip: true
71
+
72
+ # Split 300k iterations into two cycles.
73
+ # 1st cycle: fixed 3e-4 LR for 92k iters.
74
+ # 2nd cycle: cosine annealing (3e-4 to 1e-6) for 208k iters.
75
+ scheduler:
76
+ type: CosineAnnealingRestartCyclicLR
77
+ periods: [92000, 208000]
78
+ restart_weights: [1,1]
79
+ eta_mins: [0.0003,0.000001]
80
+
81
+ mixing_augs:
82
+ mixup: true
83
+ mixup_beta: 1.2
84
+ use_identity: true
85
+
86
+ optim_g:
87
+ type: AdamW
88
+ lr: !!float 3e-4
89
+ weight_decay: !!float 1e-4
90
+ betas: [0.9, 0.999]
91
+
92
+ # losses
93
+ pixel_opt:
94
+ type: L1Loss
95
+ loss_weight: 1
96
+ reduction: mean
97
+ fft_loss_opt:
98
+ type: FFTLoss
99
+ loss_weight: 0.1
100
+ reduction: mean
101
+
102
+ # validation settings
103
+ val:
104
+ window_size: 8
105
+ val_freq: !!float 4e3
106
+ save_img: false
107
+ rgb2bgr: true
108
+ use_image: false
109
+ max_minibatch: 8
110
+
111
+ metrics:
112
+ psnr: # metric name, can be arbitrary
113
+ type: calculate_psnr
114
+ crop_border: 0
115
+ test_y_channel: false
116
+
117
+ # logging settings
118
+ logger:
119
+ print_freq: 1000
120
+ save_checkpoint_freq: !!float 4e3
121
+ use_tb_logger: true
122
+ wandb:
123
+ project: ~
124
+ resume_id: ~
125
+
126
+ # dist training settings
127
+ dist_params:
128
+ backend: nccl
129
+ port: 29500
options/GaussianColorDenoising_HINT.yml ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # general settings
2
+ name: GaussianColorDenoising_HINT
3
+ model_type: ImageCleanModel
4
+ scale: 1
5
+ num_gpu: 8 # set num_gpu: 0 for cpu mode
6
+ manual_seed: 100
7
+
8
+ # dataset and data loader settings
9
+ datasets:
10
+ train:
11
+ name: TrainSet
12
+ type: Dataset_GaussianDenoising
13
+ sigma_type: random
14
+ sigma_range: [0,50]
15
+ in_ch: 3 ## RGB image
16
+ dataroot_gt: ./Denoising/Datasets/train/WB
17
+ dataroot_lq: none
18
+ geometric_augs: true
19
+
20
+ filename_tmpl: '{}'
21
+ io_backend:
22
+ type: disk
23
+
24
+ # data loader
25
+ use_shuffle: true
26
+ num_worker_per_gpu: 8
27
+ batch_size_per_gpu: 8
28
+
29
+ # -------------Progressive training--------------------------
30
+ mini_batch_sizes: [6,4,3,1,1,1] # Batch size per gpu
31
+ iters: [92000,64000,48000,36000,36000,24000]
32
+ gt_size: 256 # Max patch size for progressive training
33
+ gt_sizes: [128,160,192,256,320,384] # Patch sizes for progressive training.
34
+ ### ------------------------------------------------------------
35
+
36
+ dataset_enlarge_ratio: 1
37
+ prefetch_mode: ~
38
+
39
+ val:
40
+ name: ValSet
41
+ type: Dataset_GaussianDenoising
42
+ sigma_test: 25
43
+ in_ch: 3 ## RGB image
44
+ dataroot_gt: ./Denoising/Datasets/test/CBSD68
45
+ dataroot_lq: none
46
+ gt_size: 256
47
+ io_backend:
48
+ type: disk
49
+
50
+ # network structures
51
+ network_g:
52
+ type: HINT
53
+ inp_channels: 3
54
+ out_channels: 3
55
+ dim: 48
56
+ num_blocks: [4,6,6,8]
57
+ num_refinement_blocks: 4
58
+ heads: [8,8,8,8]
59
+ ffn_expansion_factor: 2.66
60
+ bias: False
61
+ LayerNorm_type: WithBias
62
+ dual_pixel_task: False
63
+ # path
64
+ path:
65
+ pretrain_network_g: ~
66
+ strict_load_g: true
67
+ resume_state: ~
68
+
69
+ # training settings
70
+ train:
71
+ total_iter: 300000
72
+ warmup_iter: -1 # no warm up
73
+ use_grad_clip: true
74
+
75
+ # Split 300k iterations into two cycles.
76
+ # 1st cycle: fixed 3e-4 LR for 92k iters.
77
+ # 2nd cycle: cosine annealing (3e-4 to 1e-6) for 208k iters.
78
+ scheduler:
79
+ type: CosineAnnealingRestartCyclicLR
80
+ periods: [92000, 208000]
81
+ restart_weights: [1,1]
82
+ eta_mins: [0.0003,0.000001]
83
+
84
+ mixing_augs:
85
+ mixup: true
86
+ mixup_beta: 1.2
87
+ use_identity: true
88
+
89
+ optim_g:
90
+ type: AdamW
91
+ lr: !!float 3e-4
92
+ weight_decay: !!float 1e-4
93
+ betas: [0.9, 0.999]
94
+
95
+ # losses
96
+ pixel_opt:
97
+ type: L1Loss
98
+ loss_weight: 1
99
+ reduction: mean
100
+ fft_loss_opt:
101
+ type: FFTLoss
102
+ loss_weight: 0.1
103
+ reduction: mean
104
+ # validation settings
105
+ val:
106
+ window_size: 8
107
+ val_freq: !!float 4e3
108
+ save_img: false
109
+ rgb2bgr: true
110
+ use_image: false
111
+ max_minibatch: 8
112
+
113
+ metrics:
114
+ psnr: # metric name, can be arbitrary
115
+ type: calculate_psnr
116
+ crop_border: 0
117
+ test_y_channel: false
118
+
119
+ # logging settings
120
+ logger:
121
+ print_freq: 1000
122
+ save_checkpoint_freq: !!float 4e3
123
+ use_tb_logger: true
124
+ wandb:
125
+ project: ~
126
+ resume_id: ~
127
+
128
+ # dist training settings
129
+ dist_params:
130
+ backend: nccl
131
+ port: 29500
options/HINT_LOL_v2_real.yml ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # general settings
2
+ name: Enhancement_HINT
3
+ model_type: ImageCleanModel
4
+ scale: 1
5
+ num_gpu: 1 # set num_gpu: 0 for cpu mode
6
+ manual_seed: 100
7
+
8
+ # dataset and data loader settings
9
+ datasets:
10
+ train:
11
+ name: TrainSet
12
+ type: Dataset_PairedImage
13
+ dataroot_gt: ./dataset/LOLv2/Real_captured/Train/Normal
14
+ dataroot_lq: ./dataset/LOLv2/Real_captured/Train/Low
15
+ geometric_augs: true
16
+
17
+ filename_tmpl: '{}'
18
+ io_backend:
19
+ type: disk
20
+
21
+ # data loader
22
+ use_shuffle: true
23
+ num_worker_per_gpu: 8
24
+ batch_size_per_gpu: 8
25
+
26
+ ## -------------Progressive training--------------------------
27
+ mini_batch_sizes: [6] # Batch size per gpu
28
+ iters: [150000]
29
+ gt_size: 384 # Max patch size for progressive training
30
+ gt_sizes: [128] # Patch sizes for progressive training.
31
+ ### ------------------------------------------------------------
32
+
33
+
34
+ dataset_enlarge_ratio: 1
35
+ prefetch_mode: ~
36
+
37
+ val:
38
+ name: ValSet
39
+ type: Dataset_PairedImage
40
+ dataroot_gt: /mnt/sda/zsh/dataset/LOLv2/Real_captured/Test/Normal
41
+ dataroot_lq: /mnt/sda/zsh/dataset/LOLv2/Real_captured/Test/Low
42
+ io_backend:
43
+ type: disk
44
+
45
+ # network structures
46
+ network_g:
47
+ type: HINT
48
+ inp_channels: 3
49
+ out_channels: 3
50
+ dim: 48
51
+ num_blocks: [4,6,6,8]
52
+ num_refinement_blocks: 4
53
+ heads: [8,8,8,8]
54
+ ffn_expansion_factor: 2.66
55
+ bias: False
56
+ LayerNorm_type: WithBias
57
+ dual_pixel_task: False
58
+
59
+ # path
60
+ path:
61
+ pretrain_network_g: ~
62
+ strict_load_g: true
63
+ resume_state: ~
64
+
65
+ # training settings
66
+ train:
67
+ total_iter: 150000
68
+ warmup_iter: -1 # no warm up
69
+ use_grad_clip: true
70
+
71
+ # Split 300k iterations into two cycles.
72
+ # 1st cycle: fixed 3e-4 LR for 92k iters.
73
+ # 2nd cycle: cosine annealing (3e-4 to 1e-6) for 208k iters.
74
+ scheduler:
75
+ type: CosineAnnealingRestartCyclicLR
76
+ periods: [46000, 104000]
77
+ restart_weights: [1,1]
78
+ eta_mins: [0.0003,0.000001]
79
+
80
+ mixing_augs:
81
+ mixup: true
82
+ mixup_beta: 1.2
83
+ use_identity: true
84
+
85
+ optim_g:
86
+ type: Adam
87
+ lr: !!float 2e-4
88
+ # weight_decay: !!float 1e-4
89
+ betas: [0.9, 0.999]
90
+
91
+ pixel_opt:
92
+ type: L1Loss
93
+ loss_weight: 1
94
+ reduction: mean
95
+
96
+ fft_loss_opt:
97
+ type: FFTLoss
98
+ loss_weight: 0.1
99
+ reduction: mean
100
+
101
+
102
+ # validation settings
103
+ val:
104
+ window_size: 4
105
+ val_freq: !!float 1e3
106
+ save_img: false
107
+ rgb2bgr: true
108
+ use_image: false
109
+ max_minibatch: 8
110
+
111
+ metrics:
112
+ psnr: # metric name, can be arbitrary
113
+ type: calculate_psnr
114
+ crop_border: 0
115
+ test_y_channel: false
116
+
117
+ # logging settings
118
+ logger:
119
+ print_freq: 500
120
+ save_checkpoint_freq: !!float 1e3
121
+ use_tb_logger: true
122
+ wandb:
123
+ project: ~
124
+ resume_id: ~
125
+
126
+ # dist training settings
127
+ dist_params:
128
+ backend: nccl
129
+ port: 29500
options/HINT_LOL_v2_synthetic.yml ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # general settings
2
+ name: Enhancement_HINT
3
+ model_type: ImageCleanModel
4
+ scale: 1
5
+ num_gpu: 1 # set num_gpu: 0 for cpu mode
6
+ manual_seed: 100
7
+
8
+ # dataset and data loader settings
9
+ datasets:
10
+ train:
11
+ name: TrainSet
12
+ type: Dataset_PairedImage
13
+ dataroot_gt: ./dataset/LOLv2/Synthetic/Train/Normal
14
+ dataroot_lq: ./dataset/LOLv2/Synthetic/Train/Low
15
+ geometric_augs: true
16
+
17
+ filename_tmpl: '{}'
18
+ io_backend:
19
+ type: disk
20
+
21
+ # data loader
22
+ use_shuffle: true
23
+ num_worker_per_gpu: 8
24
+ batch_size_per_gpu: 8
25
+
26
+ ### ------- Training on single fixed-patch size 128x128---------
27
+ mini_batch_sizes: [6,5,2,1,1]
28
+ iters: [50000,40000,30000,20000,10000]
29
+ gt_size: 128
30
+ gt_sizes: [128,192,256,320,384]
31
+ ### ------------------------------------------------------------
32
+
33
+ dataset_enlarge_ratio: 1
34
+ prefetch_mode: ~
35
+
36
+ val:
37
+ name: ValSet
38
+ type: Dataset_PairedImage
39
+ dataroot_gt: /mnt/sda/zsh/dataset/LOLv2/Synthetic/Test/Normal
40
+ dataroot_lq: /mnt/sda/zsh/dataset/LOLv2/Synthetic/Test/Low
41
+ io_backend:
42
+ type: disk
43
+
44
+ # network structures
45
+ network_g:
46
+ type: HINT
47
+ inp_channels: 3
48
+ out_channels: 3
49
+ dim: 48
50
+ num_blocks: [4,6,6,8]
51
+ num_refinement_blocks: 4
52
+ heads: [8,8,8,8]
53
+ ffn_expansion_factor: 2.66
54
+ bias: False
55
+ LayerNorm_type: WithBias
56
+ dual_pixel_task: False
57
+
58
+
59
+ # path
60
+ path:
61
+ pretrain_network_g: ~
62
+ strict_load_g: true
63
+ resume_state: ~
64
+
65
+ # training settings
66
+ train:
67
+ total_iter: 150000
68
+ warmup_iter: -1 # no warm up
69
+ use_grad_clip: true
70
+
71
+ # Split 300k iterations into two cycles.
72
+ # 1st cycle: fixed 3e-4 LR for 92k iters.
73
+ # 2nd cycle: cosine annealing (3e-4 to 1e-6) for 208k iters.
74
+ scheduler:
75
+ type: CosineAnnealingRestartCyclicLR
76
+ periods: [46000, 104000]
77
+ restart_weights: [1,1]
78
+ eta_mins: [0.0003,0.000001]
79
+
80
+ mixing_augs:
81
+ mixup: true
82
+ mixup_beta: 1.2
83
+ use_identity: true
84
+
85
+ optim_g:
86
+ type: Adam
87
+ lr: !!float 2e-4
88
+ # weight_decay: !!float 1e-4
89
+ betas: [0.9, 0.999]
90
+
91
+ pixel_opt:
92
+ type: L1Loss
93
+ loss_weight: 1
94
+ reduction: mean
95
+
96
+ fft_loss_opt:
97
+ type: FFTLoss
98
+ loss_weight: 0.1
99
+ reduction: mean
100
+
101
+ # validation settings
102
+ val:
103
+ window_size: 4
104
+ val_freq: !!float 1e3
105
+ save_img: false
106
+ rgb2bgr: true
107
+ use_image: false
108
+ max_minibatch: 8
109
+
110
+ metrics:
111
+ psnr: # metric name, can be arbitrary
112
+ type: calculate_psnr
113
+ crop_border: 0
114
+ test_y_channel: false
115
+
116
+ # logging settings
117
+ logger:
118
+ print_freq: 500
119
+ save_checkpoint_freq: !!float 1e3
120
+ use_tb_logger: true
121
+ wandb:
122
+ project: ~
123
+ resume_id: ~
124
+
125
+ # dist training settings
126
+ dist_params:
127
+ backend: nccl
128
+ port: 29500
options/RealDehazing_HINT.yml ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # general settings
2
+ name: Dehazing_HINT
3
+ model_type: ImageCleanModel
4
+ scale: 1
5
+ num_gpu: 8 # set num_gpu: 0 for cpu mode
6
+ manual_seed: 100
7
+
8
+ # dataset and data loader settings
9
+ datasets:
10
+ train:
11
+ name: TrainSet
12
+ type: Dataset_PairedImage_dehazeSOT
13
+ dataroot_gt: ./dataset/haze
14
+ dataroot_lq: ./dataset/haze
15
+ geometric_augs: true
16
+
17
+ filename_tmpl: '{}'
18
+ io_backend:
19
+ type: disk
20
+
21
+ # data loader
22
+ use_shuffle: true
23
+ num_worker_per_gpu: 8
24
+ batch_size_per_gpu: 8
25
+
26
+ ## ------- Training on single fixed-patch size 128x128---------
27
+ mini_batch_sizes: [6,1]
28
+ iters: [200000,100000]
29
+ gt_size: 256
30
+ gt_sizes: [128,256]
31
+ ## ------------------------------------------------------------
32
+
33
+ dataset_enlarge_ratio: 1
34
+ prefetch_mode: ~
35
+
36
+ val:
37
+ name: ValSet
38
+ type: Dataset_PairedImage_dehazeSOT
39
+ dataroot_gt: ./dataset/haze
40
+ dataroot_lq: ./dataset/haze
41
+ gt_size: 256
42
+ io_backend:
43
+ type: disk
44
+
45
+ # network structures
46
+
47
+ # network structures
48
+ network_g:
49
+ type: HINT
50
+ inp_channels: 3
51
+ out_channels: 3
52
+ dim: 48
53
+ num_blocks: [4,6,6,8]
54
+ num_refinement_blocks: 4
55
+ heads: [8,8,8,8]
56
+ ffn_expansion_factor: 2.66
57
+ bias: False
58
+ LayerNorm_type: WithBias
59
+ dual_pixel_task: False
60
+
61
+
62
+ # path
63
+ path:
64
+ pretrain_network_g: ~
65
+ strict_load_g: true
66
+ resume_state: ~
67
+
68
+ # training settings
69
+ train:
70
+ total_iter: 300000
71
+ warmup_iter: -1 # no warm up
72
+ use_grad_clip: true
73
+
74
+ # Split 300k iterations into two cycles.
75
+ # 1st cycle: fixed 3e-4 LR for 92k iters.
76
+ # 2nd cycle: cosine annealing (3e-4 to 1e-6) for 208k iters.
77
+ scheduler:
78
+ type: CosineAnnealingRestartCyclicLR
79
+ periods: [92000, 208000]
80
+ restart_weights: [1,1]
81
+ eta_mins: [0.0003,0.000001]
82
+
83
+ mixing_augs:
84
+ mixup: true
85
+ mixup_beta: 1.2
86
+ use_identity: true
87
+
88
+ optim_g:
89
+ type: AdamW
90
+ lr: !!float 3e-4
91
+ weight_decay: !!float 1e-4
92
+ betas: [0.9, 0.999]
93
+
94
+ # losses
95
+ pixel_opt:
96
+ type: L1Loss
97
+ loss_weight: 1
98
+ reduction: mean
99
+ fft_loss_opt:
100
+ type: FFTLoss
101
+ loss_weight: 0.1
102
+ reduction: mean
103
+
104
+ # validation settings
105
+ val:
106
+ window_size: 8
107
+ val_freq: !!float 4e3
108
+ save_img: false
109
+ rgb2bgr: true
110
+ use_image: false
111
+ max_minibatch: 8
112
+
113
+ metrics:
114
+ psnr: # metric name, can be arbitrary
115
+ type: calculate_psnr
116
+ crop_border: 0
117
+ test_y_channel: false
118
+
119
+ # logging settings
120
+ logger:
121
+ print_freq: 1000
122
+ save_checkpoint_freq: !!float 4e3
123
+ use_tb_logger: true
124
+ wandb:
125
+ project: ~
126
+ resume_id: ~
127
+
128
+ # dist training settings
129
+ dist_params:
130
+ backend: nccl
131
+ port: 29500