jy395 commited on
Commit
9385ebc
·
1 Parent(s): aedcb75

Restore config and code files from old commit

Browse files
simvq_1k/d_star_w_0.1/config.yaml ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # lightning.pytorch==2.2.5
2
+ seed_everything: 0
3
+ trainer:
4
+ accelerator: gpu
5
+ strategy: ddp_find_unused_parameters_true
6
+ devices: 4
7
+ num_nodes: 1
8
+ precision: 16-mixed
9
+ logger:
10
+ class_path: lightning.pytorch.loggers.TensorBoardLogger
11
+ init_args:
12
+ save_dir: vq_log/simvq_1k
13
+ name: null
14
+ version: d_star_w_0.1
15
+ log_graph: false
16
+ default_hp_metric: true
17
+ prefix: ''
18
+ sub_dir: null
19
+ comment: ''
20
+ purge_step: null
21
+ max_queue: 10
22
+ flush_secs: 120
23
+ filename_suffix: ''
24
+ callbacks:
25
+ - class_path: lightning.pytorch.callbacks.ModelCheckpoint
26
+ init_args:
27
+ dirpath: vq_log/simvq_1k/d_star_w_0.1/checkpoints
28
+ filename: null
29
+ monitor: null
30
+ verbose: false
31
+ save_last: null
32
+ save_top_k: -1
33
+ save_weights_only: false
34
+ mode: min
35
+ auto_insert_metric_name: true
36
+ every_n_train_steps: null
37
+ train_time_interval: null
38
+ every_n_epochs: null
39
+ save_on_train_epoch_end: null
40
+ enable_version_counter: true
41
+ - class_path: lightning.pytorch.callbacks.LearningRateMonitor
42
+ init_args:
43
+ logging_interval: step
44
+ log_momentum: false
45
+ log_weight_decay: false
46
+ fast_dev_run: false
47
+ max_epochs: 50
48
+ min_epochs: null
49
+ max_steps: -1
50
+ min_steps: null
51
+ max_time: null
52
+ limit_train_batches: null
53
+ limit_val_batches: null
54
+ limit_test_batches: null
55
+ limit_predict_batches: null
56
+ overfit_batches: 0.0
57
+ val_check_interval: null
58
+ check_val_every_n_epoch: 1
59
+ num_sanity_val_steps: 0
60
+ log_every_n_steps: 100
61
+ enable_checkpointing: null
62
+ enable_progress_bar: null
63
+ enable_model_summary: null
64
+ accumulate_grad_batches: 1
65
+ gradient_clip_val: null
66
+ gradient_clip_algorithm: null
67
+ deterministic: null
68
+ benchmark: null
69
+ inference_mode: true
70
+ use_distributed_sampler: true
71
+ profiler: null
72
+ detect_anomaly: false
73
+ barebones: false
74
+ plugins: null
75
+ sync_batchnorm: false
76
+ reload_dataloaders_every_n_epochs: 0
77
+ default_root_dir: null
78
+ model:
79
+ class_path: taming.models.vq.VQModel
80
+ init_args:
81
+ ddconfig:
82
+ double_z: false
83
+ z_channels: 128
84
+ resolution: 128
85
+ in_channels: 3
86
+ out_ch: 3
87
+ ch: 128
88
+ ch_mult:
89
+ - 1
90
+ - 2
91
+ - 2
92
+ - 4
93
+ num_res_blocks: 2
94
+ lossconfig:
95
+ target: taming.modules.losses.vqperceptual.VQLPIPSWithDiscriminator
96
+ params:
97
+ disc_conditional: false
98
+ disc_in_channels: 3
99
+ disc_start: 1000
100
+ disc_weight: 0.8
101
+ d_star_weight: 0.1
102
+ gen_loss_weight: 0.1
103
+ codebook_weight: 0.1
104
+ commit_weight: 1.0
105
+ codebook_enlarge_ratio: 0
106
+ codebook_enlarge_steps: 2000
107
+ quantconfig:
108
+ target: taming.modules.vqvae.quantize.SimVQ
109
+ params:
110
+ n_e: 1024
111
+ e_dim: 128
112
+ beta: 0.25
113
+ legacy: false
114
+ ckpt_path: pretrained/simvq_1k/epoch=49-step=250250.ckpt
115
+ ignore_keys: []
116
+ image_key: image
117
+ colorize_nlabels: null
118
+ monitor: null
119
+ learning_rate: 0.0001
120
+ warmup_epochs: 1.0
121
+ scheduler_type: None
122
+ accumulate_steps: 1
123
+ min_learning_rate: 0
124
+ use_ema: true
125
+ stage: null
126
+ data:
127
+ class_path: main.DataModuleFromConfig
128
+ init_args:
129
+ batch_size: 64
130
+ train:
131
+ target: taming.data.imagenet.ImageNetTrain
132
+ params:
133
+ config:
134
+ size: 64
135
+ subset: null
136
+ validation:
137
+ target: taming.data.imagenet.ImageNetValidation
138
+ params:
139
+ config:
140
+ size: 64
141
+ subset: null
142
+ test:
143
+ target: taming.data.imagenet.ImageNetValidation
144
+ params:
145
+ config:
146
+ size: 64
147
+ subset: null
148
+ wrap: false
149
+ num_workers: 8
150
+ optimizer: null
151
+ lr_scheduler: null
152
+ ckpt_path: null
simvq_1k/d_star_w_0.1/hparams.yaml ADDED
@@ -0,0 +1 @@
 
 
1
+ {}
simvq_1k/d_star_w_0/config.yaml ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # lightning.pytorch==2.2.5
2
+ seed_everything: 0
3
+ trainer:
4
+ accelerator: gpu
5
+ strategy: ddp_find_unused_parameters_true
6
+ devices: 4
7
+ num_nodes: 1
8
+ precision: 16-mixed
9
+ logger:
10
+ class_path: lightning.pytorch.loggers.TensorBoardLogger
11
+ init_args:
12
+ save_dir: vq_log/simvq_1k
13
+ name: null
14
+ version: d_star_w_0
15
+ log_graph: false
16
+ default_hp_metric: true
17
+ prefix: ''
18
+ sub_dir: null
19
+ comment: ''
20
+ purge_step: null
21
+ max_queue: 10
22
+ flush_secs: 120
23
+ filename_suffix: ''
24
+ callbacks:
25
+ - class_path: lightning.pytorch.callbacks.ModelCheckpoint
26
+ init_args:
27
+ dirpath: vq_log/simvq_1k/d_star_w_0/checkpoints
28
+ filename: null
29
+ monitor: null
30
+ verbose: false
31
+ save_last: null
32
+ save_top_k: -1
33
+ save_weights_only: false
34
+ mode: min
35
+ auto_insert_metric_name: true
36
+ every_n_train_steps: null
37
+ train_time_interval: null
38
+ every_n_epochs: null
39
+ save_on_train_epoch_end: null
40
+ enable_version_counter: true
41
+ - class_path: lightning.pytorch.callbacks.LearningRateMonitor
42
+ init_args:
43
+ logging_interval: step
44
+ log_momentum: false
45
+ log_weight_decay: false
46
+ fast_dev_run: false
47
+ max_epochs: 50
48
+ min_epochs: null
49
+ max_steps: -1
50
+ min_steps: null
51
+ max_time: null
52
+ limit_train_batches: null
53
+ limit_val_batches: null
54
+ limit_test_batches: null
55
+ limit_predict_batches: null
56
+ overfit_batches: 0.0
57
+ val_check_interval: null
58
+ check_val_every_n_epoch: 1
59
+ num_sanity_val_steps: 0
60
+ log_every_n_steps: 100
61
+ enable_checkpointing: null
62
+ enable_progress_bar: null
63
+ enable_model_summary: null
64
+ accumulate_grad_batches: 1
65
+ gradient_clip_val: null
66
+ gradient_clip_algorithm: null
67
+ deterministic: null
68
+ benchmark: null
69
+ inference_mode: true
70
+ use_distributed_sampler: true
71
+ profiler: null
72
+ detect_anomaly: false
73
+ barebones: false
74
+ plugins: null
75
+ sync_batchnorm: false
76
+ reload_dataloaders_every_n_epochs: 0
77
+ default_root_dir: null
78
+ model:
79
+ class_path: taming.models.vq.VQModel
80
+ init_args:
81
+ ddconfig:
82
+ double_z: false
83
+ z_channels: 128
84
+ resolution: 128
85
+ in_channels: 3
86
+ out_ch: 3
87
+ ch: 128
88
+ ch_mult:
89
+ - 1
90
+ - 2
91
+ - 2
92
+ - 4
93
+ num_res_blocks: 2
94
+ lossconfig:
95
+ target: taming.modules.losses.vqperceptual.VQLPIPSWithDiscriminator
96
+ params:
97
+ disc_conditional: false
98
+ disc_in_channels: 3
99
+ disc_start: 1000
100
+ disc_weight: 0.8
101
+ d_star_weight: 0
102
+ gen_loss_weight: 0.1
103
+ codebook_weight: 0.1
104
+ commit_weight: 1.0
105
+ codebook_enlarge_ratio: 0
106
+ codebook_enlarge_steps: 2000
107
+ quantconfig:
108
+ target: taming.modules.vqvae.quantize.SimVQ
109
+ params:
110
+ n_e: 1024
111
+ e_dim: 128
112
+ beta: 0.25
113
+ legacy: false
114
+ ckpt_path: pretrained/simvq_1k/epoch=49-step=250250.ckpt
115
+ ignore_keys: []
116
+ image_key: image
117
+ colorize_nlabels: null
118
+ monitor: null
119
+ learning_rate: 0.0001
120
+ warmup_epochs: 1.0
121
+ scheduler_type: None
122
+ accumulate_steps: 1
123
+ min_learning_rate: 0
124
+ use_ema: true
125
+ stage: null
126
+ data:
127
+ class_path: main.DataModuleFromConfig
128
+ init_args:
129
+ batch_size: 64
130
+ train:
131
+ target: taming.data.imagenet.ImageNetTrain
132
+ params:
133
+ config:
134
+ size: 64
135
+ subset: null
136
+ validation:
137
+ target: taming.data.imagenet.ImageNetValidation
138
+ params:
139
+ config:
140
+ size: 64
141
+ subset: null
142
+ test:
143
+ target: taming.data.imagenet.ImageNetValidation
144
+ params:
145
+ config:
146
+ size: 64
147
+ subset: null
148
+ wrap: false
149
+ num_workers: 8
150
+ optimizer: null
151
+ lr_scheduler: null
152
+ ckpt_path: null
simvq_1k/d_star_w_0/hparams.yaml ADDED
@@ -0,0 +1 @@
 
 
1
+ {}
simvq_1k/d_star_w_1/config.yaml ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # lightning.pytorch==2.2.5
2
+ seed_everything: 0
3
+ trainer:
4
+ accelerator: gpu
5
+ strategy: ddp_find_unused_parameters_true
6
+ devices: 4
7
+ num_nodes: 1
8
+ precision: 16-mixed
9
+ logger:
10
+ class_path: lightning.pytorch.loggers.TensorBoardLogger
11
+ init_args:
12
+ save_dir: vq_log/simvq_1k
13
+ name: null
14
+ version: d_star_w_1
15
+ log_graph: false
16
+ default_hp_metric: true
17
+ prefix: ''
18
+ sub_dir: null
19
+ comment: ''
20
+ purge_step: null
21
+ max_queue: 10
22
+ flush_secs: 120
23
+ filename_suffix: ''
24
+ callbacks:
25
+ - class_path: lightning.pytorch.callbacks.ModelCheckpoint
26
+ init_args:
27
+ dirpath: vq_log/simvq_1k/d_star_w_1/checkpoints
28
+ filename: null
29
+ monitor: null
30
+ verbose: false
31
+ save_last: null
32
+ save_top_k: -1
33
+ save_weights_only: false
34
+ mode: min
35
+ auto_insert_metric_name: true
36
+ every_n_train_steps: null
37
+ train_time_interval: null
38
+ every_n_epochs: null
39
+ save_on_train_epoch_end: null
40
+ enable_version_counter: true
41
+ - class_path: lightning.pytorch.callbacks.LearningRateMonitor
42
+ init_args:
43
+ logging_interval: step
44
+ log_momentum: false
45
+ log_weight_decay: false
46
+ fast_dev_run: false
47
+ max_epochs: 50
48
+ min_epochs: null
49
+ max_steps: -1
50
+ min_steps: null
51
+ max_time: null
52
+ limit_train_batches: null
53
+ limit_val_batches: null
54
+ limit_test_batches: null
55
+ limit_predict_batches: null
56
+ overfit_batches: 0.0
57
+ val_check_interval: null
58
+ check_val_every_n_epoch: 1
59
+ num_sanity_val_steps: 0
60
+ log_every_n_steps: 100
61
+ enable_checkpointing: null
62
+ enable_progress_bar: null
63
+ enable_model_summary: null
64
+ accumulate_grad_batches: 1
65
+ gradient_clip_val: null
66
+ gradient_clip_algorithm: null
67
+ deterministic: null
68
+ benchmark: null
69
+ inference_mode: true
70
+ use_distributed_sampler: true
71
+ profiler: null
72
+ detect_anomaly: false
73
+ barebones: false
74
+ plugins: null
75
+ sync_batchnorm: false
76
+ reload_dataloaders_every_n_epochs: 0
77
+ default_root_dir: null
78
+ model:
79
+ class_path: taming.models.vq.VQModel
80
+ init_args:
81
+ ddconfig:
82
+ double_z: false
83
+ z_channels: 128
84
+ resolution: 128
85
+ in_channels: 3
86
+ out_ch: 3
87
+ ch: 128
88
+ ch_mult:
89
+ - 1
90
+ - 2
91
+ - 2
92
+ - 4
93
+ num_res_blocks: 2
94
+ lossconfig:
95
+ target: taming.modules.losses.vqperceptual.VQLPIPSWithDiscriminator
96
+ params:
97
+ disc_conditional: false
98
+ disc_in_channels: 3
99
+ disc_start: 1000
100
+ disc_weight: 0.8
101
+ d_star_weight: 1
102
+ gen_loss_weight: 0.1
103
+ codebook_weight: 0.1
104
+ commit_weight: 1.0
105
+ codebook_enlarge_ratio: 0
106
+ codebook_enlarge_steps: 2000
107
+ quantconfig:
108
+ target: taming.modules.vqvae.quantize.SimVQ
109
+ params:
110
+ n_e: 1024
111
+ e_dim: 128
112
+ beta: 0.25
113
+ legacy: false
114
+ ckpt_path: pretrained/simvq_1k/epoch=49-step=250250.ckpt
115
+ ignore_keys: []
116
+ image_key: image
117
+ colorize_nlabels: null
118
+ monitor: null
119
+ learning_rate: 0.0001
120
+ warmup_epochs: 1.0
121
+ scheduler_type: None
122
+ accumulate_steps: 1
123
+ min_learning_rate: 0
124
+ use_ema: true
125
+ stage: null
126
+ data:
127
+ class_path: main.DataModuleFromConfig
128
+ init_args:
129
+ batch_size: 64
130
+ train:
131
+ target: taming.data.imagenet.ImageNetTrain
132
+ params:
133
+ config:
134
+ size: 64
135
+ subset: null
136
+ validation:
137
+ target: taming.data.imagenet.ImageNetValidation
138
+ params:
139
+ config:
140
+ size: 64
141
+ subset: null
142
+ test:
143
+ target: taming.data.imagenet.ImageNetValidation
144
+ params:
145
+ config:
146
+ size: 64
147
+ subset: null
148
+ wrap: false
149
+ num_workers: 8
150
+ optimizer: null
151
+ lr_scheduler: null
152
+ ckpt_path: null
simvq_1k/d_star_w_1/hparams.yaml ADDED
@@ -0,0 +1 @@
 
 
1
+ {}
simvq_262k/d_star_w_0.1/config.yaml ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # lightning.pytorch==2.2.5
2
+ seed_everything: 0
3
+ trainer:
4
+ accelerator: gpu
5
+ strategy: ddp_find_unused_parameters_true
6
+ devices: 4
7
+ num_nodes: 1
8
+ precision: 16-mixed
9
+ logger:
10
+ class_path: lightning.pytorch.loggers.TensorBoardLogger
11
+ init_args:
12
+ save_dir: vq_log/simvq_262k
13
+ name: null
14
+ version: d_star_w_0.1
15
+ log_graph: false
16
+ default_hp_metric: true
17
+ prefix: ''
18
+ sub_dir: null
19
+ comment: ''
20
+ purge_step: null
21
+ max_queue: 10
22
+ flush_secs: 120
23
+ filename_suffix: ''
24
+ callbacks:
25
+ - class_path: lightning.pytorch.callbacks.ModelCheckpoint
26
+ init_args:
27
+ dirpath: vq_log/simvq_262k/d_star_w_0.1/checkpoints
28
+ filename: null
29
+ monitor: null
30
+ verbose: false
31
+ save_last: null
32
+ save_top_k: -1
33
+ save_weights_only: false
34
+ mode: min
35
+ auto_insert_metric_name: true
36
+ every_n_train_steps: null
37
+ train_time_interval: null
38
+ every_n_epochs: null
39
+ save_on_train_epoch_end: null
40
+ enable_version_counter: true
41
+ - class_path: lightning.pytorch.callbacks.LearningRateMonitor
42
+ init_args:
43
+ logging_interval: step
44
+ log_momentum: false
45
+ log_weight_decay: false
46
+ fast_dev_run: false
47
+ max_epochs: 50
48
+ min_epochs: null
49
+ max_steps: -1
50
+ min_steps: null
51
+ max_time: null
52
+ limit_train_batches: null
53
+ limit_val_batches: null
54
+ limit_test_batches: null
55
+ limit_predict_batches: null
56
+ overfit_batches: 0.0
57
+ val_check_interval: null
58
+ check_val_every_n_epoch: 1
59
+ num_sanity_val_steps: 0
60
+ log_every_n_steps: 100
61
+ enable_checkpointing: null
62
+ enable_progress_bar: null
63
+ enable_model_summary: null
64
+ accumulate_grad_batches: 1
65
+ gradient_clip_val: null
66
+ gradient_clip_algorithm: null
67
+ deterministic: null
68
+ benchmark: null
69
+ inference_mode: true
70
+ use_distributed_sampler: true
71
+ profiler: null
72
+ detect_anomaly: false
73
+ barebones: false
74
+ plugins: null
75
+ sync_batchnorm: false
76
+ reload_dataloaders_every_n_epochs: 0
77
+ default_root_dir: null
78
+ model:
79
+ class_path: taming.models.vq.VQModel
80
+ init_args:
81
+ ddconfig:
82
+ double_z: false
83
+ z_channels: 128
84
+ resolution: 128
85
+ in_channels: 3
86
+ out_ch: 3
87
+ ch: 128
88
+ ch_mult:
89
+ - 1
90
+ - 2
91
+ - 2
92
+ - 4
93
+ num_res_blocks: 2
94
+ lossconfig:
95
+ target: taming.modules.losses.vqperceptual.VQLPIPSWithDiscriminator
96
+ params:
97
+ disc_conditional: false
98
+ disc_in_channels: 3
99
+ disc_start: 1000
100
+ disc_weight: 0.8
101
+ d_star_weight: 0.1
102
+ gen_loss_weight: 0.1
103
+ codebook_weight: 0.1
104
+ commit_weight: 1.0
105
+ codebook_enlarge_ratio: 0
106
+ codebook_enlarge_steps: 2000
107
+ quantconfig:
108
+ target: taming.modules.vqvae.quantize.SimVQ
109
+ params:
110
+ n_e: 262144
111
+ e_dim: 128
112
+ beta: 0.25
113
+ legacy: false
114
+ ckpt_path: pretrained/simvq_262k/epoch=49-step=250250.ckpt
115
+ ignore_keys: []
116
+ image_key: image
117
+ colorize_nlabels: null
118
+ monitor: null
119
+ learning_rate: 0.0001
120
+ warmup_epochs: 1.0
121
+ scheduler_type: None
122
+ accumulate_steps: 1
123
+ min_learning_rate: 0
124
+ use_ema: true
125
+ stage: null
126
+ data:
127
+ class_path: main.DataModuleFromConfig
128
+ init_args:
129
+ batch_size: 64
130
+ train:
131
+ target: taming.data.imagenet.ImageNetTrain
132
+ params:
133
+ config:
134
+ size: 64
135
+ subset: null
136
+ validation:
137
+ target: taming.data.imagenet.ImageNetValidation
138
+ params:
139
+ config:
140
+ size: 64
141
+ subset: null
142
+ test:
143
+ target: taming.data.imagenet.ImageNetValidation
144
+ params:
145
+ config:
146
+ size: 64
147
+ subset: null
148
+ wrap: false
149
+ num_workers: 8
150
+ optimizer: null
151
+ lr_scheduler: null
152
+ ckpt_path: null
simvq_262k/d_star_w_0.1/hparams.yaml ADDED
@@ -0,0 +1 @@
 
 
1
+ {}
simvq_262k/d_star_w_0/config.yaml ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # lightning.pytorch==2.2.5
2
+ seed_everything: 0
3
+ trainer:
4
+ accelerator: gpu
5
+ strategy: ddp_find_unused_parameters_true
6
+ devices: 4
7
+ num_nodes: 1
8
+ precision: 16-mixed
9
+ logger:
10
+ class_path: lightning.pytorch.loggers.TensorBoardLogger
11
+ init_args:
12
+ save_dir: vq_log/simvq_262k
13
+ name: null
14
+ version: d_star_w_0
15
+ log_graph: false
16
+ default_hp_metric: true
17
+ prefix: ''
18
+ sub_dir: null
19
+ comment: ''
20
+ purge_step: null
21
+ max_queue: 10
22
+ flush_secs: 120
23
+ filename_suffix: ''
24
+ callbacks:
25
+ - class_path: lightning.pytorch.callbacks.ModelCheckpoint
26
+ init_args:
27
+ dirpath: vq_log/simvq_262k/d_star_w_0/checkpoints
28
+ filename: null
29
+ monitor: null
30
+ verbose: false
31
+ save_last: null
32
+ save_top_k: -1
33
+ save_weights_only: false
34
+ mode: min
35
+ auto_insert_metric_name: true
36
+ every_n_train_steps: null
37
+ train_time_interval: null
38
+ every_n_epochs: null
39
+ save_on_train_epoch_end: null
40
+ enable_version_counter: true
41
+ - class_path: lightning.pytorch.callbacks.LearningRateMonitor
42
+ init_args:
43
+ logging_interval: step
44
+ log_momentum: false
45
+ log_weight_decay: false
46
+ fast_dev_run: false
47
+ max_epochs: 50
48
+ min_epochs: null
49
+ max_steps: -1
50
+ min_steps: null
51
+ max_time: null
52
+ limit_train_batches: null
53
+ limit_val_batches: null
54
+ limit_test_batches: null
55
+ limit_predict_batches: null
56
+ overfit_batches: 0.0
57
+ val_check_interval: null
58
+ check_val_every_n_epoch: 1
59
+ num_sanity_val_steps: 0
60
+ log_every_n_steps: 100
61
+ enable_checkpointing: null
62
+ enable_progress_bar: null
63
+ enable_model_summary: null
64
+ accumulate_grad_batches: 1
65
+ gradient_clip_val: null
66
+ gradient_clip_algorithm: null
67
+ deterministic: null
68
+ benchmark: null
69
+ inference_mode: true
70
+ use_distributed_sampler: true
71
+ profiler: null
72
+ detect_anomaly: false
73
+ barebones: false
74
+ plugins: null
75
+ sync_batchnorm: false
76
+ reload_dataloaders_every_n_epochs: 0
77
+ default_root_dir: null
78
+ model:
79
+ class_path: taming.models.vq.VQModel
80
+ init_args:
81
+ ddconfig:
82
+ double_z: false
83
+ z_channels: 128
84
+ resolution: 128
85
+ in_channels: 3
86
+ out_ch: 3
87
+ ch: 128
88
+ ch_mult:
89
+ - 1
90
+ - 2
91
+ - 2
92
+ - 4
93
+ num_res_blocks: 2
94
+ lossconfig:
95
+ target: taming.modules.losses.vqperceptual.VQLPIPSWithDiscriminator
96
+ params:
97
+ disc_conditional: false
98
+ disc_in_channels: 3
99
+ disc_start: 1000
100
+ disc_weight: 0.8
101
+ d_star_weight: 0
102
+ gen_loss_weight: 0.1
103
+ codebook_weight: 0.1
104
+ commit_weight: 1.0
105
+ codebook_enlarge_ratio: 0
106
+ codebook_enlarge_steps: 2000
107
+ quantconfig:
108
+ target: taming.modules.vqvae.quantize.SimVQ
109
+ params:
110
+ n_e: 262144
111
+ e_dim: 128
112
+ beta: 0.25
113
+ legacy: false
114
+ ckpt_path: pretrained/simvq_262k/epoch=49-step=250250.ckpt
115
+ ignore_keys: []
116
+ image_key: image
117
+ colorize_nlabels: null
118
+ monitor: null
119
+ learning_rate: 0.0001
120
+ warmup_epochs: 1.0
121
+ scheduler_type: None
122
+ accumulate_steps: 1
123
+ min_learning_rate: 0
124
+ use_ema: true
125
+ stage: null
126
+ data:
127
+ class_path: main.DataModuleFromConfig
128
+ init_args:
129
+ batch_size: 64
130
+ train:
131
+ target: taming.data.imagenet.ImageNetTrain
132
+ params:
133
+ config:
134
+ size: 64
135
+ subset: null
136
+ validation:
137
+ target: taming.data.imagenet.ImageNetValidation
138
+ params:
139
+ config:
140
+ size: 64
141
+ subset: null
142
+ test:
143
+ target: taming.data.imagenet.ImageNetValidation
144
+ params:
145
+ config:
146
+ size: 64
147
+ subset: null
148
+ wrap: false
149
+ num_workers: 8
150
+ optimizer: null
151
+ lr_scheduler: null
152
+ ckpt_path: null
simvq_262k/d_star_w_0/hparams.yaml ADDED
@@ -0,0 +1 @@
 
 
1
+ {}
simvq_262k/d_star_w_1/config.yaml ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # lightning.pytorch==2.2.5
2
+ seed_everything: 0
3
+ trainer:
4
+ accelerator: gpu
5
+ strategy: ddp_find_unused_parameters_true
6
+ devices: 4
7
+ num_nodes: 1
8
+ precision: 16-mixed
9
+ logger:
10
+ class_path: lightning.pytorch.loggers.TensorBoardLogger
11
+ init_args:
12
+ save_dir: vq_log/simvq_262k
13
+ name: null
14
+ version: d_star_w_1
15
+ log_graph: false
16
+ default_hp_metric: true
17
+ prefix: ''
18
+ sub_dir: null
19
+ comment: ''
20
+ purge_step: null
21
+ max_queue: 10
22
+ flush_secs: 120
23
+ filename_suffix: ''
24
+ callbacks:
25
+ - class_path: lightning.pytorch.callbacks.ModelCheckpoint
26
+ init_args:
27
+ dirpath: vq_log/simvq_262k/d_star_w_1/checkpoints
28
+ filename: null
29
+ monitor: null
30
+ verbose: false
31
+ save_last: null
32
+ save_top_k: -1
33
+ save_weights_only: false
34
+ mode: min
35
+ auto_insert_metric_name: true
36
+ every_n_train_steps: null
37
+ train_time_interval: null
38
+ every_n_epochs: null
39
+ save_on_train_epoch_end: null
40
+ enable_version_counter: true
41
+ - class_path: lightning.pytorch.callbacks.LearningRateMonitor
42
+ init_args:
43
+ logging_interval: step
44
+ log_momentum: false
45
+ log_weight_decay: false
46
+ fast_dev_run: false
47
+ max_epochs: 50
48
+ min_epochs: null
49
+ max_steps: -1
50
+ min_steps: null
51
+ max_time: null
52
+ limit_train_batches: null
53
+ limit_val_batches: null
54
+ limit_test_batches: null
55
+ limit_predict_batches: null
56
+ overfit_batches: 0.0
57
+ val_check_interval: null
58
+ check_val_every_n_epoch: 1
59
+ num_sanity_val_steps: 0
60
+ log_every_n_steps: 100
61
+ enable_checkpointing: null
62
+ enable_progress_bar: null
63
+ enable_model_summary: null
64
+ accumulate_grad_batches: 1
65
+ gradient_clip_val: null
66
+ gradient_clip_algorithm: null
67
+ deterministic: null
68
+ benchmark: null
69
+ inference_mode: true
70
+ use_distributed_sampler: true
71
+ profiler: null
72
+ detect_anomaly: false
73
+ barebones: false
74
+ plugins: null
75
+ sync_batchnorm: false
76
+ reload_dataloaders_every_n_epochs: 0
77
+ default_root_dir: null
78
+ model:
79
+ class_path: taming.models.vq.VQModel
80
+ init_args:
81
+ ddconfig:
82
+ double_z: false
83
+ z_channels: 128
84
+ resolution: 128
85
+ in_channels: 3
86
+ out_ch: 3
87
+ ch: 128
88
+ ch_mult:
89
+ - 1
90
+ - 2
91
+ - 2
92
+ - 4
93
+ num_res_blocks: 2
94
+ lossconfig:
95
+ target: taming.modules.losses.vqperceptual.VQLPIPSWithDiscriminator
96
+ params:
97
+ disc_conditional: false
98
+ disc_in_channels: 3
99
+ disc_start: 1000
100
+ disc_weight: 0.8
101
+ d_star_weight: 1
102
+ gen_loss_weight: 0.1
103
+ codebook_weight: 0.1
104
+ commit_weight: 1.0
105
+ codebook_enlarge_ratio: 0
106
+ codebook_enlarge_steps: 2000
107
+ quantconfig:
108
+ target: taming.modules.vqvae.quantize.SimVQ
109
+ params:
110
+ n_e: 262144
111
+ e_dim: 128
112
+ beta: 0.25
113
+ legacy: false
114
+ ckpt_path: pretrained/simvq_262k/epoch=49-step=250250.ckpt
115
+ ignore_keys: []
116
+ image_key: image
117
+ colorize_nlabels: null
118
+ monitor: null
119
+ learning_rate: 0.0001
120
+ warmup_epochs: 1.0
121
+ scheduler_type: None
122
+ accumulate_steps: 1
123
+ min_learning_rate: 0
124
+ use_ema: true
125
+ stage: null
126
+ data:
127
+ class_path: main.DataModuleFromConfig
128
+ init_args:
129
+ batch_size: 64
130
+ train:
131
+ target: taming.data.imagenet.ImageNetTrain
132
+ params:
133
+ config:
134
+ size: 64
135
+ subset: null
136
+ validation:
137
+ target: taming.data.imagenet.ImageNetValidation
138
+ params:
139
+ config:
140
+ size: 64
141
+ subset: null
142
+ test:
143
+ target: taming.data.imagenet.ImageNetValidation
144
+ params:
145
+ config:
146
+ size: 64
147
+ subset: null
148
+ wrap: false
149
+ num_workers: 8
150
+ optimizer: null
151
+ lr_scheduler: null
152
+ ckpt_path: null
simvq_262k/d_star_w_1/hparams.yaml ADDED
@@ -0,0 +1 @@
 
 
1
+ {}