Data: train_batch_size: 24 val_batch_size: 8 num_workers: 8 prefetch_factor: 4 dataset_type: S2_6b normalization: normalise_10k Model: in_bands: 6 continue_training: false load_checkpoint: false Training: gpus: - 2 - 3 max_epochs: 9999 val_check_interval: 1.0 limit_val_batches: 250 pretrain_g_only: true g_pretrain_steps: 15000 adv_loss_ramp_steps: 2500 label_smoothing: true EMA: enabled: false decay: 0.999 update_after_step: 0 use_num_updates: true Losses: adv_loss_beta: 0.001 adv_loss_schedule: cosine l1_weight: 1.0 sam_weight: 0.05 perceptual_weight: 0.1 perceptual_metric: vgg tv_weight: 0.0 max_val: 1.0 ssim_win: 11 Generator: model_type: rcab large_kernel_size: 9 small_kernel_size: 3 n_channels: 96 n_blocks: 32 scaling_factor: 8 Discriminator: model_type: standard n_blocks: 8 Optimizers: optim_g_lr: 0.0001 optim_d_lr: 0.0001 Schedulers: g_warmup_steps: 2500 g_warmup_type: cosine metric: val_metrics/l1 patience_g: 50 patience_d: 50 factor_g: 0.5 factor_d: 0.5 verbose: true Logging: num_val_images: 5