mimc_rl / config /eval_config.yaml
wangyanhui666's picture
fine tune decoder with mask
9cf79cf
model: mage_vit_base_patch16
dataset: "imagenet"
dataset_path: '/home/t2vg-a100-G4-10/project/qyp/datasets/imagenet'
kodak_path: '/home/t2vg-a100-G4-10/project/qyp/datasets/kodak/'
eval_path: ''
batch_size: 64 # Batch size per GPU
test_batch_size: 200
num_workers: 4
epochs: 400
blr: 2.e-4 # 1.5e-4 # base learning rate: absolute_lr = base_lr * total_batch_size / 256
min_lr: 5.e-6
accum_iter: 1 # Accumulate gradient iterations (for increasing the effective batch size under memory constraints)
input_size: 256
weight_decay: 0.05
warmup_epochs: 20
mask_ratio_min: 0.5
mask_ratio_max: 0.9
mask_ratio_mu: 0.55
mask_ratio_std: 0.25
grad_clip: 3.0
device: "cuda"
distributed: True
seed: 0
resume: '/home/t2vg-a100-G4-10/project/qyp/lossless/checkpoint-330.pth' # Path to the checkpoint to resume from
start_epoch: 0
pin_mem: True
save: True
name: "exp"
exp_name: "MIM_vbr_eval_lossless"
root: "/home/t2vg-a100-G4-10/project/qyp/mimc_rope"
output_dir: "/home/t2vg-a100-G4-10/project/qyp/mimc_rope/MIM_vbr_eval_lossless" # path where to save, empty for no saving
log_dir: "/home/t2vg-a100-G4-10/project/qyp/mimc_rope/MIM_vbr_eval_lossless/logs_eval" # path where to tensorboard log
strart_epoch: 0
cuda: True