| | #!/usr/bin/env bash
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| | NUM_GPUS=4
|
| |
|
| |
|
| | CHECKPOINT_DIR=checkpoints/chairs-gmflow && \
|
| | mkdir -p ${CHECKPOINT_DIR} && \
|
| | python -m torch.distributed.launch --nproc_per_node=${NUM_GPUS} --master_port=9989 main.py \
|
| | --launcher pytorch \
|
| | --checkpoint_dir ${CHECKPOINT_DIR} \
|
| | --batch_size 16 \
|
| | --val_dataset chairs sintel kitti \
|
| | --lr 4e-4 \
|
| | --image_size 384 512 \
|
| | --padding_factor 16 \
|
| | --upsample_factor 8 \
|
| | --with_speed_metric \
|
| | --val_freq 10000 \
|
| | --save_ckpt_freq 10000 \
|
| | --num_steps 100000 \
|
| | 2>&1 | tee -a ${CHECKPOINT_DIR}/train.log
|
| |
|
| |
|
| | CHECKPOINT_DIR=checkpoints/things-gmflow && \
|
| | mkdir -p ${CHECKPOINT_DIR} && \
|
| | python -m torch.distributed.launch --nproc_per_node=${NUM_GPUS} --master_port=9989 main.py \
|
| | --launcher pytorch \
|
| | --checkpoint_dir ${CHECKPOINT_DIR} \
|
| | --resume checkpoints/chairs-gmflow/step_100000.pth \
|
| | --stage things \
|
| | --batch_size 8 \
|
| | --val_dataset things sintel kitti \
|
| | --lr 2e-4 \
|
| | --image_size 384 768 \
|
| | --padding_factor 16 \
|
| | --upsample_factor 8 \
|
| | --with_speed_metric \
|
| | --val_freq 40000 \
|
| | --save_ckpt_freq 50000 \
|
| | --num_steps 800000 \
|
| | 2>&1 | tee -a ${CHECKPOINT_DIR}/train.log
|
| |
|
| |
|
| | CHECKPOINT_DIR=checkpoints/sintel-gmflow && \
|
| | mkdir -p ${CHECKPOINT_DIR} && \
|
| | python -m torch.distributed.launch --nproc_per_node=${NUM_GPUS} --master_port=9989 main.py \
|
| | --launcher pytorch \
|
| | --checkpoint_dir ${CHECKPOINT_DIR} \
|
| | --resume checkpoints/things-gmflow/step_800000.pth \
|
| | --stage sintel \
|
| | --batch_size 8 \
|
| | --val_dataset sintel kitti \
|
| | --lr 2e-4 \
|
| | --image_size 320 896 \
|
| | --padding_factor 16 \
|
| | --upsample_factor 8 \
|
| | --with_speed_metric \
|
| | --val_freq 20000 \
|
| | --save_ckpt_freq 20000 \
|
| | --num_steps 200000 \
|
| | 2>&1 | tee -a ${CHECKPOINT_DIR}/train.log
|
| |
|
| |
|
| | CHECKPOINT_DIR=checkpoints/kitti-gmflow && \
|
| | mkdir -p ${CHECKPOINT_DIR} && \
|
| | python -m torch.distributed.launch --nproc_per_node=${NUM_GPUS} --master_port=9989 main.py \
|
| | --launcher pytorch \
|
| | --checkpoint_dir ${CHECKPOINT_DIR} \
|
| | --resume checkpoints/sintel-gmflow/step_200000.pth \
|
| | --stage kitti \
|
| | --batch_size 8 \
|
| | --val_dataset kitti \
|
| | --lr 2e-4 \
|
| | --image_size 320 1152 \
|
| | --padding_factor 16 \
|
| | --upsample_factor 8 \
|
| | --with_speed_metric \
|
| | --val_freq 10000 \
|
| | --save_ckpt_freq 10000 \
|
| | --num_steps 100000 \
|
| | 2>&1 | tee -a ${CHECKPOINT_DIR}/train.log
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|