| #SBATCH --job-name=lvrz1 | |
| #SBATCH --partition=a6000 | |
| #SBATCH --gres=gpu:4 | |
| #SBATCH --time=13-11:30:00 # d-hh:mm:ss ??����??, ����???? job?? max time limit ?????? | |
| #SBATCH --mem=60000 # cpu memory size | |
| #SBATCH --cpus-per-task=8 # cpu ?�硧��??������?? | |
| #SBATCH --output=./logs/rzom_m10_mg12_tmp007_4gpu_bs32_ang.log | |
| ml purge | |
| ml load cuda/11.8 | |
| eval "$(conda shell.bash hook)" | |
| conda activate risall | |
| cd /data2/projects/chaeyun/LAVT-RIS/ | |
| export NCCL_P2P_DISABLE=1 | |
| export NVIDIA_TF32_OVERRIDE=0 | |
| GPUS=4 | |
| OUTPUT_DIR=$1 | |
| EXP_NAME=$2 | |
| MARGIN=$3 | |
| TEMP=$4 | |
| MODE=$5 | |
| MLW=$6 | |
| PORT=7852 | |
| # TRAIN | |
| # addzero is set to none for default. we don't include zero target cases in MRaCL | |
| CUDA_VISIBLE_DEVICES=0,1,2,3 torchrun \ | |
| --nproc_per_node=$GPUS --master_port=$PORT train_refzom_angle.py \ | |
| --model lavt_one \ | |
| --dataset ref-zom \ | |
| --splitBy final \ | |
| --split test \ | |
| --output-dir ${OUTPUT_DIR} \ | |
| --model_id ${EXP_NAME} \ | |
| --batch-size 8 \ | |
| --lr 0.00005 \ | |
| --wd 1e-2 \ | |
| --swin_type base \ | |
| --pretrained_swin_weights ./pretrained_weights/swin_base_patch4_window12_384_22k.pth \ | |
| --epochs 40 \ | |
| --img_size 480 \ | |
| --metric_learning \ | |
| --margin_value ${MARGIN} \ | |
| --temperature ${TEMP} \ | |
| --metric_mode ${MODE} \ | |
| --metric_loss_weight ${MLW} \ | |
| --exclude_multiobj | |
| # rzom_m10_mg12_tmp007_4gpu_bs32_ang original | |
| # # sbatch ./scripts/baseline_refzom_angle.sh ./models/rzom_m10_mg12_tmp007_4gpu_bs32_ang rzom_m10_mg12_tmp007_4gpu_bs32_ang 12 0.07 hardpos_only 0.10 | |