#!/bin/bash #SBATCH --job-name=asda-fltr #SBATCH --partition=a6000 #SBATCH --gres=gpu:1 #SBATCH --time=13-11:30:00 #SBATCH --mem=25000 #SBATCH --cpus-per-task=3 #SBATCH --output=./exp/gref_m10_tmp007_fine_nofltr_bs28.log ml purge ml load cuda/11.8 eval "$(conda shell.bash hook)" conda activate asda cd /data2/projects/chaeyun/ASDA export NCCL_P2P_DISABLE=1 export NVIDIA_TF32_OVERRIDE=1 export NCCL_IB_TIMEOUT=100 export NCCL_IB_RETRY_CNT=15 export MASTER_PORT=2721 BS=28 SAVENAME=gref_m10_tmp007_fine_nofltr_bs28 MARGIN=10 TEMP=0.07 MODE=hardpos_only_refined FILTER_THRES=0.99 FUSE_MODE=fine ## options # gref_m10_tmp007_coarse_nofltr_bs28 # gref_m10_tmp007_coarse_fthr065_bs28 # gref_m10_tmp007_coarse_fthr050_bs28 # gref_m10_tmp007_fine_fthr065_bs28 # gref_m10_tmp007_fine_nofltr_bs28 # TRAIN export CUDA_VISIBLE_DEVICES=0 python_args="--dataset refcocog \ --splitBy umd \ --ngpu 1 --batch_size ${BS} \ --savename ${SAVENAME} --time 17 \ --metric_learning \ --margin_value ${MARGIN} \ --filter_thres ${FILTER_THRES} \ --temperature ${TEMP} \ --metric_mode ${MODE} \ --fuse_mode ${FUSE_MODE} " python train_gref_sbert.py $python_args # python train.py --dataset refcoco --splitBy unc --ngpu 1 --batch_size 36 --time 17 --savename refcoco_bs36_repro # python train.py --dataset refcocog --splitBy umd --ngpu 1 --batch_size 28 --time 17 --savename gref_umd_bs28_repro # python train.py --dataset refcocog --splitBy umd --ngpu 1 --batch_size 36 --time 17 --savename gref_umd_bs36_repro # export CUDA_VISIBLE_DEVICES=0,1; python train.py --dataset refcocog --splitBy umd --ngpu 2 --batch_size 64 --time 17 --savename gref_umd_bs64_repro # export CUDA_VISIBLE_DEVICES=0,1; python train.py --dataset refcocog --splitBy umd --ngpu 1 --batch_size 64 --time 17 --savename gref_umd_bs64_repro