cafa6 / train_slurm.sh
tgueloglu's picture
Upload train_slurm.sh with huggingface_hub
9c2bea0 verified
#!/bin/bash
#SBATCH --job-name=gogpt-train
#SBATCH --output=logs/train_%j.out
#SBATCH --error=logs/train_%j.err
#SBATCH --nodes=2
#SBATCH --ntasks-per-node=1
#SBATCH --gres=gpu:1
#SBATCH --cpus-per-task=6
#SBATCH --mem=200G
#SBATCH --time=7-00:00:00
mkdir -p logs
#Activate Environment
source /home/guloglut/venv/gogpt/bin/activate
module load python/3.11 gcc arrow/21.0.0
echo "Starting training on $(hostname) with $CUDA_VISIBLE_DEVICES"
echo "Job ID: $SLURM_JOB_ID"
cd /home/guloglut/models/gogpt-dev
TIMESTAMP=$(date +'%Y-%m-%d_%H-%M-%S')
LOG_DIR="/home/guloglut/scratch/artifacts/training/\${experiment_name}/$TIMESTAMP"
PREPROCESSED_PATH="/home/guloglut/models/gogpt-dev/artifacts/preprocessed/cafa6_processed_propagated"
# Run training
srun python scripts/train.py experiment=default \
training.devices=$SLURM_NTASKS_PER_NODE \
training.num_nodes=$SLURM_NNODES \
training.strategy=ddp_find_unused_parameters_true \
data.preprocessed_path=$PREPROCESSED_PATH \
"hydra.run.dir=$LOG_DIR"