Yaning1001's picture
Add files using upload-large-folder tool
54f7697 verified
#!/bin/bash
# Launch the first task in the background
CUDA_VISIBLE_DEVICES=1,2,3 torchrun --nproc_per_node=3 --master_port=22224 train_deep_wandb.py --perturbation reverse_full --train_set 10M --batch_size 3 --epoch 3 --seed 0 > log_1.out 2>&1 &
# Launch the second task in the background
CUDA_VISIBLE_DEVICES=5,6,7 torchrun --nproc_per_node=3 --master_port=22225 train_deep_wandb.py --perturbation reverse_partial --train_set 10M --batch_size 3 --epoch 3 --seed 0 > log_2.out 2>&1 &
# Launch the second task in the background
# tmux attach-session -t impo1-0
CUDA_VISIBLE_DEVICES=1,2,3 torchrun --nproc_per_node=3 --master_port=22226 train_deep_wandb.py --perturbation reverse_control --train_set 10M --batch_size 3 --epoch 3 --seed 0 > log_3.out 2>&1 &
# Launch the second task in the background
# tmux attach-session -t impo3
CUDA_VISIBLE_DEVICES=5,6,7 torchrun --nproc_per_node=3 --master_port=22227 train_deep_hop.py --perturbation hop_control --train_set 10M --batch_size 3 --epoch 3 --seed 0 > log_3.out 2>&1 &
# tmux attach-session -t impo1-0
CUDA_VISIBLE_DEVICES=1,2,3 torchrun --nproc_per_node=3 --master_port=22228 train_deep_hop.py --perturbation hop_words4 --train_set 10M --batch_size 3 --epoch 3 --seed 0 > log_3.out 2>&1 &
# tmux attach-session -t impo3
CUDA_VISIBLE_DEVICES=5,6,7 torchrun --nproc_per_node=3 --master_port=22229 train_deep_hop.py --perturbation hop_tokens4 --train_set 10M --batch_size 3 --epoch 3 --seed 0 > log_3.out 2>&1 &
# -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# tmux attach-session -t impo1-0
CUDA_VISIBLE_DEVICES=2,3,4 torchrun --nproc_per_node=3 --master_port=22230 train_deep_wandb.py --perturbation shuffle_deterministic21 --train_set 10M --batch_size 3 --epoch 3 --seed 0 > log_3.out 2>&1 &
# tmux attach-session -t impo2
CUDA_VISIBLE_DEVICES=5,6,7 torchrun --nproc_per_node=3 --master_port=22231 train_deep_wandb.py --perturbation shuffle_deterministic57 --train_set 10M --batch_size 3 --epoch 3 --seed 0 > log_3.out 2>&1 &
# tmux attach-session -t impo2-1
CUDA_VISIBLE_DEVICES=5,6,7 torchrun --nproc_per_node=3 --master_port=22231 train_deep_wandb.py --perturbation shuffle_deterministic84 --train_set 10M --batch_size 3 --epoch 3 --seed 0 > log_3.out 2>&1 &
# tmux attach-session -t impo3
CUDA_VISIBLE_DEVICES=2,3,4 torchrun --nproc_per_node=3 --master_port=22231 train_deep_wandb.py --perturbation shuffle_even_odd --train_set 10M --batch_size 3 --epoch 3 --seed 0 > log_3.out 2>&1 &
#----------------------------------------------------------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------------------------------------------------
# tmux attach-session -t impo1-0
CUDA_VISIBLE_DEVICES=0,1,2 torchrun --nproc_per_node=3 --master_port=22229 train_ftp.py --perturbation reverse_full --train_set 10M --batch_size 3 --epoch 3 --seed 0
# *tmux attach-session -t impo2-1
CUDA_VISIBLE_DEVICES=3,4,5 torchrun --nproc_per_node=3 --master_port=22230 train_ftp.py --perturbation reverse_partial --train_set 10M --batch_size 3 --epoch 3 --seed 0
# *tmux attach-session -t impo2-1
CUDA_VISIBLE_DEVICES=3,4,5 torchrun --nproc_per_node=3 --master_port=22230 train_ftp.py --perturbation reverse_control --train_set 10M --batch_size 3 --epoch 3 --seed 0
# tmux attach-session -t impo1-0
CUDA_VISIBLE_DEVICES=3,4,5 torchrun --nproc_per_node=3 --master_port=22230 train_ftp.py --perturbation shuffle_deterministic84 --train_set 10M --batch_size 3 --epoch 3 --seed 0
# tmux attach-session -t impo1-0
CUDA_VISIBLE_DEVICES=1,2,3 torchrun --nproc_per_node=3 --master_port=22230 train_ftp.py --perturbation shuffle_nondeterministic --train_set 10M --batch_size 3 --epoch 3 --seed 0
# Wait for all background processes to complete
###
# LLama3.2-1B
###
# tmux attach-session -t impo1-0
CUDA_VISIBLE_DEVICES=0,1,2 torchrun --nproc_per_node=3 --master_port=22229 train_llama_1B.py --perturbation reverse_control --train_set 10M --batch_size 3 --epoch 3 --seed 0
# tmux attach-session -t impo2-7
CUDA_VISIBLE_DEVICES=3,4,5 torchrun --nproc_per_node=3 --master_port=22230 train_llama_1B.py --perturbation reverse_full --train_set 10M --batch_size 3 --epoch 3 --seed 0
###
# GPT-2
###
# tmux attach-session -t impo1-0
CUDA_VISIBLE_DEVICES=0,1,2 torchrun --nproc_per_node=3 --master_port=22235 train_gpt2.py --perturbation reverse_control --train_set 10M --batch_size 3 --epoch 3 --seed 0
# tmux attach-session -t impo2-1
CUDA_VISIBLE_DEVICES=3,4,5 torchrun --nproc_per_node=3 --master_port=22236 train_gpt2.py --perturbation reverse_full --train_set 10M --batch_size 3 --epoch 3 --seed 0
# tmux attach-session -t impo1-0
CUDA_VISIBLE_DEVICES=0,1,2 torchrun --nproc_per_node=3 --master_port=22237 train_gpt2.py --perturbation reverse_partial --train_set 10M --batch_size 3 --epoch 3 --seed 0
wait
echo "Both tasks have been launched."