File size: 5,092 Bytes
54f7697
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
#!/bin/bash

# Launch the first task in the background
CUDA_VISIBLE_DEVICES=1,2,3 torchrun --nproc_per_node=3 --master_port=22224 train_deep_wandb.py --perturbation reverse_full --train_set 10M --batch_size 3 --epoch 3 --seed 0 > log_1.out 2>&1 &

# Launch the second task in the background
CUDA_VISIBLE_DEVICES=5,6,7 torchrun --nproc_per_node=3 --master_port=22225 train_deep_wandb.py --perturbation reverse_partial --train_set 10M --batch_size 3 --epoch 3 --seed 0 > log_2.out 2>&1 &

# Launch the second task in the background
# tmux attach-session -t impo1-0
CUDA_VISIBLE_DEVICES=1,2,3 torchrun --nproc_per_node=3 --master_port=22226 train_deep_wandb.py --perturbation reverse_control --train_set 10M --batch_size 3 --epoch 3 --seed 0 > log_3.out 2>&1 &


# Launch the second task in the background
# tmux attach-session -t impo3
CUDA_VISIBLE_DEVICES=5,6,7 torchrun --nproc_per_node=3 --master_port=22227 train_deep_hop.py --perturbation hop_control --train_set 10M --batch_size 3 --epoch 3 --seed 0 > log_3.out 2>&1 &

# tmux attach-session -t impo1-0
CUDA_VISIBLE_DEVICES=1,2,3 torchrun --nproc_per_node=3 --master_port=22228 train_deep_hop.py --perturbation hop_words4 --train_set 10M --batch_size 3 --epoch 3 --seed 0 > log_3.out 2>&1 &

# tmux attach-session -t impo3
CUDA_VISIBLE_DEVICES=5,6,7 torchrun --nproc_per_node=3 --master_port=22229 train_deep_hop.py --perturbation hop_tokens4 --train_set 10M --batch_size 3 --epoch 3 --seed 0 > log_3.out 2>&1 &

# -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# tmux attach-session -t impo1-0
CUDA_VISIBLE_DEVICES=2,3,4 torchrun --nproc_per_node=3 --master_port=22230 train_deep_wandb.py --perturbation shuffle_deterministic21 --train_set 10M --batch_size 3 --epoch 3 --seed 0 > log_3.out 2>&1 &

# tmux attach-session -t impo2
CUDA_VISIBLE_DEVICES=5,6,7 torchrun --nproc_per_node=3 --master_port=22231 train_deep_wandb.py --perturbation shuffle_deterministic57 --train_set 10M --batch_size 3 --epoch 3 --seed 0 > log_3.out 2>&1 &

# tmux attach-session -t impo2-1
CUDA_VISIBLE_DEVICES=5,6,7 torchrun --nproc_per_node=3 --master_port=22231 train_deep_wandb.py --perturbation shuffle_deterministic84 --train_set 10M --batch_size 3 --epoch 3 --seed 0 > log_3.out 2>&1 &

# tmux attach-session -t impo3
CUDA_VISIBLE_DEVICES=2,3,4 torchrun --nproc_per_node=3 --master_port=22231 train_deep_wandb.py --perturbation shuffle_even_odd --train_set 10M --batch_size 3 --epoch 3 --seed 0 > log_3.out 2>&1 &

#----------------------------------------------------------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------------------------------------------------
# tmux attach-session -t impo1-0
CUDA_VISIBLE_DEVICES=0,1,2 torchrun --nproc_per_node=3 --master_port=22229 train_ftp.py --perturbation reverse_full --train_set 10M --batch_size 3 --epoch 3 --seed 0

# *tmux attach-session -t impo2-1
CUDA_VISIBLE_DEVICES=3,4,5 torchrun --nproc_per_node=3 --master_port=22230 train_ftp.py --perturbation reverse_partial --train_set 10M --batch_size 3 --epoch 3 --seed 0

# *tmux attach-session -t impo2-1
CUDA_VISIBLE_DEVICES=3,4,5 torchrun --nproc_per_node=3 --master_port=22230 train_ftp.py --perturbation reverse_control --train_set 10M --batch_size 3 --epoch 3 --seed 0

# tmux attach-session -t impo1-0
CUDA_VISIBLE_DEVICES=3,4,5 torchrun --nproc_per_node=3 --master_port=22230 train_ftp.py --perturbation shuffle_deterministic84 --train_set 10M --batch_size 3 --epoch 3 --seed 0

# tmux attach-session -t impo1-0
CUDA_VISIBLE_DEVICES=1,2,3 torchrun --nproc_per_node=3 --master_port=22230 train_ftp.py --perturbation shuffle_nondeterministic --train_set 10M --batch_size 3 --epoch 3 --seed 0

# Wait for all background processes to complete

###
# LLama3.2-1B
###
# tmux attach-session -t impo1-0
CUDA_VISIBLE_DEVICES=0,1,2 torchrun --nproc_per_node=3 --master_port=22229 train_llama_1B.py --perturbation reverse_control --train_set 10M --batch_size 3 --epoch 3 --seed 0

# tmux attach-session -t impo2-7
CUDA_VISIBLE_DEVICES=3,4,5 torchrun --nproc_per_node=3 --master_port=22230 train_llama_1B.py --perturbation reverse_full --train_set 10M --batch_size 3 --epoch 3 --seed 0

###
# GPT-2
###

# tmux attach-session -t impo1-0
CUDA_VISIBLE_DEVICES=0,1,2 torchrun --nproc_per_node=3 --master_port=22235 train_gpt2.py --perturbation reverse_control --train_set 10M --batch_size 3 --epoch 3 --seed 0

# tmux attach-session -t impo2-1
CUDA_VISIBLE_DEVICES=3,4,5 torchrun --nproc_per_node=3 --master_port=22236 train_gpt2.py --perturbation reverse_full --train_set 10M --batch_size 3 --epoch 3 --seed 0

# tmux attach-session -t impo1-0
CUDA_VISIBLE_DEVICES=0,1,2 torchrun --nproc_per_node=3 --master_port=22237 train_gpt2.py --perturbation reverse_partial --train_set 10M --batch_size 3 --epoch 3 --seed 0



wait

echo "Both tasks have been launched."