AlisonWen commited on
Commit
d3b34ec
·
verified ·
1 Parent(s): 14bc7be

Upload folder using huggingface_hub

Browse files
Files changed (37) hide show
  1. .gitignore +2 -0
  2. added_tokens.json +3 -0
  3. arguments.json +92 -0
  4. arguments.pkl +3 -0
  5. config.json +32 -0
  6. environ.txt +238 -0
  7. global_step302/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt +3 -0
  8. global_step302/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt +3 -0
  9. global_step302/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt +3 -0
  10. global_step302/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt +3 -0
  11. global_step302/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt +3 -0
  12. global_step302/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt +3 -0
  13. global_step302/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt +3 -0
  14. global_step302/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt +3 -0
  15. global_step302/zero_pp_rank_0_mp_rank_00_model_states.pt +3 -0
  16. global_step302/zero_pp_rank_1_mp_rank_00_model_states.pt +3 -0
  17. global_step302/zero_pp_rank_2_mp_rank_00_model_states.pt +3 -0
  18. global_step302/zero_pp_rank_3_mp_rank_00_model_states.pt +3 -0
  19. global_step302/zero_pp_rank_4_mp_rank_00_model_states.pt +3 -0
  20. global_step302/zero_pp_rank_5_mp_rank_00_model_states.pt +3 -0
  21. global_step302/zero_pp_rank_6_mp_rank_00_model_states.pt +3 -0
  22. global_step302/zero_pp_rank_7_mp_rank_00_model_states.pt +3 -0
  23. latest +1 -0
  24. pytorch_model.bin/pytorch_model-00001-of-00006.bin +3 -0
  25. pytorch_model.bin/pytorch_model-00002-of-00006.bin +3 -0
  26. pytorch_model.bin/pytorch_model-00003-of-00006.bin +3 -0
  27. pytorch_model.bin/pytorch_model-00004-of-00006.bin +3 -0
  28. pytorch_model.bin/pytorch_model-00005-of-00006.bin +3 -0
  29. pytorch_model.bin/pytorch_model-00006-of-00006.bin +3 -0
  30. pytorch_model.bin/pytorch_model.bin.index.json +3 -0
  31. special_tokens_map.json +30 -0
  32. stderr.log +92 -0
  33. stdout.log +0 -0
  34. tokenizer.json +0 -0
  35. tokenizer.model +3 -0
  36. tokenizer_config.json +56 -0
  37. zero_to_fp32.py +760 -0
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ script.sh
2
+ wandb/
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<pad>": 32000
3
+ }
arguments.json ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "actor_model_name_or_path": "AlisonWen/safe-rlhf-sft",
3
+ "reward_model_name_or_path": "AlisonWen/safe-rlhf-rm",
4
+ "reward_critic_model_name_or_path": "AlisonWen/safe-rlhf-rm",
5
+ "max_length": 512,
6
+ "trust_remote_code": true,
7
+ "train_datasets": [
8
+ [
9
+ "PKU-SafeRLHF/train",
10
+ {
11
+ "proportion": 1.0
12
+ }
13
+ ]
14
+ ],
15
+ "ptx_datasets": [
16
+ [
17
+ "alpaca",
18
+ {
19
+ "proportion": 1.0
20
+ }
21
+ ]
22
+ ],
23
+ "eval_datasets": null,
24
+ "kl_coeff": 0.02,
25
+ "clip_range_ratio": 0.2,
26
+ "clip_range_score": 50.0,
27
+ "clip_range_value": 5.0,
28
+ "ptx_coeff": 16.0,
29
+ "epochs": 1,
30
+ "update_iters": 1,
31
+ "per_device_prompt_batch_size": 16,
32
+ "per_device_train_batch_size": 16,
33
+ "per_device_eval_batch_size": 16,
34
+ "gradient_accumulation_steps": 2,
35
+ "actor_lr": 1e-05,
36
+ "actor_weight_decay": 0.01,
37
+ "actor_lr_scheduler_type": "cosine",
38
+ "actor_lr_warmup_ratio": 0.03,
39
+ "actor_gradient_checkpointing": true,
40
+ "critic_lr": 5e-06,
41
+ "critic_weight_decay": 0.0,
42
+ "critic_lr_scheduler_type": "constant",
43
+ "critic_lr_warmup_ratio": 0.03,
44
+ "critic_gradient_checkpointing": true,
45
+ "normalize_reward": false,
46
+ "seed": 100,
47
+ "fp16": false,
48
+ "bf16": true,
49
+ "tf32": true,
50
+ "pth": 1.0,
51
+ "pth_dynamic_adjust": null,
52
+ "pth_meta_lr": 0.0,
53
+ "meta_pth_scale": 10.0,
54
+ "meta_pth_update_interval": 10,
55
+ "linear_pth": null,
56
+ "linear_pth_init": 0.3,
57
+ "linear_pth_end": 1.0,
58
+ "random_reward_poisoning": null,
59
+ "attack_prob": null,
60
+ "attack_strength": null,
61
+ "uniform_random_poisoning": null,
62
+ "malicious_babble_attack": null,
63
+ "dynamic_pth_rate": 0.0,
64
+ "temperature": 1.0,
65
+ "top_p": 1.0,
66
+ "num_return_sequences": 1,
67
+ "repetition_penalty": 1.0,
68
+ "eval_strategy": "epoch",
69
+ "eval_interval": 1000000,
70
+ "need_eval": false,
71
+ "eval_split_ratio": null,
72
+ "output_dir": "/mnt/home/alison.cs13/RobustRLHF/output/ppo-baseline-print-token-prob",
73
+ "log_type": "wandb",
74
+ "log_dir": "/mnt/home/alison.cs13/RobustRLHF/output/ppo-baseline-print-token-prob",
75
+ "log_project": "Safe-RLHF-PPO",
76
+ "log_run_name": "ppo-print-token-prob",
77
+ "save_16bit": false,
78
+ "save_interval": 1000000,
79
+ "local_rank": 0,
80
+ "zero_stage": 3,
81
+ "offload": "none",
82
+ "deepspeed": false,
83
+ "deepspeed_config": null,
84
+ "deepscale": false,
85
+ "deepscale_config": null,
86
+ "global_rank": 0,
87
+ "device": {
88
+ "type": "torch.device",
89
+ "repr": "device(type='cuda', index=0)"
90
+ },
91
+ "total_training_steps": 302
92
+ }
arguments.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:959ba8f43dd58217422c179c7bdf0ff9605cc71beae03ccc4a268210da569acf
3
+ size 1988
config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_attn_implementation_autoset": true,
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 1,
9
+ "eos_token_id": 2,
10
+ "head_dim": 128,
11
+ "hidden_act": "silu",
12
+ "hidden_size": 4096,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 11008,
15
+ "max_position_embeddings": 2048,
16
+ "max_sequence_length": 2048,
17
+ "mlp_bias": false,
18
+ "model_type": "llama",
19
+ "num_attention_heads": 32,
20
+ "num_hidden_layers": 32,
21
+ "num_key_value_heads": 32,
22
+ "pad_token_id": 32000,
23
+ "pretraining_tp": 1,
24
+ "rms_norm_eps": 1e-06,
25
+ "rope_scaling": null,
26
+ "rope_theta": 10000.0,
27
+ "tie_word_embeddings": false,
28
+ "torch_dtype": "float16",
29
+ "transformers_version": "4.51.3",
30
+ "use_cache": true,
31
+ "vocab_size": 32001
32
+ }
environ.txt ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ BASH_FUNC_ml%%=() { module ml "$@"
2
+ }
3
+ BASH_FUNC_module%%=() { unset _mlshdbg;
4
+ if [ "${MODULES_SILENT_SHELL_DEBUG:-0}" = '1' ]; then
5
+ case "$-" in
6
+ *v*x*)
7
+ set +vx;
8
+ _mlshdbg='vx'
9
+ ;;
10
+ *v*)
11
+ set +v;
12
+ _mlshdbg='v'
13
+ ;;
14
+ *x*)
15
+ set +x;
16
+ _mlshdbg='x'
17
+ ;;
18
+ *)
19
+ _mlshdbg=''
20
+ ;;
21
+ esac;
22
+ fi;
23
+ unset _mlre _mlIFS;
24
+ if [ -n "${IFS+x}" ]; then
25
+ _mlIFS=$IFS;
26
+ fi;
27
+ IFS=' ';
28
+ for _mlv in ${MODULES_RUN_QUARANTINE:-};
29
+ do
30
+ if [ "${_mlv}" = "${_mlv##*[!A-Za-z0-9_]}" -a "${_mlv}" = "${_mlv#[0-9]}" ]; then
31
+ if [ -n "`eval 'echo ${'$_mlv'+x}'`" ]; then
32
+ _mlre="${_mlre:-}${_mlv}_modquar='`eval 'echo ${'$_mlv'}'`' ";
33
+ fi;
34
+ _mlrv="MODULES_RUNENV_${_mlv}";
35
+ _mlre="${_mlre:-}${_mlv}='`eval 'echo ${'$_mlrv':-}'`' ";
36
+ fi;
37
+ done;
38
+ if [ -n "${_mlre:-}" ]; then
39
+ eval `eval ${_mlre} /usr/bin/tclsh /cm/local/apps/environment-modules/4.5.3/libexec/modulecmd.tcl bash '"$@"'`;
40
+ else
41
+ eval `/usr/bin/tclsh /cm/local/apps/environment-modules/4.5.3/libexec/modulecmd.tcl bash "$@"`;
42
+ fi;
43
+ _mlstatus=$?;
44
+ if [ -n "${_mlIFS+x}" ]; then
45
+ IFS=$_mlIFS;
46
+ else
47
+ unset IFS;
48
+ fi;
49
+ unset _mlre _mlv _mlrv _mlIFS;
50
+ if [ -n "${_mlshdbg:-}" ]; then
51
+ set -$_mlshdbg;
52
+ fi;
53
+ unset _mlshdbg;
54
+ return $_mlstatus
55
+ }
56
+ BASH_FUNC_switchml%%=() { typeset swfound=1;
57
+ if [ "${MODULES_USE_COMPAT_VERSION:-0}" = '1' ]; then
58
+ typeset swname='main';
59
+ if [ -e /cm/local/apps/environment-modules/4.5.3/libexec/modulecmd.tcl ]; then
60
+ typeset swfound=0;
61
+ unset MODULES_USE_COMPAT_VERSION;
62
+ fi;
63
+ else
64
+ typeset swname='compatibility';
65
+ if [ -e /cm/local/apps/environment-modules/4.5.3/libexec/modulecmd-compat ]; then
66
+ typeset swfound=0;
67
+ MODULES_USE_COMPAT_VERSION=1;
68
+ export MODULES_USE_COMPAT_VERSION;
69
+ fi;
70
+ fi;
71
+ if [ $swfound -eq 0 ]; then
72
+ echo "Switching to Modules $swname version";
73
+ source /cm/local/apps/environment-modules/4.5.3/init/bash;
74
+ else
75
+ echo "Cannot switch to Modules $swname version, command not found";
76
+ return 1;
77
+ fi
78
+ }
79
+ BROWSER=/home/alison.cs13/.vscode-server/cli/servers/Stable-4437686ffebaf200fa4a6e6e67f735f3edf24ada/server/bin/helpers/browser.sh
80
+ CC=/usr/bin/gcc
81
+ CMD_WLM_CLUSTER_NAME=slurm
82
+ COLORTERM=truecolor
83
+ CONDA_DEFAULT_ENV=safe-rlhf-12.4
84
+ CONDA_EXE=/mnt/home/alison.cs13/anaconda3/bin/conda
85
+ CONDA_PREFIX=/mnt/home/alison.cs13/anaconda3/envs/safe-rlhf-12.4
86
+ CONDA_PREFIX_1=/mnt/home/alison.cs13/anaconda3
87
+ CONDA_PREFIX_2=/mnt/home/alison.cs13/anaconda3/envs/safe-rlhf-12.4
88
+ CONDA_PREFIX_3=/mnt/home/alison.cs13/anaconda3
89
+ CONDA_PROMPT_MODIFIER=(safe-rlhf-12.4)
90
+ CONDA_PYTHON_EXE=/mnt/home/alison.cs13/anaconda3/bin/python
91
+ CONDA_SHLVL=4
92
+ CPATH=/cm/shared/apps/slurm/current/include
93
+ CPATH_modshare=/cm/shared/apps/slurm/current/include:1
94
+ CROSS_RANK=0
95
+ CROSS_SIZE=1
96
+ CUDA_MODULE_LOADING=LAZY
97
+ CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
98
+ CXX=/usr/bin/g++
99
+ DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/1109/bus
100
+ ENABLE_LMOD=0
101
+ ENVIRONMENT=BATCH
102
+ GIT_ASKPASS=/home/alison.cs13/.vscode-server/cli/servers/Stable-4437686ffebaf200fa4a6e6e67f735f3edf24ada/server/extensions/git/dist/askpass.sh
103
+ GPU_DEVICE_ORDINAL=0,1,2,3,4,5,6,7
104
+ HOME=/home/alison.cs13
105
+ HOSTNAME=cnode4-003
106
+ LANG=en_US.UTF-8
107
+ LD_LIBRARY_PATH=/cm/shared/apps/slurm/current/lib64/slurm:/cm/shared/apps/slurm/current/lib64:/cm/local/apps/gcc/13.1.0/lib:/cm/local/apps/gcc/13.1.0/lib64:/cm/local/apps/gcc/13.1.0/lib32
108
+ LD_LIBRARY_PATH_modshare=/cm/shared/apps/slurm/current/lib64:1:/cm/local/apps/gcc/13.1.0/lib64:1:/cm/shared/apps/slurm/current/lib64/slurm:1:/cm/local/apps/gcc/13.1.0/lib:1:/cm/local/apps/gcc/13.1.0/lib32:1
109
+ LESSCLOSE=/usr/bin/lesspipe %s %s
110
+ LESSOPEN=| /usr/bin/lesspipe %s
111
+ LIBRARY_PATH=/cm/shared/apps/slurm/current/lib64/slurm:/cm/shared/apps/slurm/current/lib64
112
+ LIBRARY_PATH_modshare=/cm/shared/apps/slurm/current/lib64:1:/cm/shared/apps/slurm/current/lib64/slurm:1
113
+ LOADEDMODULES=gcc/13.1.0:slurm/slurm/23.02.8
114
+ LOADEDMODULES_modshare=gcc/13.1.0:1:slurm/slurm/23.02.8:1
115
+ LOCAL_RANK=0
116
+ LOCAL_SIZE=8
117
+ LOGLEVEL=WARNING
118
+ LOGNAME=alison.cs13
119
+ LS_COLORS=rs=0:di=01;34:ln=01;36:mh=00:pi=40;33:so=01;35:do=01;35:bd=40;33;01:cd=40;33;01:or=40;31;01:mi=00:su=37;41:sg=30;43:ca=30;41:tw=30;42:ow=34;42:st=37;44:ex=01;32:*.tar=01;31:*.tgz=01;31:*.arc=01;31:*.arj=01;31:*.taz=01;31:*.lha=01;31:*.lz4=01;31:*.lzh=01;31:*.lzma=01;31:*.tlz=01;31:*.txz=01;31:*.tzo=01;31:*.t7z=01;31:*.zip=01;31:*.z=01;31:*.dz=01;31:*.gz=01;31:*.lrz=01;31:*.lz=01;31:*.lzo=01;31:*.xz=01;31:*.zst=01;31:*.tzst=01;31:*.bz2=01;31:*.bz=01;31:*.tbz=01;31:*.tbz2=01;31:*.tz=01;31:*.deb=01;31:*.rpm=01;31:*.jar=01;31:*.war=01;31:*.ear=01;31:*.sar=01;31:*.rar=01;31:*.alz=01;31:*.ace=01;31:*.zoo=01;31:*.cpio=01;31:*.7z=01;31:*.rz=01;31:*.cab=01;31:*.wim=01;31:*.swm=01;31:*.dwm=01;31:*.esd=01;31:*.jpg=01;35:*.jpeg=01;35:*.mjpg=01;35:*.mjpeg=01;35:*.gif=01;35:*.bmp=01;35:*.pbm=01;35:*.pgm=01;35:*.ppm=01;35:*.tga=01;35:*.xbm=01;35:*.xpm=01;35:*.tif=01;35:*.tiff=01;35:*.png=01;35:*.svg=01;35:*.svgz=01;35:*.mng=01;35:*.pcx=01;35:*.mov=01;35:*.mpg=01;35:*.mpeg=01;35:*.m2v=01;35:*.mkv=01;35:*.webm=01;35:*.webp=01;35:*.ogm=01;35:*.mp4=01;35:*.m4v=01;35:*.mp4v=01;35:*.vob=01;35:*.qt=01;35:*.nuv=01;35:*.wmv=01;35:*.asf=01;35:*.rm=01;35:*.rmvb=01;35:*.flc=01;35:*.avi=01;35:*.fli=01;35:*.flv=01;35:*.gl=01;35:*.dl=01;35:*.xcf=01;35:*.xwd=01;35:*.yuv=01;35:*.cgm=01;35:*.emf=01;35:*.ogv=01;35:*.ogx=01;35:*.aac=00;36:*.au=00;36:*.flac=00;36:*.m4a=00;36:*.mid=00;36:*.midi=00;36:*.mka=00;36:*.mp3=00;36:*.mpc=00;36:*.ogg=00;36:*.ra=00;36:*.wav=00;36:*.oga=00;36:*.opus=00;36:*.spx=00;36:*.xspf=00;36:
120
+ MANPATH=/cm/shared/apps/slurm/current/man:/cm/local/apps/environment-modules/4.5.3/share/man:/usr/local/man:/usr/local/share/man:/usr/share/man:/cm/local/apps/environment-modules/current/share/man
121
+ MANPATH_modshare=/usr/local/share/man:1:/cm/local/apps/environment-modules/current/share/man:1:/cm/local/apps/environment-modules/4.5.3/share/man:1:/usr/local/man:1:/usr/share/man:1:/cm/shared/apps/slurm/current/man:1
122
+ MASTER_ADDR=127.0.0.1
123
+ MASTER_PORT=17737
124
+ MODULEPATH=/cm/local/modulefiles:/cm/shared/modulefiles
125
+ MODULESHOME=/cm/local/apps/environment-modules/4.5.3
126
+ MODULES_CMD=/cm/local/apps/environment-modules/4.5.3/libexec/modulecmd.tcl
127
+ MODULES_SET_SHELL_STARTUP=0
128
+ MOTD_SHOWN=pam
129
+ NVCC_FLAGS=--ccbin=/usr/bin/g++
130
+ OLDPWD=/mnt/home/alison.cs13/RobustRLHF/scripts/Inference_logs
131
+ PATH=/cm/shared/apps/slurm/current/sbin:/cm/shared/apps/slurm/current/bin:/mnt/home/alison.cs13/anaconda3/envs/safe-rlhf-12.4/bin:/mnt/home/alison.cs13/anaconda3/condabin:/cm/local/apps/gcc/13.1.0/bin:/home/alison.cs13/.vscode-server/cli/servers/Stable-4437686ffebaf200fa4a6e6e67f735f3edf24ada/server/bin/remote-cli:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/sbin:/usr/sbin:/cm/local/apps/environment-modules/4.5.3/bin
132
+ PATH_modshare=/home/alison.cs13/.vscode-server/cli/servers/Stable-4437686ffebaf200fa4a6e6e67f735f3edf24ada/server/bin/remote-cli:1:/usr/bin:1:/usr/local/bin:1:/cm/shared/apps/slurm/current/bin:1:/mnt/home/alison.cs13/anaconda3/envs/safe-rlhf-12.4/bin:1:/cm/shared/apps/slurm/current/sbin:1:/mnt/home/alison.cs13/anaconda3/condabin:1:/cm/local/apps/gcc/13.1.0/bin:1:/bin:1:/snap/bin:1:/sbin:1:/usr/sbin:1:/usr/games:1:/cm/local/apps/environment-modules/4.5.3/bin:1:/usr/local/sbin:1:/usr/local/games:1
133
+ PMIX_BFROP_BUFFER_TYPE=PMIX_BFROP_BUFFER_NON_DESC
134
+ PMIX_GDS_MODULE=hash
135
+ PMIX_HOSTNAME=cnode4-003
136
+ PMIX_NAMESPACE=slurm.pmix.1002.0
137
+ PMIX_RANK=0
138
+ PMIX_SECURITY_MODE=none
139
+ PMIX_SERVER_TMPDIR=/cm/local/apps/slurm/var/spool/pmix.1002.0/
140
+ PMIX_SERVER_URI2=pmix-server.736309;tcp4://127.0.0.1:39237
141
+ PMIX_SERVER_URI21=pmix-server.736309;tcp4://127.0.0.1:39237
142
+ PMIX_SERVER_URI3=pmix-server.736309;tcp4://127.0.0.1:39237
143
+ PMIX_SERVER_URI4=pmix-server.736309;tcp4://127.0.0.1:39237
144
+ PMIX_SERVER_URI41=pmix-server.736309;tcp4://127.0.0.1:39237
145
+ PMIX_SYSTEM_TMPDIR=/var/empty
146
+ PMIX_VERSION=4.1.3
147
+ PWD=/mnt/home/alison.cs13/RobustRLHF/scripts
148
+ PYTHONHASHSEED=100
149
+ PYTHONPATH=/mnt/home/alison.cs13/RobustRLHF
150
+ RANK=0
151
+ ROCR_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
152
+ SHELL=/bin/bash
153
+ SHLVL=5
154
+ SLURMD_DEBUG=2
155
+ SLURMD_NODENAME=cnode4-003
156
+ SLURM_CLUSTER_NAME=slurm
157
+ SLURM_CONF=/cm/shared/apps/slurm/var/etc/slurm/slurm.conf
158
+ SLURM_CPUS_ON_NODE=8
159
+ SLURM_CPUS_PER_TASK=8
160
+ SLURM_DISTRIBUTION=cyclic
161
+ SLURM_GPUS_ON_NODE=8
162
+ SLURM_GTIDS=0
163
+ SLURM_JOBID=1002
164
+ SLURM_JOB_CPUS_PER_NODE=8
165
+ SLURM_JOB_END_TIME=1748207053
166
+ SLURM_JOB_GID=1109
167
+ SLURM_JOB_GPUS=0,1,2,3,4,5,6,7
168
+ SLURM_JOB_ID=1002
169
+ SLURM_JOB_NAME=Train-PPO-print-token-prob-sample
170
+ SLURM_JOB_NODELIST=cnode4-003
171
+ SLURM_JOB_NUM_NODES=1
172
+ SLURM_JOB_PARTITION=p1
173
+ SLURM_JOB_QOS=normal
174
+ SLURM_JOB_START_TIME=1748077453
175
+ SLURM_JOB_UID=1109
176
+ SLURM_JOB_USER=alison.cs13
177
+ SLURM_LAUNCH_NODE_IPADDR=10.225.1.229
178
+ SLURM_LOCALID=0
179
+ SLURM_MEM_PER_NODE=204800
180
+ SLURM_MPI_TYPE=pmix
181
+ SLURM_NNODES=1
182
+ SLURM_NODEID=0
183
+ SLURM_NODELIST=cnode4-003
184
+ SLURM_NPROCS=1
185
+ SLURM_NTASKS=1
186
+ SLURM_NTASKS_PER_NODE=1
187
+ SLURM_PMIXP_ABORT_AGENT_PORT=41983
188
+ SLURM_PMIX_MAPPING_SERV=(vector,(0,1,1))
189
+ SLURM_PRIO_PROCESS=0
190
+ SLURM_PROCID=0
191
+ SLURM_SRUN_COMM_HOST=10.225.1.229
192
+ SLURM_SRUN_COMM_PORT=37475
193
+ SLURM_STEPID=0
194
+ SLURM_STEP_GPUS=0,1,2,3,4,5,6,7
195
+ SLURM_STEP_ID=0
196
+ SLURM_STEP_LAUNCHER_PORT=37475
197
+ SLURM_STEP_NODELIST=cnode4-003
198
+ SLURM_STEP_NUM_NODES=1
199
+ SLURM_STEP_NUM_TASKS=1
200
+ SLURM_STEP_TASKS_PER_NODE=1
201
+ SLURM_SUBMIT_DIR=/mnt/home/alison.cs13/RobustRLHF/scripts
202
+ SLURM_SUBMIT_HOST=Tunnel4-TRDC-02
203
+ SLURM_TASKS_PER_NODE=1
204
+ SLURM_TASK_PID=736319
205
+ SLURM_TOPOLOGY_ADDR=cnode4-003
206
+ SLURM_TOPOLOGY_ADDR_PATTERN=node
207
+ SLURM_UMASK=0002
208
+ SLURM_WORKING_CLUSTER=slurm:Tunnel4-TRDC-02:6817:9984:109
209
+ SRUN_DEBUG=3
210
+ SSH_CLIENT=127.0.0.1 49680 22
211
+ SSH_CONNECTION=127.0.0.1 49680 127.0.0.1 22
212
+ SSL_CERT_DIR=/usr/lib/ssl/certs
213
+ SSL_CERT_FILE=/usr/lib/ssl/certs/ca-certificates.crt
214
+ TERM=xterm-256color
215
+ TERM_PROGRAM=vscode
216
+ TERM_PROGRAM_VERSION=1.99.0
217
+ TMPDIR=/tmp
218
+ TORCH_CUDA_ARCH_LIST=9.0
219
+ USER=alison.cs13
220
+ VSCODE_GIT_ASKPASS_EXTRA_ARGS=
221
+ VSCODE_GIT_ASKPASS_MAIN=/home/alison.cs13/.vscode-server/cli/servers/Stable-4437686ffebaf200fa4a6e6e67f735f3edf24ada/server/extensions/git/dist/askpass-main.js
222
+ VSCODE_GIT_ASKPASS_NODE=/home/alison.cs13/.vscode-server/cli/servers/Stable-4437686ffebaf200fa4a6e6e67f735f3edf24ada/server/node
223
+ VSCODE_GIT_IPC_HANDLE=/run/user/1109/vscode-git-a784703e7c.sock
224
+ VSCODE_IPC_HOOK_CLI=/run/user/1109/vscode-ipc-a65db9f7-1b43-4575-8f17-1ea637764c0e.sock
225
+ WANDB_MODE=online
226
+ WANDB_SERVICE=2-736816-tcp-localhost-43163
227
+ WORLD_SIZE=8
228
+ XDG_DATA_DIRS=/usr/local/share:/usr/share:/var/lib/snapd/desktop
229
+ XDG_RUNTIME_DIR=/run/user/1109
230
+ XDG_SESSION_CLASS=user
231
+ XDG_SESSION_ID=26677
232
+ XDG_SESSION_TYPE=tty
233
+ ZE_AFFINITY_MASK=0,1,2,3,4,5,6,7
234
+ _=/mnt/home/alison.cs13/anaconda3/envs/safe-rlhf-12.4/bin/deepspeed
235
+ _CE_CONDA=
236
+ _CE_M=
237
+ _LMFILES_=/cm/local/modulefiles/gcc/13.1.0:/cm/local/modulefiles/slurm/slurm/23.02.8
238
+ _LMFILES__modshare=/cm/local/modulefiles/gcc/13.1.0:1:/cm/local/modulefiles/slurm/slurm/23.02.8:1
global_step302/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc84a7563f82fdbeca0536165de82bd3fd920d4f0613d8f8500d8fe1a3b328e6
3
+ size 10107640320
global_step302/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad479508915200d922d085e272dae2e6c21aaece3bc49a07cbdd9be0d16a04ad
3
+ size 10107640320
global_step302/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0c8e41cf587535ebbe7f35069d4a98a7f9a7da1d4e04f45b5b73703f0d7faf4
3
+ size 10107640320
global_step302/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13b2b2e345fcfd6bd9d80b51512d28bc2ddad911fda1006196de8c999c764a96
3
+ size 10107640320
global_step302/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:161b2558a04e21e150afe54fec6333707541626715cc93cf31c3695ecc3f726e
3
+ size 10107640320
global_step302/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:747b429b7e8d064fc0381df9f3e7f4ac3504e50a6fb52c13f019fe7baede15d2
3
+ size 10107640320
global_step302/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec3f3603ce15442dbf32cc169116ac01160269fdbb1aefa4c7dbfde90b423fbd
3
+ size 10107640320
global_step302/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec825ddbe62ee3481b8a2baffd959c5458c816c66eb6159d70ca965f1cf50775
3
+ size 10107640320
global_step302/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4415b047b4e06b7247932c1347e3827fae8a80a5f19e62821d565f8e30dcf8c7
3
+ size 149349
global_step302/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae97999dc2a5dd8f00aa32bc5036c0f841a90dbe3f927931630972e8e7e7f98a
3
+ size 149349
global_step302/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c84fa7ced451cbc6ceefae233f360a6086756a6f90ecaf8949494d2f4e242f3e
3
+ size 149349
global_step302/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74438d6582821e44332ec9b3881615f56ee8cf333577b609d9cffd19081845f0
3
+ size 149349
global_step302/zero_pp_rank_4_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0bebc425342e7a295b5ee8a47aac9fa048b681acefe1dae4ac10d696f1fea198
3
+ size 149349
global_step302/zero_pp_rank_5_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b50295a1e671d6325efc664c2d83bdf2061b20c452826ad640d912b4238a520d
3
+ size 149349
global_step302/zero_pp_rank_6_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2425fc3f5983beb1dfc9dacff13c15e788988a2f3c9d60f0038bca5df0b6b11e
3
+ size 149349
global_step302/zero_pp_rank_7_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa493116a3badbf5723f0b30216620a56c43f76c8244d7aa9dab76a0725655b2
3
+ size 149349
latest ADDED
@@ -0,0 +1 @@
 
 
1
+ global_step302
pytorch_model.bin/pytorch_model-00001-of-00006.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7fabe1ef7fe62975b4cedf2ef44528cd281ddd4cb3f2d4207799e126d0cb1b3
3
+ size 4840257098
pytorch_model.bin/pytorch_model-00002-of-00006.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ebce5662d915e2740b01528c6b5eaafeec080417a3f361e2523adffd33494a6
3
+ size 4857018594
pytorch_model.bin/pytorch_model-00003-of-00006.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13fb180f4b8b30c0ccb723c64673e26ff228ead1f6465e398572b0345bbf817c
3
+ size 4857018658
pytorch_model.bin/pytorch_model-00004-of-00006.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1103e546024ae3d79230a5c6a772b8e3f5c417a4e4c2d1aefa66d6eb92f65a1e
3
+ size 4857018658
pytorch_model.bin/pytorch_model-00005-of-00006.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d498d70fd34ccd3cb77363687982cd7bce311df605d288224a94d3d1154de69c
3
+ size 4857018658
pytorch_model.bin/pytorch_model-00006-of-00006.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9d6badf9e9203cbb40a48b8f74e5467b6b7743f3b158dd16b1f3f54bcdc4966
3
+ size 2685464064
pytorch_model.bin/pytorch_model.bin.index.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ccd2629c946f62d9a501020a76de34ce6e4eef0128a17f3157d7e05ad334c75
3
+ size 23950
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<pad>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
stderr.log ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ + deepspeed --master_port 17737 --module safe_rlhf.algorithms.ppo --train_datasets PKU-SafeRLHF/train --ptx_datasets alpaca --actor_model_name_or_path AlisonWen/safe-rlhf-sft --reward_model_name_or_path AlisonWen/safe-rlhf-rm --reward_critic_model_name_or_path AlisonWen/safe-rlhf-rm --max_length 512 --temperature 1.0 --num_return_sequences 1 --repetition_penalty 1.0 --trust_remote_code True --epochs 1 --update_iters 1 --per_device_prompt_batch_size 16 --per_device_train_batch_size 16 --gradient_accumulation_steps 1 --actor_lr 1e-5 --actor_weight_decay 0.01 --actor_lr_scheduler_type cosine --actor_lr_warmup_ratio 0.03 --actor_gradient_checkpointing --critic_lr 5e-6 --critic_weight_decay 0.0 --critic_lr_scheduler_type constant --critic_lr_warmup_ratio 0.03 --critic_gradient_checkpointing --normalize_reward False --seed 100 --kl_coeff 0.02 --clip_range_ratio 0.2 --clip_range_score 50.0 --clip_range_value 5.0 --ptx_coeff 16.0 --output_dir /mnt/home/alison.cs13/RobustRLHF/output/ppo-baseline-print-token-prob --log_type wandb --log_project Safe-RLHF-PPO --zero_stage 3 --offload none --bf16 True --tf32 True --pth 1 --log_run_name ppo-print-token-prob
2
+ [rank2]:[W524 17:05:09.072910144 ProcessGroupNCCL.cpp:4115] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 to perform barrier as devices used by this process are currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect.Specify device_ids in barrier() to force use of a particular device,or call init_process_group() with a device_id.
3
+ [rank3]:[W524 17:05:09.071000956 ProcessGroupNCCL.cpp:4115] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 to perform barrier as devices used by this process are currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect.Specify device_ids in barrier() to force use of a particular device,or call init_process_group() with a device_id.
4
+ [rank7]:[W524 17:05:09.095302428 ProcessGroupNCCL.cpp:4115] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 to perform barrier as devices used by this process are currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect.Specify device_ids in barrier() to force use of a particular device,or call init_process_group() with a device_id.
5
+ [rank6]:[W524 17:05:09.205766830 ProcessGroupNCCL.cpp:4115] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 to perform barrier as devices used by this process are currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect.Specify device_ids in barrier() to force use of a particular device,or call init_process_group() with a device_id.
6
+ [rank4]:[W524 17:05:09.205822771 ProcessGroupNCCL.cpp:4115] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 to perform barrier as devices used by this process are currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect.Specify device_ids in barrier() to force use of a particular device,or call init_process_group() with a device_id.
7
+ [rank1]:[W524 17:05:09.242018619 ProcessGroupNCCL.cpp:4115] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 to perform barrier as devices used by this process are currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect.Specify device_ids in barrier() to force use of a particular device,or call init_process_group() with a device_id.
8
+ [rank0]:[W524 17:05:09.371519527 ProcessGroupNCCL.cpp:4115] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 to perform barrier as devices used by this process are currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect.Specify device_ids in barrier() to force use of a particular device,or call init_process_group() with a device_id.
9
+ [rank5]:[W524 17:05:10.422560604 ProcessGroupNCCL.cpp:4115] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 to perform barrier as devices used by this process are currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect.Specify device_ids in barrier() to force use of a particular device,or call init_process_group() with a device_id.
10
+ Using /home/alison.cs13/.cache/torch_extensions/py311_cu124 as PyTorch extensions root...
11
+ Using /home/alison.cs13/.cache/torch_extensions/py311_cu124 as PyTorch extensions root...
12
+ Using /home/alison.cs13/.cache/torch_extensions/py311_cu124 as PyTorch extensions root...
13
+ Using /home/alison.cs13/.cache/torch_extensions/py311_cu124 as PyTorch extensions root...
14
+ Using /home/alison.cs13/.cache/torch_extensions/py311_cu124 as PyTorch extensions root...
15
+ Using /home/alison.cs13/.cache/torch_extensions/py311_cu124 as PyTorch extensions root...
16
+ Using /home/alison.cs13/.cache/torch_extensions/py311_cu124 as PyTorch extensions root...
17
+ Using /home/alison.cs13/.cache/torch_extensions/py311_cu124 as PyTorch extensions root...
18
+ Detected CUDA files, patching ldflags
19
+ Emitting ninja build file /home/alison.cs13/.cache/torch_extensions/py311_cu124/fused_adam/build.ninja...
20
+ /mnt/home/alison.cs13/anaconda3/envs/safe-rlhf-12.4/lib/python3.11/site-packages/torch/utils/cpp_extension.py:1964: UserWarning: TORCH_CUDA_ARCH_LIST is not set, all archs for visible cards are included for compilation.
21
+ If this is not desired, please set os.environ['TORCH_CUDA_ARCH_LIST'].
22
+ warnings.warn(
23
+ Building extension module fused_adam...
24
+ Allowing ninja to set a default number of workers... (overridable by setting the environment variable MAX_JOBS=N)
25
+ Loading extension module fused_adam...
26
+ Loading extension module fused_adam...
27
+ Loading extension module fused_adam...
28
+ Loading extension module fused_adam...
29
+ Loading extension module fused_adam...
30
+ Loading extension module fused_adam...
31
+ Loading extension module fused_adam...
32
+ Loading extension module fused_adam...
33
+ wandb: Currently logged in as: alison-cs09 (alison_wen) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin
34
+ wandb: Tracking run with wandb version 0.19.11
35
+ wandb: Run data is saved locally in /mnt/home/alison.cs13/RobustRLHF/output/ppo-baseline-print-token-prob/wandb/run-20250524_170714-ydatbywb
36
+ wandb: Run `wandb offline` to turn off syncing.
37
+ wandb: Syncing run ppo-print-token-prob
38
+ wandb: ⭐️ View project at https://wandb.ai/alison_wen/Safe-RLHF-PPO
39
+ wandb: 🚀 View run at https://wandb.ai/alison_wen/Safe-RLHF-PPO/runs/ydatbywb
40
+
41
+ `use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.
42
+ `use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.
43
+ `use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.
44
+ `use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.
45
+ `use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.
46
+ `use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.
47
+ `use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.
48
+
49
+
50
+
51
+
52
+ wandb: ERROR Problem finishing run
53
+ Exception ignored in atexit callback: <bound method rank_zero_only.<locals>.wrapper of <safe_rlhf.logger.Logger object at 0x1551a7299d50>>
54
+ Traceback (most recent call last):
55
+ File "/mnt/home/alison.cs13/RobustRLHF/safe_rlhf/utils.py", line 195, in wrapper
56
+ return func(*args, **kwargs)
57
+ ^^^^^^^^^^^^^^^^^^^^^
58
+ File "/mnt/home/alison.cs13/RobustRLHF/safe_rlhf/logger.py", line 230, in close
59
+ self.wandb.finish()
60
+ File "/mnt/home/alison.cs13/anaconda3/envs/safe-rlhf-12.4/lib/python3.11/site-packages/wandb/sdk/wandb_run.py", line 406, in wrapper
61
+ return func(self, *args, **kwargs)
62
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^
63
+ File "/mnt/home/alison.cs13/anaconda3/envs/safe-rlhf-12.4/lib/python3.11/site-packages/wandb/sdk/wandb_run.py", line 503, in wrapper
64
+ return func(self, *args, **kwargs)
65
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^
66
+ File "/mnt/home/alison.cs13/anaconda3/envs/safe-rlhf-12.4/lib/python3.11/site-packages/wandb/sdk/wandb_run.py", line 451, in wrapper
67
+ return func(self, *args, **kwargs)
68
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^
69
+ File "/mnt/home/alison.cs13/anaconda3/envs/safe-rlhf-12.4/lib/python3.11/site-packages/wandb/sdk/wandb_run.py", line 2309, in finish
70
+ return self._finish(exit_code)
71
+ ^^^^^^^^^^^^^^^^^^^^^^^
72
+ File "/mnt/home/alison.cs13/anaconda3/envs/safe-rlhf-12.4/lib/python3.11/site-packages/wandb/sdk/wandb_run.py", line 406, in wrapper
73
+ return func(self, *args, **kwargs)
74
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^
75
+ File "/mnt/home/alison.cs13/anaconda3/envs/safe-rlhf-12.4/lib/python3.11/site-packages/wandb/sdk/wandb_run.py", line 2337, in _finish
76
+ self._atexit_cleanup(exit_code=exit_code)
77
+ File "/mnt/home/alison.cs13/anaconda3/envs/safe-rlhf-12.4/lib/python3.11/site-packages/wandb/sdk/wandb_run.py", line 2550, in _atexit_cleanup
78
+ self._on_finish()
79
+ File "/mnt/home/alison.cs13/anaconda3/envs/safe-rlhf-12.4/lib/python3.11/site-packages/wandb/sdk/wandb_run.py", line 2806, in _on_finish
80
+ wait_with_progress(
81
+ File "/mnt/home/alison.cs13/anaconda3/envs/safe-rlhf-12.4/lib/python3.11/site-packages/wandb/sdk/mailbox/wait_with_progress.py", line 24, in wait_with_progress
82
+ return wait_all_with_progress(
83
+ ^^^^^^^^^^^^^^^^^^^^^^^
84
+ File "/mnt/home/alison.cs13/anaconda3/envs/safe-rlhf-12.4/lib/python3.11/site-packages/wandb/sdk/mailbox/wait_with_progress.py", line 87, in wait_all_with_progress
85
+ return asyncio_compat.run(progress_loop_with_timeout)
86
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
87
+ File "/mnt/home/alison.cs13/anaconda3/envs/safe-rlhf-12.4/lib/python3.11/site-packages/wandb/sdk/lib/asyncio_compat.py", line 27, in run
88
+ future = executor.submit(runner.run, fn)
89
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
90
+ File "/mnt/home/alison.cs13/anaconda3/envs/safe-rlhf-12.4/lib/python3.11/concurrent/futures/thread.py", line 169, in submit
91
+ raise RuntimeError('cannot schedule new futures after '
92
+ RuntimeError: cannot schedule new futures after interpreter shutdown
stdout.log ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": null,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": true,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": true,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": true,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ },
30
+ "32000": {
31
+ "content": "<pad>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false,
36
+ "special": true
37
+ }
38
+ },
39
+ "bos_token": "<s>",
40
+ "chat_template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% elif false == true and not '<<SYS>>' in messages[0]['content'] %}{% set loop_messages = messages %}{% set system_message = 'You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.\\n\\nIf a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don\\'t know the answer to a question, please don\\'t share false information.' %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<<SYS>>\\n' + system_message + '\\n<</SYS>>\\n\\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ bos_token + '[INST] ' + content.strip() + ' [/INST]' }}{% elif message['role'] == 'system' %}{{ '<<SYS>>\\n' + content.strip() + '\\n<</SYS>>\\n\\n' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content.strip() + ' ' + eos_token }}{% endif %}{% endfor %}",
41
+ "clean_up_tokenization_spaces": false,
42
+ "eos_token": "</s>",
43
+ "extra_special_tokens": {},
44
+ "legacy": true,
45
+ "max_length": 512,
46
+ "model_max_length": 512,
47
+ "pad_token": "<pad>",
48
+ "padding_side": "left",
49
+ "sp_model_kwargs": {},
50
+ "stride": 0,
51
+ "tokenizer_class": "LlamaTokenizer",
52
+ "truncation_side": "right",
53
+ "truncation_strategy": "longest_first",
54
+ "unk_token": "<unk>",
55
+ "use_default_system_prompt": false
56
+ }
zero_to_fp32.py ADDED
@@ -0,0 +1,760 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright (c) Microsoft Corporation.
4
+ # SPDX-License-Identifier: Apache-2.0
5
+
6
+ # DeepSpeed Team
7
+
8
+ # This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
9
+ # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
10
+ # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
11
+ # application.
12
+ #
13
+ # example:
14
+ # python zero_to_fp32.py . output_dir/
15
+ # or
16
+ # python zero_to_fp32.py . output_dir/ --safe_serialization
17
+
18
+ import argparse
19
+ import torch
20
+ import glob
21
+ import math
22
+ import os
23
+ import re
24
+ import gc
25
+ import json
26
+ import numpy as np
27
+ from tqdm import tqdm
28
+ from collections import OrderedDict
29
+ from dataclasses import dataclass
30
+
31
+ # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
32
+ # DeepSpeed data structures it has to be available in the current python environment.
33
+ from deepspeed.utils import logger
34
+ from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
35
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
36
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
37
+
38
+
39
+ @dataclass
40
+ class zero_model_state:
41
+ buffers: dict()
42
+ param_shapes: dict()
43
+ shared_params: list
44
+ ds_version: int
45
+ frozen_param_shapes: dict()
46
+ frozen_param_fragments: dict()
47
+
48
+
49
+ debug = 0
50
+
51
+ # load to cpu
52
+ device = torch.device('cpu')
53
+
54
+
55
+ def atoi(text):
56
+ return int(text) if text.isdigit() else text
57
+
58
+
59
+ def natural_keys(text):
60
+ '''
61
+ alist.sort(key=natural_keys) sorts in human order
62
+ http://nedbatchelder.com/blog/200712/human_sorting.html
63
+ (See Toothy's implementation in the comments)
64
+ '''
65
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
66
+
67
+
68
+ def get_model_state_file(checkpoint_dir, zero_stage):
69
+ if not os.path.isdir(checkpoint_dir):
70
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
71
+
72
+ # there should be only one file
73
+ if zero_stage <= 2:
74
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
75
+ elif zero_stage == 3:
76
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
77
+
78
+ if not os.path.exists(file):
79
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
80
+
81
+ return file
82
+
83
+
84
+ def get_checkpoint_files(checkpoint_dir, glob_pattern):
85
+ # XXX: need to test that this simple glob rule works for multi-node setup too
86
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
87
+
88
+ if len(ckpt_files) == 0:
89
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
90
+
91
+ return ckpt_files
92
+
93
+
94
+ def get_optim_files(checkpoint_dir):
95
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
96
+
97
+
98
+ def get_model_state_files(checkpoint_dir):
99
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
100
+
101
+
102
+ def parse_model_states(files):
103
+ zero_model_states = []
104
+ for file in files:
105
+ state_dict = torch.load(file, map_location=device, weights_only=False)
106
+
107
+ if BUFFER_NAMES not in state_dict:
108
+ raise ValueError(f"{file} is not a model state checkpoint")
109
+ buffer_names = state_dict[BUFFER_NAMES]
110
+ if debug:
111
+ print("Found buffers:", buffer_names)
112
+
113
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
114
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
115
+ param_shapes = state_dict[PARAM_SHAPES]
116
+
117
+ # collect parameters that are included in param_shapes
118
+ param_names = []
119
+ for s in param_shapes:
120
+ for name in s.keys():
121
+ param_names.append(name)
122
+
123
+ # update with frozen parameters
124
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
125
+ if frozen_param_shapes is not None:
126
+ if debug:
127
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
128
+ param_names += list(frozen_param_shapes.keys())
129
+
130
+ # handle shared params
131
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
132
+
133
+ ds_version = state_dict.get(DS_VERSION, None)
134
+
135
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
136
+
137
+ z_model_state = zero_model_state(buffers=buffers,
138
+ param_shapes=param_shapes,
139
+ shared_params=shared_params,
140
+ ds_version=ds_version,
141
+ frozen_param_shapes=frozen_param_shapes,
142
+ frozen_param_fragments=frozen_param_fragments)
143
+ zero_model_states.append(z_model_state)
144
+
145
+ return zero_model_states
146
+
147
+
148
+ def parse_optim_states(files, ds_checkpoint_dir):
149
+ total_files = len(files)
150
+ state_dicts = []
151
+ for f in tqdm(files, desc='Loading checkpoint shards'):
152
+ state_dict = torch.load(f, map_location=device, mmap=True, weights_only=False)
153
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
154
+ # and also handle the case where it was already removed by another helper script
155
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
156
+ state_dicts.append(state_dict)
157
+
158
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
159
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
160
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
161
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
162
+
163
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
164
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
165
+ # use the max of the partition_count to get the dp world_size.
166
+
167
+ if type(world_size) is list:
168
+ world_size = max(world_size)
169
+
170
+ if world_size != total_files:
171
+ raise ValueError(
172
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
173
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
174
+ )
175
+
176
+ # the groups are named differently in each stage
177
+ if zero_stage <= 2:
178
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
179
+ elif zero_stage == 3:
180
+ fp32_groups_key = FP32_FLAT_GROUPS
181
+ else:
182
+ raise ValueError(f"unknown zero stage {zero_stage}")
183
+
184
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
185
+ return zero_stage, world_size, fp32_flat_groups
186
+
187
+
188
+ def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
189
+ """
190
+ Returns fp32 state_dict reconstructed from ds checkpoint
191
+
192
+ Args:
193
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
194
+
195
+ """
196
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
197
+
198
+ optim_files = get_optim_files(ds_checkpoint_dir)
199
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
200
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
201
+
202
+ model_files = get_model_state_files(ds_checkpoint_dir)
203
+
204
+ zero_model_states = parse_model_states(model_files)
205
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
206
+
207
+ if zero_stage <= 2:
208
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
209
+ exclude_frozen_parameters)
210
+ elif zero_stage == 3:
211
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
212
+ exclude_frozen_parameters)
213
+
214
+
215
+ def _zero2_merge_frozen_params(state_dict, zero_model_states):
216
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
217
+ return
218
+
219
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
220
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
221
+
222
+ if debug:
223
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
224
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
225
+
226
+ wanted_params = len(frozen_param_shapes)
227
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
228
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
229
+ print(f'Frozen params: Have {avail_numel} numels to process.')
230
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
231
+
232
+ total_params = 0
233
+ total_numel = 0
234
+ for name, shape in frozen_param_shapes.items():
235
+ total_params += 1
236
+ unpartitioned_numel = shape.numel()
237
+ total_numel += unpartitioned_numel
238
+
239
+ state_dict[name] = frozen_param_fragments[name]
240
+
241
+ if debug:
242
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
243
+
244
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
245
+
246
+
247
+ def _has_callable(obj, fn):
248
+ attr = getattr(obj, fn, None)
249
+ return callable(attr)
250
+
251
+
252
+ def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
253
+ param_shapes = zero_model_states[0].param_shapes
254
+
255
+ # Reconstruction protocol:
256
+ #
257
+ # XXX: document this
258
+
259
+ if debug:
260
+ for i in range(world_size):
261
+ for j in range(len(fp32_flat_groups[0])):
262
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
263
+
264
+ # XXX: memory usage doubles here (zero2)
265
+ num_param_groups = len(fp32_flat_groups[0])
266
+ merged_single_partition_of_fp32_groups = []
267
+ for i in range(num_param_groups):
268
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
269
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
270
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
271
+ avail_numel = sum(
272
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
273
+
274
+ if debug:
275
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
276
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
277
+ # not asserting if there is a mismatch due to possible padding
278
+ print(f"Have {avail_numel} numels to process.")
279
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
280
+
281
+ # params
282
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
283
+ # out-of-core computing solution
284
+ total_numel = 0
285
+ total_params = 0
286
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
287
+ offset = 0
288
+ avail_numel = full_single_fp32_vector.numel()
289
+ for name, shape in shapes.items():
290
+
291
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
292
+ total_numel += unpartitioned_numel
293
+ total_params += 1
294
+
295
+ if debug:
296
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
297
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
298
+ offset += unpartitioned_numel
299
+
300
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
301
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
302
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
303
+ # live optimizer object, so we are checking that the numbers are within the right range
304
+ align_to = 2 * world_size
305
+
306
+ def zero2_align(x):
307
+ return align_to * math.ceil(x / align_to)
308
+
309
+ if debug:
310
+ print(f"original offset={offset}, avail_numel={avail_numel}")
311
+
312
+ offset = zero2_align(offset)
313
+ avail_numel = zero2_align(avail_numel)
314
+
315
+ if debug:
316
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
317
+
318
+ # Sanity check
319
+ if offset != avail_numel:
320
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
321
+
322
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
323
+
324
+
325
+ def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
326
+ exclude_frozen_parameters):
327
+ state_dict = OrderedDict()
328
+
329
+ # buffers
330
+ buffers = zero_model_states[0].buffers
331
+ state_dict.update(buffers)
332
+ if debug:
333
+ print(f"added {len(buffers)} buffers")
334
+
335
+ if not exclude_frozen_parameters:
336
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
337
+
338
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
339
+
340
+ # recover shared parameters
341
+ for pair in zero_model_states[0].shared_params:
342
+ if pair[1] in state_dict:
343
+ state_dict[pair[0]] = state_dict[pair[1]]
344
+
345
+ return state_dict
346
+
347
+
348
+ def zero3_partitioned_param_info(unpartitioned_numel, world_size):
349
+ remainder = unpartitioned_numel % world_size
350
+ padding_numel = (world_size - remainder) if remainder else 0
351
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
352
+ return partitioned_numel, padding_numel
353
+
354
+
355
+ def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
356
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
357
+ return
358
+
359
+ if debug:
360
+ for i in range(world_size):
361
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
362
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
363
+
364
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
365
+ wanted_params = len(frozen_param_shapes)
366
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
367
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
368
+ print(f'Frozen params: Have {avail_numel} numels to process.')
369
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
370
+
371
+ total_params = 0
372
+ total_numel = 0
373
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
374
+ total_params += 1
375
+ unpartitioned_numel = shape.numel()
376
+ total_numel += unpartitioned_numel
377
+
378
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
379
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
380
+
381
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
382
+
383
+ if debug:
384
+ print(
385
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
386
+ )
387
+
388
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
389
+
390
+
391
+ class GatheredTensor:
392
+ """
393
+ A pseudo tensor that collects partitioned weights.
394
+ It is more memory efficient when there are multiple groups.
395
+ """
396
+
397
+ def __init__(self, flat_groups, flat_groups_offset, offset, partitioned_numel, shape):
398
+ self.flat_groups = flat_groups
399
+ self.flat_groups_offset = flat_groups_offset
400
+ self.offset = offset
401
+ self.partitioned_numel = partitioned_numel
402
+ self.shape = shape
403
+ self.dtype = self.flat_groups[0][0].dtype
404
+
405
+ def contiguous(self):
406
+ """
407
+ Merge partitioned weights from flat_groups into a single tensor.
408
+ """
409
+ end_idx = self.offset + self.partitioned_numel
410
+ world_size = len(self.flat_groups)
411
+ pad_flat_param_chunks = []
412
+
413
+ for rank_i in range(world_size):
414
+ # for each rank, we need to collect weights from related group/groups
415
+ flat_groups_at_rank_i = self.flat_groups[rank_i]
416
+ start_group_id = None
417
+ end_group_id = None
418
+ for group_id in range(len(self.flat_groups_offset)):
419
+ if self.flat_groups_offset[group_id] <= self.offset < self.flat_groups_offset[group_id + 1]:
420
+ start_group_id = group_id
421
+ if self.flat_groups_offset[group_id] < end_idx <= self.flat_groups_offset[group_id + 1]:
422
+ end_group_id = group_id
423
+ break
424
+ # collect weights from related group/groups
425
+ for group_id in range(start_group_id, end_group_id + 1):
426
+ flat_tensor = flat_groups_at_rank_i[group_id]
427
+ start_offset = self.offset - self.flat_groups_offset[group_id]
428
+ end_offset = min(end_idx, self.flat_groups_offset[group_id + 1]) - self.flat_groups_offset[group_id]
429
+ pad_flat_param_chunks.append(flat_tensor[start_offset:end_offset])
430
+
431
+ # collect weights from all ranks
432
+ pad_flat_param = torch.cat(pad_flat_param_chunks, dim=0)
433
+ param = pad_flat_param[:self.shape.numel()].view(self.shape).contiguous()
434
+ return param
435
+
436
+
437
+ def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
438
+ param_shapes = zero_model_states[0].param_shapes
439
+ avail_numel = sum([flat_group.numel() for flat_group in fp32_flat_groups[0]]) * world_size
440
+
441
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
442
+ # param, re-consolidating each param, while dealing with padding if any
443
+
444
+ # merge list of dicts, preserving order
445
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
446
+
447
+ if debug:
448
+ for i in range(world_size):
449
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
450
+
451
+ wanted_params = len(param_shapes)
452
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
453
+ # not asserting if there is a mismatch due to possible padding
454
+ avail_numel = fp32_flat_groups[0].numel() * world_size
455
+ print(f"Trainable params: Have {avail_numel} numels to process.")
456
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
457
+
458
+ # params
459
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
460
+ # out-of-core computing solution
461
+ offset = 0
462
+ total_numel = 0
463
+ total_params = 0
464
+ flat_groups_offset = [0] + list(np.cumsum([flat_tensor.numel() for flat_tensor in fp32_flat_groups[0]]))
465
+ for name, shape in tqdm(param_shapes.items(), desc='Gathering sharded weights'):
466
+ unpartitioned_numel = shape.numel()
467
+ total_numel += unpartitioned_numel
468
+ total_params += 1
469
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
470
+
471
+ if debug:
472
+ print(
473
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
474
+ )
475
+
476
+ # memory efficient tensor
477
+ tensor = GatheredTensor(fp32_flat_groups, flat_groups_offset, offset, partitioned_numel, shape)
478
+ state_dict[name] = tensor
479
+ offset += partitioned_numel
480
+
481
+ offset *= world_size
482
+
483
+ # Sanity check
484
+ if offset != avail_numel:
485
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
486
+
487
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
488
+
489
+
490
+ def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
491
+ exclude_frozen_parameters):
492
+ state_dict = OrderedDict()
493
+
494
+ # buffers
495
+ buffers = zero_model_states[0].buffers
496
+ state_dict.update(buffers)
497
+ if debug:
498
+ print(f"added {len(buffers)} buffers")
499
+
500
+ if not exclude_frozen_parameters:
501
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
502
+
503
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
504
+
505
+ # recover shared parameters
506
+ for pair in zero_model_states[0].shared_params:
507
+ if pair[1] in state_dict:
508
+ state_dict[pair[0]] = state_dict[pair[1]]
509
+
510
+ return state_dict
511
+
512
+
513
+ def to_torch_tensor(state_dict, return_empty_tensor=False):
514
+ """
515
+ Convert state_dict of GatheredTensor to torch tensor
516
+ """
517
+ torch_state_dict = {}
518
+ converted_tensors = {}
519
+ for name, tensor in state_dict.items():
520
+ tensor_id = id(tensor)
521
+ if tensor_id in converted_tensors: # shared tensors
522
+ shared_tensor = torch_state_dict[converted_tensors[tensor_id]]
523
+ torch_state_dict[name] = shared_tensor
524
+ else:
525
+ converted_tensors[tensor_id] = name
526
+ if return_empty_tensor:
527
+ torch_state_dict[name] = torch.empty(tensor.shape, dtype=tensor.dtype)
528
+ else:
529
+ torch_state_dict[name] = tensor.contiguous()
530
+ return torch_state_dict
531
+
532
+
533
+ def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
534
+ tag=None,
535
+ exclude_frozen_parameters=False,
536
+ lazy_mode=False):
537
+ """
538
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
539
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
540
+ via a model hub.
541
+
542
+ Args:
543
+ - ``checkpoint_dir``: path to the desired checkpoint folder
544
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
545
+ - ``exclude_frozen_parameters``: exclude frozen parameters
546
+ - ``lazy_mode``: get state_dict in lazy mode. It returns a dict of pesduo tensor instead of torch tensor, which is more memory efficient.
547
+ Convert the pesduo tensor to torch tensor by ``.contiguous()``
548
+
549
+ Returns:
550
+ - pytorch ``state_dict``
551
+
552
+ A typical usage might be ::
553
+
554
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
555
+ # do the training and checkpoint saving
556
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
557
+ model = model.cpu() # move to cpu
558
+ model.load_state_dict(state_dict)
559
+ # submit to model hub or save the model to share with others
560
+
561
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
562
+ application. i.e. you will need to re-initialize the deepspeed engine, since
563
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
564
+
565
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
566
+
567
+ Note: the above usage may not work if your application doesn't have sufficient free CPU memory.
568
+ You may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
569
+ the checkpoint. Or you can load state_dict in lazy mode ::
570
+
571
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
572
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, lazy_mode=True) # not on cpu
573
+ for name, lazy_tensor in state_dict.item():
574
+ tensor = lazy_tensor.contiguous() # to cpu
575
+ print(name, tensor)
576
+ # del tensor to release memory if it no longer in use
577
+ """
578
+ if tag is None:
579
+ latest_path = os.path.join(checkpoint_dir, 'latest')
580
+ if os.path.isfile(latest_path):
581
+ with open(latest_path, 'r') as fd:
582
+ tag = fd.read().strip()
583
+ else:
584
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
585
+
586
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
587
+
588
+ if not os.path.isdir(ds_checkpoint_dir):
589
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
590
+
591
+ state_dict = _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
592
+ if lazy_mode:
593
+ return state_dict
594
+ else:
595
+ return to_torch_tensor(state_dict)
596
+
597
+
598
+ def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir,
599
+ output_dir,
600
+ max_shard_size="5GB",
601
+ safe_serialization=False,
602
+ tag=None,
603
+ exclude_frozen_parameters=False):
604
+ """
605
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
606
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
607
+
608
+ Args:
609
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
610
+ - ``output_dir``: directory to the pytorch fp32 state_dict output files
611
+ - ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB
612
+ - ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
613
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
614
+ - ``exclude_frozen_parameters``: exclude frozen parameters
615
+ """
616
+
617
+ # Dependency pre-check
618
+ if safe_serialization:
619
+ try:
620
+ from safetensors.torch import save_file
621
+ except ImportError:
622
+ print('If you want to use `safe_serialization`, please `pip install safetensors`')
623
+ raise
624
+ if max_shard_size is not None:
625
+ try:
626
+ from huggingface_hub import split_torch_state_dict_into_shards
627
+ except ImportError:
628
+ print('If you want to use `max_shard_size`, please `pip install huggingface_hub`')
629
+ raise
630
+
631
+ # Convert zero checkpoint to state_dict
632
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
633
+ tag,
634
+ exclude_frozen_parameters,
635
+ lazy_mode=True)
636
+
637
+ # Shard the model if it is too big.
638
+ weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin"
639
+ if max_shard_size is not None:
640
+ filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors")
641
+ # an memory-efficient approach for sharding
642
+ empty_state_dict = to_torch_tensor(state_dict, return_empty_tensor=True)
643
+ state_dict_split = split_torch_state_dict_into_shards(empty_state_dict,
644
+ filename_pattern=filename_pattern,
645
+ max_shard_size=max_shard_size)
646
+ else:
647
+ from collections import namedtuple
648
+ StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"])
649
+ state_dict_split = StateDictSplit(is_sharded=False,
650
+ filename_to_tensors={weights_name: list(state_dict.keys())})
651
+
652
+ # Save the model by shard
653
+ os.makedirs(output_dir, exist_ok=True)
654
+ filename_to_tensors = state_dict_split.filename_to_tensors.items()
655
+ for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"):
656
+ shard_state_dict = {tensor_name: state_dict[tensor_name] for tensor_name in tensors}
657
+ shard_state_dict = to_torch_tensor(shard_state_dict)
658
+ output_path = os.path.join(output_dir, shard_file)
659
+ if safe_serialization:
660
+ save_file(shard_state_dict, output_path, metadata={"format": "pt"})
661
+ else:
662
+ torch.save(shard_state_dict, output_path)
663
+ # release the memory of current shard
664
+ for tensor_name in list(shard_state_dict.keys()):
665
+ del state_dict[tensor_name]
666
+ del shard_state_dict[tensor_name]
667
+ del shard_state_dict
668
+ gc.collect()
669
+
670
+ # Save index if sharded
671
+ if state_dict_split.is_sharded:
672
+ index = {
673
+ "metadata": state_dict_split.metadata,
674
+ "weight_map": state_dict_split.tensor_to_filename,
675
+ }
676
+ save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json"
677
+ save_index_file = os.path.join(output_dir, save_index_file)
678
+ with open(save_index_file, "w", encoding="utf-8") as f:
679
+ content = json.dumps(index, indent=2, sort_keys=True) + "\n"
680
+ f.write(content)
681
+
682
+
683
+ def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
684
+ """
685
+ 1. Put the provided model to cpu
686
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
687
+ 3. Load it into the provided model
688
+
689
+ Args:
690
+ - ``model``: the model object to update
691
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
692
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
693
+
694
+ Returns:
695
+ - ``model`: modified model
696
+
697
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
698
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
699
+ conveniently placed for you in the checkpoint folder.
700
+
701
+ A typical usage might be ::
702
+
703
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
704
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
705
+ # submit to model hub or save the model to share with others
706
+
707
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
708
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
709
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
710
+
711
+ """
712
+ logger.info(f"Extracting fp32 weights")
713
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
714
+
715
+ logger.info(f"Overwriting model with fp32 weights")
716
+ model = model.cpu()
717
+ model.load_state_dict(state_dict, strict=False)
718
+
719
+ return model
720
+
721
+
722
+ if __name__ == "__main__":
723
+ parser = argparse.ArgumentParser()
724
+ parser.add_argument("checkpoint_dir",
725
+ type=str,
726
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
727
+ parser.add_argument("output_dir",
728
+ type=str,
729
+ help="directory to the pytorch fp32 state_dict output files"
730
+ "(e.g. path/checkpoint-12-output/)")
731
+ parser.add_argument(
732
+ "--max_shard_size",
733
+ type=str,
734
+ default="5GB",
735
+ help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size"
736
+ "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`"
737
+ "We default it to 5GB in order for models to be able to run easily on free-tier google colab instances"
738
+ "without CPU OOM issues.")
739
+ parser.add_argument(
740
+ "--safe_serialization",
741
+ default=False,
742
+ action='store_true',
743
+ help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).")
744
+ parser.add_argument("-t",
745
+ "--tag",
746
+ type=str,
747
+ default=None,
748
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
749
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
750
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
751
+ args = parser.parse_args()
752
+
753
+ debug = args.debug
754
+
755
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
756
+ args.output_dir,
757
+ max_shard_size=args.max_shard_size,
758
+ safe_serialization=args.safe_serialization,
759
+ tag=args.tag,
760
+ exclude_frozen_parameters=args.exclude_frozen_parameters)