simon123905 commited on
Commit
5d367ae
·
verified ·
1 Parent(s): 8b274c6

Upload folder using huggingface_hub

Browse files
Files changed (28) hide show
  1. llamagen-siglip-sb-block-causal/scripts/autoregressive/extract_codes_c2i.sh +15 -0
  2. llamagen-siglip-sb-block-causal/scripts/autoregressive/extract_codes_t2i.sh +15 -0
  3. llamagen-siglip-sb-block-causal/scripts/autoregressive/sample_c2i.sh +31 -0
  4. llamagen-siglip-sb-block-causal/scripts/autoregressive/sample_t2i.sh +54 -0
  5. llamagen-siglip-sb-block-causal/scripts/autoregressive/sample_t2i_coco.sh +11 -0
  6. llamagen-siglip-sb-block-causal/scripts/autoregressive/sample_t2i_ddp.sh +54 -0
  7. llamagen-siglip-sb-block-causal/scripts/autoregressive/sample_t2i_noreverse.sh +54 -0
  8. llamagen-siglip-sb-block-causal/scripts/autoregressive/sample_t2i_parti.sh +11 -0
  9. llamagen-siglip-sb-block-causal/scripts/autoregressive/train_c2i.sh +20 -0
  10. llamagen-siglip-sb-block-causal/scripts/autoregressive/train_c2i_fsdp.sh +113 -0
  11. llamagen-siglip-sb-block-causal/scripts/autoregressive/train_t2i.sh +25 -0
  12. llamagen-siglip-sb-block-causal/scripts/autoregressive/train_t2i_multi_node.sh +160 -0
  13. llamagen-siglip-sb-block-causal/scripts/autoregressive/train_t2i_multi_node_absPos.sh +164 -0
  14. llamagen-siglip-sb-block-causal/scripts/autoregressive/train_t2i_multi_node_overfit.sh +164 -0
  15. llamagen-siglip-sb-block-causal/scripts/autoregressive/train_t2i_stage1.sh +13 -0
  16. llamagen-siglip-sb-block-causal/scripts/autoregressive/train_t2i_stage2.sh +14 -0
  17. llamagen-siglip-sb-block-causal/scripts/language/extract_flan_t5_feat_laion_coco_stage1.sh +11 -0
  18. llamagen-siglip-sb-block-causal/scripts/language/extract_flan_t5_feat_stage2.sh +10 -0
  19. llamagen-siglip-sb-block-causal/scripts/language/extract_flan_t5_feat_trunc_stage2.sh +11 -0
  20. llamagen-siglip-sb-block-causal/scripts/language/extract_flan_t5_imagenet.sh +9 -0
  21. llamagen-siglip-sb-block-causal/scripts/tokenizer/reconstruction_consistency_decoder.sh +8 -0
  22. llamagen-siglip-sb-block-causal/scripts/tokenizer/reconstruction_vae.sh +8 -0
  23. llamagen-siglip-sb-block-causal/scripts/tokenizer/reconstruction_vq.sh +8 -0
  24. llamagen-siglip-sb-block-causal/scripts/tokenizer/reconstruction_vqgan.sh +8 -0
  25. llamagen-siglip-sb-block-causal/scripts/tokenizer/train_vq.sh +7 -0
  26. llamagen-siglip-sb-block-causal/scripts/tokenizer/train_vq_finetune.sh +15 -0
  27. llamagen-siglip-sb-block-causal/scripts/tokenizer/train_vq_finetune_continue.sh +15 -0
  28. llamagen-siglip-sb-block-causal/scripts/tokenizer/val.sh +8 -0
llamagen-siglip-sb-block-causal/scripts/autoregressive/extract_codes_c2i.sh ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+
4
+ torchrun \
5
+ --nnodes=1 --nproc_per_node=8 --node_rank=0 \
6
+ --master_port=12335 \
7
+ autoregressive/train/extract_codes_c2i.py \
8
+ --yml-path /home/ma-user/work/p84402465/llamagen-finetune-main/config/selftok_sd3_E31-512_modified.yml \
9
+ --vq-ckpt /home/ma-user/work/w84402926/SelfTok/llamagen/llamagen-finetune-main/weight/iter_237999.pth \
10
+ --sd3-pretrained /home/ma-user/work/w84402926/SelfTok/llamagen/llamagen-finetune-main/weight/sd3_medium.ckpt \
11
+ --data-path /home/ma-user/work/p84402465/imagenet \
12
+ --code-path /cache/code-imagenet-E31 \
13
+ --image-size 512
14
+
15
+
llamagen-siglip-sb-block-causal/scripts/autoregressive/extract_codes_t2i.sh ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+
4
+ torchrun \
5
+ --nnodes=1 --nproc_per_node=8 --node_rank=0 \
6
+ --master_port=12335 \
7
+ autoregressive/train/extract_codes_t2i.py \
8
+ --yml-path /home/ma-user/work/p84402465/llamagen-finetune-main/config/selftok_sd3_E31-512_modified.yml \
9
+ --vq-ckpt /home/ma-user/work/w84402926/SelfTok/llamagen/llamagen-finetune-main/weight/iter_237999.pth \
10
+ --sd3-pretrained /home/ma-user/work/w84402926/SelfTok/llamagen/llamagen-finetune-main/weight/sd3_medium.ckpt \
11
+ --data-path /cache/imagenet \
12
+ --code-path /cache/code-imagenet-E31_cap \
13
+ --image-size 512
14
+
15
+
llamagen-siglip-sb-block-causal/scripts/autoregressive/sample_c2i.sh ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # # !/bin/bash
2
+ # set -x
3
+
4
+ # torchrun \
5
+ # --nnodes=1 --nproc_per_node=8 --node_rank=0 \
6
+ # --master_port=12345 \
7
+ # autoregressive/sample/sample_c2i_ddp.py \
8
+ # --vq-ckpt ./pretrained_models/vq_ds16_c2i.pt \
9
+ # "$@"
10
+
11
+ # # !/bin/bash
12
+ # set -x
13
+
14
+ torchrun \
15
+ --nnodes=1 --nproc_per_node=1 --node_rank=0 \
16
+ --master_port=12345 \
17
+ autoregressive/sample/sample_c2i.py \
18
+ --yml-path="/home/ma-user/work/p84402465/SelftokPipeline-master/configs/renderer/selftok_sd3_E31-512_renderer.yml" \
19
+ --vq-ckpt="/home/ma-user/work/p84402465/cache/models/pretrain_tokenizer_iter_331999.pth" \
20
+ --save-path="./sample_c2i_240k_cfg10_largebs.png" \
21
+ --gpt-model="GPT-XL" \
22
+ --gpt-ckpt="/home/ma-user/work/p84402465/cache/llamagen-E31-0811/checkpoint-30000/consolidated.pth" \
23
+ --sd3-pretrained="/home/ma-user/work/w84402926/SelfTok/llamagen/llamagen-finetune-main/weight/sd3_medium.ckpt" \
24
+ --gpt-type="c2i" \
25
+ --codebook-size=16384 \
26
+ --image-size=512 \
27
+ --cfg-scale 10 \
28
+ --top-p 1.0 \
29
+ --top-k 0 \
30
+ --temperature 1.0 \
31
+ --from-fsdp
llamagen-siglip-sb-block-causal/scripts/autoregressive/sample_t2i.sh ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+
4
+ GPUS=1
5
+
6
+ CFG=3.0
7
+
8
+ MODEL_S3_PATH="s3://bucket-5125-guiyang/selftok/outputs/llamagen_selftok/Selftok-Llamagen-t2i-vit-sb-v16-blockcausal/checkpoint-505000"
9
+ MOX_OVERWRITE=False
10
+
11
+ parent_dir=$(basename "$(dirname "$MODEL_S3_PATH")")
12
+ base_dir=$(basename "$MODEL_S3_PATH")
13
+ local_model_path="/cache/ckpts/${parent_dir}/${base_dir}"
14
+
15
+ echo "[INFO] model parent_dir = $parent_dir"
16
+ echo "[INFO] model base_dir = $base_dir"
17
+ echo "[INFO] local_model_path = $local_model_path"
18
+
19
+ python3 - <<EOF
20
+ import os
21
+ import moxing as mox
22
+
23
+ model_s3 = "$MODEL_S3_PATH"
24
+ local_path = "$local_model_path"
25
+ overwrite = $MOX_OVERWRITE
26
+
27
+ if not os.path.exists(local_path) or overwrite:
28
+ os.makedirs(os.path.dirname(local_path), exist_ok=True)
29
+ print(f"[COPY] {model_s3} -> {local_path}")
30
+ mox.file.copy_parallel(model_s3, local_path)
31
+ print("[COPY DONE]")
32
+ else:
33
+ print(f"[SKIP] local model already exists: {local_path}")
34
+ EOF
35
+
36
+ torchrun \
37
+ --nnodes=1 --nproc_per_node=$GPUS --node_rank=0 \
38
+ --master_port=12346 \
39
+ autoregressive/sample/sample_t2i.py \
40
+ --yml-path="/home/ma-user/work/z84399568/selftok_sd3_vit_spatial_block_512res_eval_fsq/configs/mimo/selftok/spatial-block/v16.yml" \
41
+ --vq-ckpt="/home/ma-user/work/z84399568/selftok_sd3_vit_spatial_block_512res_eval_fsq/iter_155999.pth" \
42
+ --gpt-model="GPT-XL" \
43
+ --gpt-ckpt=$local_model_path \
44
+ --save-image="./results/imagenet_siglip2/${parent_dir}-${base_dir}-cfg-${CFG}" \
45
+ --sd3-pretrained="/home/ma-user/work/selftok/models/sd3_medium.ckpt" \
46
+ --t5-path="/home/ma-user/work/selftok/models" \
47
+ --gpt-type="t2i" \
48
+ --codebook-size=19683 \
49
+ --image-size=512 \
50
+ --cfg-scale $CFG \
51
+ --top-p 1.0 \
52
+ --top-k 0 \
53
+ --temperature 1.0 \
54
+
llamagen-siglip-sb-block-causal/scripts/autoregressive/sample_t2i_coco.sh ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+
4
+ torchrun \
5
+ --nnodes=1 --nproc_per_node=8 --node_rank=0 \
6
+ --master_port=12346 \
7
+ autoregressive/sample/sample_t2i_ddp.py \
8
+ --prompt-csv evaluations/t2i/coco_captions.csv \
9
+ --sample-dir samples_coco \
10
+ --vq-ckpt ./pretrained_models/vq_ds16_t2i.pt \
11
+ "$@"
llamagen-siglip-sb-block-causal/scripts/autoregressive/sample_t2i_ddp.sh ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+
4
+ GPUS=8
5
+
6
+ CFG=3.0
7
+
8
+ MODEL_S3_PATH="s3://bucket-5125-guiyang/selftok/outputs/llamagen_selftok/Selftok-Llamagen-t2i-vit-sb-v16-blockcausal/checkpoint-485000"
9
+ MOX_OVERWRITE=False
10
+
11
+ parent_dir=$(basename "$(dirname "$MODEL_S3_PATH")")
12
+ base_dir=$(basename "$MODEL_S3_PATH")
13
+ local_model_path="/cache/ckpts/${parent_dir}/${base_dir}"
14
+
15
+ echo "[INFO] model parent_dir = $parent_dir"
16
+ echo "[INFO] model base_dir = $base_dir"
17
+ echo "[INFO] local_model_path = $local_model_path"
18
+
19
+ python3 - <<EOF
20
+ import os
21
+ import moxing as mox
22
+
23
+ model_s3 = "$MODEL_S3_PATH"
24
+ local_path = "$local_model_path"
25
+ overwrite = $MOX_OVERWRITE
26
+
27
+ if not os.path.exists(local_path) or overwrite:
28
+ os.makedirs(os.path.dirname(local_path), exist_ok=True)
29
+ print(f"[COPY] {model_s3} -> {local_path}")
30
+ mox.file.copy_parallel(model_s3, local_path)
31
+ print("[COPY DONE]")
32
+ else:
33
+ print(f"[SKIP] local model already exists: {local_path}")
34
+ EOF
35
+
36
+ torchrun \
37
+ --nnodes=1 --nproc_per_node=$GPUS --node_rank=0 \
38
+ --master_port=12346 \
39
+ autoregressive/sample/sample_t2i_ddp.py \
40
+ --yml-path="/home/ma-user/work/z84399568/selftok_sd3_vit_spatial_block_512res_eval_fsq/configs/mimo/selftok/spatial-block/v16.yml" \
41
+ --vq-ckpt="/home/ma-user/work/z84399568/selftok_sd3_vit_spatial_block_512res_eval_fsq/iter_155999.pth" \
42
+ --gpt-model="GPT-XL" \
43
+ --prompt-csv="/home/ma-user/work/p84402465/imagenet_process/imagenet_val_captions.csv" \
44
+ --gpt-ckpt=$local_model_path \
45
+ --sd3-pretrained="/home/ma-user/work/selftok/models/sd3_medium.ckpt" \
46
+ --t5-path="/home/ma-user/work/selftok/models" \
47
+ --gpt-type="t2i" \
48
+ --codebook-size=19683 \
49
+ --image-size=512 \
50
+ --cfg-scale $CFG \
51
+ --top-p 1.0 \
52
+ --top-k 0 \
53
+ --temperature 1.0 \
54
+
llamagen-siglip-sb-block-causal/scripts/autoregressive/sample_t2i_noreverse.sh ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+
4
+ GPUS=1
5
+
6
+ CFG=3.0
7
+
8
+ MODEL_S3_PATH="s3://bucket-5125-guiyang/selftok/outputs/llamagen_selftok/Selftok-Llamagen-t2i-SigLip-noreverse/checkpoint-330000"
9
+ MOX_OVERWRITE=False
10
+
11
+ parent_dir=$(basename "$(dirname "$MODEL_S3_PATH")")
12
+ base_dir=$(basename "$MODEL_S3_PATH")
13
+ local_model_path="/cache/ckpts/${parent_dir}/${base_dir}"
14
+
15
+ echo "[INFO] model parent_dir = $parent_dir"
16
+ echo "[INFO] model base_dir = $base_dir"
17
+ echo "[INFO] local_model_path = $local_model_path"
18
+
19
+ python3 - <<EOF
20
+ import os
21
+ import moxing as mox
22
+
23
+ model_s3 = "$MODEL_S3_PATH"
24
+ local_path = "$local_model_path"
25
+ overwrite = $MOX_OVERWRITE
26
+
27
+ if not os.path.exists(local_path) or overwrite:
28
+ os.makedirs(os.path.dirname(local_path), exist_ok=True)
29
+ print(f"[COPY] {model_s3} -> {local_path}")
30
+ mox.file.copy_parallel(model_s3, local_path)
31
+ print("[COPY DONE]")
32
+ else:
33
+ print(f"[SKIP] local model already exists: {local_path}")
34
+ EOF
35
+
36
+ torchrun \
37
+ --nnodes=1 --nproc_per_node=$GPUS --node_rank=0 \
38
+ --master_port=12346 \
39
+ autoregressive/sample/sample_t2i_noreverse.py \
40
+ --yml-path="/home/ma-user/work/z84378256/Selftok-o-inference-siglip2/mimogpt/configs/sd3-siglip2/512-fsq.yml" \
41
+ --vq-ckpt="/home/ma-user/work/z84378256/model/selftok-tokenizer-vit-block-stage2/iter_123999.pth" \
42
+ --gpt-model="GPT-XL" \
43
+ --gpt-ckpt=$local_model_path \
44
+ --save-image="./results/imagenet_siglip2_noreverse/${parent_dir}-${base_dir}-cfg-${CFG}" \
45
+ --sd3-pretrained="/home/ma-user/work/selftok/models/sd3_medium.ckpt" \
46
+ --t5-path="/home/ma-user/work/selftok/models" \
47
+ --gpt-type="t2i" \
48
+ --codebook-size=19683 \
49
+ --image-size=512 \
50
+ --cfg-scale $CFG \
51
+ --top-p 1.0 \
52
+ --top-k 0 \
53
+ --temperature 1.0 \
54
+
llamagen-siglip-sb-block-causal/scripts/autoregressive/sample_t2i_parti.sh ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+
4
+ torchrun \
5
+ --nnodes=1 --nproc_per_node=8 --node_rank=0 \
6
+ --master_port=12347 \
7
+ autoregressive/sample/sample_t2i_ddp.py \
8
+ --prompt-csv evaluations/t2i/PartiPrompts.tsv \
9
+ --sample-dir samples_parti \
10
+ --vq-ckpt ./pretrained_models/vq_ds16_t2i.pt \
11
+ "$@"
llamagen-siglip-sb-block-causal/scripts/autoregressive/train_c2i.sh ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+
4
+ export PYTHONPATH="${PWD}:${PYTHONPATH}"
5
+ # export WANDB_NAME="run_$(date +%Y%m%d_%H%M%S)"
6
+ # export WANDB_PROJECT="c2i_selftok"
7
+ # export WANDB_API_KEY="0b30f581d65172381c1f1a45f928210cab80f1de"
8
+
9
+
10
+ torchrun --nnodes=1 --nproc_per_node=8 --node_rank=0 \
11
+ --master_port=20045 \
12
+ autoregressive/train/train_c2i.py \
13
+ --code-path /home/ma-user/work/w84402926/SelfTok/llamagen/code-imagenet-E31 \
14
+ --cloud-save-path ./results_ckpt_e31 \
15
+ --gpt-model GPT-XL --gpt-type c2i \
16
+ --vocab-size 16384 \
17
+ --image-size 512 \
18
+ --global-batch-size 8 \
19
+ --no-local-save \
20
+ --no-wandb
llamagen-siglip-sb-block-causal/scripts/autoregressive/train_c2i_fsdp.sh ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 配置CANN相关环境变量
2
+ CANN_INSTALL_PATH_CONF='/etc/Ascend/ascend_cann_install.info'
3
+ sudo cat /etc/Ascend/ascend_cann_install.info
4
+ DEFAULT_CANN_INSTALL_PATH="/usr/local/Ascend/"
5
+ CANN_INSTALL_PATH="/usr/local/Ascend/"
6
+
7
+ if [ -d ${CANN_INSTALL_PATH}/ascend-toolkit/latest ];then
8
+ cat ${CANN_INSTALL_PATH}/ascend-toolkit/set_env.sh
9
+ source ${CANN_INSTALL_PATH}/ascend-toolkit/set_env.sh
10
+ else
11
+ cat ${CANN_INSTALL_PATH}/nnae/set_env.sh
12
+ source ${CANN_INSTALL_PATH}/nnae/set_env.sh
13
+ fi
14
+
15
+ # 导入依赖库
16
+ export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/openblas/lib
17
+ export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib/
18
+ export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/lib64/
19
+ export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/lib/
20
+ export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/lib/aarch64_64-linux-gnu
21
+
22
+ # 配置自定义环境变量
23
+ export HCCL_WHITELIST_DISABLE=1
24
+
25
+ # log
26
+ export ASCEND_SLOG_PRINT_TO_STDOUT=0 # 日志打屏, 可选
27
+ export ASCEND_GLOBAL_LOG_LEVEL=3 # 日志级别常用 1 INFO级别; 3 ERROR级别
28
+ export ASCEND_GLOBAL_EVENT_ENABLE=0 # 默认不使能event日志信息
29
+ export ASCEND_LAUNCH_BLOCKING=0 # 控制算子执行时是否启动同步模式, 1 同步; 0 异步
30
+
31
+ # 系统默认环境变量,不建议修改
32
+ MASTER_HOST="$VC_WORKER_HOSTS"
33
+ MASTER_ADDR="${VC_WORKER_HOSTS%%,*}"
34
+ NNODES="$MA_NUM_HOSTS"
35
+ NODE_RANK="$VC_TASK_INDEX"
36
+ NGPUS_PER_NODE="$MA_NUM_GPUS"
37
+ NUM_PROCESSES=$(($NGPUS_PER_NODE * $NNODES))
38
+
39
+ echo "num_machines: ${NNODES}" >> ${DEEPSPEED_CONFIG_PATH}
40
+ echo "num_processes: ${NUM_PROCESSES}" >> ${DEEPSPEED_CONFIG_PATH}
41
+
42
+ echo "------> system config <------"
43
+ echo "VC_WORKER_HOSTS: ${VC_WORKER_HOSTS}"
44
+ echo "MASTER_HOST: ${MASTER_HOST}"
45
+ echo "MASTER_ADDR: ${MASTER_ADDR}"
46
+ echo "NNODES: ${NNODES}"
47
+ echo "NODE_RANK: ${NODE_RANK}"
48
+ echo "NGPUS_PER_NODE: ${NGPUS_PER_NODE}"
49
+ echo "NUM_PROCESSES: ${NUM_PROCESSES}"
50
+ echo "${MA_JOB_DIR}"
51
+ echo "------> <------"
52
+
53
+ # https://www.hiascend.com/document/detail/zh/canncommercial/63RC2/modeldevpt/ptmigr/ptmigr_0022.html
54
+ export HCCL_WHITELIST_DISABLE=1
55
+
56
+ if [[ $NODE_RANK == 0 ]]; then
57
+ EXT_ARGS="--rdzv_conf=is_host=1"
58
+ else
59
+ EXT_ARGS=""
60
+ fi
61
+
62
+ # set npu plog env, https://3ms.huawei.com/hi/group/3225441/wiki_6402466.html
63
+ ma_vj_name=`echo ${MA_VJ_NAME} | sed 's:ma-job:modelarts-job:g'`
64
+ task_name="worker-${VC_TASK_INDEX}"
65
+ task_plog_path=${MA_LOG_DIR}/${ma_vj_name}/${task_name}
66
+
67
+ mkdir -p ${task_plog_path}
68
+ export ASCEND_PROCESS_LOG_PATH=${task_plog_path}
69
+
70
+ echo "plog path: ${ASCEND_PROCESS_LOG_PATH}"
71
+
72
+ npu-smi info
73
+ pip install wandb
74
+
75
+ # set hccl timeout time in seconds
76
+ export HCCL_CONNECT_TIMEOUT=7200
77
+
78
+ export OMP_NUM_THREADS=4
79
+ export NUMEXPR_MAX_THREADS=4
80
+
81
+
82
+ echo "------> pwd <------"
83
+ pwd
84
+ echo "------> files <------"
85
+ ls
86
+
87
+ export ROOT='/cache'
88
+ python tools/mox_copy.py s3://bucket-2588-wuhu/code/p84402465/code-imagenet-E31/ ${ROOT}/code-imagenet-E31/
89
+
90
+
91
+ export MASTER_PORT="${MASTER_PORT:-12335}"
92
+
93
+ CMD="torchrun \
94
+ --nnodes=${NNODES} \
95
+ --nproc_per_node=${NGPUS_PER_NODE} \
96
+ --node_rank=${NODE_RANK} \
97
+ --master_addr=${MASTER_ADDR} \
98
+ --master_port=${MASTER_PORT} \
99
+ autoregressive/train/train_c2i_fsdp.py \
100
+ --code-path /cache/code-imagenet-E31 \
101
+ --cloud-save-path results_ckpt_e31_rope \
102
+ --train-url s3://bucket-2588-wuhu/outputs/p84402465/llamagen-E31-0811 \
103
+ --gpt-model GPT-XL \
104
+ --gpt-type c2i \
105
+ --vocab-size 16384 \
106
+ --image-size 512 \
107
+ --global-batch-size 1024 \
108
+ --no-local-save \
109
+ --no-wandb"
110
+
111
+ echo "------> CMD <------"
112
+ echo "$CMD"
113
+ eval "$CMD"
llamagen-siglip-sb-block-causal/scripts/autoregressive/train_t2i.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+ export PYTHONPATH="${PWD}:${PYTHONPATH}"
4
+ export WANDB_NAME="run_$(date +%Y%m%d_%H%M%S)"
5
+ export WANDB_PROJECT="c2i_selftok"
6
+ export WANDB_API_KEY="0b30f581d65172381c1f1a45f928210cab80f1de"
7
+
8
+ torchrun --nnodes=1 --nproc_per_node=1 --node_rank=0 \
9
+ --master_port=20045 \
10
+ autoregressive/train/train_t2i.py \
11
+ --image-token-path /home/ma-user/work/selftok/imagenet/imagenet_vit_sb_v16_selftok_tokens \
12
+ --text-token-path /home/ma-user/work/selftok/imagenet/imagenet_cap512_cap_codes \
13
+ --gpt-model GPT-XL --gpt-type t2i \
14
+ --dataset t2i_code \
15
+ --cls-token-num 256 \
16
+ --vocab-size 16384 \
17
+ --image-size 512 \
18
+ --global-batch-size 8 \
19
+ --ckpt-every 500 \
20
+ --gradient-accumulation-steps 2 \
21
+ --cloud-save-path /cache/results_E31_t2i \
22
+ --s3-save-path s3://bucket-5125-guiyang/selftok/outputs/llamagen/Llamagen-t2i-SigLip \
23
+ --no-local-save \
24
+ --no-wandb
25
+
llamagen-siglip-sb-block-causal/scripts/autoregressive/train_t2i_multi_node.sh ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # # !/bin/bash
2
+ # set -x
3
+ # export PYTHONPATH="${PWD}:${PYTHONPATH}"
4
+ # export WANDB_NAME="run_$(date +%Y%m%d_%H%M%S)"
5
+ # export WANDB_PROJECT="c2i_selftok"
6
+ # export WANDB_API_KEY="0b30f581d65172381c1f1a45f928210cab80f1de"
7
+
8
+ # torchrun --nnodes=1 --nproc_per_node=8 --node_rank=0 \
9
+ # --master_port=20045 \
10
+ # autoregressive/train/train_t2i.py \
11
+ # --image-token-path /cache/code-imagenet-E31_cap/imagenet_cap512_image_codes \
12
+ # --text-token-path /cache/code-imagenet-E31_cap/imagenet_cap512_cap_codes \
13
+ # --gpt-model GPT-XL --gpt-type t2i \
14
+ # --dataset t2i_code \
15
+ # --cls-token-num 256 \
16
+ # --vocab-size 16384 \
17
+ # --image-size 512 \
18
+ # --global-batch-size 512 \
19
+ # --gradient-accumulation-steps 2 \
20
+ # --cloud-save-path /cache/results_E31_t2i \
21
+ # --no-local-save \
22
+ # --no-wandb
23
+
24
+
25
+
26
+
27
+
28
+ # 配置CANN相关环境变量
29
+ CANN_INSTALL_PATH_CONF='/etc/Ascend/ascend_cann_install.info'
30
+ sudo cat /etc/Ascend/ascend_cann_install.info
31
+ DEFAULT_CANN_INSTALL_PATH="/usr/local/Ascend/"
32
+ CANN_INSTALL_PATH="/usr/local/Ascend/"
33
+
34
+ if [ -d ${CANN_INSTALL_PATH}/ascend-toolkit/latest ];then
35
+ cat ${CANN_INSTALL_PATH}/ascend-toolkit/set_env.sh
36
+ source ${CANN_INSTALL_PATH}/ascend-toolkit/set_env.sh
37
+ else
38
+ cat ${CANN_INSTALL_PATH}/nnae/set_env.sh
39
+ source ${CANN_INSTALL_PATH}/nnae/set_env.sh
40
+ fi
41
+
42
+ # 导入依赖库
43
+ export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/openblas/lib
44
+ export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib/
45
+ export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/lib64/
46
+ export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/lib/
47
+ export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/lib/aarch64_64-linux-gnu
48
+
49
+ # 配置自定义环境变量
50
+ export HCCL_WHITELIST_DISABLE=1
51
+
52
+ # log
53
+ export ASCEND_SLOG_PRINT_TO_STDOUT=0 # 日志打屏, 可选
54
+ export ASCEND_GLOBAL_LOG_LEVEL=3 # 日志级别常用 1 INFO级别; 3 ERROR级别
55
+ export ASCEND_GLOBAL_EVENT_ENABLE=0 # 默认不使能event日志信息
56
+ export ASCEND_LAUNCH_BLOCKING=0 # 控制算子执行时是否启动同步模式, 1 同步; 0 异步
57
+
58
+ # 系统默认环境变量,不建议修改
59
+ MASTER_HOST="$VC_WORKER_HOSTS"
60
+ MASTER_ADDR="${VC_WORKER_HOSTS%%,*}"
61
+ NNODES="$MA_NUM_HOSTS"
62
+ NODE_RANK="$VC_TASK_INDEX"
63
+ NGPUS_PER_NODE="$MA_NUM_GPUS"
64
+ NUM_PROCESSES=$(($NGPUS_PER_NODE * $NNODES))
65
+
66
+ echo "num_machines: ${NNODES}" >> ${DEEPSPEED_CONFIG_PATH}
67
+ echo "num_processes: ${NUM_PROCESSES}" >> ${DEEPSPEED_CONFIG_PATH}
68
+
69
+ echo "------> system config <------"
70
+ echo "VC_WORKER_HOSTS: ${VC_WORKER_HOSTS}"
71
+ echo "MASTER_HOST: ${MASTER_HOST}"
72
+ echo "MASTER_ADDR: ${MASTER_ADDR}"
73
+ echo "NNODES: ${NNODES}"
74
+ echo "NODE_RANK: ${NODE_RANK}"
75
+ echo "NGPUS_PER_NODE: ${NGPUS_PER_NODE}"
76
+ echo "NUM_PROCESSES: ${NUM_PROCESSES}"
77
+ echo "${MA_JOB_DIR}"
78
+ echo "------> <------"
79
+
80
+ # https://www.hiascend.com/document/detail/zh/canncommercial/63RC2/modeldevpt/ptmigr/ptmigr_0022.html
81
+ export HCCL_WHITELIST_DISABLE=1
82
+
83
+ if [[ $NODE_RANK == 0 ]]; then
84
+ EXT_ARGS="--rdzv_conf=is_host=1"
85
+ else
86
+ EXT_ARGS=""
87
+ fi
88
+
89
+ # set npu plog env, https://3ms.huawei.com/hi/group/3225441/wiki_6402466.html
90
+ ma_vj_name=`echo ${MA_VJ_NAME} | sed 's:ma-job:modelarts-job:g'`
91
+ task_name="worker-${VC_TASK_INDEX}"
92
+ task_plog_path=${MA_LOG_DIR}/${ma_vj_name}/${task_name}
93
+
94
+ mkdir -p ${task_plog_path}
95
+ export ASCEND_PROCESS_LOG_PATH=${task_plog_path}
96
+
97
+ echo "plog path: ${ASCEND_PROCESS_LOG_PATH}"
98
+
99
+ npu-smi info
100
+ pip install wandb
101
+
102
+ # set hccl timeout time in seconds
103
+ export HCCL_CONNECT_TIMEOUT=7200
104
+
105
+ export OMP_NUM_THREADS=4
106
+ export NUMEXPR_MAX_THREADS=4
107
+
108
+
109
+ echo "------> pwd <------"
110
+ pwd
111
+ echo "------> files <------"
112
+ ls
113
+
114
+ export ROOT='/cache'
115
+ # python tools/mox_copy.py s3://bucket-2588-wuhu/code/p84402465/code-imagenet-E31/ ${ROOT}/code-imagenet-E31/
116
+
117
+
118
+ export MASTER_PORT="${MASTER_PORT:-12335}"
119
+
120
+ CMD="torchrun \
121
+ --nnodes=${NNODES} \
122
+ --nproc_per_node=${NGPUS_PER_NODE} \
123
+ --node_rank=${NODE_RANK} \
124
+ --master_addr=${MASTER_ADDR} \
125
+ --master_port=${MASTER_PORT} \
126
+ autoregressive/train/train_t2i.py \
127
+ --image-token-path /home/ma-user/modelarts/user-job-dir/sfs/selftok/imagenet/imagenet_cap512_cap_codes/imagenet_cap512_image_codes \
128
+ --text-token-path /home/ma-user/modelarts/user-job-dir/sfs/selftok/imagenet/code-imagenet-E31_cap/imagenet_cap512_cap_codes \
129
+ --gpt-model GPT-XL --gpt-type t2i \
130
+ --dataset t2i_code \
131
+ --cls-token-num 256 \
132
+ --vocab-size 16384 \
133
+ --image-size 512 \
134
+ --global-batch-size 512 \
135
+ --ckpt-every 5000 \
136
+ --gradient-accumulation-steps 2 \
137
+ --cloud-save-path /cache/results_Selftok-Llamagen-t2i-RelPosEmb \
138
+ --s3-save-path s3://bucket-2588-wuhu/code/w84402926/experiment/llamagen_selftok/Selftok-Llamagen-t2i-RelPosEmb \
139
+ --no-local-save \
140
+ --no-wandb"
141
+
142
+ # torchrun --nnodes=1 --nproc_per_node=8 --node_rank=0 \
143
+ # --master_port=20045 \
144
+ # autoregressive/train/train_t2i.py \
145
+ # --image-token-path /cache/code-imagenet-E31_cap/imagenet_cap512_image_codes \
146
+ # --text-token-path /cache/code-imagenet-E31_cap/imagenet_cap512_cap_codes \
147
+ # --gpt-model GPT-XL --gpt-type t2i \
148
+ # --dataset t2i_code \
149
+ # --cls-token-num 256 \
150
+ # --vocab-size 16384 \
151
+ # --image-size 512 \
152
+ # --global-batch-size 512 \
153
+ # --gradient-accumulation-steps 2 \
154
+ # --cloud-save-path /cache/results_E31_t2i \
155
+ # --no-local-save \
156
+ # --no-wandb
157
+
158
+ echo "------> CMD <------"
159
+ echo "$CMD"
160
+ eval "$CMD"
llamagen-siglip-sb-block-causal/scripts/autoregressive/train_t2i_multi_node_absPos.sh ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # # !/bin/bash
2
+ # set -x
3
+ # export PYTHONPATH="${PWD}:${PYTHONPATH}"
4
+ # export WANDB_NAME="run_$(date +%Y%m%d_%H%M%S)"
5
+ # export WANDB_PROJECT="c2i_selftok"
6
+ # export WANDB_API_KEY="0b30f581d65172381c1f1a45f928210cab80f1de"
7
+
8
+ # torchrun --nnodes=1 --nproc_per_node=8 --node_rank=0 \
9
+ # --master_port=20045 \
10
+ # autoregressive/train/train_t2i.py \
11
+ # --image-token-path /cache/code-imagenet-E31_cap/imagenet_cap512_image_codes \
12
+ # --text-token-path /cache/code-imagenet-E31_cap/imagenet_cap512_cap_codes \
13
+ # --gpt-model GPT-XL --gpt-type t2i \
14
+ # --dataset t2i_code \
15
+ # --cls-token-num 256 \
16
+ # --vocab-size 16384 \
17
+ # --image-size 512 \
18
+ # --global-batch-size 512 \
19
+ # --gradient-accumulation-steps 2 \
20
+ # --cloud-save-path /cache/results_E31_t2i \
21
+ # --no-local-save \
22
+ # --no-wandb
23
+
24
+
25
+
26
+
27
+
28
+ # 配置CANN相关环境变量
29
+ CANN_INSTALL_PATH_CONF='/etc/Ascend/ascend_cann_install.info'
30
+ sudo cat /etc/Ascend/ascend_cann_install.info
31
+ DEFAULT_CANN_INSTALL_PATH="/usr/local/Ascend/"
32
+ CANN_INSTALL_PATH="/usr/local/Ascend/"
33
+
34
+ if [ -d ${CANN_INSTALL_PATH}/ascend-toolkit/latest ];then
35
+ cat ${CANN_INSTALL_PATH}/ascend-toolkit/set_env.sh
36
+ source ${CANN_INSTALL_PATH}/ascend-toolkit/set_env.sh
37
+ else
38
+ cat ${CANN_INSTALL_PATH}/nnae/set_env.sh
39
+ source ${CANN_INSTALL_PATH}/nnae/set_env.sh
40
+ fi
41
+
42
+ # 导入依赖库
43
+ export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/openblas/lib
44
+ export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib/
45
+ export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/lib64/
46
+ export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/lib/
47
+ export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/lib/aarch64_64-linux-gnu
48
+
49
+ # 配置自定义环境变量
50
+ export HCCL_WHITELIST_DISABLE=1
51
+
52
+ # log
53
+ export ASCEND_SLOG_PRINT_TO_STDOUT=0 # 日志打屏, 可选
54
+ export ASCEND_GLOBAL_LOG_LEVEL=3 # 日志级别常用 1 INFO级别; 3 ERROR级别
55
+ export ASCEND_GLOBAL_EVENT_ENABLE=0 # 默认不使能event日志信息
56
+ export ASCEND_LAUNCH_BLOCKING=0 # 控制算子执行时是否启动同步模式, 1 同步; 0 异步
57
+
58
+ # 系统默认环境变量,不建议修改
59
+ MASTER_HOST="$VC_WORKER_HOSTS"
60
+ MASTER_ADDR="${VC_WORKER_HOSTS%%,*}"
61
+ NNODES="$MA_NUM_HOSTS"
62
+ NODE_RANK="$VC_TASK_INDEX"
63
+ NGPUS_PER_NODE="$MA_NUM_GPUS"
64
+ NUM_PROCESSES=$(($NGPUS_PER_NODE * $NNODES))
65
+
66
+ echo "num_machines: ${NNODES}" >> ${DEEPSPEED_CONFIG_PATH}
67
+ echo "num_processes: ${NUM_PROCESSES}" >> ${DEEPSPEED_CONFIG_PATH}
68
+
69
+ echo "------> system config <------"
70
+ echo "VC_WORKER_HOSTS: ${VC_WORKER_HOSTS}"
71
+ echo "MASTER_HOST: ${MASTER_HOST}"
72
+ echo "MASTER_ADDR: ${MASTER_ADDR}"
73
+ echo "NNODES: ${NNODES}"
74
+ echo "NODE_RANK: ${NODE_RANK}"
75
+ echo "NGPUS_PER_NODE: ${NGPUS_PER_NODE}"
76
+ echo "NUM_PROCESSES: ${NUM_PROCESSES}"
77
+ echo "${MA_JOB_DIR}"
78
+ echo "------> <------"
79
+
80
+ # https://www.hiascend.com/document/detail/zh/canncommercial/63RC2/modeldevpt/ptmigr/ptmigr_0022.html
81
+ export HCCL_WHITELIST_DISABLE=1
82
+
83
+ if [[ $NODE_RANK == 0 ]]; then
84
+ EXT_ARGS="--rdzv_conf=is_host=1"
85
+ else
86
+ EXT_ARGS=""
87
+ fi
88
+
89
+ # set npu plog env, https://3ms.huawei.com/hi/group/3225441/wiki_6402466.html
90
+ ma_vj_name=`echo ${MA_VJ_NAME} | sed 's:ma-job:modelarts-job:g'`
91
+ task_name="worker-${VC_TASK_INDEX}"
92
+ task_plog_path=${MA_LOG_DIR}/${ma_vj_name}/${task_name}
93
+
94
+ mkdir -p ${task_plog_path}
95
+ export ASCEND_PROCESS_LOG_PATH=${task_plog_path}
96
+
97
+ echo "plog path: ${ASCEND_PROCESS_LOG_PATH}"
98
+
99
+ npu-smi info
100
+ pip install wandb
101
+ pip install einx
102
+ pip install fairscale
103
+ pip install lpips
104
+ pip install diffusers==0.31.0
105
+
106
+ # set hccl timeout time in seconds
107
+ export HCCL_CONNECT_TIMEOUT=7200
108
+
109
+ export OMP_NUM_THREADS=4
110
+ export NUMEXPR_MAX_THREADS=4
111
+
112
+
113
+ echo "------> pwd <------"
114
+ pwd
115
+ echo "------> files <------"
116
+ ls
117
+
118
+ export ROOT='/cache'
119
+ # python tools/mox_copy.py s3://bucket-2588-wuhu/code/p84402465/code-imagenet-E31/ ${ROOT}/code-imagenet-E31/
120
+
121
+
122
+ export MASTER_PORT="${MASTER_PORT:-12335}"
123
+
124
+ CMD="torchrun \
125
+ --nnodes=${NNODES} \
126
+ --nproc_per_node=${NGPUS_PER_NODE} \
127
+ --node_rank=${NODE_RANK} \
128
+ --master_addr=${MASTER_ADDR} \
129
+ --master_port=${MASTER_PORT} \
130
+ autoregressive/train/train_t2i.py \
131
+ --image-token-path /home/ma-user/modelarts/user-job-dir/sfs/selftok/imagenet/imagenet_vit_sb_v16_selftok_tokens \
132
+ --text-token-path /home/ma-user/modelarts/user-job-dir/sfs/selftok/imagenet/imagenet_cap512_cap_codes \
133
+ --gpt-model GPT-XL --gpt-type t2i \
134
+ --dataset t2i_code \
135
+ --cls-token-num 256 \
136
+ --vocab-size 19683 \
137
+ --image-size 512 \
138
+ --global-batch-size 256 \
139
+ --ckpt-every 5000 \
140
+ --gradient-accumulation-steps 2 \
141
+ --cloud-save-path /cache/results_Selftok-Llamagen-t2i-AbsPosEmb \
142
+ --s3-save-path s3://bucket-5125-guiyang/selftok/outputs/llamagen_selftok/Selftok-Llamagen-t2i-vit-sb-v16-blockcausal \
143
+ --no-local-save \
144
+ --no-wandb"
145
+
146
+ # torchrun --nnodes=1 --nproc_per_node=8 --node_rank=0 \
147
+ # --master_port=20045 \
148
+ # autoregressive/train/train_t2i.py \
149
+ # --image-token-path /cache/code-imagenet-E31_cap/imagenet_cap512_image_codes \
150
+ # --text-token-path /cache/code-imagenet-E31_cap/imagenet_cap512_cap_codes \
151
+ # --gpt-model GPT-XL --gpt-type t2i \
152
+ # --dataset t2i_code \
153
+ # --cls-token-num 256 \
154
+ # --vocab-size 16384 \
155
+ # --image-size 512 \
156
+ # --global-batch-size 512 \
157
+ # --gradient-accumulation-steps 2 \
158
+ # --cloud-save-path /cache/results_E31_t2i \
159
+ # --no-local-save \
160
+ # --no-wandb
161
+
162
+ echo "------> CMD <------"
163
+ echo "$CMD"
164
+ eval "$CMD"
llamagen-siglip-sb-block-causal/scripts/autoregressive/train_t2i_multi_node_overfit.sh ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # # !/bin/bash
2
+ # set -x
3
+ # export PYTHONPATH="${PWD}:${PYTHONPATH}"
4
+ # export WANDB_NAME="run_$(date +%Y%m%d_%H%M%S)"
5
+ # export WANDB_PROJECT="c2i_selftok"
6
+ # export WANDB_API_KEY="0b30f581d65172381c1f1a45f928210cab80f1de"
7
+
8
+ # torchrun --nnodes=1 --nproc_per_node=8 --node_rank=0 \
9
+ # --master_port=20045 \
10
+ # autoregressive/train/train_t2i.py \
11
+ # --image-token-path /cache/code-imagenet-E31_cap/imagenet_cap512_image_codes \
12
+ # --text-token-path /cache/code-imagenet-E31_cap/imagenet_cap512_cap_codes \
13
+ # --gpt-model GPT-XL --gpt-type t2i \
14
+ # --dataset t2i_code \
15
+ # --cls-token-num 256 \
16
+ # --vocab-size 16384 \
17
+ # --image-size 512 \
18
+ # --global-batch-size 512 \
19
+ # --gradient-accumulation-steps 2 \
20
+ # --cloud-save-path /cache/results_E31_t2i \
21
+ # --no-local-save \
22
+ # --no-wandb
23
+
24
+
25
+
26
+
27
+
28
+ # 配置CANN相关环境变量
29
+ CANN_INSTALL_PATH_CONF='/etc/Ascend/ascend_cann_install.info'
30
+ sudo cat /etc/Ascend/ascend_cann_install.info
31
+ DEFAULT_CANN_INSTALL_PATH="/usr/local/Ascend/"
32
+ CANN_INSTALL_PATH="/usr/local/Ascend/"
33
+
34
+ if [ -d ${CANN_INSTALL_PATH}/ascend-toolkit/latest ];then
35
+ cat ${CANN_INSTALL_PATH}/ascend-toolkit/set_env.sh
36
+ source ${CANN_INSTALL_PATH}/ascend-toolkit/set_env.sh
37
+ else
38
+ cat ${CANN_INSTALL_PATH}/nnae/set_env.sh
39
+ source ${CANN_INSTALL_PATH}/nnae/set_env.sh
40
+ fi
41
+
42
+ # 导入依赖库
43
+ export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/openblas/lib
44
+ export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib/
45
+ export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/lib64/
46
+ export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/lib/
47
+ export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/lib/aarch64_64-linux-gnu
48
+
49
+ # 配置自定义环境变量
50
+ export HCCL_WHITELIST_DISABLE=1
51
+
52
+ # log
53
+ export ASCEND_SLOG_PRINT_TO_STDOUT=0 # 日志打屏, 可选
54
+ export ASCEND_GLOBAL_LOG_LEVEL=3 # 日志级别常用 1 INFO级别; 3 ERROR级别
55
+ export ASCEND_GLOBAL_EVENT_ENABLE=0 # 默认不使能event日志信息
56
+ export ASCEND_LAUNCH_BLOCKING=0 # 控制算子执行时是否启动同步模式, 1 同步; 0 异步
57
+
58
+ # 系统默认环境变量,不建议修改
59
+ MASTER_HOST="$VC_WORKER_HOSTS"
60
+ MASTER_ADDR="${VC_WORKER_HOSTS%%,*}"
61
+ NNODES="$MA_NUM_HOSTS"
62
+ NODE_RANK="$VC_TASK_INDEX"
63
+ NGPUS_PER_NODE="$MA_NUM_GPUS"
64
+ NUM_PROCESSES=$(($NGPUS_PER_NODE * $NNODES))
65
+
66
+ echo "num_machines: ${NNODES}" >> ${DEEPSPEED_CONFIG_PATH}
67
+ echo "num_processes: ${NUM_PROCESSES}" >> ${DEEPSPEED_CONFIG_PATH}
68
+
69
+ echo "------> system config <------"
70
+ echo "VC_WORKER_HOSTS: ${VC_WORKER_HOSTS}"
71
+ echo "MASTER_HOST: ${MASTER_HOST}"
72
+ echo "MASTER_ADDR: ${MASTER_ADDR}"
73
+ echo "NNODES: ${NNODES}"
74
+ echo "NODE_RANK: ${NODE_RANK}"
75
+ echo "NGPUS_PER_NODE: ${NGPUS_PER_NODE}"
76
+ echo "NUM_PROCESSES: ${NUM_PROCESSES}"
77
+ echo "${MA_JOB_DIR}"
78
+ echo "------> <------"
79
+
80
+ # https://www.hiascend.com/document/detail/zh/canncommercial/63RC2/modeldevpt/ptmigr/ptmigr_0022.html
81
+ export HCCL_WHITELIST_DISABLE=1
82
+
83
+ if [[ $NODE_RANK == 0 ]]; then
84
+ EXT_ARGS="--rdzv_conf=is_host=1"
85
+ else
86
+ EXT_ARGS=""
87
+ fi
88
+
89
+ # set npu plog env, https://3ms.huawei.com/hi/group/3225441/wiki_6402466.html
90
+ ma_vj_name=`echo ${MA_VJ_NAME} | sed 's:ma-job:modelarts-job:g'`
91
+ task_name="worker-${VC_TASK_INDEX}"
92
+ task_plog_path=${MA_LOG_DIR}/${ma_vj_name}/${task_name}
93
+
94
+ mkdir -p ${task_plog_path}
95
+ export ASCEND_PROCESS_LOG_PATH=${task_plog_path}
96
+
97
+ echo "plog path: ${ASCEND_PROCESS_LOG_PATH}"
98
+
99
+ npu-smi info
100
+ pip install wandb
101
+ pip install einx
102
+ pip install fairscale
103
+ pip install lpips
104
+ pip install diffusers==0.31.0
105
+
106
+ # set hccl timeout time in seconds
107
+ export HCCL_CONNECT_TIMEOUT=7200
108
+
109
+ export OMP_NUM_THREADS=4
110
+ export NUMEXPR_MAX_THREADS=4
111
+
112
+
113
+ echo "------> pwd <------"
114
+ pwd
115
+ echo "------> files <------"
116
+ ls
117
+
118
+ export ROOT='/cache'
119
+ # python tools/mox_copy.py s3://bucket-2588-wuhu/code/p84402465/code-imagenet-E31/ ${ROOT}/code-imagenet-E31/
120
+
121
+
122
+ export MASTER_PORT="${MASTER_PORT:-12335}"
123
+
124
+ CMD="torchrun \
125
+ --nnodes=${NNODES} \
126
+ --nproc_per_node=${NGPUS_PER_NODE} \
127
+ --node_rank=${NODE_RANK} \
128
+ --master_addr=${MASTER_ADDR} \
129
+ --master_port=${MASTER_PORT} \
130
+ autoregressive/train/train_t2i.py \
131
+ --image-token-path /home/ma-user/modelarts/user-job-dir/sfs/selftok/imagenet/imagenet_vit_sb_v16_selftok_tokens \
132
+ --text-token-path /home/ma-user/modelarts/user-job-dir/sfs/selftok/imagenet/overfits \
133
+ --gpt-model GPT-XL --gpt-type t2i \
134
+ --dataset t2i_code \
135
+ --cls-token-num 256 \
136
+ --vocab-size 19683 \
137
+ --image-size 512 \
138
+ --global-batch-size 256 \
139
+ --ckpt-every 5000 \
140
+ --gradient-accumulation-steps 2 \
141
+ --cloud-save-path /cache/results_Selftok-Llamagen-t2i-AbsPosEmb \
142
+ --s3-save-path s3://bucket-5125-guiyang/selftok/outputs/llamagen_selftok/Selftok-Llamagen-t2i-vit-sb-v16-blockcausal-overfit \
143
+ --no-local-save \
144
+ --no-wandb"
145
+
146
+ # torchrun --nnodes=1 --nproc_per_node=8 --node_rank=0 \
147
+ # --master_port=20045 \
148
+ # autoregressive/train/train_t2i.py \
149
+ # --image-token-path /cache/code-imagenet-E31_cap/imagenet_cap512_image_codes \
150
+ # --text-token-path /cache/code-imagenet-E31_cap/imagenet_cap512_cap_codes \
151
+ # --gpt-model GPT-XL --gpt-type t2i \
152
+ # --dataset t2i_code \
153
+ # --cls-token-num 256 \
154
+ # --vocab-size 16384 \
155
+ # --image-size 512 \
156
+ # --global-batch-size 512 \
157
+ # --gradient-accumulation-steps 2 \
158
+ # --cloud-save-path /cache/results_E31_t2i \
159
+ # --no-local-save \
160
+ # --no-wandb
161
+
162
+ echo "------> CMD <------"
163
+ echo "$CMD"
164
+ eval "$CMD"
llamagen-siglip-sb-block-causal/scripts/autoregressive/train_t2i_stage1.sh ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+
4
+ torchrun \
5
+ --nnodes=$nnodes --nproc_per_node=$nproc_per_node --node_rank=$node_rank \
6
+ --master_addr=$master_addr --master_port=$master_port \
7
+ autoregressive/train/train_t2i.py \
8
+ --vq-ckpt ./pretrained_models/vq_ds16_t2i.pt \
9
+ --data-path /path/to/laion_coco50M \
10
+ --t5-feat-path /path/to/laion_coco50M_flan_t5_xl \
11
+ --dataset t2i \
12
+ --image-size 256 \
13
+ "$@"
llamagen-siglip-sb-block-causal/scripts/autoregressive/train_t2i_stage2.sh ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+
4
+ torchrun \
5
+ --nnodes=$nnodes --nproc_per_node=$nproc_per_node --node_rank=$node_rank \
6
+ --master_addr=$master_addr --master_port=$master_port \
7
+ autoregressive/train/train_t2i.py \
8
+ --vq-ckpt ./pretrained_models/vq_ds16_t2i.pt \
9
+ --data-path /path/to/high_aesthetic_10M \
10
+ --t5-feat-path /path/to/high_aesthetic_10M_flan_t5_xl \
11
+ --short-t5-feat-path /path/to/high_aesthetic_10M_trunc_flan_t5_xl \
12
+ --dataset t2i \
13
+ --image-size 512 \
14
+ "$@"
llamagen-siglip-sb-block-causal/scripts/language/extract_flan_t5_feat_laion_coco_stage1.sh ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+
4
+ torchrun \
5
+ --nnodes=1 --nproc_per_node=8 --node_rank=0 \
6
+ --master_port=12337 \
7
+ language/extract_t5_feature.py \
8
+ --data-path /path/to/laion_coco50M \
9
+ --t5-path /path/to/laion_coco50M_flan_t5_xl \
10
+ --caption-key blip \
11
+ "$@"
llamagen-siglip-sb-block-causal/scripts/language/extract_flan_t5_feat_stage2.sh ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+
4
+ torchrun \
5
+ --nnodes=1 --nproc_per_node=8 --node_rank=0 \
6
+ --master_port=12337 \
7
+ language/extract_t5_feature.py \
8
+ --data-path /path/to/high_aesthetic_10M \
9
+ --t5-path /path/to/high_aesthetic_10M_flan_t5_xl \
10
+ "$@"
llamagen-siglip-sb-block-causal/scripts/language/extract_flan_t5_feat_trunc_stage2.sh ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+
4
+ torchrun \
5
+ --nnodes=1 --nproc_per_node=8 --node_rank=0 \
6
+ --master_port=12337 \
7
+ language/extract_t5_feature.py \
8
+ --data-path /path/to/high_aesthetic_10M \
9
+ --t5-path /path/to/high_aesthetic_10M_trunc_flan_t5_xl \
10
+ --trunc-caption \
11
+ "$@"
llamagen-siglip-sb-block-causal/scripts/language/extract_flan_t5_imagenet.sh ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # 单机多卡(建议用 torchrun 或你现有的 init_distributed_mode)
2
+ torchrun --nproc_per_node=8 language/extract_t5_feature_imagenet.py \
3
+ --parquet-dir /home/ma-user/work/w84402926/SelfTok/llamagen/imagenet/imagenet-1k-vl-enriched \
4
+ --out-dir /cache/code-imagenet-E31_cap \
5
+ --data-start 0 --data-end 206 \
6
+ --pattern "train-{idx:05d}-of-00207.parquet" \
7
+ --t5-model-path /home/ma-user/work/w84402926/SelfTok/llamagen/imagenet \
8
+ --t5-model-type flan-t5-xl \
9
+ --precision bf16
llamagen-siglip-sb-block-causal/scripts/tokenizer/reconstruction_consistency_decoder.sh ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+
4
+ torchrun \
5
+ --nnodes=1 --nproc_per_node=8 --node_rank=0 \
6
+ --master_port=12344 \
7
+ tokenizer/consistencydecoder/reconstruction_cd_ddp.py \
8
+ "$@"
llamagen-siglip-sb-block-causal/scripts/tokenizer/reconstruction_vae.sh ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+
4
+ torchrun \
5
+ --nnodes=1 --nproc_per_node=8 --node_rank=0 \
6
+ --master_port=12344 \
7
+ tokenizer/vae/reconstruction_vae_ddp.py \
8
+ "$@"
llamagen-siglip-sb-block-causal/scripts/tokenizer/reconstruction_vq.sh ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+
4
+ torchrun \
5
+ --nnodes=1 --nproc_per_node=8 --node_rank=0 \
6
+ --master_port=12344 \
7
+ tokenizer/tokenizer_image/reconstruction_vq_ddp.py \
8
+ "$@"
llamagen-siglip-sb-block-causal/scripts/tokenizer/reconstruction_vqgan.sh ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+
4
+ torchrun \
5
+ --nnodes=1 --nproc_per_node=8 --node_rank=0 \
6
+ --master_port=12344 \
7
+ tokenizer/vqgan/reconstruction_vqgan_ddp.py \
8
+ "$@"
llamagen-siglip-sb-block-causal/scripts/tokenizer/train_vq.sh ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+
4
+ torchrun \
5
+ --nnodes=$nnodes --nproc_per_node=$nproc_per_node --node_rank=$node_rank \
6
+ --master_addr=$master_addr --master_port=$master_port \
7
+ tokenizer/tokenizer_image/vq_train.py "$@"
llamagen-siglip-sb-block-causal/scripts/tokenizer/train_vq_finetune.sh ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+
4
+ torchrun \
5
+ --nnodes=$nnodes --nproc_per_node=$nproc_per_node --node_rank=$node_rank \
6
+ --master_addr=$master_addr --master_port=$master_port \
7
+ tokenizer/tokenizer_image/vq_train.py \
8
+ --finetune \
9
+ --disc-start 0 \
10
+ --vq-ckpt ./pretrained_models/vq_ds16_c2i.pt \
11
+ --dataset t2i_image \
12
+ --data-path /path/to/high_aesthetic_10M \
13
+ --data-face-path /path/to/face_2M \
14
+ --cloud-save-path /path/to/cloud_disk \
15
+ "$@"
llamagen-siglip-sb-block-causal/scripts/tokenizer/train_vq_finetune_continue.sh ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+
4
+ torchrun \
5
+ --nnodes=$nnodes --nproc_per_node=$nproc_per_node --node_rank=$node_rank \
6
+ --master_addr=$master_addr --master_port=$master_port \
7
+ tokenizer/tokenizer_image/vq_train.py \
8
+ --disc-start 0 \
9
+ --dataset t2i_image \
10
+ --data-path /path/to/high_aesthetic_10M \
11
+ --data-face-path /path/to/face_2M \
12
+ --cloud-save-path /path/to/cloud_disk \
13
+ "$@"
14
+
15
+ # --vq-ckpt xxx.pt
llamagen-siglip-sb-block-causal/scripts/tokenizer/val.sh ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+
4
+ torchrun \
5
+ --nnodes=1 --nproc_per_node=8 --node_rank=0 \
6
+ --master_port=12343 \
7
+ tokenizer/validation/val_ddp.py \
8
+ "$@"