tuandunghcmut commited on
Commit
6d45858
·
verified ·
1 Parent(s): 6be5f84

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. VLMEvalKit_old/InternVL/classification/configs/attn_pooling_probing/attn_pooling_probing_intern_vit_6b_224px_in1k_224to448_64gpu.yaml +36 -0
  2. VLMEvalKit_old/InternVL/classification/configs/attn_pooling_probing/attn_pooling_probing_intern_vit_6b_448px_v1_0_in1k_448_64gpu_imagenet_real.yaml +37 -0
  3. VLMEvalKit_old/InternVL/classification/configs/attn_pooling_probing/attn_pooling_probing_intern_vit_6b_448px_v1_5_in1k_448_64gpu_imagenet_real.yaml +37 -0
  4. VLMEvalKit_old/InternVL/internvl_chat/examples/image1.jpg +0 -0
  5. VLMEvalKit_old/InternVL/internvl_chat/examples/image3.jpg +0 -0
  6. VLMEvalKit_old/InternVL/internvl_chat/shell/data/coco_caption.json +9 -0
  7. VLMEvalKit_old/InternVL/internvl_chat/shell/data/internvl_1_2_finetune.json +65 -0
  8. VLMEvalKit_old/InternVL/internvl_chat/shell/data/internvl_1_2_finetune_custom.json +65 -0
  9. VLMEvalKit_old/InternVL/internvl_chat/shell/internvl1.2/2nd_finetune/internvl_chat_v1_2_hermes2_yi34b_448_res_2nd_finetune_full.sh +76 -0
  10. VLMEvalKit_old/InternVL/internvl_chat/shell/internvl1.2/hermes2_yi34b/internvl_chat_v1_2_hermes2_yi34b_448_res_finetune.sh +74 -0
  11. VLMEvalKit_old/InternVL/internvl_chat/shell/internvl1.5/2nd_finetune/internvl_chat_v1_5_internlm2_1_8b_dynamic_res_2nd_finetune_full.sh +68 -0
  12. VLMEvalKit_old/InternVL/internvl_chat/shell/internvl1.5/2nd_finetune/internvl_chat_v1_5_internlm2_1_8b_dynamic_res_2nd_finetune_lora.sh +69 -0
  13. VLMEvalKit_old/InternVL/internvl_chat/shell/internvl1.5/2nd_finetune/internvl_chat_v1_5_internlm2_20b_dynamic_res_2nd_finetune_full.sh +68 -0
  14. VLMEvalKit_old/InternVL/internvl_chat/shell/internvl1.5/2nd_finetune/internvl_chat_v1_5_internlm2_20b_dynamic_res_2nd_finetune_lora.sh +69 -0
  15. VLMEvalKit_old/InternVL/internvl_chat/shell/internvl1.5/2nd_finetune/internvl_chat_v1_5_phi3_3_8b_dynamic_res_2nd_finetune_full.sh +68 -0
  16. VLMEvalKit_old/InternVL/internvl_chat/shell/internvl1.5/2nd_finetune/internvl_chat_v1_5_phi3_3_8b_dynamic_res_2nd_finetune_lora.sh +69 -0
  17. VLMEvalKit_old/InternVL/internvl_chat/shell/internvl1.5/hermes2_yi34b/internvl_chat_v1_5_hermes2_yi34b_dynamic_res_finetune.sh +76 -0
  18. VLMEvalKit_old/InternVL/internvl_chat/shell/internvl1.5/hermes2_yi34b/internvl_chat_v1_5_hermes2_yi34b_dynamic_res_pretrain.sh +78 -0
  19. VLMEvalKit_old/InternVL/internvl_chat/shell/internvl1.5/internlm2_1_8b/internvl_chat_v1_5_internlm2_1_8b_dynamic_res_finetune.sh +76 -0
  20. VLMEvalKit_old/InternVL/internvl_chat/shell/internvl1.5/internlm2_1_8b/internvl_chat_v1_5_internlm2_1_8b_dynamic_res_pretrain.sh +77 -0
  21. VLMEvalKit_old/InternVL/internvl_chat/shell/internvl1.5/internlm2_20b/internvl_chat_v1_5_internlm2_20b_dynamic_res_finetune.sh +76 -0
  22. VLMEvalKit_old/InternVL/internvl_chat/shell/internvl1.5/internlm2_20b/internvl_chat_v1_5_internlm2_20b_dynamic_res_pretrain.sh +77 -0
  23. VLMEvalKit_old/InternVL/internvl_chat/shell/internvl1.5/phi3_3_8b/internvl_chat_v1_5_phi3_3_8b_dynamic_res_finetune.sh +76 -0
  24. VLMEvalKit_old/InternVL/internvl_chat/shell/internvl1.5/phi3_3_8b/internvl_chat_v1_5_phi3_3_8b_dynamic_res_pretrain.sh +77 -0
  25. VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.0/2nd_finetune/internvl2_1b_qwen2_0_5b_dynamic_res_2nd_finetune_full.sh +68 -0
  26. VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.0/2nd_finetune/internvl2_2b_internlm2_1_8b_dynamic_res_2nd_finetune_lora_coco.sh +69 -0
  27. VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.0/2nd_finetune/internvl2_4b_phi3_3_8b_dynamic_res_2nd_finetune_full.sh +68 -0
  28. VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.0/2nd_finetune/internvl2_4b_phi3_3_8b_dynamic_res_2nd_finetune_lora.sh +69 -0
  29. VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.0/2nd_finetune/internvl2_76b_hermes2_llama3_70b_dynamic_res_2nd_finetune_full.sh +76 -0
  30. VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.0/2nd_finetune/internvl2_76b_hermes2_llama3_70b_dynamic_res_2nd_finetune_lora.sh +69 -0
  31. VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.0/2nd_finetune/internvl2_8b_internlm2_7b_dynamic_res_2nd_finetune_full.sh +68 -0
  32. VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.0/2nd_finetune/internvl2_8b_internlm2_7b_dynamic_res_2nd_finetune_lora.sh +69 -0
  33. VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.0_mpo/README.md +150 -0
  34. VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.0_mpo/preference_optimization/internvl2_8b_internlm2_7b_dynamic_res_mpo_full.sh +81 -0
  35. VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.5/2nd_finetune/internvl2_5_1b_dynamic_res_2nd_finetune_full.sh +69 -0
  36. VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.5/2nd_finetune/internvl2_5_1b_dynamic_res_2nd_finetune_lora.sh +70 -0
  37. VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.5/2nd_finetune/internvl2_5_26b_dynamic_res_2nd_finetune_full.sh +69 -0
  38. VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.5/2nd_finetune/internvl2_5_26b_dynamic_res_2nd_finetune_lora.sh +70 -0
  39. VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.5/2nd_finetune/internvl2_5_2b_dynamic_res_2nd_finetune_full.sh +69 -0
  40. VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.5/2nd_finetune/internvl2_5_2b_dynamic_res_2nd_finetune_lora.sh +70 -0
  41. VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.5/2nd_finetune/internvl2_5_2b_dynamic_res_2nd_finetune_lora_coco.sh +70 -0
  42. VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.5/2nd_finetune/internvl2_5_38b_dynamic_res_2nd_finetune_full.sh +77 -0
  43. VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.5/2nd_finetune/internvl2_5_38b_dynamic_res_2nd_finetune_lora.sh +70 -0
  44. VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.5/2nd_finetune/internvl2_5_4b_dynamic_res_2nd_finetune_full.sh +69 -0
  45. VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.5/2nd_finetune/internvl2_5_4b_dynamic_res_2nd_finetune_lora.sh +70 -0
  46. VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.5/2nd_finetune/internvl2_5_78b_dynamic_res_2nd_finetune_full.sh +77 -0
  47. VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.5/2nd_finetune/internvl2_5_78b_dynamic_res_2nd_finetune_lora.sh +70 -0
  48. VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.5/2nd_finetune/internvl2_5_8b_dynamic_res_2nd_finetune_full.sh +69 -0
  49. VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.5/2nd_finetune/internvl2_5_8b_dynamic_res_2nd_finetune_lora.sh +70 -0
  50. VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.5/stage1.5/internvl2_5_26b_internlm2_5_20b_dynamic_res_stage1_5.sh +91 -0
VLMEvalKit_old/InternVL/classification/configs/attn_pooling_probing/attn_pooling_probing_intern_vit_6b_224px_in1k_224to448_64gpu.yaml ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DATA:
2
+ IMG_ON_MEMORY: False
3
+ BATCH_SIZE: 16 # single GPU batch size
4
+ TRANSFORM: 'build_transform_for_linear_probe'
5
+ DATA_PATH: './data/imagenet-1k'
6
+ IMG_SIZE: 448
7
+ MODEL:
8
+ TYPE: intern_vit_6b
9
+ DROP_PATH_RATE: 0.0
10
+ INTERN_VIT_6B:
11
+ FREEZE_VIT: True
12
+ PATCH_SIZE: 14
13
+ PRETRAIN_SIZE: 224
14
+ QKV_BIAS: False
15
+ EMBED_DIM: 3200
16
+ NUM_HEADS: 25
17
+ MLP_RATIO: 4
18
+ INIT_VALUES: 0.1
19
+ QK_NORMALIZATION: True
20
+ DEPTH: 48
21
+ USE_FLASH_ATTN: True
22
+ PRETRAINED: "./pretrained/intern_vit_6b_224px.pth"
23
+ CLS_TARGET: 'attention_pooling'
24
+ TRAIN:
25
+ EMA:
26
+ ENABLE: True
27
+ DECAY: 0.998
28
+ EPOCHS: 10
29
+ WARMUP_EPOCHS: 1
30
+ WEIGHT_DECAY: 0.0
31
+ BASE_LR: 0.1 # 512
32
+ WARMUP_LR: .0
33
+ MIN_LR: .0
34
+ LR_LAYER_DECAY: false
35
+ OPTIMIZER:
36
+ NAME: 'sgd'
VLMEvalKit_old/InternVL/classification/configs/attn_pooling_probing/attn_pooling_probing_intern_vit_6b_448px_v1_0_in1k_448_64gpu_imagenet_real.yaml ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DATA:
2
+ IMG_ON_MEMORY: False
3
+ BATCH_SIZE: 16 # single GPU batch size
4
+ DATASET: 'imagenet-real'
5
+ TRANSFORM: 'build_transform_for_linear_probe'
6
+ DATA_PATH: './data/imagenet-1k'
7
+ IMG_SIZE: 448
8
+ MODEL:
9
+ TYPE: intern_vit_6b
10
+ DROP_PATH_RATE: 0.0
11
+ INTERN_VIT_6B:
12
+ FREEZE_VIT: True
13
+ PATCH_SIZE: 14
14
+ PRETRAIN_SIZE: 448
15
+ QKV_BIAS: False
16
+ EMBED_DIM: 3200
17
+ NUM_HEADS: 25
18
+ MLP_RATIO: 4
19
+ INIT_VALUES: 0.1
20
+ QK_NORMALIZATION: True
21
+ DEPTH: 45
22
+ USE_FLASH_ATTN: True
23
+ PRETRAINED: "./pretrained/intern_vit_6b_448px_v1_0.pth"
24
+ CLS_TARGET: 'attention_pooling'
25
+ TRAIN:
26
+ EMA:
27
+ ENABLE: True
28
+ DECAY: 0.998
29
+ EPOCHS: 10
30
+ WARMUP_EPOCHS: 1
31
+ WEIGHT_DECAY: 0.0
32
+ BASE_LR: 0.1 # 512
33
+ WARMUP_LR: .0
34
+ MIN_LR: .0
35
+ LR_LAYER_DECAY: false
36
+ OPTIMIZER:
37
+ NAME: 'sgd'
VLMEvalKit_old/InternVL/classification/configs/attn_pooling_probing/attn_pooling_probing_intern_vit_6b_448px_v1_5_in1k_448_64gpu_imagenet_real.yaml ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DATA:
2
+ IMG_ON_MEMORY: False
3
+ BATCH_SIZE: 16 # single GPU batch size
4
+ DATASET: 'imagenet-real'
5
+ TRANSFORM: 'build_transform_for_linear_probe'
6
+ DATA_PATH: './data/imagenet-1k'
7
+ IMG_SIZE: 448
8
+ MODEL:
9
+ TYPE: intern_vit_6b
10
+ DROP_PATH_RATE: 0.0
11
+ INTERN_VIT_6B:
12
+ FREEZE_VIT: True
13
+ PATCH_SIZE: 14
14
+ PRETRAIN_SIZE: 448
15
+ QKV_BIAS: False
16
+ EMBED_DIM: 3200
17
+ NUM_HEADS: 25
18
+ MLP_RATIO: 4
19
+ INIT_VALUES: 0.1
20
+ QK_NORMALIZATION: True
21
+ DEPTH: 45
22
+ USE_FLASH_ATTN: True
23
+ PRETRAINED: "./pretrained/intern_vit_6b_448px_v1_5.pth"
24
+ CLS_TARGET: 'attention_pooling'
25
+ TRAIN:
26
+ EMA:
27
+ ENABLE: True
28
+ DECAY: 0.998
29
+ EPOCHS: 10
30
+ WARMUP_EPOCHS: 1
31
+ WEIGHT_DECAY: 0.0
32
+ BASE_LR: 0.1 # 512
33
+ WARMUP_LR: .0
34
+ MIN_LR: .0
35
+ LR_LAYER_DECAY: false
36
+ OPTIMIZER:
37
+ NAME: 'sgd'
VLMEvalKit_old/InternVL/internvl_chat/examples/image1.jpg ADDED
VLMEvalKit_old/InternVL/internvl_chat/examples/image3.jpg ADDED
VLMEvalKit_old/InternVL/internvl_chat/shell/data/coco_caption.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "coco_karpathy_train_567k": {
3
+ "root": "data/coco/",
4
+ "annotation": "data/coco/annotations/coco_karpathy_train_567k.jsonl",
5
+ "data_augment": false,
6
+ "repeat_time": 1,
7
+ "length": 566747
8
+ }
9
+ }
VLMEvalKit_old/InternVL/internvl_chat/shell/data/internvl_1_2_finetune.json ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "sharegpt4v_instruct_gpt4-vision_cap100k": {
3
+ "root": "playground/data/",
4
+ "annotation": "playground/opensource/sharegpt4v_instruct_gpt4-vision_cap100k.jsonl",
5
+ "data_augment": false,
6
+ "repeat_time": 1,
7
+ "length": 102025
8
+ },
9
+ "llava_instruct_150k_zh": {
10
+ "root": "playground/data/coco/",
11
+ "annotation": "playground/opensource/llava_instruct_150k_zh.jsonl",
12
+ "data_augment": false,
13
+ "repeat_time": 1,
14
+ "length": 157712
15
+ },
16
+ "sharegpt4v_mix665k_cap23k_coco-ap9k_lcs3k_sam9k_div2k": {
17
+ "root": "playground/data/",
18
+ "annotation": "playground/opensource/sharegpt4v_mix665k_cap23k_coco-ap9k_lcs3k_sam9k_div2k.jsonl",
19
+ "data_augment": false,
20
+ "repeat_time": 1,
21
+ "length": 665058
22
+ },
23
+ "dvqa_train_200k": {
24
+ "root": "playground/data/dvqa/",
25
+ "annotation": "playground/opensource/dvqa_train_200k.jsonl",
26
+ "data_augment": false,
27
+ "repeat_time": 1,
28
+ "length": 200000
29
+ },
30
+ "chartqa_train_18k": {
31
+ "root": "playground/data/chartqa/",
32
+ "annotation": "playground/opensource/chartqa_train_18k.jsonl",
33
+ "data_augment": false,
34
+ "repeat_time": 1,
35
+ "length": 18317
36
+ },
37
+ "ai2d_train_12k": {
38
+ "root": "playground/data/ai2d/",
39
+ "annotation": "playground/opensource/ai2d_train_12k.jsonl",
40
+ "data_augment": false,
41
+ "repeat_time": 1,
42
+ "length": 12413
43
+ },
44
+ "docvqa_train_10k": {
45
+ "root": "playground/data/docvqa/",
46
+ "annotation": "playground/opensource/docvqa_train_10k.jsonl",
47
+ "data_augment": false,
48
+ "repeat_time": 1,
49
+ "length": 10211
50
+ },
51
+ "geoqa+": {
52
+ "root": "playground/data/geoqa+/",
53
+ "annotation": "playground/opensource/geoqa+.jsonl",
54
+ "data_augment": false,
55
+ "repeat_time": 1,
56
+ "length": 72318
57
+ },
58
+ "synthdog_en": {
59
+ "root": "playground/data/synthdog-en/",
60
+ "annotation": "playground/opensource/synthdog_en.jsonl",
61
+ "data_augment": false,
62
+ "repeat_time": 1,
63
+ "length": 29765
64
+ }
65
+ }
VLMEvalKit_old/InternVL/internvl_chat/shell/data/internvl_1_2_finetune_custom.json ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "sharegpt4v_instruct_gpt4-vision_cap100k": {
3
+ "root": "playground/data/",
4
+ "annotation": "playground/opensource/sharegpt4v_instruct_gpt4-vision_cap100k.jsonl",
5
+ "data_augment": false,
6
+ "repeat_time": 1,
7
+ "length": 102025
8
+ },
9
+ "llava_instruct_150k_zh": {
10
+ "root": "playground/data/coco/",
11
+ "annotation": "playground/opensource/llava_instruct_150k_zh.jsonl",
12
+ "data_augment": false,
13
+ "repeat_time": 1,
14
+ "length": 157712
15
+ },
16
+ "sharegpt4v_mix665k_cap23k_coco-ap9k_lcs3k_sam9k_div2k": {
17
+ "root": "playground/data/",
18
+ "annotation": "playground/opensource/sharegpt4v_mix665k_cap23k_coco-ap9k_lcs3k_sam9k_div2k.jsonl",
19
+ "data_augment": false,
20
+ "repeat_time": 1,
21
+ "length": 665058
22
+ },
23
+ "dvqa_train_200k": {
24
+ "root": "playground/data/dvqa/",
25
+ "annotation": "playground/opensource/dvqa_train_200k.jsonl",
26
+ "data_augment": false,
27
+ "repeat_time": 1,
28
+ "length": 200000
29
+ },
30
+ "chartqa_train_18k": {
31
+ "root": "playground/data/chartqa/",
32
+ "annotation": "playground/opensource/chartqa_train_18k.jsonl",
33
+ "data_augment": false,
34
+ "repeat_time": 1,
35
+ "length": 18317
36
+ },
37
+ "ai2d_train_12k": {
38
+ "root": "playground/data/ai2d/",
39
+ "annotation": "playground/opensource/ai2d_train_12k.jsonl",
40
+ "data_augment": false,
41
+ "repeat_time": 1,
42
+ "length": 12413
43
+ },
44
+ "docvqa_train_10k": {
45
+ "root": "playground/data/docvqa/",
46
+ "annotation": "playground/opensource/docvqa_train_10k.jsonl",
47
+ "data_augment": false,
48
+ "repeat_time": 1,
49
+ "length": 10211
50
+ },
51
+ "geoqa+": {
52
+ "root": "playground/data/geoqa+/",
53
+ "annotation": "playground/opensource/geoqa+.jsonl",
54
+ "data_augment": false,
55
+ "repeat_time": 1,
56
+ "length": 72318
57
+ },
58
+ "synthdog_en": {
59
+ "root": "playground/data/synthdog-en/",
60
+ "annotation": "playground/opensource/synthdog_en.jsonl",
61
+ "data_augment": false,
62
+ "repeat_time": 1,
63
+ "length": 29765
64
+ }
65
+ }
VLMEvalKit_old/InternVL/internvl_chat/shell/internvl1.2/2nd_finetune/internvl_chat_v1_2_hermes2_yi34b_448_res_2nd_finetune_full.sh ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ PARTITION=${PARTITION:-"INTERN2"}
4
+ GPUS=${GPUS:-16}
5
+ GPUS_PER_NODE=${GPUS_PER_NODE:-8}
6
+ QUOTA_TYPE=${QUOTA_TYPE:-"reserved"}
7
+ NODES=$((GPUS / GPUS_PER_NODE))
8
+ CPUS_PER_TASK=${CPUS_PER_TASK:-10}
9
+ SRUN_ARGS=${SRUN_ARGS:-""}
10
+ BATCH_SIZE=${BATCH_SIZE:-128}
11
+ PER_DEVICE_BATCH_SIZE=${PER_DEVICE_BATCH_SIZE:-4}
12
+ GRADIENT_ACC=$((BATCH_SIZE / PER_DEVICE_BATCH_SIZE / GPUS))
13
+
14
+
15
+ export PYTHONPATH="${PYTHONPATH}:$(pwd)"
16
+ export MASTER_PORT=34229
17
+ export TF_CPP_MIN_LOG_LEVEL=3
18
+
19
+ OUTPUT_DIR='work_dirs/internvl_chat_v1_2/internvl_chat_v1_2_hermes2_yi34b_448_res_2nd_finetune_full'
20
+
21
+ if [ ! -d "$OUTPUT_DIR" ]; then
22
+ mkdir -p "$OUTPUT_DIR"
23
+ fi
24
+
25
+ # number of gpus: 16
26
+ # batch size per gpu: 4
27
+ # gradient accumulation steps: 2
28
+ # total batch size: 128
29
+ # epoch: 1
30
+ srun -p ${PARTITION} \
31
+ --gres=gpu:${GPUS_PER_NODE} \
32
+ --nodes=${NODES} \
33
+ --ntasks=${GPUS} \
34
+ --ntasks-per-node=${GPUS_PER_NODE} \
35
+ --cpus-per-task=${CPUS_PER_TASK} \
36
+ --kill-on-bad-exit=1 \
37
+ --quotatype=${QUOTA_TYPE} \
38
+ ${SRUN_ARGS} \
39
+ python -u internvl/train/internvl_chat_finetune.py \
40
+ --model_name_or_path "./pretrained/InternVL-Chat-V1-2-Plus" \
41
+ --conv_style "Hermes-2" \
42
+ --output_dir ${OUTPUT_DIR} \
43
+ --meta_path "./shell/data/internvl_1_2_finetune_custom.json" \
44
+ --overwrite_output_dir True \
45
+ --force_image_size 448 \
46
+ --max_dynamic_patch 1 \
47
+ --down_sample_ratio 0.5 \
48
+ --drop_path_rate 0.0 \
49
+ --freeze_llm False \
50
+ --freeze_mlp False \
51
+ --freeze_backbone True \
52
+ --vision_select_layer -1 \
53
+ --dataloader_num_workers 4 \
54
+ --bf16 True \
55
+ --num_train_epochs 1 \
56
+ --per_device_train_batch_size ${PER_DEVICE_BATCH_SIZE} \
57
+ --gradient_accumulation_steps ${GRADIENT_ACC} \
58
+ --evaluation_strategy "no" \
59
+ --save_strategy "steps" \
60
+ --save_steps 200 \
61
+ --save_total_limit 1 \
62
+ --learning_rate 1e-5 \
63
+ --weight_decay 0.05 \
64
+ --warmup_ratio 0.03 \
65
+ --lr_scheduler_type "cosine" \
66
+ --logging_steps 1 \
67
+ --max_seq_length 2048 \
68
+ --do_train True \
69
+ --grad_checkpoint True \
70
+ --group_by_length True \
71
+ --dynamic_image_size False \
72
+ --use_thumbnail False \
73
+ --ps_version 'v1' \
74
+ --deepspeed "zero_stage3_config_34b.json" \
75
+ --report_to "tensorboard" \
76
+ 2>&1 | tee -a "${OUTPUT_DIR}/training_log.txt"
VLMEvalKit_old/InternVL/internvl_chat/shell/internvl1.2/hermes2_yi34b/internvl_chat_v1_2_hermes2_yi34b_448_res_finetune.sh ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ PARTITION=${PARTITION:-"INTERN2"}
4
+ GPUS=${GPUS:-64}
5
+ GPUS_PER_NODE=${GPUS_PER_NODE:-8}
6
+ QUOTA_TYPE=${QUOTA_TYPE:-"reserved"}
7
+ NODES=$((GPUS / GPUS_PER_NODE))
8
+ CPUS_PER_TASK=${CPUS_PER_TASK:-10}
9
+ SRUN_ARGS=${SRUN_ARGS:-""}
10
+ BATCH_SIZE=${BATCH_SIZE:-512}
11
+ PER_DEVICE_BATCH_SIZE=${PER_DEVICE_BATCH_SIZE:-8}
12
+ GRADIENT_ACC=$((BATCH_SIZE / PER_DEVICE_BATCH_SIZE / GPUS))
13
+
14
+
15
+ export PYTHONPATH="${PYTHONPATH}:$(pwd)"
16
+ export MASTER_PORT=34229
17
+ export TF_CPP_MIN_LOG_LEVEL=3
18
+
19
+ OUTPUT_DIR='work_dirs/internvl_chat_v1_2/internvl_chat_v1_2_hermes2_yi34b_448_res_finetune'
20
+
21
+ if [ ! -d "$OUTPUT_DIR" ]; then
22
+ mkdir -p "$OUTPUT_DIR"
23
+ fi
24
+
25
+ # number of gpus: 64
26
+ # batch size per gpu: 8
27
+ # gradient accumulation steps: 1
28
+ # total batch size: 512
29
+ # epoch: 1
30
+ srun -p ${PARTITION} \
31
+ --gres=gpu:${GPUS_PER_NODE} \
32
+ --nodes=${NODES} \
33
+ --ntasks=${GPUS} \
34
+ --ntasks-per-node=${GPUS_PER_NODE} \
35
+ --cpus-per-task=${CPUS_PER_TASK} \
36
+ --kill-on-bad-exit=1 \
37
+ --quotatype=${QUOTA_TYPE} \
38
+ ${SRUN_ARGS} \
39
+ python -u internvl/train/internvl_chat_finetune.py \
40
+ --vision_path "./pretrained/InternViT-6B-448px-V1-2" \
41
+ --mlp_path "./pretrained/InternViT-6B-448px-V1-2/mlp_projector/hermes_2_yi_34b.pth" \
42
+ --llm_path "./pretrained/Nous-Hermes-2-Yi-34B" \
43
+ --conv_style "Hermes-2" \
44
+ --output_dir ${OUTPUT_DIR} \
45
+ --meta_path "./shell/data/internvl_1_2_finetune.json" \
46
+ --overwrite_output_dir True \
47
+ --force_image_size 448 \
48
+ --down_sample_ratio 0.5 \
49
+ --drop_path_rate 0.4 \
50
+ --freeze_llm False \
51
+ --freeze_mlp False \
52
+ --freeze_backbone False \
53
+ --vision_select_layer -1 \
54
+ --dataloader_num_workers 4 \
55
+ --bf16 True \
56
+ --num_train_epochs 1 \
57
+ --per_device_train_batch_size ${PER_DEVICE_BATCH_SIZE} \
58
+ --gradient_accumulation_steps ${GRADIENT_ACC} \
59
+ --evaluation_strategy "no" \
60
+ --save_strategy "steps" \
61
+ --save_steps 200 \
62
+ --save_total_limit 3 \
63
+ --learning_rate 1e-5 \
64
+ --weight_decay 0.05 \
65
+ --warmup_ratio 0.03 \
66
+ --lr_scheduler_type "cosine" \
67
+ --logging_steps 1 \
68
+ --max_seq_length 2048 \
69
+ --do_train True \
70
+ --grad_checkpoint True \
71
+ --group_by_length True \
72
+ --deepspeed "zero_stage3_config_34b.json" \
73
+ --report_to "tensorboard" \
74
+ 2>&1 | tee -a "${OUTPUT_DIR}/training_log.txt"
VLMEvalKit_old/InternVL/internvl_chat/shell/internvl1.5/2nd_finetune/internvl_chat_v1_5_internlm2_1_8b_dynamic_res_2nd_finetune_full.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ GPUS=${GPUS:-8}
4
+ BATCH_SIZE=${BATCH_SIZE:-128}
5
+ PER_DEVICE_BATCH_SIZE=${PER_DEVICE_BATCH_SIZE:-4}
6
+ GRADIENT_ACC=$((BATCH_SIZE / PER_DEVICE_BATCH_SIZE / GPUS))
7
+
8
+
9
+ export PYTHONPATH="${PYTHONPATH}:$(pwd)"
10
+ export MASTER_PORT=34229
11
+ export TF_CPP_MIN_LOG_LEVEL=3
12
+ export LAUNCHER=pytorch
13
+
14
+ OUTPUT_DIR='work_dirs/internvl_chat_v1_5/internvl_chat_v1_5_internlm2_1_8b_dynamic_res_2nd_finetune_full'
15
+
16
+ if [ ! -d "$OUTPUT_DIR" ]; then
17
+ mkdir -p "$OUTPUT_DIR"
18
+ fi
19
+
20
+ # number of gpus: 8
21
+ # batch size per gpu: 4
22
+ # gradient accumulation steps: 4
23
+ # total batch size: 128
24
+ # epoch: 1
25
+ torchrun \
26
+ --nnodes=1 \
27
+ --node_rank=0 \
28
+ --master_addr=127.0.0.1 \
29
+ --nproc_per_node=${GPUS} \
30
+ --master_port=${MASTER_PORT} \
31
+ internvl/train/internvl_chat_finetune.py \
32
+ --model_name_or_path "./pretrained/Mini-InternVL-Chat-2B-V1-5" \
33
+ --conv_style "internlm2-chat" \
34
+ --output_dir ${OUTPUT_DIR} \
35
+ --meta_path "./shell/data/internvl_1_2_finetune_custom.json" \
36
+ --overwrite_output_dir True \
37
+ --force_image_size 448 \
38
+ --max_dynamic_patch 12 \
39
+ --down_sample_ratio 0.5 \
40
+ --drop_path_rate 0.1 \
41
+ --freeze_llm False \
42
+ --freeze_mlp False \
43
+ --freeze_backbone True \
44
+ --vision_select_layer -1 \
45
+ --dataloader_num_workers 4 \
46
+ --bf16 True \
47
+ --num_train_epochs 1 \
48
+ --per_device_train_batch_size ${PER_DEVICE_BATCH_SIZE} \
49
+ --gradient_accumulation_steps ${GRADIENT_ACC} \
50
+ --evaluation_strategy "no" \
51
+ --save_strategy "steps" \
52
+ --save_steps 200 \
53
+ --save_total_limit 1 \
54
+ --learning_rate 4e-5 \
55
+ --weight_decay 0.01 \
56
+ --warmup_ratio 0.03 \
57
+ --lr_scheduler_type "cosine" \
58
+ --logging_steps 1 \
59
+ --max_seq_length 4096 \
60
+ --do_train True \
61
+ --grad_checkpoint True \
62
+ --group_by_length True \
63
+ --dynamic_image_size True \
64
+ --use_thumbnail True \
65
+ --ps_version 'v2' \
66
+ --deepspeed "zero_stage1_config.json" \
67
+ --report_to "tensorboard" \
68
+ 2>&1 | tee -a "${OUTPUT_DIR}/training_log.txt"
VLMEvalKit_old/InternVL/internvl_chat/shell/internvl1.5/2nd_finetune/internvl_chat_v1_5_internlm2_1_8b_dynamic_res_2nd_finetune_lora.sh ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ GPUS=${GPUS:-2}
4
+ BATCH_SIZE=${BATCH_SIZE:-16}
5
+ PER_DEVICE_BATCH_SIZE=${PER_DEVICE_BATCH_SIZE:-4}
6
+ GRADIENT_ACC=$((BATCH_SIZE / PER_DEVICE_BATCH_SIZE / GPUS))
7
+
8
+
9
+ export PYTHONPATH="${PYTHONPATH}:$(pwd)"
10
+ export MASTER_PORT=34229
11
+ export TF_CPP_MIN_LOG_LEVEL=3
12
+ export LAUNCHER=pytorch
13
+
14
+ OUTPUT_DIR='work_dirs/internvl_chat_v1_5/internvl_chat_v1_5_internlm2_1_8b_dynamic_res_2nd_finetune_lora'
15
+
16
+ if [ ! -d "$OUTPUT_DIR" ]; then
17
+ mkdir -p "$OUTPUT_DIR"
18
+ fi
19
+
20
+ # number of gpus: 2
21
+ # batch size per gpu: 4
22
+ # gradient accumulation steps: 2
23
+ # total batch size: 16
24
+ # epoch: 1
25
+ torchrun \
26
+ --nnodes=1 \
27
+ --node_rank=0 \
28
+ --master_addr=127.0.0.1 \
29
+ --nproc_per_node=${GPUS} \
30
+ --master_port=${MASTER_PORT} \
31
+ internvl/train/internvl_chat_finetune.py \
32
+ --model_name_or_path "./pretrained/Mini-InternVL-Chat-2B-V1-5" \
33
+ --conv_style "internlm2-chat" \
34
+ --output_dir ${OUTPUT_DIR} \
35
+ --meta_path "./shell/data/internvl_1_2_finetune_custom.json" \
36
+ --overwrite_output_dir True \
37
+ --force_image_size 448 \
38
+ --max_dynamic_patch 12 \
39
+ --down_sample_ratio 0.5 \
40
+ --drop_path_rate 0.0 \
41
+ --freeze_llm True \
42
+ --freeze_mlp True \
43
+ --freeze_backbone True \
44
+ --use_llm_lora 16 \
45
+ --vision_select_layer -1 \
46
+ --dataloader_num_workers 4 \
47
+ --bf16 True \
48
+ --num_train_epochs 1 \
49
+ --per_device_train_batch_size ${PER_DEVICE_BATCH_SIZE} \
50
+ --gradient_accumulation_steps ${GRADIENT_ACC} \
51
+ --evaluation_strategy "no" \
52
+ --save_strategy "steps" \
53
+ --save_steps 200 \
54
+ --save_total_limit 1 \
55
+ --learning_rate 4e-5 \
56
+ --weight_decay 0.01 \
57
+ --warmup_ratio 0.03 \
58
+ --lr_scheduler_type "cosine" \
59
+ --logging_steps 1 \
60
+ --max_seq_length 4096 \
61
+ --do_train True \
62
+ --grad_checkpoint True \
63
+ --group_by_length True \
64
+ --dynamic_image_size True \
65
+ --use_thumbnail True \
66
+ --ps_version 'v2' \
67
+ --deepspeed "zero_stage1_config.json" \
68
+ --report_to "tensorboard" \
69
+ 2>&1 | tee -a "${OUTPUT_DIR}/training_log.txt"
VLMEvalKit_old/InternVL/internvl_chat/shell/internvl1.5/2nd_finetune/internvl_chat_v1_5_internlm2_20b_dynamic_res_2nd_finetune_full.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ GPUS=${GPUS:-8}
4
+ BATCH_SIZE=${BATCH_SIZE:-128}
5
+ PER_DEVICE_BATCH_SIZE=${PER_DEVICE_BATCH_SIZE:-2}
6
+ GRADIENT_ACC=$((BATCH_SIZE / PER_DEVICE_BATCH_SIZE / GPUS))
7
+
8
+
9
+ export PYTHONPATH="${PYTHONPATH}:$(pwd)"
10
+ export MASTER_PORT=34229
11
+ export TF_CPP_MIN_LOG_LEVEL=3
12
+ export LAUNCHER=pytorch
13
+
14
+ OUTPUT_DIR='work_dirs/internvl_chat_v1_5/internvl_chat_v1_5_internlm2_20b_dynamic_res_2nd_finetune_full'
15
+
16
+ if [ ! -d "$OUTPUT_DIR" ]; then
17
+ mkdir -p "$OUTPUT_DIR"
18
+ fi
19
+
20
+ # number of gpus: 8
21
+ # batch size per gpu: 4
22
+ # gradient accumulation steps: 4
23
+ # total batch size: 128
24
+ # epoch: 1
25
+ torchrun \
26
+ --nnodes=1 \
27
+ --node_rank=0 \
28
+ --master_addr=127.0.0.1 \
29
+ --nproc_per_node=${GPUS} \
30
+ --master_port=${MASTER_PORT} \
31
+ internvl/train/internvl_chat_finetune.py \
32
+ --model_name_or_path "./pretrained/InternVL-Chat-V1-5" \
33
+ --conv_style "internlm2-chat" \
34
+ --output_dir ${OUTPUT_DIR} \
35
+ --meta_path "./shell/data/internvl_1_2_finetune_custom.json" \
36
+ --overwrite_output_dir True \
37
+ --force_image_size 448 \
38
+ --max_dynamic_patch 12 \
39
+ --down_sample_ratio 0.5 \
40
+ --drop_path_rate 0.4 \
41
+ --freeze_llm False \
42
+ --freeze_mlp False \
43
+ --freeze_backbone True \
44
+ --vision_select_layer -1 \
45
+ --dataloader_num_workers 4 \
46
+ --bf16 True \
47
+ --num_train_epochs 1 \
48
+ --per_device_train_batch_size ${PER_DEVICE_BATCH_SIZE} \
49
+ --gradient_accumulation_steps ${GRADIENT_ACC} \
50
+ --evaluation_strategy "no" \
51
+ --save_strategy "steps" \
52
+ --save_steps 200 \
53
+ --save_total_limit 1 \
54
+ --learning_rate 2e-5 \
55
+ --weight_decay 0.05 \
56
+ --warmup_ratio 0.03 \
57
+ --lr_scheduler_type "cosine" \
58
+ --logging_steps 1 \
59
+ --max_seq_length 4096 \
60
+ --do_train True \
61
+ --grad_checkpoint True \
62
+ --group_by_length True \
63
+ --dynamic_image_size True \
64
+ --use_thumbnail True \
65
+ --ps_version 'v2' \
66
+ --deepspeed "zero_stage3_config.json" \
67
+ --report_to "tensorboard" \
68
+ 2>&1 | tee -a "${OUTPUT_DIR}/training_log.txt"
VLMEvalKit_old/InternVL/internvl_chat/shell/internvl1.5/2nd_finetune/internvl_chat_v1_5_internlm2_20b_dynamic_res_2nd_finetune_lora.sh ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ GPUS=${GPUS:-2}
4
+ BATCH_SIZE=${BATCH_SIZE:-16}
5
+ PER_DEVICE_BATCH_SIZE=${PER_DEVICE_BATCH_SIZE:-2}
6
+ GRADIENT_ACC=$((BATCH_SIZE / PER_DEVICE_BATCH_SIZE / GPUS))
7
+
8
+
9
+ export PYTHONPATH="${PYTHONPATH}:$(pwd)"
10
+ export MASTER_PORT=34229
11
+ export TF_CPP_MIN_LOG_LEVEL=3
12
+ export LAUNCHER=pytorch
13
+
14
+ OUTPUT_DIR='work_dirs/internvl_chat_v1_5/internvl_chat_v1_5_internlm2_20b_dynamic_res_2nd_finetune_lora'
15
+
16
+ if [ ! -d "$OUTPUT_DIR" ]; then
17
+ mkdir -p "$OUTPUT_DIR"
18
+ fi
19
+
20
+ # number of gpus: 2
21
+ # batch size per gpu: 4
22
+ # gradient accumulation steps: 2
23
+ # total batch size: 16
24
+ # epoch: 1
25
+ torchrun \
26
+ --nnodes=1 \
27
+ --node_rank=0 \
28
+ --master_addr=127.0.0.1 \
29
+ --nproc_per_node=${GPUS} \
30
+ --master_port=${MASTER_PORT} \
31
+ internvl/train/internvl_chat_finetune.py \
32
+ --model_name_or_path "./pretrained/InternVL-Chat-V1-5" \
33
+ --conv_style "internlm2-chat" \
34
+ --output_dir ${OUTPUT_DIR} \
35
+ --meta_path "./shell/data/internvl_1_2_finetune_custom.json" \
36
+ --overwrite_output_dir True \
37
+ --force_image_size 448 \
38
+ --max_dynamic_patch 12 \
39
+ --down_sample_ratio 0.5 \
40
+ --drop_path_rate 0.0 \
41
+ --freeze_llm True \
42
+ --freeze_mlp True \
43
+ --freeze_backbone True \
44
+ --use_llm_lora 16 \
45
+ --vision_select_layer -1 \
46
+ --dataloader_num_workers 4 \
47
+ --bf16 True \
48
+ --num_train_epochs 1 \
49
+ --per_device_train_batch_size ${PER_DEVICE_BATCH_SIZE} \
50
+ --gradient_accumulation_steps ${GRADIENT_ACC} \
51
+ --evaluation_strategy "no" \
52
+ --save_strategy "steps" \
53
+ --save_steps 200 \
54
+ --save_total_limit 1 \
55
+ --learning_rate 2e-5 \
56
+ --weight_decay 0.05 \
57
+ --warmup_ratio 0.03 \
58
+ --lr_scheduler_type "cosine" \
59
+ --logging_steps 1 \
60
+ --max_seq_length 4096 \
61
+ --do_train True \
62
+ --grad_checkpoint True \
63
+ --group_by_length True \
64
+ --dynamic_image_size True \
65
+ --use_thumbnail True \
66
+ --ps_version 'v2' \
67
+ --deepspeed "zero_stage3_config.json" \
68
+ --report_to "tensorboard" \
69
+ 2>&1 | tee -a "${OUTPUT_DIR}/training_log.txt"
VLMEvalKit_old/InternVL/internvl_chat/shell/internvl1.5/2nd_finetune/internvl_chat_v1_5_phi3_3_8b_dynamic_res_2nd_finetune_full.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ GPUS=${GPUS:-8}
4
+ BATCH_SIZE=${BATCH_SIZE:-128}
5
+ PER_DEVICE_BATCH_SIZE=${PER_DEVICE_BATCH_SIZE:-4}
6
+ GRADIENT_ACC=$((BATCH_SIZE / PER_DEVICE_BATCH_SIZE / GPUS))
7
+
8
+
9
+ export PYTHONPATH="${PYTHONPATH}:$(pwd)"
10
+ export MASTER_PORT=34229
11
+ export TF_CPP_MIN_LOG_LEVEL=3
12
+ export LAUNCHER=pytorch
13
+
14
+ OUTPUT_DIR='work_dirs/internvl_chat_v1_5/internvl_chat_v1_5_phi3_3_8b_dynamic_res_2nd_finetune_full'
15
+
16
+ if [ ! -d "$OUTPUT_DIR" ]; then
17
+ mkdir -p "$OUTPUT_DIR"
18
+ fi
19
+
20
+ # number of gpus: 8
21
+ # batch size per gpu: 4
22
+ # gradient accumulation steps: 4
23
+ # total batch size: 128
24
+ # epoch: 1
25
+ torchrun \
26
+ --nnodes=1 \
27
+ --node_rank=0 \
28
+ --master_addr=127.0.0.1 \
29
+ --nproc_per_node=${GPUS} \
30
+ --master_port=${MASTER_PORT} \
31
+ internvl/train/internvl_chat_finetune.py \
32
+ --model_name_or_path "./pretrained/Mini-InternVL-Chat-4B-V1-5" \
33
+ --conv_style "phi3-chat" \
34
+ --output_dir ${OUTPUT_DIR} \
35
+ --meta_path "./shell/data/internvl_1_2_finetune_custom.json" \
36
+ --overwrite_output_dir True \
37
+ --force_image_size 448 \
38
+ --max_dynamic_patch 12 \
39
+ --down_sample_ratio 0.5 \
40
+ --drop_path_rate 0.1 \
41
+ --freeze_llm False \
42
+ --freeze_mlp False \
43
+ --freeze_backbone True \
44
+ --vision_select_layer -1 \
45
+ --dataloader_num_workers 4 \
46
+ --bf16 True \
47
+ --num_train_epochs 1 \
48
+ --per_device_train_batch_size ${PER_DEVICE_BATCH_SIZE} \
49
+ --gradient_accumulation_steps ${GRADIENT_ACC} \
50
+ --evaluation_strategy "no" \
51
+ --save_strategy "steps" \
52
+ --save_steps 200 \
53
+ --save_total_limit 1 \
54
+ --learning_rate 4e-5 \
55
+ --weight_decay 0.05 \
56
+ --warmup_ratio 0.03 \
57
+ --lr_scheduler_type "cosine" \
58
+ --logging_steps 1 \
59
+ --max_seq_length 4096 \
60
+ --do_train True \
61
+ --grad_checkpoint True \
62
+ --group_by_length True \
63
+ --dynamic_image_size True \
64
+ --use_thumbnail True \
65
+ --ps_version 'v2' \
66
+ --deepspeed "zero_stage1_config.json" \
67
+ --report_to "tensorboard" \
68
+ 2>&1 | tee -a "${OUTPUT_DIR}/training_log.txt"
VLMEvalKit_old/InternVL/internvl_chat/shell/internvl1.5/2nd_finetune/internvl_chat_v1_5_phi3_3_8b_dynamic_res_2nd_finetune_lora.sh ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ GPUS=${GPUS:-2}
4
+ BATCH_SIZE=${BATCH_SIZE:-16}
5
+ PER_DEVICE_BATCH_SIZE=${PER_DEVICE_BATCH_SIZE:-4}
6
+ GRADIENT_ACC=$((BATCH_SIZE / PER_DEVICE_BATCH_SIZE / GPUS))
7
+
8
+
9
+ export PYTHONPATH="${PYTHONPATH}:$(pwd)"
10
+ export MASTER_PORT=34229
11
+ export TF_CPP_MIN_LOG_LEVEL=3
12
+ export LAUNCHER=pytorch
13
+
14
+ OUTPUT_DIR='work_dirs/internvl_chat_v1_5/internvl_chat_v1_5_phi3_3_8b_dynamic_res_2nd_finetune_lora'
15
+
16
+ if [ ! -d "$OUTPUT_DIR" ]; then
17
+ mkdir -p "$OUTPUT_DIR"
18
+ fi
19
+
20
+ # number of gpus: 2
21
+ # batch size per gpu: 4
22
+ # gradient accumulation steps: 2
23
+ # total batch size: 16
24
+ # epoch: 1
25
+ torchrun \
26
+ --nnodes=1 \
27
+ --node_rank=0 \
28
+ --master_addr=127.0.0.1 \
29
+ --nproc_per_node=${GPUS} \
30
+ --master_port=${MASTER_PORT} \
31
+ internvl/train/internvl_chat_finetune.py \
32
+ --model_name_or_path "./pretrained/Mini-InternVL-Chat-4B-V1-5" \
33
+ --conv_style "phi3-chat" \
34
+ --output_dir ${OUTPUT_DIR} \
35
+ --meta_path "./shell/data/internvl_1_2_finetune_custom.json" \
36
+ --overwrite_output_dir True \
37
+ --force_image_size 448 \
38
+ --max_dynamic_patch 12 \
39
+ --down_sample_ratio 0.5 \
40
+ --drop_path_rate 0.0 \
41
+ --freeze_llm True \
42
+ --freeze_mlp True \
43
+ --freeze_backbone True \
44
+ --use_llm_lora 16 \
45
+ --vision_select_layer -1 \
46
+ --dataloader_num_workers 4 \
47
+ --bf16 True \
48
+ --num_train_epochs 1 \
49
+ --per_device_train_batch_size ${PER_DEVICE_BATCH_SIZE} \
50
+ --gradient_accumulation_steps ${GRADIENT_ACC} \
51
+ --evaluation_strategy "no" \
52
+ --save_strategy "steps" \
53
+ --save_steps 200 \
54
+ --save_total_limit 1 \
55
+ --learning_rate 4e-5 \
56
+ --weight_decay 0.05 \
57
+ --warmup_ratio 0.03 \
58
+ --lr_scheduler_type "cosine" \
59
+ --logging_steps 1 \
60
+ --max_seq_length 4096 \
61
+ --do_train True \
62
+ --grad_checkpoint True \
63
+ --group_by_length True \
64
+ --dynamic_image_size True \
65
+ --use_thumbnail True \
66
+ --ps_version 'v2' \
67
+ --deepspeed "zero_stage1_config.json" \
68
+ --report_to "tensorboard" \
69
+ 2>&1 | tee -a "${OUTPUT_DIR}/training_log.txt"
VLMEvalKit_old/InternVL/internvl_chat/shell/internvl1.5/hermes2_yi34b/internvl_chat_v1_5_hermes2_yi34b_dynamic_res_finetune.sh ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ PARTITION=${PARTITION:-"INTERN2"}
4
+ GPUS=${GPUS:-256}
5
+ GPUS_PER_NODE=${GPUS_PER_NODE:-8}
6
+ QUOTA_TYPE=${QUOTA_TYPE:-"reserved"}
7
+ NODES=$((GPUS / GPUS_PER_NODE))
8
+ CPUS_PER_TASK=${CPUS_PER_TASK:-10}
9
+ SRUN_ARGS=${SRUN_ARGS:-""}
10
+ BATCH_SIZE=${BATCH_SIZE:-1024}
11
+ PER_DEVICE_BATCH_SIZE=${PER_DEVICE_BATCH_SIZE:-4}
12
+ GRADIENT_ACC=$((BATCH_SIZE / PER_DEVICE_BATCH_SIZE / GPUS))
13
+
14
+
15
+ export PYTHONPATH="${PYTHONPATH}:$(pwd)"
16
+ export MASTER_PORT=34229
17
+ export TF_CPP_MIN_LOG_LEVEL=3
18
+
19
+ OUTPUT_DIR='work_dirs/internvl_chat_v1_5/internvl_chat_v1_5_hermes2_yi34b_dynamic_res_finetune'
20
+
21
+ if [ ! -d "$OUTPUT_DIR" ]; then
22
+ mkdir -p "$OUTPUT_DIR"
23
+ fi
24
+
25
+ # number of gpus: 256
26
+ # batch size per gpu: 4
27
+ # gradient accumulation steps: 1
28
+ # total batch size: 1024
29
+ # epoch: 1
30
+ srun -p ${PARTITION} \
31
+ --gres=gpu:${GPUS_PER_NODE} \
32
+ --nodes=${NODES} \
33
+ --ntasks=${GPUS} \
34
+ --ntasks-per-node=${GPUS_PER_NODE} \
35
+ --cpus-per-task=${CPUS_PER_TASK} \
36
+ --kill-on-bad-exit=1 \
37
+ --quotatype=${QUOTA_TYPE} \
38
+ ${SRUN_ARGS} \
39
+ python -u internvl/train/internvl_chat_finetune.py \
40
+ --model_name_or_path "./work_dirs/internvl_chat_v1_5_hermes2_yi34b_dynamic_res_pretrain" \
41
+ --conv_style "Hermes-2" \
42
+ --output_dir ${OUTPUT_DIR} \
43
+ --meta_path "path/to/finetune/data.json" \
44
+ --overwrite_output_dir True \
45
+ --force_image_size 448 \
46
+ --max_dynamic_patch 12 \
47
+ --down_sample_ratio 0.5 \
48
+ --drop_path_rate 0.4 \
49
+ --freeze_llm False \
50
+ --freeze_mlp False \
51
+ --freeze_backbone False \
52
+ --vision_select_layer -1 \
53
+ --dataloader_num_workers 4 \
54
+ --bf16 True \
55
+ --num_train_epochs 1 \
56
+ --per_device_train_batch_size ${PER_DEVICE_BATCH_SIZE} \
57
+ --gradient_accumulation_steps ${GRADIENT_ACC} \
58
+ --evaluation_strategy "no" \
59
+ --save_strategy "steps" \
60
+ --save_steps 200 \
61
+ --save_total_limit 3 \
62
+ --learning_rate 2e-5 \
63
+ --weight_decay 0.05 \
64
+ --warmup_ratio 0.03 \
65
+ --lr_scheduler_type "cosine" \
66
+ --logging_steps 1 \
67
+ --max_seq_length 4096 \
68
+ --do_train True \
69
+ --grad_checkpoint True \
70
+ --group_by_length True \
71
+ --dynamic_image_size True \
72
+ --use_thumbnail True \
73
+ --ps_version 'v2' \
74
+ --deepspeed "zero_stage3_config_34b.json" \
75
+ --report_to "tensorboard" \
76
+ 2>&1 | tee -a "${OUTPUT_DIR}/training_log.txt"
VLMEvalKit_old/InternVL/internvl_chat/shell/internvl1.5/hermes2_yi34b/internvl_chat_v1_5_hermes2_yi34b_dynamic_res_pretrain.sh ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ PARTITION=${PARTITION:-"INTERN2"}
4
+ GPUS=${GPUS:-256}
5
+ GPUS_PER_NODE=${GPUS_PER_NODE:-8}
6
+ QUOTA_TYPE=${QUOTA_TYPE:-"reserved"}
7
+ NODES=$((GPUS / GPUS_PER_NODE))
8
+ CPUS_PER_TASK=${CPUS_PER_TASK:-10}
9
+ SRUN_ARGS=${SRUN_ARGS:-""}
10
+ BATCH_SIZE=${BATCH_SIZE:-2048}
11
+ PER_DEVICE_BATCH_SIZE=${PER_DEVICE_BATCH_SIZE:-2}
12
+ GRADIENT_ACC=$((BATCH_SIZE / PER_DEVICE_BATCH_SIZE / GPUS))
13
+
14
+
15
+ export PYTHONPATH="${PYTHONPATH}:$(pwd)"
16
+ export MASTER_PORT=34229
17
+ export TF_CPP_MIN_LOG_LEVEL=3
18
+
19
+ OUTPUT_DIR='work_dirs/internvl_chat_v1_5/internvl_chat_v1_5_hermes2_yi34b_dynamic_res_pretrain'
20
+
21
+ if [ ! -d "$OUTPUT_DIR" ]; then
22
+ mkdir -p "$OUTPUT_DIR"
23
+ fi
24
+
25
+ # number of gpus: 256
26
+ # batch size per gpu: 2
27
+ # gradient accumulation steps: 4
28
+ # total batch size: 2048
29
+ # epoch: 1
30
+ srun -p ${PARTITION} \
31
+ --gres=gpu:${GPUS_PER_NODE} \
32
+ --nodes=${NODES} \
33
+ --ntasks=${GPUS} \
34
+ --ntasks-per-node=${GPUS_PER_NODE} \
35
+ --cpus-per-task=${CPUS_PER_TASK} \
36
+ --kill-on-bad-exit=1 \
37
+ --quotatype=${QUOTA_TYPE} \
38
+ ${SRUN_ARGS} \
39
+ python -u internvl/train/internvl_chat_pretrain.py \
40
+ --vision_path "./pretrained/InternViT-6B-448px-V1-5" \
41
+ --mlp_path "./pretrained/InternViT-6B-448px-V1-2/mlp_projector/hermes_2_yi_34b.pth" \
42
+ --llm_path "./pretrained/Nous-Hermes-2-Yi-34B" \
43
+ --conv_style "Hermes-2" \
44
+ --output_dir ${OUTPUT_DIR} \
45
+ --meta_path "path/to/pretrain/data.json" \
46
+ --overwrite_output_dir True \
47
+ --force_image_size 448 \
48
+ --max_dynamic_patch 12 \
49
+ --down_sample_ratio 0.5 \
50
+ --drop_path_rate 0.0 \
51
+ --freeze_llm True \
52
+ --freeze_mlp False \
53
+ --freeze_backbone True \
54
+ --vision_select_layer -1 \
55
+ --dataloader_num_workers 4 \
56
+ --bf16 True \
57
+ --num_train_epochs 1 \
58
+ --per_device_train_batch_size ${PER_DEVICE_BATCH_SIZE} \
59
+ --gradient_accumulation_steps ${GRADIENT_ACC} \
60
+ --evaluation_strategy "no" \
61
+ --save_strategy "steps" \
62
+ --save_steps 200 \
63
+ --save_total_limit 3 \
64
+ --learning_rate 1e-4 \
65
+ --weight_decay 0.05 \
66
+ --warmup_steps 100 \
67
+ --lr_scheduler_type "cosine" \
68
+ --logging_steps 1 \
69
+ --max_seq_length 4096 \
70
+ --do_train True \
71
+ --grad_checkpoint True \
72
+ --group_by_length False \
73
+ --dynamic_image_size True \
74
+ --use_thumbnail True \
75
+ --ps_version 'v2' \
76
+ --deepspeed "zero_stage3_config_34b.json" \
77
+ --report_to "tensorboard" \
78
+ 2>&1 | tee -a "${OUTPUT_DIR}/training_log.txt"
VLMEvalKit_old/InternVL/internvl_chat/shell/internvl1.5/internlm2_1_8b/internvl_chat_v1_5_internlm2_1_8b_dynamic_res_finetune.sh ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ PARTITION=${PARTITION:-"INTERN2"}
4
+ GPUS=${GPUS:-128}
5
+ GPUS_PER_NODE=${GPUS_PER_NODE:-8}
6
+ QUOTA_TYPE=${QUOTA_TYPE:-"reserved"}
7
+ NODES=$((GPUS / GPUS_PER_NODE))
8
+ CPUS_PER_TASK=${CPUS_PER_TASK:-10}
9
+ SRUN_ARGS=${SRUN_ARGS:-""}
10
+ BATCH_SIZE=${BATCH_SIZE:-1024}
11
+ PER_DEVICE_BATCH_SIZE=${PER_DEVICE_BATCH_SIZE:-4}
12
+ GRADIENT_ACC=$((BATCH_SIZE / PER_DEVICE_BATCH_SIZE / GPUS))
13
+
14
+
15
+ export PYTHONPATH="${PYTHONPATH}:$(pwd)"
16
+ export MASTER_PORT=34229
17
+ export TF_CPP_MIN_LOG_LEVEL=3
18
+
19
+ OUTPUT_DIR='work_dirs/internvl_chat_v1_5/internvl_chat_v1_5_internlm2_1_8b_dynamic_res_finetune'
20
+
21
+ if [ ! -d "$OUTPUT_DIR" ]; then
22
+ mkdir -p "$OUTPUT_DIR"
23
+ fi
24
+
25
+ # number of gpus: 128
26
+ # batch size per gpu: 4
27
+ # gradient accumulation steps: 2
28
+ # total batch size: 1024
29
+ # epoch: 1
30
+ srun -p ${PARTITION} \
31
+ --gres=gpu:${GPUS_PER_NODE} \
32
+ --nodes=${NODES} \
33
+ --ntasks=${GPUS} \
34
+ --ntasks-per-node=${GPUS_PER_NODE} \
35
+ --cpus-per-task=${CPUS_PER_TASK} \
36
+ --kill-on-bad-exit=1 \
37
+ --quotatype=${QUOTA_TYPE} \
38
+ ${SRUN_ARGS} \
39
+ python -u internvl/train/internvl_chat_finetune.py \
40
+ --model_name_or_path "./work_dirs/internvl_chat_v1_5_internlm2_1_8b_dynamic_res_pretrain" \
41
+ --conv_style "internlm2-chat" \
42
+ --output_dir ${OUTPUT_DIR} \
43
+ --meta_path "path/to/finetune/data.json" \
44
+ --overwrite_output_dir True \
45
+ --force_image_size 448 \
46
+ --max_dynamic_patch 12 \
47
+ --down_sample_ratio 0.5 \
48
+ --drop_path_rate 0.1 \
49
+ --freeze_llm False \
50
+ --freeze_mlp False \
51
+ --freeze_backbone False \
52
+ --vision_select_layer -1 \
53
+ --dataloader_num_workers 4 \
54
+ --bf16 True \
55
+ --num_train_epochs 1 \
56
+ --per_device_train_batch_size ${PER_DEVICE_BATCH_SIZE} \
57
+ --gradient_accumulation_steps ${GRADIENT_ACC} \
58
+ --evaluation_strategy "no" \
59
+ --save_strategy "steps" \
60
+ --save_steps 200 \
61
+ --save_total_limit 3 \
62
+ --learning_rate 4e-5 \
63
+ --weight_decay 0.01 \
64
+ --warmup_ratio 0.03 \
65
+ --lr_scheduler_type "cosine" \
66
+ --logging_steps 1 \
67
+ --max_seq_length 8192 \
68
+ --do_train True \
69
+ --grad_checkpoint True \
70
+ --group_by_length True \
71
+ --dynamic_image_size True \
72
+ --use_thumbnail True \
73
+ --ps_version 'v2' \
74
+ --deepspeed "zero_stage1_config.json" \
75
+ --report_to "tensorboard" \
76
+ 2>&1 | tee -a "${OUTPUT_DIR}/training_log.txt"
VLMEvalKit_old/InternVL/internvl_chat/shell/internvl1.5/internlm2_1_8b/internvl_chat_v1_5_internlm2_1_8b_dynamic_res_pretrain.sh ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ PARTITION=${PARTITION:-"INTERN2"}
4
+ GPUS=${GPUS:-128}
5
+ GPUS_PER_NODE=${GPUS_PER_NODE:-8}
6
+ QUOTA_TYPE=${QUOTA_TYPE:-"reserved"}
7
+ NODES=$((GPUS / GPUS_PER_NODE))
8
+ CPUS_PER_TASK=${CPUS_PER_TASK:-10}
9
+ SRUN_ARGS=${SRUN_ARGS:-""}
10
+ BATCH_SIZE=${BATCH_SIZE:-2048}
11
+ PER_DEVICE_BATCH_SIZE=${PER_DEVICE_BATCH_SIZE:-8}
12
+ GRADIENT_ACC=$((BATCH_SIZE / PER_DEVICE_BATCH_SIZE / GPUS))
13
+
14
+
15
+ export PYTHONPATH="${PYTHONPATH}:$(pwd)"
16
+ export MASTER_PORT=34229
17
+ export TF_CPP_MIN_LOG_LEVEL=3
18
+
19
+ OUTPUT_DIR='work_dirs/internvl_chat_v1_5/internvl_chat_v1_5_internlm2_1_8b_dynamic_res_pretrain'
20
+
21
+ if [ ! -d "$OUTPUT_DIR" ]; then
22
+ mkdir -p "$OUTPUT_DIR"
23
+ fi
24
+
25
+ # number of gpus: 128
26
+ # batch size per gpu: 8
27
+ # gradient accumulation steps: 2
28
+ # total batch size: 2048
29
+ # epoch: 1
30
+ srun -p ${PARTITION} \
31
+ --gres=gpu:${GPUS_PER_NODE} \
32
+ --nodes=${NODES} \
33
+ --ntasks=${GPUS} \
34
+ --ntasks-per-node=${GPUS_PER_NODE} \
35
+ --cpus-per-task=${CPUS_PER_TASK} \
36
+ --kill-on-bad-exit=1 \
37
+ --quotatype=${QUOTA_TYPE} \
38
+ ${SRUN_ARGS} \
39
+ python -u internvl/train/internvl_chat_pretrain.py \
40
+ --vision_path "./pretrained/InternViT-300M-448px" \
41
+ --llm_path "./pretrained/internlm2-chat-1_8b" \
42
+ --conv_style "internlm2-chat" \
43
+ --output_dir ${OUTPUT_DIR} \
44
+ --meta_path "path/to/pretrain/data.json" \
45
+ --overwrite_output_dir True \
46
+ --force_image_size 448 \
47
+ --max_dynamic_patch 12 \
48
+ --down_sample_ratio 0.5 \
49
+ --drop_path_rate 0.1 \
50
+ --freeze_llm True \
51
+ --freeze_mlp False \
52
+ --freeze_backbone False \
53
+ --vision_select_layer -1 \
54
+ --dataloader_num_workers 4 \
55
+ --bf16 True \
56
+ --num_train_epochs 1 \
57
+ --per_device_train_batch_size ${PER_DEVICE_BATCH_SIZE} \
58
+ --gradient_accumulation_steps ${GRADIENT_ACC} \
59
+ --evaluation_strategy "no" \
60
+ --save_strategy "steps" \
61
+ --save_steps 200 \
62
+ --save_total_limit 3 \
63
+ --learning_rate 2e-5 \
64
+ --weight_decay 0.01 \
65
+ --warmup_steps 100 \
66
+ --lr_scheduler_type "cosine" \
67
+ --logging_steps 1 \
68
+ --max_seq_length 4096 \
69
+ --do_train True \
70
+ --grad_checkpoint True \
71
+ --group_by_length False \
72
+ --dynamic_image_size True \
73
+ --use_thumbnail True \
74
+ --ps_version 'v2' \
75
+ --deepspeed "zero_stage1_config.json" \
76
+ --report_to "tensorboard" \
77
+ 2>&1 | tee -a "${OUTPUT_DIR}/training_log.txt"
VLMEvalKit_old/InternVL/internvl_chat/shell/internvl1.5/internlm2_20b/internvl_chat_v1_5_internlm2_20b_dynamic_res_finetune.sh ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ PARTITION=${PARTITION:-"INTERN2"}
4
+ GPUS=${GPUS:-256}
5
+ GPUS_PER_NODE=${GPUS_PER_NODE:-8}
6
+ QUOTA_TYPE=${QUOTA_TYPE:-"reserved"}
7
+ NODES=$((GPUS / GPUS_PER_NODE))
8
+ CPUS_PER_TASK=${CPUS_PER_TASK:-10}
9
+ SRUN_ARGS=${SRUN_ARGS:-""}
10
+ BATCH_SIZE=${BATCH_SIZE:-1024}
11
+ PER_DEVICE_BATCH_SIZE=${PER_DEVICE_BATCH_SIZE:-4}
12
+ GRADIENT_ACC=$((BATCH_SIZE / PER_DEVICE_BATCH_SIZE / GPUS))
13
+
14
+
15
+ export PYTHONPATH="${PYTHONPATH}:$(pwd)"
16
+ export MASTER_PORT=34229
17
+ export TF_CPP_MIN_LOG_LEVEL=3
18
+
19
+ OUTPUT_DIR='work_dirs/internvl_chat_v1_5/internvl_chat_v1_5_internlm2_20b_dynamic_res_finetune'
20
+
21
+ if [ ! -d "$OUTPUT_DIR" ]; then
22
+ mkdir -p "$OUTPUT_DIR"
23
+ fi
24
+
25
+ # number of gpus: 256
26
+ # batch size per gpu: 4
27
+ # gradient accumulation steps: 1
28
+ # total batch size: 1024
29
+ # epoch: 1
30
+ srun -p ${PARTITION} \
31
+ --gres=gpu:${GPUS_PER_NODE} \
32
+ --nodes=${NODES} \
33
+ --ntasks=${GPUS} \
34
+ --ntasks-per-node=${GPUS_PER_NODE} \
35
+ --cpus-per-task=${CPUS_PER_TASK} \
36
+ --kill-on-bad-exit=1 \
37
+ --quotatype=${QUOTA_TYPE} \
38
+ ${SRUN_ARGS} \
39
+ python -u internvl/train/internvl_chat_finetune.py \
40
+ --model_name_or_path "./work_dirs/internvl_chat_v1_5_internlm2_20b_dynamic_res_pretrain" \
41
+ --conv_style "internlm2-chat" \
42
+ --output_dir ${OUTPUT_DIR} \
43
+ --meta_path "path/to/finetune/data.json" \
44
+ --overwrite_output_dir True \
45
+ --force_image_size 448 \
46
+ --max_dynamic_patch 12 \
47
+ --down_sample_ratio 0.5 \
48
+ --drop_path_rate 0.4 \
49
+ --freeze_llm False \
50
+ --freeze_mlp False \
51
+ --freeze_backbone False \
52
+ --vision_select_layer -1 \
53
+ --dataloader_num_workers 4 \
54
+ --bf16 True \
55
+ --num_train_epochs 1 \
56
+ --per_device_train_batch_size ${PER_DEVICE_BATCH_SIZE} \
57
+ --gradient_accumulation_steps ${GRADIENT_ACC} \
58
+ --evaluation_strategy "no" \
59
+ --save_strategy "steps" \
60
+ --save_steps 200 \
61
+ --save_total_limit 3 \
62
+ --learning_rate 2e-5 \
63
+ --weight_decay 0.05 \
64
+ --warmup_ratio 0.03 \
65
+ --lr_scheduler_type "cosine" \
66
+ --logging_steps 1 \
67
+ --max_seq_length 4096 \
68
+ --do_train True \
69
+ --grad_checkpoint True \
70
+ --group_by_length True \
71
+ --dynamic_image_size True \
72
+ --use_thumbnail True \
73
+ --ps_version 'v2' \
74
+ --deepspeed "zero_stage3_config.json" \
75
+ --report_to "tensorboard" \
76
+ 2>&1 | tee -a "${OUTPUT_DIR}/training_log.txt"
VLMEvalKit_old/InternVL/internvl_chat/shell/internvl1.5/internlm2_20b/internvl_chat_v1_5_internlm2_20b_dynamic_res_pretrain.sh ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ PARTITION=${PARTITION:-"INTERN2"}
4
+ GPUS=${GPUS:-256}
5
+ GPUS_PER_NODE=${GPUS_PER_NODE:-8}
6
+ QUOTA_TYPE=${QUOTA_TYPE:-"reserved"}
7
+ NODES=$((GPUS / GPUS_PER_NODE))
8
+ CPUS_PER_TASK=${CPUS_PER_TASK:-10}
9
+ SRUN_ARGS=${SRUN_ARGS:-""}
10
+ BATCH_SIZE=${BATCH_SIZE:-2048}
11
+ PER_DEVICE_BATCH_SIZE=${PER_DEVICE_BATCH_SIZE:-4}
12
+ GRADIENT_ACC=$((BATCH_SIZE / PER_DEVICE_BATCH_SIZE / GPUS))
13
+
14
+
15
+ export PYTHONPATH="${PYTHONPATH}:$(pwd)"
16
+ export MASTER_PORT=34229
17
+ export TF_CPP_MIN_LOG_LEVEL=3
18
+
19
+ OUTPUT_DIR='work_dirs/internvl_chat_v1_5/internvl_chat_v1_5_internlm2_20b_dynamic_res_pretrain'
20
+
21
+ if [ ! -d "$OUTPUT_DIR" ]; then
22
+ mkdir -p "$OUTPUT_DIR"
23
+ fi
24
+
25
+ # number of gpus: 256
26
+ # batch size per gpu: 4
27
+ # gradient accumulation steps: 2
28
+ # total batch size: 2048
29
+ # epoch: 1
30
+ srun -p ${PARTITION} \
31
+ --gres=gpu:${GPUS_PER_NODE} \
32
+ --nodes=${NODES} \
33
+ --ntasks=${GPUS} \
34
+ --ntasks-per-node=${GPUS_PER_NODE} \
35
+ --cpus-per-task=${CPUS_PER_TASK} \
36
+ --kill-on-bad-exit=1 \
37
+ --quotatype=${QUOTA_TYPE} \
38
+ ${SRUN_ARGS} \
39
+ python -u internvl/train/internvl_chat_pretrain.py \
40
+ --vision_path "./pretrained/InternViT-6B-448px-V1-5" \
41
+ --llm_path "./pretrained/internlm2-chat-20b" \
42
+ --conv_style "internlm2-chat" \
43
+ --output_dir ${OUTPUT_DIR} \
44
+ --meta_path "path/to/pretrain/data.json" \
45
+ --overwrite_output_dir True \
46
+ --force_image_size 448 \
47
+ --max_dynamic_patch 12 \
48
+ --down_sample_ratio 0.5 \
49
+ --drop_path_rate 0.2 \
50
+ --freeze_llm True \
51
+ --freeze_mlp False \
52
+ --freeze_backbone False \
53
+ --vision_select_layer -1 \
54
+ --dataloader_num_workers 4 \
55
+ --bf16 True \
56
+ --num_train_epochs 1 \
57
+ --per_device_train_batch_size ${PER_DEVICE_BATCH_SIZE} \
58
+ --gradient_accumulation_steps ${GRADIENT_ACC} \
59
+ --evaluation_strategy "no" \
60
+ --save_strategy "steps" \
61
+ --save_steps 200 \
62
+ --save_total_limit 3 \
63
+ --learning_rate 1e-5 \
64
+ --weight_decay 0.05 \
65
+ --warmup_steps 100 \
66
+ --lr_scheduler_type "cosine" \
67
+ --logging_steps 1 \
68
+ --max_seq_length 4096 \
69
+ --do_train True \
70
+ --grad_checkpoint True \
71
+ --group_by_length False \
72
+ --dynamic_image_size True \
73
+ --use_thumbnail True \
74
+ --ps_version 'v2' \
75
+ --deepspeed "zero_stage3_config.json" \
76
+ --report_to "tensorboard" \
77
+ 2>&1 | tee -a "${OUTPUT_DIR}/training_log.txt"
VLMEvalKit_old/InternVL/internvl_chat/shell/internvl1.5/phi3_3_8b/internvl_chat_v1_5_phi3_3_8b_dynamic_res_finetune.sh ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ PARTITION=${PARTITION:-"INTERN2"}
4
+ GPUS=${GPUS:-128}
5
+ GPUS_PER_NODE=${GPUS_PER_NODE:-8}
6
+ QUOTA_TYPE=${QUOTA_TYPE:-"reserved"}
7
+ NODES=$((GPUS / GPUS_PER_NODE))
8
+ CPUS_PER_TASK=${CPUS_PER_TASK:-10}
9
+ SRUN_ARGS=${SRUN_ARGS:-""}
10
+ BATCH_SIZE=${BATCH_SIZE:-1024}
11
+ PER_DEVICE_BATCH_SIZE=${PER_DEVICE_BATCH_SIZE:-4}
12
+ GRADIENT_ACC=$((BATCH_SIZE / PER_DEVICE_BATCH_SIZE / GPUS))
13
+
14
+
15
+ export PYTHONPATH="${PYTHONPATH}:$(pwd)"
16
+ export MASTER_PORT=34229
17
+ export TF_CPP_MIN_LOG_LEVEL=3
18
+
19
+ OUTPUT_DIR='work_dirs/internvl_chat_v1_5/internvl_chat_v1_5_phi3_3_8b_dynamic_res_finetune'
20
+
21
+ if [ ! -d "$OUTPUT_DIR" ]; then
22
+ mkdir -p "$OUTPUT_DIR"
23
+ fi
24
+
25
+ # number of gpus: 128
26
+ # batch size per gpu: 4
27
+ # gradient accumulation steps: 2
28
+ # total batch size: 1024
29
+ # epoch: 1
30
+ srun -p ${PARTITION} \
31
+ --gres=gpu:${GPUS_PER_NODE} \
32
+ --nodes=${NODES} \
33
+ --ntasks=${GPUS} \
34
+ --ntasks-per-node=${GPUS_PER_NODE} \
35
+ --cpus-per-task=${CPUS_PER_TASK} \
36
+ --kill-on-bad-exit=1 \
37
+ --quotatype=${QUOTA_TYPE} \
38
+ ${SRUN_ARGS} \
39
+ python -u internvl/train/internvl_chat_finetune.py \
40
+ --model_name_or_path "./work_dirs/internvl_chat_v1_5_phi3_3_8b_dynamic_res_finetune" \
41
+ --conv_style "phi3-chat" \
42
+ --output_dir ${OUTPUT_DIR} \
43
+ --meta_path "path/to/finetune/data.json" \
44
+ --overwrite_output_dir True \
45
+ --force_image_size 448 \
46
+ --max_dynamic_patch 12 \
47
+ --down_sample_ratio 0.5 \
48
+ --drop_path_rate 0.1 \
49
+ --freeze_llm False \
50
+ --freeze_mlp False \
51
+ --freeze_backbone False \
52
+ --vision_select_layer -1 \
53
+ --dataloader_num_workers 4 \
54
+ --bf16 True \
55
+ --num_train_epochs 1 \
56
+ --per_device_train_batch_size ${PER_DEVICE_BATCH_SIZE} \
57
+ --gradient_accumulation_steps ${GRADIENT_ACC} \
58
+ --evaluation_strategy "no" \
59
+ --save_strategy "steps" \
60
+ --save_steps 200 \
61
+ --save_total_limit 3 \
62
+ --learning_rate 4e-5 \
63
+ --weight_decay 0.05 \
64
+ --warmup_ratio 0.03 \
65
+ --lr_scheduler_type "cosine" \
66
+ --logging_steps 1 \
67
+ --max_seq_length 8192 \
68
+ --do_train True \
69
+ --grad_checkpoint True \
70
+ --group_by_length True \
71
+ --dynamic_image_size True \
72
+ --use_thumbnail True \
73
+ --ps_version 'v2' \
74
+ --deepspeed "zero_stage1_config.json" \
75
+ --report_to "tensorboard" \
76
+ 2>&1 | tee -a "${OUTPUT_DIR}/training_log.txt"
VLMEvalKit_old/InternVL/internvl_chat/shell/internvl1.5/phi3_3_8b/internvl_chat_v1_5_phi3_3_8b_dynamic_res_pretrain.sh ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ PARTITION=${PARTITION:-"INTERN2"}
4
+ GPUS=${GPUS:-128}
5
+ GPUS_PER_NODE=${GPUS_PER_NODE:-8}
6
+ QUOTA_TYPE=${QUOTA_TYPE:-"reserved"}
7
+ NODES=$((GPUS / GPUS_PER_NODE))
8
+ CPUS_PER_TASK=${CPUS_PER_TASK:-10}
9
+ SRUN_ARGS=${SRUN_ARGS:-""}
10
+ BATCH_SIZE=${BATCH_SIZE:-2048}
11
+ PER_DEVICE_BATCH_SIZE=${PER_DEVICE_BATCH_SIZE:-8}
12
+ GRADIENT_ACC=$((BATCH_SIZE / PER_DEVICE_BATCH_SIZE / GPUS))
13
+
14
+
15
+ export PYTHONPATH="${PYTHONPATH}:$(pwd)"
16
+ export MASTER_PORT=34229
17
+ export TF_CPP_MIN_LOG_LEVEL=3
18
+
19
+ OUTPUT_DIR='work_dirs/internvl_chat_v1_5/internvl_chat_v1_5_phi3_3_8b_dynamic_res_pretrain'
20
+
21
+ if [ ! -d "$OUTPUT_DIR" ]; then
22
+ mkdir -p "$OUTPUT_DIR"
23
+ fi
24
+
25
+ # number of gpus: 128
26
+ # batch size per gpu: 8
27
+ # gradient accumulation steps: 2
28
+ # total batch size: 2048
29
+ # epoch: 1
30
+ srun -p ${PARTITION} \
31
+ --gres=gpu:${GPUS_PER_NODE} \
32
+ --nodes=${NODES} \
33
+ --ntasks=${GPUS} \
34
+ --ntasks-per-node=${GPUS_PER_NODE} \
35
+ --cpus-per-task=${CPUS_PER_TASK} \
36
+ --kill-on-bad-exit=1 \
37
+ --quotatype=${QUOTA_TYPE} \
38
+ ${SRUN_ARGS} \
39
+ python -u internvl/train/internvl_chat_pretrain.py \
40
+ --vision_path "./pretrained/InternViT-300M-448px" \
41
+ --llm_path "./pretrained/Phi-3-mini-128k-instruct" \
42
+ --conv_style "phi3-chat" \
43
+ --output_dir ${OUTPUT_DIR} \
44
+ --meta_path "path/to/pretrain/data.json" \
45
+ --overwrite_output_dir True \
46
+ --force_image_size 448 \
47
+ --max_dynamic_patch 12 \
48
+ --down_sample_ratio 0.5 \
49
+ --drop_path_rate 0.0 \
50
+ --freeze_llm True \
51
+ --freeze_mlp False \
52
+ --freeze_backbone True \
53
+ --vision_select_layer -1 \
54
+ --dataloader_num_workers 4 \
55
+ --bf16 True \
56
+ --num_train_epochs 1 \
57
+ --per_device_train_batch_size ${PER_DEVICE_BATCH_SIZE} \
58
+ --gradient_accumulation_steps ${GRADIENT_ACC} \
59
+ --evaluation_strategy "no" \
60
+ --save_strategy "steps" \
61
+ --save_steps 200 \
62
+ --save_total_limit 3 \
63
+ --learning_rate 2e-4 \
64
+ --weight_decay 0.05 \
65
+ --warmup_steps 100 \
66
+ --lr_scheduler_type "cosine" \
67
+ --logging_steps 1 \
68
+ --max_seq_length 4096 \
69
+ --do_train True \
70
+ --grad_checkpoint True \
71
+ --group_by_length False \
72
+ --dynamic_image_size True \
73
+ --use_thumbnail True \
74
+ --ps_version 'v2' \
75
+ --deepspeed "zero_stage1_config.json" \
76
+ --report_to "tensorboard" \
77
+ 2>&1 | tee -a "${OUTPUT_DIR}/training_log.txt"
VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.0/2nd_finetune/internvl2_1b_qwen2_0_5b_dynamic_res_2nd_finetune_full.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ GPUS=${GPUS:-8}
4
+ BATCH_SIZE=${BATCH_SIZE:-128}
5
+ PER_DEVICE_BATCH_SIZE=${PER_DEVICE_BATCH_SIZE:-4}
6
+ GRADIENT_ACC=$((BATCH_SIZE / PER_DEVICE_BATCH_SIZE / GPUS))
7
+
8
+
9
+ export PYTHONPATH="${PYTHONPATH}:$(pwd)"
10
+ export MASTER_PORT=34229
11
+ export TF_CPP_MIN_LOG_LEVEL=3
12
+ export LAUNCHER=pytorch
13
+
14
+ OUTPUT_DIR='work_dirs/internvl_chat_v2_0/internvl2_1b_qwen2_0_5b_dynamic_res_2nd_finetune_full'
15
+
16
+ if [ ! -d "$OUTPUT_DIR" ]; then
17
+ mkdir -p "$OUTPUT_DIR"
18
+ fi
19
+
20
+ # number of gpus: 8
21
+ # batch size per gpu: 4
22
+ # gradient accumulation steps: 4
23
+ # total batch size: 128
24
+ # epoch: 1
25
+ torchrun \
26
+ --nnodes=1 \
27
+ --node_rank=0 \
28
+ --master_addr=127.0.0.1 \
29
+ --nproc_per_node=${GPUS} \
30
+ --master_port=${MASTER_PORT} \
31
+ internvl/train/internvl_chat_finetune.py \
32
+ --model_name_or_path "./pretrained/InternVL2-1B" \
33
+ --conv_style "Hermes-2" \
34
+ --output_dir ${OUTPUT_DIR} \
35
+ --meta_path "./shell/data/internvl_1_2_finetune_custom.json" \
36
+ --overwrite_output_dir True \
37
+ --force_image_size 448 \
38
+ --max_dynamic_patch 6 \
39
+ --down_sample_ratio 0.5 \
40
+ --drop_path_rate 0.1 \
41
+ --freeze_llm False \
42
+ --freeze_mlp False \
43
+ --freeze_backbone True \
44
+ --vision_select_layer -1 \
45
+ --dataloader_num_workers 4 \
46
+ --bf16 True \
47
+ --num_train_epochs 1 \
48
+ --per_device_train_batch_size ${PER_DEVICE_BATCH_SIZE} \
49
+ --gradient_accumulation_steps ${GRADIENT_ACC} \
50
+ --evaluation_strategy "no" \
51
+ --save_strategy "steps" \
52
+ --save_steps 200 \
53
+ --save_total_limit 1 \
54
+ --learning_rate 4e-5 \
55
+ --weight_decay 0.01 \
56
+ --warmup_ratio 0.03 \
57
+ --lr_scheduler_type "cosine" \
58
+ --logging_steps 1 \
59
+ --max_seq_length 4096 \
60
+ --do_train True \
61
+ --grad_checkpoint True \
62
+ --group_by_length True \
63
+ --dynamic_image_size True \
64
+ --use_thumbnail True \
65
+ --ps_version 'v2' \
66
+ --deepspeed "zero_stage1_config.json" \
67
+ --report_to "tensorboard" \
68
+ 2>&1 | tee -a "${OUTPUT_DIR}/training_log.txt"
VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.0/2nd_finetune/internvl2_2b_internlm2_1_8b_dynamic_res_2nd_finetune_lora_coco.sh ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ GPUS=${GPUS:-8}
4
+ BATCH_SIZE=${BATCH_SIZE:-512}
5
+ PER_DEVICE_BATCH_SIZE=${PER_DEVICE_BATCH_SIZE:-4}
6
+ GRADIENT_ACC=$((BATCH_SIZE / PER_DEVICE_BATCH_SIZE / GPUS))
7
+
8
+
9
+ export PYTHONPATH="${PYTHONPATH}:$(pwd)"
10
+ export MASTER_PORT=34229
11
+ export TF_CPP_MIN_LOG_LEVEL=3
12
+ export LAUNCHER=pytorch
13
+
14
+ OUTPUT_DIR='work_dirs/internvl_chat_v2_0/internvl2_2b_internlm2_1_8b_dynamic_res_2nd_finetune_lora_coco'
15
+
16
+ if [ ! -d "$OUTPUT_DIR" ]; then
17
+ mkdir -p "$OUTPUT_DIR"
18
+ fi
19
+
20
+ # number of gpus: 8
21
+ # batch size per gpu: 4
22
+ # gradient accumulation steps: 16
23
+ # total batch size: 512
24
+ # epoch: 1
25
+ torchrun \
26
+ --nnodes=1 \
27
+ --node_rank=0 \
28
+ --master_addr=127.0.0.1 \
29
+ --nproc_per_node=${GPUS} \
30
+ --master_port=${MASTER_PORT} \
31
+ internvl/train/internvl_chat_finetune.py \
32
+ --model_name_or_path "./pretrained/InternVL2-2B" \
33
+ --conv_style "internlm2-chat" \
34
+ --output_dir ${OUTPUT_DIR} \
35
+ --meta_path "./shell/data/coco_caption.json" \
36
+ --overwrite_output_dir True \
37
+ --force_image_size 448 \
38
+ --max_dynamic_patch 6 \
39
+ --down_sample_ratio 0.5 \
40
+ --drop_path_rate 0.0 \
41
+ --freeze_llm True \
42
+ --freeze_mlp True \
43
+ --freeze_backbone True \
44
+ --use_llm_lora 128 \
45
+ --vision_select_layer -1 \
46
+ --dataloader_num_workers 4 \
47
+ --bf16 True \
48
+ --num_train_epochs 1 \
49
+ --per_device_train_batch_size ${PER_DEVICE_BATCH_SIZE} \
50
+ --gradient_accumulation_steps ${GRADIENT_ACC} \
51
+ --evaluation_strategy "no" \
52
+ --save_strategy "steps" \
53
+ --save_steps 200 \
54
+ --save_total_limit 1 \
55
+ --learning_rate 4e-5 \
56
+ --weight_decay 0.01 \
57
+ --warmup_ratio 0.03 \
58
+ --lr_scheduler_type "cosine" \
59
+ --logging_steps 1 \
60
+ --max_seq_length 4096 \
61
+ --do_train True \
62
+ --grad_checkpoint True \
63
+ --group_by_length True \
64
+ --dynamic_image_size True \
65
+ --use_thumbnail True \
66
+ --ps_version 'v2' \
67
+ --deepspeed "zero_stage1_config.json" \
68
+ --report_to "tensorboard" \
69
+ 2>&1 | tee -a "${OUTPUT_DIR}/training_log.txt"
VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.0/2nd_finetune/internvl2_4b_phi3_3_8b_dynamic_res_2nd_finetune_full.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ GPUS=${GPUS:-8}
4
+ BATCH_SIZE=${BATCH_SIZE:-128}
5
+ PER_DEVICE_BATCH_SIZE=${PER_DEVICE_BATCH_SIZE:-4}
6
+ GRADIENT_ACC=$((BATCH_SIZE / PER_DEVICE_BATCH_SIZE / GPUS))
7
+
8
+
9
+ export PYTHONPATH="${PYTHONPATH}:$(pwd)"
10
+ export MASTER_PORT=34229
11
+ export TF_CPP_MIN_LOG_LEVEL=3
12
+ export LAUNCHER=pytorch
13
+
14
+ OUTPUT_DIR='work_dirs/internvl_chat_v2_0/internvl2_4b_phi3_3_8b_dynamic_res_2nd_finetune_full'
15
+
16
+ if [ ! -d "$OUTPUT_DIR" ]; then
17
+ mkdir -p "$OUTPUT_DIR"
18
+ fi
19
+
20
+ # number of gpus: 8
21
+ # batch size per gpu: 4
22
+ # gradient accumulation steps: 4
23
+ # total batch size: 128
24
+ # epoch: 1
25
+ torchrun \
26
+ --nnodes=1 \
27
+ --node_rank=0 \
28
+ --master_addr=127.0.0.1 \
29
+ --nproc_per_node=${GPUS} \
30
+ --master_port=${MASTER_PORT} \
31
+ internvl/train/internvl_chat_finetune.py \
32
+ --model_name_or_path "./pretrained/InternVL2-4B" \
33
+ --conv_style "phi3-chat" \
34
+ --output_dir ${OUTPUT_DIR} \
35
+ --meta_path "./shell/data/internvl_1_2_finetune_custom.json" \
36
+ --overwrite_output_dir True \
37
+ --force_image_size 448 \
38
+ --max_dynamic_patch 6 \
39
+ --down_sample_ratio 0.5 \
40
+ --drop_path_rate 0.1 \
41
+ --freeze_llm False \
42
+ --freeze_mlp False \
43
+ --freeze_backbone True \
44
+ --vision_select_layer -1 \
45
+ --dataloader_num_workers 4 \
46
+ --bf16 True \
47
+ --num_train_epochs 1 \
48
+ --per_device_train_batch_size ${PER_DEVICE_BATCH_SIZE} \
49
+ --gradient_accumulation_steps ${GRADIENT_ACC} \
50
+ --evaluation_strategy "no" \
51
+ --save_strategy "steps" \
52
+ --save_steps 200 \
53
+ --save_total_limit 1 \
54
+ --learning_rate 4e-5 \
55
+ --weight_decay 0.01 \
56
+ --warmup_ratio 0.03 \
57
+ --lr_scheduler_type "cosine" \
58
+ --logging_steps 1 \
59
+ --max_seq_length 4096 \
60
+ --do_train True \
61
+ --grad_checkpoint True \
62
+ --group_by_length True \
63
+ --dynamic_image_size True \
64
+ --use_thumbnail True \
65
+ --ps_version 'v2' \
66
+ --deepspeed "zero_stage1_config.json" \
67
+ --report_to "tensorboard" \
68
+ 2>&1 | tee -a "${OUTPUT_DIR}/training_log.txt"
VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.0/2nd_finetune/internvl2_4b_phi3_3_8b_dynamic_res_2nd_finetune_lora.sh ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ GPUS=${GPUS:-2}
4
+ BATCH_SIZE=${BATCH_SIZE:-16}
5
+ PER_DEVICE_BATCH_SIZE=${PER_DEVICE_BATCH_SIZE:-4}
6
+ GRADIENT_ACC=$((BATCH_SIZE / PER_DEVICE_BATCH_SIZE / GPUS))
7
+
8
+
9
+ export PYTHONPATH="${PYTHONPATH}:$(pwd)"
10
+ export MASTER_PORT=34229
11
+ export TF_CPP_MIN_LOG_LEVEL=3
12
+ export LAUNCHER=pytorch
13
+
14
+ OUTPUT_DIR='work_dirs/internvl_chat_v2_0/internvl2_4b_phi3_3_8b_dynamic_res_2nd_finetune_lora'
15
+
16
+ if [ ! -d "$OUTPUT_DIR" ]; then
17
+ mkdir -p "$OUTPUT_DIR"
18
+ fi
19
+
20
+ # number of gpus: 2
21
+ # batch size per gpu: 4
22
+ # gradient accumulation steps: 2
23
+ # total batch size: 16
24
+ # epoch: 1
25
+ torchrun \
26
+ --nnodes=1 \
27
+ --node_rank=0 \
28
+ --master_addr=127.0.0.1 \
29
+ --nproc_per_node=${GPUS} \
30
+ --master_port=${MASTER_PORT} \
31
+ internvl/train/internvl_chat_finetune.py \
32
+ --model_name_or_path "./pretrained/InternVL2-4B" \
33
+ --conv_style "phi3-chat" \
34
+ --output_dir ${OUTPUT_DIR} \
35
+ --meta_path "./shell/data/internvl_1_2_finetune_custom.json" \
36
+ --overwrite_output_dir True \
37
+ --force_image_size 448 \
38
+ --max_dynamic_patch 6 \
39
+ --down_sample_ratio 0.5 \
40
+ --drop_path_rate 0.0 \
41
+ --freeze_llm True \
42
+ --freeze_mlp True \
43
+ --freeze_backbone True \
44
+ --use_llm_lora 16 \
45
+ --vision_select_layer -1 \
46
+ --dataloader_num_workers 4 \
47
+ --bf16 True \
48
+ --num_train_epochs 1 \
49
+ --per_device_train_batch_size ${PER_DEVICE_BATCH_SIZE} \
50
+ --gradient_accumulation_steps ${GRADIENT_ACC} \
51
+ --evaluation_strategy "no" \
52
+ --save_strategy "steps" \
53
+ --save_steps 200 \
54
+ --save_total_limit 1 \
55
+ --learning_rate 4e-5 \
56
+ --weight_decay 0.01 \
57
+ --warmup_ratio 0.03 \
58
+ --lr_scheduler_type "cosine" \
59
+ --logging_steps 1 \
60
+ --max_seq_length 4096 \
61
+ --do_train True \
62
+ --grad_checkpoint True \
63
+ --group_by_length True \
64
+ --dynamic_image_size True \
65
+ --use_thumbnail True \
66
+ --ps_version 'v2' \
67
+ --deepspeed "zero_stage1_config.json" \
68
+ --report_to "tensorboard" \
69
+ 2>&1 | tee -a "${OUTPUT_DIR}/training_log.txt"
VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.0/2nd_finetune/internvl2_76b_hermes2_llama3_70b_dynamic_res_2nd_finetune_full.sh ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ PARTITION=${PARTITION:-"INTERN2"}
4
+ GPUS=${GPUS:-32}
5
+ GPUS_PER_NODE=${GPUS_PER_NODE:-8}
6
+ QUOTA_TYPE=${QUOTA_TYPE:-"reserved"}
7
+ NODES=$((GPUS / GPUS_PER_NODE))
8
+ CPUS_PER_TASK=${CPUS_PER_TASK:-10}
9
+ SRUN_ARGS=${SRUN_ARGS:-""}
10
+ BATCH_SIZE=${BATCH_SIZE:-128}
11
+ PER_DEVICE_BATCH_SIZE=${PER_DEVICE_BATCH_SIZE:-1}
12
+ GRADIENT_ACC=$((BATCH_SIZE / PER_DEVICE_BATCH_SIZE / GPUS))
13
+
14
+
15
+ export PYTHONPATH="${PYTHONPATH}:$(pwd)"
16
+ export MASTER_PORT=34229
17
+ export TF_CPP_MIN_LOG_LEVEL=3
18
+
19
+ OUTPUT_DIR='work_dirs/internvl_chat_v2_0/internvl2_76b_hermes2_llama3_70b_dynamic_res_2nd_finetune_full'
20
+
21
+ if [ ! -d "$OUTPUT_DIR" ]; then
22
+ mkdir -p "$OUTPUT_DIR"
23
+ fi
24
+
25
+ # number of gpus: 32
26
+ # batch size per gpu: 1
27
+ # gradient accumulation steps: 4
28
+ # total batch size: 128
29
+ # epoch: 1
30
+ srun -p ${PARTITION} \
31
+ --gres=gpu:${GPUS_PER_NODE} \
32
+ --nodes=${NODES} \
33
+ --ntasks=${GPUS} \
34
+ --ntasks-per-node=${GPUS_PER_NODE} \
35
+ --cpus-per-task=${CPUS_PER_TASK} \
36
+ --kill-on-bad-exit=1 \
37
+ --quotatype=${QUOTA_TYPE} \
38
+ ${SRUN_ARGS} \
39
+ python -u internvl/train/internvl_chat_finetune.py \
40
+ --model_name_or_path "./pretrained/InternVL2-Llama3-76B" \
41
+ --conv_style "internlm2-chat" \
42
+ --output_dir ${OUTPUT_DIR} \
43
+ --meta_path "./shell/data/internvl_1_2_finetune_custom.json" \
44
+ --overwrite_output_dir True \
45
+ --force_image_size 448 \
46
+ --max_dynamic_patch 6 \
47
+ --down_sample_ratio 0.5 \
48
+ --drop_path_rate 0.4 \
49
+ --freeze_llm False \
50
+ --freeze_mlp False \
51
+ --freeze_backbone True \
52
+ --vision_select_layer -1 \
53
+ --dataloader_num_workers 4 \
54
+ --bf16 True \
55
+ --num_train_epochs 1 \
56
+ --per_device_train_batch_size ${PER_DEVICE_BATCH_SIZE} \
57
+ --gradient_accumulation_steps ${GRADIENT_ACC} \
58
+ --evaluation_strategy "no" \
59
+ --save_strategy "steps" \
60
+ --save_steps 200 \
61
+ --save_total_limit 1 \
62
+ --learning_rate 2e-5 \
63
+ --weight_decay 0.05 \
64
+ --warmup_ratio 0.03 \
65
+ --lr_scheduler_type "cosine" \
66
+ --logging_steps 1 \
67
+ --max_seq_length 4096 \
68
+ --do_train True \
69
+ --grad_checkpoint True \
70
+ --group_by_length True \
71
+ --dynamic_image_size True \
72
+ --use_thumbnail True \
73
+ --ps_version 'v2' \
74
+ --deepspeed "zero_stage3_config_100b.json" \
75
+ --report_to "tensorboard" \
76
+ 2>&1 | tee -a "${OUTPUT_DIR}/training_log.txt"
VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.0/2nd_finetune/internvl2_76b_hermes2_llama3_70b_dynamic_res_2nd_finetune_lora.sh ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ GPUS=${GPUS:-8}
4
+ BATCH_SIZE=${BATCH_SIZE:-16}
5
+ PER_DEVICE_BATCH_SIZE=${PER_DEVICE_BATCH_SIZE:-1}
6
+ GRADIENT_ACC=$((BATCH_SIZE / PER_DEVICE_BATCH_SIZE / GPUS))
7
+
8
+
9
+ export PYTHONPATH="${PYTHONPATH}:$(pwd)"
10
+ export MASTER_PORT=34229
11
+ export TF_CPP_MIN_LOG_LEVEL=3
12
+ export LAUNCHER=pytorch
13
+
14
+ OUTPUT_DIR='work_dirs/internvl_chat_v2_0/internvl2_76b_hermes2_llama3_70b_dynamic_res_2nd_finetune_lora'
15
+
16
+ if [ ! -d "$OUTPUT_DIR" ]; then
17
+ mkdir -p "$OUTPUT_DIR"
18
+ fi
19
+
20
+ # number of gpus: 8
21
+ # batch size per gpu: 1
22
+ # gradient accumulation steps: 2
23
+ # total batch size: 16
24
+ # epoch: 1
25
+ torchrun \
26
+ --nnodes=1 \
27
+ --node_rank=0 \
28
+ --master_addr=127.0.0.1 \
29
+ --nproc_per_node=${GPUS} \
30
+ --master_port=${MASTER_PORT} \
31
+ internvl/train/internvl_chat_finetune.py \
32
+ --model_name_or_path "./pretrained/InternVL2-Llama3-76B" \
33
+ --conv_style "internlm2-chat" \
34
+ --output_dir ${OUTPUT_DIR} \
35
+ --meta_path "./shell/data/internvl_1_2_finetune_custom.json" \
36
+ --overwrite_output_dir True \
37
+ --force_image_size 448 \
38
+ --max_dynamic_patch 6 \
39
+ --down_sample_ratio 0.5 \
40
+ --drop_path_rate 0.0 \
41
+ --freeze_llm True \
42
+ --freeze_mlp True \
43
+ --freeze_backbone True \
44
+ --use_llm_lora 16 \
45
+ --vision_select_layer -1 \
46
+ --dataloader_num_workers 4 \
47
+ --bf16 True \
48
+ --num_train_epochs 1 \
49
+ --per_device_train_batch_size ${PER_DEVICE_BATCH_SIZE} \
50
+ --gradient_accumulation_steps ${GRADIENT_ACC} \
51
+ --evaluation_strategy "no" \
52
+ --save_strategy "steps" \
53
+ --save_steps 200 \
54
+ --save_total_limit 1 \
55
+ --learning_rate 2e-5 \
56
+ --weight_decay 0.05 \
57
+ --warmup_ratio 0.03 \
58
+ --lr_scheduler_type "cosine" \
59
+ --logging_steps 1 \
60
+ --max_seq_length 4096 \
61
+ --do_train True \
62
+ --grad_checkpoint True \
63
+ --group_by_length True \
64
+ --dynamic_image_size True \
65
+ --use_thumbnail True \
66
+ --ps_version 'v2' \
67
+ --deepspeed "zero_stage3_config_100b.json" \
68
+ --report_to "tensorboard" \
69
+ 2>&1 | tee -a "${OUTPUT_DIR}/training_log.txt"
VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.0/2nd_finetune/internvl2_8b_internlm2_7b_dynamic_res_2nd_finetune_full.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ GPUS=${GPUS:-8}
4
+ BATCH_SIZE=${BATCH_SIZE:-128}
5
+ PER_DEVICE_BATCH_SIZE=${PER_DEVICE_BATCH_SIZE:-4}
6
+ GRADIENT_ACC=$((BATCH_SIZE / PER_DEVICE_BATCH_SIZE / GPUS))
7
+
8
+
9
+ export PYTHONPATH="${PYTHONPATH}:$(pwd)"
10
+ export MASTER_PORT=34229
11
+ export TF_CPP_MIN_LOG_LEVEL=3
12
+ export LAUNCHER=pytorch
13
+
14
+ OUTPUT_DIR='work_dirs/internvl_chat_v2_0/internvl2_8b_internlm2_7b_dynamic_res_2nd_finetune_full'
15
+
16
+ if [ ! -d "$OUTPUT_DIR" ]; then
17
+ mkdir -p "$OUTPUT_DIR"
18
+ fi
19
+
20
+ # number of gpus: 8
21
+ # batch size per gpu: 4
22
+ # gradient accumulation steps: 4
23
+ # total batch size: 128
24
+ # epoch: 1
25
+ torchrun \
26
+ --nnodes=1 \
27
+ --node_rank=0 \
28
+ --master_addr=127.0.0.1 \
29
+ --nproc_per_node=${GPUS} \
30
+ --master_port=${MASTER_PORT} \
31
+ internvl/train/internvl_chat_finetune.py \
32
+ --model_name_or_path "./pretrained/InternVL2-8B" \
33
+ --conv_style "internlm2-chat" \
34
+ --output_dir ${OUTPUT_DIR} \
35
+ --meta_path "./shell/data/internvl_1_2_finetune_custom.json" \
36
+ --overwrite_output_dir True \
37
+ --force_image_size 448 \
38
+ --max_dynamic_patch 6 \
39
+ --down_sample_ratio 0.5 \
40
+ --drop_path_rate 0.1 \
41
+ --freeze_llm False \
42
+ --freeze_mlp False \
43
+ --freeze_backbone True \
44
+ --vision_select_layer -1 \
45
+ --dataloader_num_workers 4 \
46
+ --bf16 True \
47
+ --num_train_epochs 1 \
48
+ --per_device_train_batch_size ${PER_DEVICE_BATCH_SIZE} \
49
+ --gradient_accumulation_steps ${GRADIENT_ACC} \
50
+ --evaluation_strategy "no" \
51
+ --save_strategy "steps" \
52
+ --save_steps 200 \
53
+ --save_total_limit 1 \
54
+ --learning_rate 4e-5 \
55
+ --weight_decay 0.05 \
56
+ --warmup_ratio 0.03 \
57
+ --lr_scheduler_type "cosine" \
58
+ --logging_steps 1 \
59
+ --max_seq_length 4096 \
60
+ --do_train True \
61
+ --grad_checkpoint True \
62
+ --group_by_length True \
63
+ --dynamic_image_size True \
64
+ --use_thumbnail True \
65
+ --ps_version 'v2' \
66
+ --deepspeed "zero_stage1_config.json" \
67
+ --report_to "tensorboard" \
68
+ 2>&1 | tee -a "${OUTPUT_DIR}/training_log.txt"
VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.0/2nd_finetune/internvl2_8b_internlm2_7b_dynamic_res_2nd_finetune_lora.sh ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ GPUS=${GPUS:-2}
4
+ BATCH_SIZE=${BATCH_SIZE:-16}
5
+ PER_DEVICE_BATCH_SIZE=${PER_DEVICE_BATCH_SIZE:-4}
6
+ GRADIENT_ACC=$((BATCH_SIZE / PER_DEVICE_BATCH_SIZE / GPUS))
7
+
8
+
9
+ export PYTHONPATH="${PYTHONPATH}:$(pwd)"
10
+ export MASTER_PORT=34229
11
+ export TF_CPP_MIN_LOG_LEVEL=3
12
+ export LAUNCHER=pytorch
13
+
14
+ OUTPUT_DIR='work_dirs/internvl_chat_v2_0/internvl2_8b_internlm2_7b_dynamic_res_2nd_finetune_lora'
15
+
16
+ if [ ! -d "$OUTPUT_DIR" ]; then
17
+ mkdir -p "$OUTPUT_DIR"
18
+ fi
19
+
20
+ # number of gpus: 2
21
+ # batch size per gpu: 4
22
+ # gradient accumulation steps: 2
23
+ # total batch size: 16
24
+ # epoch: 1
25
+ torchrun \
26
+ --nnodes=1 \
27
+ --node_rank=0 \
28
+ --master_addr=127.0.0.1 \
29
+ --nproc_per_node=${GPUS} \
30
+ --master_port=${MASTER_PORT} \
31
+ internvl/train/internvl_chat_finetune.py \
32
+ --model_name_or_path "./pretrained/InternVL2-8B" \
33
+ --conv_style "internlm2-chat" \
34
+ --output_dir ${OUTPUT_DIR} \
35
+ --meta_path "./shell/data/internvl_1_2_finetune_custom.json" \
36
+ --overwrite_output_dir True \
37
+ --force_image_size 448 \
38
+ --max_dynamic_patch 6 \
39
+ --down_sample_ratio 0.5 \
40
+ --drop_path_rate 0.0 \
41
+ --freeze_llm True \
42
+ --freeze_mlp True \
43
+ --freeze_backbone True \
44
+ --use_llm_lora 16 \
45
+ --vision_select_layer -1 \
46
+ --dataloader_num_workers 4 \
47
+ --bf16 True \
48
+ --num_train_epochs 1 \
49
+ --per_device_train_batch_size ${PER_DEVICE_BATCH_SIZE} \
50
+ --gradient_accumulation_steps ${GRADIENT_ACC} \
51
+ --evaluation_strategy "no" \
52
+ --save_strategy "steps" \
53
+ --save_steps 200 \
54
+ --save_total_limit 1 \
55
+ --learning_rate 4e-5 \
56
+ --weight_decay 0.05 \
57
+ --warmup_ratio 0.03 \
58
+ --lr_scheduler_type "cosine" \
59
+ --logging_steps 1 \
60
+ --max_seq_length 4096 \
61
+ --do_train True \
62
+ --grad_checkpoint True \
63
+ --group_by_length True \
64
+ --dynamic_image_size True \
65
+ --use_thumbnail True \
66
+ --ps_version 'v2' \
67
+ --deepspeed "zero_stage1_config.json" \
68
+ --report_to "tensorboard" \
69
+ 2>&1 | tee -a "${OUTPUT_DIR}/training_log.txt"
VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.0_mpo/README.md ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Enhancing the Reasoning Ability of Multimodal Large Language Models via Mixed Preference Optimization
2
+
3
+ [\[📂 GitHub\]](https://github.com/OpenGVLab/InternVL/tree/main/internvl_chat/shell/internvl2.0_mpo) [\[🆕 Blog\]](https://internvl.github.io/blog/2024-11-14-InternVL-2.0-MPO/) [\[📜 Paper\]](https://arxiv.org/abs/2411.10442) [\[📖 Documents\]](https://internvl.readthedocs.io/en/latest/internvl2.0/preference_optimization.html)
4
+
5
+ ## Introduction
6
+
7
+ Existing open-source multimodal large language models (MLLMs) generally follow a training process involving pre-training and supervised fine-tuning. However, these models suffer from distribution shifts, which limit their multimodal reasoning, particularly in the Chain-of-Thought (CoT) performance.
8
+
9
+ To address this, we introduce a preference optimization (PO) process to enhance the multimodal reasoning capabilities of MLLMs. Specifically, (1) on the data side, we design an automated preference data construction pipeline to create [MMPR](https://huggingface.co/datasets/OpenGVLab/MMPR), a high-quality, large-scale multimodal reasoning preference dataset. and (2) on the model side, we explore integrating PO with MLLMs, developing a simple yet effective method, termed Mixed Preference Optimization (MPO), which boosts multimodal CoT performance.
10
+
11
+ Our approach demonstrates improved performance across multiple benchmarks, particularly in multimodal reasoning tasks. Notably, our model, [InternVL2-8B-MPO](https://huggingface.co/OpenGVLab/InternVL2-8B-MPO), achieves an accuracy of 67.0 on MathVista, outperforming InternVL2-8B by 8.7 points and achieving performance comparable to the 10$`\times`$ larger InternVL2-76B. We hope this study could inspire further advancements in MLLMs.
12
+
13
+ ![image/jpeg](https://cdn-uploads.huggingface.co/production/uploads/619507e7b74b6c591f794340/sy8aVC1Y5wtAjG-OQzrDI.jpeg)
14
+
15
+ ## MMPR Dataset
16
+
17
+ MMPR is a large-scale and high-quality multimodal reasoning preference dataset. This dataset includes about 3 million samples.
18
+
19
+ ![image/jpeg](https://cdn-uploads.huggingface.co/production/uploads/619507e7b74b6c591f794340/mmXL47UPDFwYOWdn9Z6j5.jpeg)
20
+ ![image/jpeg](https://cdn-uploads.huggingface.co/production/uploads/619507e7b74b6c591f794340/6fnvI_wCd9JXAs6vYthaG.jpeg)
21
+
22
+ To construct this dataset, we propose an efficient data construction pipeline. Specifically, we categorize the multimodal data into **samples with clear ground truths** and **samples without clear ground truths**.
23
+
24
+ - **For samples with clear ground truths:**
25
+ the model is prompted to first provide the reasoning process and then give the final answer in the format like `Final Answer: ***`.
26
+ Responses matching the ground truth answer constitute the positive set $\\mathcal{Y}\_p$, while those that do not match make up the negative set $\\mathcal{Y}\_n$. Additionally, responses that fail to provide a clear final answer are also merged into $\\mathcal{Y}\_n$.
27
+ Given these responses labeled as positive or negative, we build the preference pairs by selecting a chosen response $y_c$ from $\\mathcal{Y}\_p$ and a negative response $y_r$ from $\\mathcal{Y}\_n$.
28
+
29
+ - **For samples without clear ground truths:**
30
+ we propose a simple yet effective method: Dropout Next-Token Prediction (Dropout NTP).
31
+ Specifically, we use the responses generated by InternVL2-8B as chosen answers.
32
+ Given the chosen answer, we truncate it by half and then prompt InternVL2-8B to complete the remaining
33
+ portion of the truncated answer without access to the image input.
34
+ This generated completion serves as the rejected answer for the paired sample.
35
+ It is worth noting that while the responses generated by InternVL2-8B may not be perfect,
36
+ the completions generated without the image input will introduce more hallucinations than those
37
+ generated with the image input.
38
+ Therefore, the partial order relationship between the chosen and rejected responses holds true.
39
+
40
+ The data construction pipeline is open-sourced, see more details in our [document](https://internvl.readthedocs.io/en/latest/internvl2.0/preference_optimization.html#generate-additional-preference-data).
41
+
42
+ ## Mixed Preference Optimization
43
+
44
+ The key insight behind MPO is that *an effective PO process should enable the model to learn the relative preference between pairs of responses, the absolute quality of individual responses, and the process for generating preferred responses.* We define the training objective as a combination of
45
+ preference loss $`\mathcal{L}_{\text{p}}`$,
46
+ quality loss $`\mathcal{L}_{\text{q}}`$,
47
+ and generation loss $`\mathcal{L}_{\text{g}}`$,
48
+ referred to as Mixed Preference Optimization:
49
+
50
+ ```math
51
+ \mathcal{L}=w_{p}\cdot\mathcal{L}_{\text{p}} + w_{q}\cdot\mathcal{L}_{\text{q}} + w_{g}\cdot\mathcal{L}_{\text{g}},
52
+ ```
53
+
54
+ where $w\_{\*}$ represents the weight assigned to each loss component.
55
+ In this work, we empirically compare different variants of preference loss.
56
+ Based on the experimental results, we use DPO as our preference loss and BCO as our quality loss.
57
+
58
+ Specifically, the DPO serves as the preference loss to enable the model to learn the
59
+ relative preference between chosen and rejected responses.
60
+ This algorithm optimizes the following loss function:
61
+
62
+ ```math
63
+ \mathcal{L}_{\text{p}}=-\log \sigma\left(\beta \log \frac{\pi_\theta\left(y_c \mid x\right)}{\pi_0\left(y_c \mid x\right)}-\beta \log \frac{\pi_\theta\left(y_r \mid x\right)}{\pi_0\left(y_r \mid x\right)}\right),
64
+ ```
65
+
66
+ where $\\beta$ is the KL penalty coefficient, and $x$, $y_c$, and $y_r$ are user query, chosen response, and rejected response, respectively.
67
+ The policy model $\\pi\_\\theta$ is initialized from model $\\pi_0$.
68
+
69
+ Additionally, the BCO loss is employed as the quality loss, which helps the model to understand the absolute quality of individual responses.
70
+ The loss function is defined as:
71
+
72
+ ```math
73
+ \mathcal{L}_{\text{q}}=\mathcal{L}_{\text{q}}^+ + \mathcal{L}_{\text{q}}^-,
74
+ ```
75
+
76
+ where $`\mathcal{L}_{\text{q}}^{+}`$ and $`\mathcal{L}_{\text{q}}^{+}`$ represent the loss for chosen and rejected responses, respectively.
77
+ Each response type's loss is calculated independently, requiring the model to differentiate the absolute quality of individual responses. The loss terms are given by:
78
+
79
+ ```math
80
+ \mathcal{L}_{\text{q}}^+=-\log \sigma\left(\beta \log \frac{\pi_\theta\left(y_c \mid x\right)}{\pi_0\left(y_c \mid x\right)} - \delta\right),
81
+ ```
82
+
83
+ ```math
84
+ \mathcal{L}_{\text{q}}^-=-\log \sigma\left(-\left(\beta \log \frac{\pi_\theta\left(y_r \mid x\right)}{\pi_0\left(y_r \mid x\right)} - \delta\right) \right),
85
+ ```
86
+
87
+ where $\\delta$ represents the reward shift, calculated as the moving average of previous rewards to stabilize training.
88
+
89
+ Finally, the SFT loss is used as the generation loss to help the model learn the generation process of preferred responses.
90
+ The loss function is defined as:
91
+
92
+ ```math
93
+ \mathcal{L}_{\text{gen}}=-\frac{\log\pi_\theta\left(y_c \mid x\right)}{\left| y_c \right|}.
94
+ ```
95
+
96
+ ## Models and Performance
97
+
98
+ Our [InternVL2-8B-MPO](https://huggingface.co/OpenGVLab/InternVL2-8B) achieves superior performance across 8 benchmarks, particularly excelling in multimodal reasoning tasks.
99
+ **On the MathVista benchmark, our model achieves an accuracy of 67.0%**, outperforming InternVL2-8B by 8.7 points and achieving performance comparable to the 10$`\times`$ larger InternVL2-76B.
100
+ **On the MathVision benchmark, our model achieves an accuracy of 25.7%**, establishing a new state-of-the-art performance among open-source models.
101
+ These results demonstrate the effectiveness of our preference optimization approach in enhancing multimodal reasoning capabilities.
102
+
103
+ Additionally, on the POPE benchmark, our model exhibits a 1.2-point improvement over InterVL2-8B, demonstrating the effectiveness of the perception data contained in our MMPR dataset to mitigate hallucinations.
104
+
105
+ Furthermore, our model also shows superior performance compared to the InternVL2-8B on complex VQA benchmarks, indicating that the general abilities of our model are also improved, benefiting from enhanced reasoning abilities and mitigated hallucinations.
106
+
107
+ | Model Name | M3CoT | MathVista | MathVision MINI | MMVet (GPT4-Turbo) | LLaVA-Bench | POPE | CRPE | MMHalBench |
108
+ | ----------------------- | :---: | :-------: | :-------------: | :----------------: | :---------: | :--: | :--: | :--------: |
109
+ | Gemini-1.5-Pro | - | 63.9 | 19.2 | - | - | - | - | - |
110
+ | GPT-4o | 64.3 | 63.8 | 30.4 | 69.1 | 97.6 | 86.9 | 76.6 | 4.0 |
111
+ | GPT-4o-Mini | 61.9 | 52.4 | 27.3 | 66.9 | 95.4 | 85.1 | 73.1 | 3.6 |
112
+ | LLaVA-1.5-13B | 39.5 | 27.6 | 11.1 | 36.3 | 70.7 | 85.9 | 55.6 | 2.4 |
113
+ | Qwen2-VL-7B | 57.8 | 58.2 | 21.1 | 60.6 | 67.7 | 88.1 | 74.4 | 3.4 |
114
+ | MiniCPM-V-2-6-8B | 56.0 | 60.6 | 23.4 | 57.4 | 83.4 | 87.3 | 75.2 | 3.6 |
115
+ | LLaVA-OneVision-7B | 52.3 | 63.2 | 18.4 | 51.4 | 79.9 | 88.4 | 73.7 | 3.1 |
116
+ | InternVL2-26B | 58.2 | 59.4 | 23.4 | 62.1 | 92.3 | 88.0 | 75.6 | 3.7 |
117
+ | InternVL2-40B | 63.6 | 63.7 | 21.4 | 65.5 | 100.5 | 88.4 | 77.3 | 3.9 |
118
+ | InternVL2-76B | 65.4 | 67.5 | 23.7 | 65.7 | 99.3 | 89.0 | 77.8 | 3.8 |
119
+ | InternVL2-Pro | 65.6 | 66.3 | 18.8 | 69.4 | 99.5 | 88.2 | 77.6 | 3.7 |
120
+ | InternVL2-8B | 59.3 | 58.3 | 20.4 | 54.2 | 73.2 | 86.9 | 75.5 | 3.3 |
121
+ | InternVL2-8B-MPO (ours) | 79.2 | 67.0 | 25.7 | 56.2 | 76.7 | 88.1 | 75.4 | 3.5 |
122
+
123
+ ## Train
124
+
125
+ Please refer to [our document](https://internvl.readthedocs.io/en/latest/internvl2.0/preference_optimization.html) for more details about how to train with our data.
126
+
127
+ ## Citation
128
+
129
+ If you find this project useful in your research, please consider citing:
130
+
131
+ ```BibTeX
132
+ @article{wang2024mpo,
133
+ title={Enhancing the Reasoning Ability of Multimodal Large Language Models via Mixed Preference Optimization},
134
+ author={Wang, Weiyun and Chen, Zhe and Wang, Wenhai and Cao, Yue and Liu, Yangzhou and Gao, Zhangwei and Zhu, Jinguo and Zhu, Xizhou and Lu, Lewei and Qiao, Yu and Dai, Jifeng},
135
+ journal={arXiv preprint arXiv:2411.10442},
136
+ year={2024}
137
+ }
138
+ @article{chen2023internvl,
139
+ title={InternVL: Scaling up Vision Foundation Models and Aligning for Generic Visual-Linguistic Tasks},
140
+ author={Chen, Zhe and Wu, Jiannan and Wang, Wenhai and Su, Weijie and Chen, Guo and Xing, Sen and Zhong, Muyan and Zhang, Qinglong and Zhu, Xizhou and Lu, Lewei and Li, Bin and Luo, Ping and Lu, Tong and Qiao, Yu and Dai, Jifeng},
141
+ journal={arXiv preprint arXiv:2312.14238},
142
+ year={2023}
143
+ }
144
+ @article{chen2024far,
145
+ title={How Far Are We to GPT-4V? Closing the Gap to Commercial Multimodal Models with Open-Source Suites},
146
+ author={Chen, Zhe and Wang, Weiyun and Tian, Hao and Ye, Shenglong and Gao, Zhangwei and Cui, Erfei and Tong, Wenwen and Hu, Kongzhi and Luo, Jiapeng and Ma, Zheng and others},
147
+ journal={arXiv preprint arXiv:2404.16821},
148
+ year={2024}
149
+ }
150
+ ```
VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.0_mpo/preference_optimization/internvl2_8b_internlm2_7b_dynamic_res_mpo_full.sh ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ PARTITION=${PARTITION:-"Intern5"}
4
+ GPUS=${GPUS:-256}
5
+ GPUS_PER_NODE=${GPUS_PER_NODE:-8}
6
+ QUOTA_TYPE=${QUOTA_TYPE:-"reserved"}
7
+ NODES=$((GPUS / GPUS_PER_NODE))
8
+ CPUS_PER_TASK=${CPUS_PER_TASK:-10}
9
+ SRUN_ARGS=${SRUN_ARGS:-""}
10
+ BATCH_SIZE=${BATCH_SIZE:-256}
11
+ PER_DEVICE_BATCH_SIZE=${PER_DEVICE_BATCH_SIZE:-1}
12
+ GRADIENT_ACC=$((BATCH_SIZE / PER_DEVICE_BATCH_SIZE / GPUS))
13
+
14
+ export PYTHONPATH="/mnt/petrelfs/wangweiyun/workspace_cz/InternVL/internvl_chat_dev/petrel-oss-python-sdk"
15
+ export PYTHONPATH="${PYTHONPATH}:$(pwd)"
16
+ export MASTER_PORT=34229
17
+ export TF_CPP_MIN_LOG_LEVEL=3
18
+
19
+ OUTPUT_DIR='work_dirs/internvl_chat_v2_0/internvl2_8b_internlm2_7b_dynamic_res_mpo_full'
20
+
21
+ if [ ! -d "$OUTPUT_DIR" ]; then
22
+ mkdir -p "$OUTPUT_DIR"
23
+ fi
24
+
25
+ # number of gpus: 64
26
+ # batch size per gpu: ~4
27
+ # gradient accumulation steps: 1
28
+ # total batch size: ~256
29
+ # epoch: 8
30
+ srun -p ${PARTITION} \
31
+ --gres=gpu:${GPUS_PER_NODE} \
32
+ --nodes=${NODES} \
33
+ --ntasks=${GPUS} \
34
+ --ntasks-per-node=${GPUS_PER_NODE} \
35
+ --cpus-per-task=${CPUS_PER_TASK} \
36
+ --kill-on-bad-exit=1 \
37
+ --quotatype=${QUOTA_TYPE} \
38
+ ${SRUN_ARGS} \
39
+ python -u internvl/train/internvl_chat_dpo.py \
40
+ --model_name_or_path "ckpt/OpenGVLab/InternVL2-8B" \
41
+ --conv_style "internlm2-chat" \
42
+ --output_dir ${OUTPUT_DIR} \
43
+ --meta_path "MMPR/meta.json" \
44
+ --overwrite_output_dir True \
45
+ --force_image_size 448 \
46
+ --down_sample_ratio 0.5 \
47
+ --drop_path_rate 0.1 \
48
+ --pad2square False \
49
+ --freeze_llm False \
50
+ --freeze_mlp False \
51
+ --freeze_backbone False \
52
+ --vision_select_layer -1 \
53
+ --use_data_resampling False \
54
+ --dataloader_num_workers 8 \
55
+ --bf16 True \
56
+ --num_train_epochs 1 \
57
+ --per_device_train_batch_size ${PER_DEVICE_BATCH_SIZE} \
58
+ --gradient_accumulation_steps ${GRADIENT_ACC} \
59
+ --evaluation_strategy "no" \
60
+ --save_strategy "no" \
61
+ --save_steps 100 \
62
+ --save_total_limit 100 \
63
+ --learning_rate 5e-6 \
64
+ --weight_decay 0.05 \
65
+ --warmup_ratio 0.03 \
66
+ --lr_scheduler_type "cosine" \
67
+ --logging_steps 1 \
68
+ --max_seq_length 6144 \
69
+ --do_train True \
70
+ --grad_checkpoint True \
71
+ --group_by_length False \
72
+ --dynamic_image_size True \
73
+ --use_thumbnail True \
74
+ --ps_version 'v2' \
75
+ --deepspeed "zero_stage1_config.json" \
76
+ --report_to "tensorboard" \
77
+ --loss_type sigmoid,bco_pair \
78
+ --sigmoid_loss_weight 0.8 \
79
+ --bco_pair_loss_weight 0.2 \
80
+ --rpo_alpha 1 \
81
+ 2>&1 | tee -a "${OUTPUT_DIR}/training_log.txt"
VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.5/2nd_finetune/internvl2_5_1b_dynamic_res_2nd_finetune_full.sh ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ GPUS=${GPUS:-8}
4
+ BATCH_SIZE=${BATCH_SIZE:-128}
5
+ PER_DEVICE_BATCH_SIZE=${PER_DEVICE_BATCH_SIZE:-4}
6
+ GRADIENT_ACC=$((BATCH_SIZE / PER_DEVICE_BATCH_SIZE / GPUS))
7
+
8
+
9
+ export PYTHONPATH="${PYTHONPATH}:$(pwd)"
10
+ export MASTER_PORT=34229
11
+ export TF_CPP_MIN_LOG_LEVEL=3
12
+ export LAUNCHER=pytorch
13
+
14
+ OUTPUT_DIR='work_dirs/internvl_chat_v2_5/internvl2_5_1b_dynamic_res_2nd_finetune_full'
15
+
16
+ if [ ! -d "$OUTPUT_DIR" ]; then
17
+ mkdir -p "$OUTPUT_DIR"
18
+ fi
19
+
20
+ # number of gpus: 8
21
+ # batch size per gpu: 4
22
+ # gradient accumulation steps: 4
23
+ # total batch size: 128
24
+ # epoch: 1
25
+ torchrun \
26
+ --nnodes=1 \
27
+ --node_rank=0 \
28
+ --master_addr=127.0.0.1 \
29
+ --nproc_per_node=${GPUS} \
30
+ --master_port=${MASTER_PORT} \
31
+ internvl/train/internvl_chat_finetune.py \
32
+ --model_name_or_path "OpenGVLab/InternVL2_5-1B" \
33
+ --conv_style "internvl2_5" \
34
+ --use_fast_tokenizer False \
35
+ --output_dir ${OUTPUT_DIR} \
36
+ --meta_path "./shell/data/internvl_1_2_finetune_custom.json" \
37
+ --overwrite_output_dir True \
38
+ --force_image_size 448 \
39
+ --max_dynamic_patch 6 \
40
+ --down_sample_ratio 0.5 \
41
+ --drop_path_rate 0.1 \
42
+ --freeze_llm False \
43
+ --freeze_mlp False \
44
+ --freeze_backbone True \
45
+ --vision_select_layer -1 \
46
+ --dataloader_num_workers 4 \
47
+ --bf16 True \
48
+ --num_train_epochs 1 \
49
+ --per_device_train_batch_size ${PER_DEVICE_BATCH_SIZE} \
50
+ --gradient_accumulation_steps ${GRADIENT_ACC} \
51
+ --evaluation_strategy "no" \
52
+ --save_strategy "steps" \
53
+ --save_steps 200 \
54
+ --save_total_limit 1 \
55
+ --learning_rate 4e-5 \
56
+ --weight_decay 0.01 \
57
+ --warmup_ratio 0.03 \
58
+ --lr_scheduler_type "cosine" \
59
+ --logging_steps 1 \
60
+ --max_seq_length 8192 \
61
+ --do_train True \
62
+ --grad_checkpoint True \
63
+ --group_by_length True \
64
+ --dynamic_image_size True \
65
+ --use_thumbnail True \
66
+ --ps_version 'v2' \
67
+ --deepspeed "zero_stage1_config.json" \
68
+ --report_to "tensorboard" \
69
+ 2>&1 | tee -a "${OUTPUT_DIR}/training_log.txt"
VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.5/2nd_finetune/internvl2_5_1b_dynamic_res_2nd_finetune_lora.sh ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ GPUS=${GPUS:-2}
4
+ BATCH_SIZE=${BATCH_SIZE:-16}
5
+ PER_DEVICE_BATCH_SIZE=${PER_DEVICE_BATCH_SIZE:-4}
6
+ GRADIENT_ACC=$((BATCH_SIZE / PER_DEVICE_BATCH_SIZE / GPUS))
7
+
8
+
9
+ export PYTHONPATH="${PYTHONPATH}:$(pwd)"
10
+ export MASTER_PORT=34229
11
+ export TF_CPP_MIN_LOG_LEVEL=3
12
+ export LAUNCHER=pytorch
13
+
14
+ OUTPUT_DIR='work_dirs/internvl_chat_v2_5/internvl2_5_1b_dynamic_res_2nd_finetune_lora'
15
+
16
+ if [ ! -d "$OUTPUT_DIR" ]; then
17
+ mkdir -p "$OUTPUT_DIR"
18
+ fi
19
+
20
+ # number of gpus: 2
21
+ # batch size per gpu: 4
22
+ # gradient accumulation steps: 2
23
+ # total batch size: 16
24
+ # epoch: 1
25
+ torchrun \
26
+ --nnodes=1 \
27
+ --node_rank=0 \
28
+ --master_addr=127.0.0.1 \
29
+ --nproc_per_node=${GPUS} \
30
+ --master_port=${MASTER_PORT} \
31
+ internvl/train/internvl_chat_finetune.py \
32
+ --model_name_or_path "OpenGVLab/InternVL2_5-1B" \
33
+ --conv_style "internvl2_5" \
34
+ --use_fast_tokenizer False \
35
+ --output_dir ${OUTPUT_DIR} \
36
+ --meta_path "./shell/data/internvl_1_2_finetune_custom.json" \
37
+ --overwrite_output_dir True \
38
+ --force_image_size 448 \
39
+ --max_dynamic_patch 6 \
40
+ --down_sample_ratio 0.5 \
41
+ --drop_path_rate 0.0 \
42
+ --freeze_llm True \
43
+ --freeze_mlp True \
44
+ --freeze_backbone True \
45
+ --use_llm_lora 16 \
46
+ --vision_select_layer -1 \
47
+ --dataloader_num_workers 4 \
48
+ --bf16 True \
49
+ --num_train_epochs 1 \
50
+ --per_device_train_batch_size ${PER_DEVICE_BATCH_SIZE} \
51
+ --gradient_accumulation_steps ${GRADIENT_ACC} \
52
+ --evaluation_strategy "no" \
53
+ --save_strategy "steps" \
54
+ --save_steps 200 \
55
+ --save_total_limit 1 \
56
+ --learning_rate 4e-5 \
57
+ --weight_decay 0.01 \
58
+ --warmup_ratio 0.03 \
59
+ --lr_scheduler_type "cosine" \
60
+ --logging_steps 1 \
61
+ --max_seq_length 8192 \
62
+ --do_train True \
63
+ --grad_checkpoint True \
64
+ --group_by_length True \
65
+ --dynamic_image_size True \
66
+ --use_thumbnail True \
67
+ --ps_version 'v2' \
68
+ --deepspeed "zero_stage1_config.json" \
69
+ --report_to "tensorboard" \
70
+ 2>&1 | tee -a "${OUTPUT_DIR}/training_log.txt"
VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.5/2nd_finetune/internvl2_5_26b_dynamic_res_2nd_finetune_full.sh ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ GPUS=${GPUS:-8}
4
+ BATCH_SIZE=${BATCH_SIZE:-128}
5
+ PER_DEVICE_BATCH_SIZE=${PER_DEVICE_BATCH_SIZE:-2}
6
+ GRADIENT_ACC=$((BATCH_SIZE / PER_DEVICE_BATCH_SIZE / GPUS))
7
+
8
+
9
+ export PYTHONPATH="${PYTHONPATH}:$(pwd)"
10
+ export MASTER_PORT=34229
11
+ export TF_CPP_MIN_LOG_LEVEL=3
12
+ export LAUNCHER=pytorch
13
+
14
+ OUTPUT_DIR='work_dirs/internvl_chat_v2_5/internvl2_5_26b_dynamic_res_2nd_finetune_lora'
15
+
16
+ if [ ! -d "$OUTPUT_DIR" ]; then
17
+ mkdir -p "$OUTPUT_DIR"
18
+ fi
19
+
20
+ # number of gpus: 8
21
+ # batch size per gpu: 2
22
+ # gradient accumulation steps: 8
23
+ # total batch size: 128
24
+ # epoch: 1
25
+ torchrun \
26
+ --nnodes=1 \
27
+ --node_rank=0 \
28
+ --master_addr=127.0.0.1 \
29
+ --nproc_per_node=${GPUS} \
30
+ --master_port=${MASTER_PORT} \
31
+ internvl/train/internvl_chat_finetune.py \
32
+ --model_name_or_path "OpenGVLab/InternVL2_5-26B" \
33
+ --conv_style "internvl2_5" \
34
+ --use_fast_tokenizer False \
35
+ --output_dir ${OUTPUT_DIR} \
36
+ --meta_path "./shell/data/internvl_1_2_finetune_custom.json" \
37
+ --overwrite_output_dir True \
38
+ --force_image_size 448 \
39
+ --max_dynamic_patch 6 \
40
+ --down_sample_ratio 0.5 \
41
+ --drop_path_rate 0.4 \
42
+ --freeze_llm False \
43
+ --freeze_mlp False \
44
+ --freeze_backbone True \
45
+ --vision_select_layer -1 \
46
+ --dataloader_num_workers 4 \
47
+ --bf16 True \
48
+ --num_train_epochs 1 \
49
+ --per_device_train_batch_size ${PER_DEVICE_BATCH_SIZE} \
50
+ --gradient_accumulation_steps ${GRADIENT_ACC} \
51
+ --evaluation_strategy "no" \
52
+ --save_strategy "steps" \
53
+ --save_steps 200 \
54
+ --save_total_limit 1 \
55
+ --learning_rate 2e-5 \
56
+ --weight_decay 0.05 \
57
+ --warmup_ratio 0.03 \
58
+ --lr_scheduler_type "cosine" \
59
+ --logging_steps 1 \
60
+ --max_seq_length 8192 \
61
+ --do_train True \
62
+ --grad_checkpoint True \
63
+ --group_by_length True \
64
+ --dynamic_image_size True \
65
+ --use_thumbnail True \
66
+ --ps_version 'v2' \
67
+ --deepspeed "zero_stage3_config.json" \
68
+ --report_to "tensorboard" \
69
+ 2>&1 | tee -a "${OUTPUT_DIR}/training_log.txt"
VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.5/2nd_finetune/internvl2_5_26b_dynamic_res_2nd_finetune_lora.sh ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ GPUS=${GPUS:-2}
4
+ BATCH_SIZE=${BATCH_SIZE:-16}
5
+ PER_DEVICE_BATCH_SIZE=${PER_DEVICE_BATCH_SIZE:-2}
6
+ GRADIENT_ACC=$((BATCH_SIZE / PER_DEVICE_BATCH_SIZE / GPUS))
7
+
8
+
9
+ export PYTHONPATH="${PYTHONPATH}:$(pwd)"
10
+ export MASTER_PORT=34229
11
+ export TF_CPP_MIN_LOG_LEVEL=3
12
+ export LAUNCHER=pytorch
13
+
14
+ OUTPUT_DIR='work_dirs/internvl_chat_v2_5/internvl2_5_26b_dynamic_res_2nd_finetune_lora'
15
+
16
+ if [ ! -d "$OUTPUT_DIR" ]; then
17
+ mkdir -p "$OUTPUT_DIR"
18
+ fi
19
+
20
+ # number of gpus: 2
21
+ # batch size per gpu: 2
22
+ # gradient accumulation steps: 4
23
+ # total batch size: 16
24
+ # epoch: 1
25
+ torchrun \
26
+ --nnodes=1 \
27
+ --node_rank=0 \
28
+ --master_addr=127.0.0.1 \
29
+ --nproc_per_node=${GPUS} \
30
+ --master_port=${MASTER_PORT} \
31
+ internvl/train/internvl_chat_finetune.py \
32
+ --model_name_or_path "OpenGVLab/InternVL2_5-26B" \
33
+ --conv_style "internvl2_5" \
34
+ --use_fast_tokenizer False \
35
+ --output_dir ${OUTPUT_DIR} \
36
+ --meta_path "./shell/data/internvl_1_2_finetune_custom.json" \
37
+ --overwrite_output_dir True \
38
+ --force_image_size 448 \
39
+ --max_dynamic_patch 6 \
40
+ --down_sample_ratio 0.5 \
41
+ --drop_path_rate 0.0 \
42
+ --freeze_llm True \
43
+ --freeze_mlp True \
44
+ --freeze_backbone True \
45
+ --use_llm_lora 16 \
46
+ --vision_select_layer -1 \
47
+ --dataloader_num_workers 4 \
48
+ --bf16 True \
49
+ --num_train_epochs 1 \
50
+ --per_device_train_batch_size ${PER_DEVICE_BATCH_SIZE} \
51
+ --gradient_accumulation_steps ${GRADIENT_ACC} \
52
+ --evaluation_strategy "no" \
53
+ --save_strategy "steps" \
54
+ --save_steps 200 \
55
+ --save_total_limit 1 \
56
+ --learning_rate 2e-5 \
57
+ --weight_decay 0.05 \
58
+ --warmup_ratio 0.03 \
59
+ --lr_scheduler_type "cosine" \
60
+ --logging_steps 1 \
61
+ --max_seq_length 8192 \
62
+ --do_train True \
63
+ --grad_checkpoint True \
64
+ --group_by_length True \
65
+ --dynamic_image_size True \
66
+ --use_thumbnail True \
67
+ --ps_version 'v2' \
68
+ --deepspeed "zero_stage3_config.json" \
69
+ --report_to "tensorboard" \
70
+ 2>&1 | tee -a "${OUTPUT_DIR}/training_log.txt"
VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.5/2nd_finetune/internvl2_5_2b_dynamic_res_2nd_finetune_full.sh ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ GPUS=${GPUS:-8}
4
+ BATCH_SIZE=${BATCH_SIZE:-128}
5
+ PER_DEVICE_BATCH_SIZE=${PER_DEVICE_BATCH_SIZE:-4}
6
+ GRADIENT_ACC=$((BATCH_SIZE / PER_DEVICE_BATCH_SIZE / GPUS))
7
+
8
+
9
+ export PYTHONPATH="${PYTHONPATH}:$(pwd)"
10
+ export MASTER_PORT=34229
11
+ export TF_CPP_MIN_LOG_LEVEL=3
12
+ export LAUNCHER=pytorch
13
+
14
+ OUTPUT_DIR='work_dirs/internvl_chat_v2_5/internvl2_5_2b_dynamic_res_2nd_finetune_full'
15
+
16
+ if [ ! -d "$OUTPUT_DIR" ]; then
17
+ mkdir -p "$OUTPUT_DIR"
18
+ fi
19
+
20
+ # number of gpus: 8
21
+ # batch size per gpu: 4
22
+ # gradient accumulation steps: 4
23
+ # total batch size: 128
24
+ # epoch: 1
25
+ torchrun \
26
+ --nnodes=1 \
27
+ --node_rank=0 \
28
+ --master_addr=127.0.0.1 \
29
+ --nproc_per_node=${GPUS} \
30
+ --master_port=${MASTER_PORT} \
31
+ internvl/train/internvl_chat_finetune.py \
32
+ --model_name_or_path "OpenGVLab/InternVL2_5-2B" \
33
+ --conv_style "internvl2_5" \
34
+ --use_fast_tokenizer False \
35
+ --output_dir ${OUTPUT_DIR} \
36
+ --meta_path "./shell/data/internvl_1_2_finetune_custom.json" \
37
+ --overwrite_output_dir True \
38
+ --force_image_size 448 \
39
+ --max_dynamic_patch 6 \
40
+ --down_sample_ratio 0.5 \
41
+ --drop_path_rate 0.1 \
42
+ --freeze_llm False \
43
+ --freeze_mlp False \
44
+ --freeze_backbone True \
45
+ --vision_select_layer -1 \
46
+ --dataloader_num_workers 4 \
47
+ --bf16 True \
48
+ --num_train_epochs 1 \
49
+ --per_device_train_batch_size ${PER_DEVICE_BATCH_SIZE} \
50
+ --gradient_accumulation_steps ${GRADIENT_ACC} \
51
+ --evaluation_strategy "no" \
52
+ --save_strategy "steps" \
53
+ --save_steps 200 \
54
+ --save_total_limit 1 \
55
+ --learning_rate 4e-5 \
56
+ --weight_decay 0.01 \
57
+ --warmup_ratio 0.03 \
58
+ --lr_scheduler_type "cosine" \
59
+ --logging_steps 1 \
60
+ --max_seq_length 8192 \
61
+ --do_train True \
62
+ --grad_checkpoint True \
63
+ --group_by_length True \
64
+ --dynamic_image_size True \
65
+ --use_thumbnail True \
66
+ --ps_version 'v2' \
67
+ --deepspeed "zero_stage1_config.json" \
68
+ --report_to "tensorboard" \
69
+ 2>&1 | tee -a "${OUTPUT_DIR}/training_log.txt"
VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.5/2nd_finetune/internvl2_5_2b_dynamic_res_2nd_finetune_lora.sh ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ GPUS=${GPUS:-2}
4
+ BATCH_SIZE=${BATCH_SIZE:-16}
5
+ PER_DEVICE_BATCH_SIZE=${PER_DEVICE_BATCH_SIZE:-4}
6
+ GRADIENT_ACC=$((BATCH_SIZE / PER_DEVICE_BATCH_SIZE / GPUS))
7
+
8
+
9
+ export PYTHONPATH="${PYTHONPATH}:$(pwd)"
10
+ export MASTER_PORT=34229
11
+ export TF_CPP_MIN_LOG_LEVEL=3
12
+ export LAUNCHER=pytorch
13
+
14
+ OUTPUT_DIR='work_dirs/internvl_chat_v2_5/internvl2_5_2b_dynamic_res_2nd_finetune_lora'
15
+
16
+ if [ ! -d "$OUTPUT_DIR" ]; then
17
+ mkdir -p "$OUTPUT_DIR"
18
+ fi
19
+
20
+ # number of gpus: 2
21
+ # batch size per gpu: 4
22
+ # gradient accumulation steps: 2
23
+ # total batch size: 16
24
+ # epoch: 1
25
+ torchrun \
26
+ --nnodes=1 \
27
+ --node_rank=0 \
28
+ --master_addr=127.0.0.1 \
29
+ --nproc_per_node=${GPUS} \
30
+ --master_port=${MASTER_PORT} \
31
+ internvl/train/internvl_chat_finetune.py \
32
+ --model_name_or_path "OpenGVLab/InternVL2_5-2B" \
33
+ --conv_style "internvl2_5" \
34
+ --use_fast_tokenizer False \
35
+ --output_dir ${OUTPUT_DIR} \
36
+ --meta_path "./shell/data/internvl_1_2_finetune_custom.json" \
37
+ --overwrite_output_dir True \
38
+ --force_image_size 448 \
39
+ --max_dynamic_patch 6 \
40
+ --down_sample_ratio 0.5 \
41
+ --drop_path_rate 0.0 \
42
+ --freeze_llm True \
43
+ --freeze_mlp True \
44
+ --freeze_backbone True \
45
+ --use_llm_lora 16 \
46
+ --vision_select_layer -1 \
47
+ --dataloader_num_workers 4 \
48
+ --bf16 True \
49
+ --num_train_epochs 1 \
50
+ --per_device_train_batch_size ${PER_DEVICE_BATCH_SIZE} \
51
+ --gradient_accumulation_steps ${GRADIENT_ACC} \
52
+ --evaluation_strategy "no" \
53
+ --save_strategy "steps" \
54
+ --save_steps 200 \
55
+ --save_total_limit 1 \
56
+ --learning_rate 4e-5 \
57
+ --weight_decay 0.01 \
58
+ --warmup_ratio 0.03 \
59
+ --lr_scheduler_type "cosine" \
60
+ --logging_steps 1 \
61
+ --max_seq_length 8192 \
62
+ --do_train True \
63
+ --grad_checkpoint True \
64
+ --group_by_length True \
65
+ --dynamic_image_size True \
66
+ --use_thumbnail True \
67
+ --ps_version 'v2' \
68
+ --deepspeed "zero_stage1_config.json" \
69
+ --report_to "tensorboard" \
70
+ 2>&1 | tee -a "${OUTPUT_DIR}/training_log.txt"
VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.5/2nd_finetune/internvl2_5_2b_dynamic_res_2nd_finetune_lora_coco.sh ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ GPUS=${GPUS:-8}
4
+ BATCH_SIZE=${BATCH_SIZE:-512}
5
+ PER_DEVICE_BATCH_SIZE=${PER_DEVICE_BATCH_SIZE:-4}
6
+ GRADIENT_ACC=$((BATCH_SIZE / PER_DEVICE_BATCH_SIZE / GPUS))
7
+
8
+
9
+ export PYTHONPATH="${PYTHONPATH}:$(pwd)"
10
+ export MASTER_PORT=34229
11
+ export TF_CPP_MIN_LOG_LEVEL=3
12
+ export LAUNCHER=pytorch
13
+
14
+ OUTPUT_DIR='work_dirs/internvl_chat_v2_5/internvl2_5_2b_dynamic_res_2nd_finetune_lora_coco'
15
+
16
+ if [ ! -d "$OUTPUT_DIR" ]; then
17
+ mkdir -p "$OUTPUT_DIR"
18
+ fi
19
+
20
+ # number of gpus: 8
21
+ # batch size per gpu: 4
22
+ # gradient accumulation steps: 16
23
+ # total batch size: 512
24
+ # epoch: 1
25
+ torchrun \
26
+ --nnodes=1 \
27
+ --node_rank=0 \
28
+ --master_addr=127.0.0.1 \
29
+ --nproc_per_node=${GPUS} \
30
+ --master_port=${MASTER_PORT} \
31
+ internvl/train/internvl_chat_finetune.py \
32
+ --model_name_or_path "OpenGVLab/InternVL2_5-2B" \
33
+ --conv_style "internvl2_5" \
34
+ --use_fast_tokenizer False \
35
+ --output_dir ${OUTPUT_DIR} \
36
+ --meta_path "./shell/data/coco_caption.json" \
37
+ --overwrite_output_dir True \
38
+ --force_image_size 448 \
39
+ --max_dynamic_patch 6 \
40
+ --down_sample_ratio 0.5 \
41
+ --drop_path_rate 0.0 \
42
+ --freeze_llm True \
43
+ --freeze_mlp True \
44
+ --freeze_backbone True \
45
+ --use_llm_lora 128 \
46
+ --vision_select_layer -1 \
47
+ --dataloader_num_workers 4 \
48
+ --bf16 True \
49
+ --num_train_epochs 1 \
50
+ --per_device_train_batch_size ${PER_DEVICE_BATCH_SIZE} \
51
+ --gradient_accumulation_steps ${GRADIENT_ACC} \
52
+ --evaluation_strategy "no" \
53
+ --save_strategy "steps" \
54
+ --save_steps 200 \
55
+ --save_total_limit 1 \
56
+ --learning_rate 4e-5 \
57
+ --weight_decay 0.01 \
58
+ --warmup_ratio 0.03 \
59
+ --lr_scheduler_type "cosine" \
60
+ --logging_steps 1 \
61
+ --max_seq_length 8192 \
62
+ --do_train True \
63
+ --grad_checkpoint True \
64
+ --group_by_length True \
65
+ --dynamic_image_size True \
66
+ --use_thumbnail True \
67
+ --ps_version 'v2' \
68
+ --deepspeed "zero_stage1_config.json" \
69
+ --report_to "tensorboard" \
70
+ 2>&1 | tee -a "${OUTPUT_DIR}/training_log.txt"
VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.5/2nd_finetune/internvl2_5_38b_dynamic_res_2nd_finetune_full.sh ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ PARTITION=${PARTITION:-"VC5"}
4
+ GPUS=${GPUS:-16}
5
+ GPUS_PER_NODE=${GPUS_PER_NODE:-8}
6
+ QUOTA_TYPE=${QUOTA_TYPE:-"reserved"}
7
+ NODES=$((GPUS / GPUS_PER_NODE))
8
+ CPUS_PER_TASK=${CPUS_PER_TASK:-10}
9
+ SRUN_ARGS=${SRUN_ARGS:-""}
10
+ BATCH_SIZE=${BATCH_SIZE:-128}
11
+ PER_DEVICE_BATCH_SIZE=${PER_DEVICE_BATCH_SIZE:-2}
12
+ GRADIENT_ACC=$((BATCH_SIZE / PER_DEVICE_BATCH_SIZE / GPUS))
13
+
14
+
15
+ export PYTHONPATH="${PYTHONPATH}:$(pwd)"
16
+ export MASTER_PORT=34229
17
+ export TF_CPP_MIN_LOG_LEVEL=3
18
+
19
+ OUTPUT_DIR='work_dirs/internvl_chat_v2_5/internvl2_5_38b_dynamic_res_2nd_finetune_full'
20
+
21
+ if [ ! -d "$OUTPUT_DIR" ]; then
22
+ mkdir -p "$OUTPUT_DIR"
23
+ fi
24
+
25
+ # number of gpus: 16
26
+ # batch size per gpu: 2
27
+ # gradient accumulation steps: 4
28
+ # total batch size: 128
29
+ # epoch: 1
30
+ srun -p ${PARTITION} \
31
+ --gres=gpu:${GPUS_PER_NODE} \
32
+ --nodes=${NODES} \
33
+ --ntasks=${GPUS} \
34
+ --ntasks-per-node=${GPUS_PER_NODE} \
35
+ --cpus-per-task=${CPUS_PER_TASK} \
36
+ --kill-on-bad-exit=1 \
37
+ --quotatype=${QUOTA_TYPE} \
38
+ ${SRUN_ARGS} \
39
+ python -u internvl/train/internvl_chat_finetune.py \
40
+ --model_name_or_path "OpenGVLab/InternVL2_5-38B" \
41
+ --conv_style "internvl2_5" \
42
+ --use_fast_tokenizer False \
43
+ --output_dir ${OUTPUT_DIR} \
44
+ --meta_path "./shell/data/internvl_1_2_finetune_custom.json" \
45
+ --overwrite_output_dir True \
46
+ --force_image_size 448 \
47
+ --max_dynamic_patch 6 \
48
+ --down_sample_ratio 0.5 \
49
+ --drop_path_rate 0.4 \
50
+ --freeze_llm False \
51
+ --freeze_mlp False \
52
+ --freeze_backbone True \
53
+ --vision_select_layer -1 \
54
+ --dataloader_num_workers 4 \
55
+ --bf16 True \
56
+ --num_train_epochs 1 \
57
+ --per_device_train_batch_size ${PER_DEVICE_BATCH_SIZE} \
58
+ --gradient_accumulation_steps ${GRADIENT_ACC} \
59
+ --evaluation_strategy "no" \
60
+ --save_strategy "steps" \
61
+ --save_steps 200 \
62
+ --save_total_limit 1 \
63
+ --learning_rate 2e-5 \
64
+ --weight_decay 0.05 \
65
+ --warmup_ratio 0.03 \
66
+ --lr_scheduler_type "cosine" \
67
+ --logging_steps 1 \
68
+ --max_seq_length 8192 \
69
+ --do_train True \
70
+ --grad_checkpoint True \
71
+ --group_by_length True \
72
+ --dynamic_image_size True \
73
+ --use_thumbnail True \
74
+ --ps_version 'v2' \
75
+ --deepspeed "zero_stage3_config_34b.json" \
76
+ --report_to "tensorboard" \
77
+ 2>&1 | tee -a "${OUTPUT_DIR}/training_log.txt"
VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.5/2nd_finetune/internvl2_5_38b_dynamic_res_2nd_finetune_lora.sh ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ GPUS=${GPUS:-2}
4
+ BATCH_SIZE=${BATCH_SIZE:-16}
5
+ PER_DEVICE_BATCH_SIZE=${PER_DEVICE_BATCH_SIZE:-2}
6
+ GRADIENT_ACC=$((BATCH_SIZE / PER_DEVICE_BATCH_SIZE / GPUS))
7
+
8
+
9
+ export PYTHONPATH="${PYTHONPATH}:$(pwd)"
10
+ export MASTER_PORT=34229
11
+ export TF_CPP_MIN_LOG_LEVEL=3
12
+ export LAUNCHER=pytorch
13
+
14
+ OUTPUT_DIR='work_dirs/internvl_chat_v2_5/internvl2_5_38b_dynamic_res_2nd_finetune_lora'
15
+
16
+ if [ ! -d "$OUTPUT_DIR" ]; then
17
+ mkdir -p "$OUTPUT_DIR"
18
+ fi
19
+
20
+ # number of gpus: 2
21
+ # batch size per gpu: 2
22
+ # gradient accumulation steps: 4
23
+ # total batch size: 16
24
+ # epoch: 1
25
+ torchrun \
26
+ --nnodes=1 \
27
+ --node_rank=0 \
28
+ --master_addr=127.0.0.1 \
29
+ --nproc_per_node=${GPUS} \
30
+ --master_port=${MASTER_PORT} \
31
+ internvl/train/internvl_chat_finetune.py \
32
+ --model_name_or_path "OpenGVLab/InternVL2_5-38B" \
33
+ --conv_style "internvl2_5" \
34
+ --use_fast_tokenizer False \
35
+ --output_dir ${OUTPUT_DIR} \
36
+ --meta_path "./shell/data/internvl_1_2_finetune_custom.json" \
37
+ --overwrite_output_dir True \
38
+ --force_image_size 448 \
39
+ --max_dynamic_patch 6 \
40
+ --down_sample_ratio 0.5 \
41
+ --drop_path_rate 0.0 \
42
+ --freeze_llm True \
43
+ --freeze_mlp True \
44
+ --freeze_backbone True \
45
+ --use_llm_lora 16 \
46
+ --vision_select_layer -1 \
47
+ --dataloader_num_workers 4 \
48
+ --bf16 True \
49
+ --num_train_epochs 1 \
50
+ --per_device_train_batch_size ${PER_DEVICE_BATCH_SIZE} \
51
+ --gradient_accumulation_steps ${GRADIENT_ACC} \
52
+ --evaluation_strategy "no" \
53
+ --save_strategy "steps" \
54
+ --save_steps 200 \
55
+ --save_total_limit 1 \
56
+ --learning_rate 2e-5 \
57
+ --weight_decay 0.05 \
58
+ --warmup_ratio 0.03 \
59
+ --lr_scheduler_type "cosine" \
60
+ --logging_steps 1 \
61
+ --max_seq_length 8192 \
62
+ --do_train True \
63
+ --grad_checkpoint True \
64
+ --group_by_length True \
65
+ --dynamic_image_size True \
66
+ --use_thumbnail True \
67
+ --ps_version 'v2' \
68
+ --deepspeed "zero_stage3_config_34b.json" \
69
+ --report_to "tensorboard" \
70
+ 2>&1 | tee -a "${OUTPUT_DIR}/training_log.txt"
VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.5/2nd_finetune/internvl2_5_4b_dynamic_res_2nd_finetune_full.sh ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ GPUS=${GPUS:-8}
4
+ BATCH_SIZE=${BATCH_SIZE:-128}
5
+ PER_DEVICE_BATCH_SIZE=${PER_DEVICE_BATCH_SIZE:-4}
6
+ GRADIENT_ACC=$((BATCH_SIZE / PER_DEVICE_BATCH_SIZE / GPUS))
7
+
8
+
9
+ export PYTHONPATH="${PYTHONPATH}:$(pwd)"
10
+ export MASTER_PORT=34229
11
+ export TF_CPP_MIN_LOG_LEVEL=3
12
+ export LAUNCHER=pytorch
13
+
14
+ OUTPUT_DIR='work_dirs/internvl_chat_v2_5/internvl2_5_4b_dynamic_res_2nd_finetune_full'
15
+
16
+ if [ ! -d "$OUTPUT_DIR" ]; then
17
+ mkdir -p "$OUTPUT_DIR"
18
+ fi
19
+
20
+ # number of gpus: 8
21
+ # batch size per gpu: 4
22
+ # gradient accumulation steps: 4
23
+ # total batch size: 128
24
+ # epoch: 1
25
+ torchrun \
26
+ --nnodes=1 \
27
+ --node_rank=0 \
28
+ --master_addr=127.0.0.1 \
29
+ --nproc_per_node=${GPUS} \
30
+ --master_port=${MASTER_PORT} \
31
+ internvl/train/internvl_chat_finetune.py \
32
+ --model_name_or_path "OpenGVLab/InternVL2_5-4B" \
33
+ --conv_style "internvl2_5" \
34
+ --use_fast_tokenizer False \
35
+ --output_dir ${OUTPUT_DIR} \
36
+ --meta_path "./shell/data/internvl_1_2_finetune_custom.json" \
37
+ --overwrite_output_dir True \
38
+ --force_image_size 448 \
39
+ --max_dynamic_patch 6 \
40
+ --down_sample_ratio 0.5 \
41
+ --drop_path_rate 0.1 \
42
+ --freeze_llm False \
43
+ --freeze_mlp False \
44
+ --freeze_backbone True \
45
+ --vision_select_layer -1 \
46
+ --dataloader_num_workers 4 \
47
+ --bf16 True \
48
+ --num_train_epochs 1 \
49
+ --per_device_train_batch_size ${PER_DEVICE_BATCH_SIZE} \
50
+ --gradient_accumulation_steps ${GRADIENT_ACC} \
51
+ --evaluation_strategy "no" \
52
+ --save_strategy "steps" \
53
+ --save_steps 200 \
54
+ --save_total_limit 1 \
55
+ --learning_rate 4e-5 \
56
+ --weight_decay 0.01 \
57
+ --warmup_ratio 0.03 \
58
+ --lr_scheduler_type "cosine" \
59
+ --logging_steps 1 \
60
+ --max_seq_length 8192 \
61
+ --do_train True \
62
+ --grad_checkpoint True \
63
+ --group_by_length True \
64
+ --dynamic_image_size True \
65
+ --use_thumbnail True \
66
+ --ps_version 'v2' \
67
+ --deepspeed "zero_stage1_config.json" \
68
+ --report_to "tensorboard" \
69
+ 2>&1 | tee -a "${OUTPUT_DIR}/training_log.txt"
VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.5/2nd_finetune/internvl2_5_4b_dynamic_res_2nd_finetune_lora.sh ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ GPUS=${GPUS:-2}
4
+ BATCH_SIZE=${BATCH_SIZE:-16}
5
+ PER_DEVICE_BATCH_SIZE=${PER_DEVICE_BATCH_SIZE:-4}
6
+ GRADIENT_ACC=$((BATCH_SIZE / PER_DEVICE_BATCH_SIZE / GPUS))
7
+
8
+
9
+ export PYTHONPATH="${PYTHONPATH}:$(pwd)"
10
+ export MASTER_PORT=34229
11
+ export TF_CPP_MIN_LOG_LEVEL=3
12
+ export LAUNCHER=pytorch
13
+
14
+ OUTPUT_DIR='work_dirs/internvl_chat_v2_5/internvl2_5_4b_dynamic_res_2nd_finetune_lora'
15
+
16
+ if [ ! -d "$OUTPUT_DIR" ]; then
17
+ mkdir -p "$OUTPUT_DIR"
18
+ fi
19
+
20
+ # number of gpus: 2
21
+ # batch size per gpu: 4
22
+ # gradient accumulation steps: 2
23
+ # total batch size: 16
24
+ # epoch: 1
25
+ torchrun \
26
+ --nnodes=1 \
27
+ --node_rank=0 \
28
+ --master_addr=127.0.0.1 \
29
+ --nproc_per_node=${GPUS} \
30
+ --master_port=${MASTER_PORT} \
31
+ internvl/train/internvl_chat_finetune.py \
32
+ --model_name_or_path "OpenGVLab/InternVL2_5-4B" \
33
+ --conv_style "internvl2_5" \
34
+ --use_fast_tokenizer False \
35
+ --output_dir ${OUTPUT_DIR} \
36
+ --meta_path "./shell/data/internvl_1_2_finetune_custom.json" \
37
+ --overwrite_output_dir True \
38
+ --force_image_size 448 \
39
+ --max_dynamic_patch 6 \
40
+ --down_sample_ratio 0.5 \
41
+ --drop_path_rate 0.0 \
42
+ --freeze_llm True \
43
+ --freeze_mlp True \
44
+ --freeze_backbone True \
45
+ --use_llm_lora 16 \
46
+ --vision_select_layer -1 \
47
+ --dataloader_num_workers 4 \
48
+ --bf16 True \
49
+ --num_train_epochs 1 \
50
+ --per_device_train_batch_size ${PER_DEVICE_BATCH_SIZE} \
51
+ --gradient_accumulation_steps ${GRADIENT_ACC} \
52
+ --evaluation_strategy "no" \
53
+ --save_strategy "steps" \
54
+ --save_steps 200 \
55
+ --save_total_limit 1 \
56
+ --learning_rate 4e-5 \
57
+ --weight_decay 0.01 \
58
+ --warmup_ratio 0.03 \
59
+ --lr_scheduler_type "cosine" \
60
+ --logging_steps 1 \
61
+ --max_seq_length 8192 \
62
+ --do_train True \
63
+ --grad_checkpoint True \
64
+ --group_by_length True \
65
+ --dynamic_image_size True \
66
+ --use_thumbnail True \
67
+ --ps_version 'v2' \
68
+ --deepspeed "zero_stage1_config.json" \
69
+ --report_to "tensorboard" \
70
+ 2>&1 | tee -a "${OUTPUT_DIR}/training_log.txt"
VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.5/2nd_finetune/internvl2_5_78b_dynamic_res_2nd_finetune_full.sh ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ PARTITION=${PARTITION:-"VC5"}
4
+ GPUS=${GPUS:-32}
5
+ GPUS_PER_NODE=${GPUS_PER_NODE:-8}
6
+ QUOTA_TYPE=${QUOTA_TYPE:-"reserved"}
7
+ NODES=$((GPUS / GPUS_PER_NODE))
8
+ CPUS_PER_TASK=${CPUS_PER_TASK:-10}
9
+ SRUN_ARGS=${SRUN_ARGS:-""}
10
+ BATCH_SIZE=${BATCH_SIZE:-128}
11
+ PER_DEVICE_BATCH_SIZE=${PER_DEVICE_BATCH_SIZE:-1}
12
+ GRADIENT_ACC=$((BATCH_SIZE / PER_DEVICE_BATCH_SIZE / GPUS))
13
+
14
+
15
+ export PYTHONPATH="${PYTHONPATH}:$(pwd)"
16
+ export MASTER_PORT=34229
17
+ export TF_CPP_MIN_LOG_LEVEL=3
18
+
19
+ OUTPUT_DIR='work_dirs/internvl_chat_v2_5/internvl2_5_78b_dynamic_res_2nd_finetune_full'
20
+
21
+ if [ ! -d "$OUTPUT_DIR" ]; then
22
+ mkdir -p "$OUTPUT_DIR"
23
+ fi
24
+
25
+ # number of gpus: 32
26
+ # batch size per gpu: 1
27
+ # gradient accumulation steps: 4
28
+ # total batch size: 128
29
+ # epoch: 1
30
+ srun -p ${PARTITION} \
31
+ --gres=gpu:${GPUS_PER_NODE} \
32
+ --nodes=${NODES} \
33
+ --ntasks=${GPUS} \
34
+ --ntasks-per-node=${GPUS_PER_NODE} \
35
+ --cpus-per-task=${CPUS_PER_TASK} \
36
+ --kill-on-bad-exit=1 \
37
+ --quotatype=${QUOTA_TYPE} \
38
+ ${SRUN_ARGS} \
39
+ python -u internvl/train/internvl_chat_finetune.py \
40
+ --model_name_or_path "OpenGVLab/InternVL2_5-78B" \
41
+ --conv_style "internvl2_5" \
42
+ --use_fast_tokenizer False \
43
+ --output_dir ${OUTPUT_DIR} \
44
+ --meta_path "./shell/data/internvl_1_2_finetune_custom.json" \
45
+ --overwrite_output_dir True \
46
+ --force_image_size 448 \
47
+ --max_dynamic_patch 6 \
48
+ --down_sample_ratio 0.5 \
49
+ --drop_path_rate 0.4 \
50
+ --freeze_llm False \
51
+ --freeze_mlp False \
52
+ --freeze_backbone True \
53
+ --vision_select_layer -1 \
54
+ --dataloader_num_workers 4 \
55
+ --bf16 True \
56
+ --num_train_epochs 1 \
57
+ --per_device_train_batch_size ${PER_DEVICE_BATCH_SIZE} \
58
+ --gradient_accumulation_steps ${GRADIENT_ACC} \
59
+ --evaluation_strategy "no" \
60
+ --save_strategy "steps" \
61
+ --save_steps 200 \
62
+ --save_total_limit 1 \
63
+ --learning_rate 2e-5 \
64
+ --weight_decay 0.05 \
65
+ --warmup_ratio 0.03 \
66
+ --lr_scheduler_type "cosine" \
67
+ --logging_steps 1 \
68
+ --max_seq_length 8192 \
69
+ --do_train True \
70
+ --grad_checkpoint True \
71
+ --group_by_length True \
72
+ --dynamic_image_size True \
73
+ --use_thumbnail True \
74
+ --ps_version 'v2' \
75
+ --deepspeed "zero_stage3_config_100b.json" \
76
+ --report_to "tensorboard" \
77
+ 2>&1 | tee -a "${OUTPUT_DIR}/training_log.txt"
VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.5/2nd_finetune/internvl2_5_78b_dynamic_res_2nd_finetune_lora.sh ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ GPUS=${GPUS:-8}
4
+ BATCH_SIZE=${BATCH_SIZE:-16}
5
+ PER_DEVICE_BATCH_SIZE=${PER_DEVICE_BATCH_SIZE:-1}
6
+ GRADIENT_ACC=$((BATCH_SIZE / PER_DEVICE_BATCH_SIZE / GPUS))
7
+
8
+
9
+ export PYTHONPATH="${PYTHONPATH}:$(pwd)"
10
+ export MASTER_PORT=34229
11
+ export TF_CPP_MIN_LOG_LEVEL=3
12
+ export LAUNCHER=pytorch
13
+
14
+ OUTPUT_DIR='work_dirs/internvl_chat_v2_5/internvl2_5_78b_dynamic_res_2nd_finetune_lora'
15
+
16
+ if [ ! -d "$OUTPUT_DIR" ]; then
17
+ mkdir -p "$OUTPUT_DIR"
18
+ fi
19
+
20
+ # number of gpus: 8
21
+ # batch size per gpu: 1
22
+ # gradient accumulation steps: 2
23
+ # total batch size: 16
24
+ # epoch: 1
25
+ torchrun \
26
+ --nnodes=1 \
27
+ --node_rank=0 \
28
+ --master_addr=127.0.0.1 \
29
+ --nproc_per_node=${GPUS} \
30
+ --master_port=${MASTER_PORT} \
31
+ internvl/train/internvl_chat_finetune.py \
32
+ --model_name_or_path "OpenGVLab/InternVL2_5-78B" \
33
+ --conv_style "internvl2_5" \
34
+ --use_fast_tokenizer False \
35
+ --output_dir ${OUTPUT_DIR} \
36
+ --meta_path "./shell/data/internvl_1_2_finetune_custom.json" \
37
+ --overwrite_output_dir True \
38
+ --force_image_size 448 \
39
+ --max_dynamic_patch 6 \
40
+ --down_sample_ratio 0.5 \
41
+ --drop_path_rate 0.0 \
42
+ --freeze_llm True \
43
+ --freeze_mlp True \
44
+ --freeze_backbone True \
45
+ --use_llm_lora 16 \
46
+ --vision_select_layer -1 \
47
+ --dataloader_num_workers 4 \
48
+ --bf16 True \
49
+ --num_train_epochs 1 \
50
+ --per_device_train_batch_size ${PER_DEVICE_BATCH_SIZE} \
51
+ --gradient_accumulation_steps ${GRADIENT_ACC} \
52
+ --evaluation_strategy "no" \
53
+ --save_strategy "steps" \
54
+ --save_steps 200 \
55
+ --save_total_limit 1 \
56
+ --learning_rate 2e-5 \
57
+ --weight_decay 0.05 \
58
+ --warmup_ratio 0.03 \
59
+ --lr_scheduler_type "cosine" \
60
+ --logging_steps 1 \
61
+ --max_seq_length 8192 \
62
+ --do_train True \
63
+ --grad_checkpoint True \
64
+ --group_by_length True \
65
+ --dynamic_image_size True \
66
+ --use_thumbnail True \
67
+ --ps_version 'v2' \
68
+ --deepspeed "zero_stage3_config_100b.json" \
69
+ --report_to "tensorboard" \
70
+ 2>&1 | tee -a "${OUTPUT_DIR}/training_log.txt"
VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.5/2nd_finetune/internvl2_5_8b_dynamic_res_2nd_finetune_full.sh ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ GPUS=${GPUS:-8}
4
+ BATCH_SIZE=${BATCH_SIZE:-128}
5
+ PER_DEVICE_BATCH_SIZE=${PER_DEVICE_BATCH_SIZE:-4}
6
+ GRADIENT_ACC=$((BATCH_SIZE / PER_DEVICE_BATCH_SIZE / GPUS))
7
+
8
+
9
+ export PYTHONPATH="${PYTHONPATH}:$(pwd)"
10
+ export MASTER_PORT=34229
11
+ export TF_CPP_MIN_LOG_LEVEL=3
12
+ export LAUNCHER=pytorch
13
+
14
+ OUTPUT_DIR='work_dirs/internvl_chat_v2_5/internvl2_5_8b_dynamic_res_2nd_finetune_full'
15
+
16
+ if [ ! -d "$OUTPUT_DIR" ]; then
17
+ mkdir -p "$OUTPUT_DIR"
18
+ fi
19
+
20
+ # number of gpus: 8
21
+ # batch size per gpu: 4
22
+ # gradient accumulation steps: 4
23
+ # total batch size: 128
24
+ # epoch: 1
25
+ torchrun \
26
+ --nnodes=1 \
27
+ --node_rank=0 \
28
+ --master_addr=127.0.0.1 \
29
+ --nproc_per_node=${GPUS} \
30
+ --master_port=${MASTER_PORT} \
31
+ internvl/train/internvl_chat_finetune.py \
32
+ --model_name_or_path "OpenGVLab/InternVL2_5-8B" \
33
+ --conv_style "internvl2_5" \
34
+ --use_fast_tokenizer False \
35
+ --output_dir ${OUTPUT_DIR} \
36
+ --meta_path "./shell/data/internvl_1_2_finetune_custom.json" \
37
+ --overwrite_output_dir True \
38
+ --force_image_size 448 \
39
+ --max_dynamic_patch 6 \
40
+ --down_sample_ratio 0.5 \
41
+ --drop_path_rate 0.1 \
42
+ --freeze_llm False \
43
+ --freeze_mlp False \
44
+ --freeze_backbone True \
45
+ --vision_select_layer -1 \
46
+ --dataloader_num_workers 4 \
47
+ --bf16 True \
48
+ --num_train_epochs 1 \
49
+ --per_device_train_batch_size ${PER_DEVICE_BATCH_SIZE} \
50
+ --gradient_accumulation_steps ${GRADIENT_ACC} \
51
+ --evaluation_strategy "no" \
52
+ --save_strategy "steps" \
53
+ --save_steps 200 \
54
+ --save_total_limit 1 \
55
+ --learning_rate 4e-5 \
56
+ --weight_decay 0.05 \
57
+ --warmup_ratio 0.03 \
58
+ --lr_scheduler_type "cosine" \
59
+ --logging_steps 1 \
60
+ --max_seq_length 8192 \
61
+ --do_train True \
62
+ --grad_checkpoint True \
63
+ --group_by_length True \
64
+ --dynamic_image_size True \
65
+ --use_thumbnail True \
66
+ --ps_version 'v2' \
67
+ --deepspeed "zero_stage1_config.json" \
68
+ --report_to "tensorboard" \
69
+ 2>&1 | tee -a "${OUTPUT_DIR}/training_log.txt"
VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.5/2nd_finetune/internvl2_5_8b_dynamic_res_2nd_finetune_lora.sh ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ GPUS=${GPUS:-2}
4
+ BATCH_SIZE=${BATCH_SIZE:-16}
5
+ PER_DEVICE_BATCH_SIZE=${PER_DEVICE_BATCH_SIZE:-4}
6
+ GRADIENT_ACC=$((BATCH_SIZE / PER_DEVICE_BATCH_SIZE / GPUS))
7
+
8
+
9
+ export PYTHONPATH="${PYTHONPATH}:$(pwd)"
10
+ export MASTER_PORT=34229
11
+ export TF_CPP_MIN_LOG_LEVEL=3
12
+ export LAUNCHER=pytorch
13
+
14
+ OUTPUT_DIR='work_dirs/internvl_chat_v2_5/internvl2_5_8b_dynamic_res_2nd_finetune_lora'
15
+
16
+ if [ ! -d "$OUTPUT_DIR" ]; then
17
+ mkdir -p "$OUTPUT_DIR"
18
+ fi
19
+
20
+ # number of gpus: 2
21
+ # batch size per gpu: 4
22
+ # gradient accumulation steps: 2
23
+ # total batch size: 16
24
+ # epoch: 1
25
+ torchrun \
26
+ --nnodes=1 \
27
+ --node_rank=0 \
28
+ --master_addr=127.0.0.1 \
29
+ --nproc_per_node=${GPUS} \
30
+ --master_port=${MASTER_PORT} \
31
+ internvl/train/internvl_chat_finetune.py \
32
+ --model_name_or_path "OpenGVLab/InternVL2_5-8B" \
33
+ --conv_style "internvl2_5" \
34
+ --use_fast_tokenizer False \
35
+ --output_dir ${OUTPUT_DIR} \
36
+ --meta_path "./shell/data/internvl_1_2_finetune_custom.json" \
37
+ --overwrite_output_dir True \
38
+ --force_image_size 448 \
39
+ --max_dynamic_patch 6 \
40
+ --down_sample_ratio 0.5 \
41
+ --drop_path_rate 0.0 \
42
+ --freeze_llm True \
43
+ --freeze_mlp True \
44
+ --freeze_backbone True \
45
+ --use_llm_lora 16 \
46
+ --vision_select_layer -1 \
47
+ --dataloader_num_workers 4 \
48
+ --bf16 True \
49
+ --num_train_epochs 1 \
50
+ --per_device_train_batch_size ${PER_DEVICE_BATCH_SIZE} \
51
+ --gradient_accumulation_steps ${GRADIENT_ACC} \
52
+ --evaluation_strategy "no" \
53
+ --save_strategy "steps" \
54
+ --save_steps 200 \
55
+ --save_total_limit 1 \
56
+ --learning_rate 4e-5 \
57
+ --weight_decay 0.05 \
58
+ --warmup_ratio 0.03 \
59
+ --lr_scheduler_type "cosine" \
60
+ --logging_steps 1 \
61
+ --max_seq_length 8192 \
62
+ --do_train True \
63
+ --grad_checkpoint True \
64
+ --group_by_length True \
65
+ --dynamic_image_size True \
66
+ --use_thumbnail True \
67
+ --ps_version 'v2' \
68
+ --deepspeed "zero_stage1_config.json" \
69
+ --report_to "tensorboard" \
70
+ 2>&1 | tee -a "${OUTPUT_DIR}/training_log.txt"
VLMEvalKit_old/InternVL/internvl_chat/shell/internvl2.5/stage1.5/internvl2_5_26b_internlm2_5_20b_dynamic_res_stage1_5.sh ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ PARTITION=${PARTITION:-"VC5"}
4
+ GPUS=${GPUS:-512}
5
+ GPUS_PER_NODE=${GPUS_PER_NODE:-8}
6
+ QUOTA_TYPE=${QUOTA_TYPE:-"reserved"}
7
+ NODES=$((GPUS / GPUS_PER_NODE))
8
+ CPUS_PER_TASK=${CPUS_PER_TASK:-10}
9
+ SRUN_ARGS=${SRUN_ARGS:-""}
10
+
11
+ export PYTHONPATH="${PYTHONPATH}:$(pwd)"
12
+ export MASTER_PORT=34229
13
+ export TF_CPP_MIN_LOG_LEVEL=3
14
+
15
+ OUTPUT_DIR='work_dirs/internvl_chat_v2_5/internvl2_5_26b_internlm2_5_20b_dynamic_res_stage1_5'
16
+
17
+ if [ ! -d "$OUTPUT_DIR" ]; then
18
+ mkdir -p "$OUTPUT_DIR"
19
+ fi
20
+
21
+ # Stage: Stage 1.5 (ViT Incremental Learning)
22
+ # Architecture: InternViT-6B-448px-V1-5 + MLP + internlm2_5-20b-chat
23
+ # Trainable Components: ViT + MLP
24
+ # Number of GPUs: 512
25
+ # Packed Batch Size: 1024
26
+ # Learning Rate: 1e-5
27
+ # Context Length: 16384
28
+ # Image Tile Threshold: 48
29
+ # ViT Drop Path: 0.4
30
+ # Weight Decay: 0.05
31
+ # Epoch: None
32
+ srun -p ${PARTITION} \
33
+ --gres=gpu:${GPUS_PER_NODE} \
34
+ --nodes=${NODES} \
35
+ --ntasks=${GPUS} \
36
+ --ntasks-per-node=${GPUS_PER_NODE} \
37
+ --cpus-per-task=${CPUS_PER_TASK} \
38
+ --kill-on-bad-exit=1 \
39
+ --quotatype=${QUOTA_TYPE} \
40
+ ${SRUN_ARGS} \
41
+ python -u internvl/train/internvl_chat_pretrain.py \
42
+ --model_name_or_path "./work_dirs/internvl_chat_v2_5/internvl2_5_26b_internlm2_5_20b_dynamic_res_stage1/" \
43
+ --conv_style "internvl2_5" \
44
+ --use_fast_tokenizer False \
45
+ --output_dir ${OUTPUT_DIR} \
46
+ --meta_path "./path/to/pretrain/data/mixture.json" \
47
+ --overwrite_output_dir True \
48
+ --force_image_size 448 \
49
+ --down_sample_ratio 0.5 \
50
+ --drop_path_rate 0.4 \
51
+ --min_num_frame 8 \
52
+ --max_num_frame 32 \
53
+ --freeze_llm True \
54
+ --freeze_mlp False \
55
+ --freeze_backbone False \
56
+ --vision_select_layer -1 \
57
+ --dataloader_num_workers 8 \
58
+ --bf16 True \
59
+ --max_steps 100000 \
60
+ --per_device_train_batch_size 1 \
61
+ --gradient_accumulation_steps 2 \
62
+ --evaluation_strategy "no" \
63
+ --save_strategy "steps" \
64
+ --save_steps 100 \
65
+ --save_total_limit 3 \
66
+ --learning_rate 1e-5 \
67
+ --weight_decay 0.05 \
68
+ --warmup_steps 100 \
69
+ --lr_scheduler_type "cosine" \
70
+ --logging_steps 1 \
71
+ --max_seq_length 16384 \
72
+ --do_train True \
73
+ --grad_checkpoint True \
74
+ --group_by_length False \
75
+ --dynamic_image_size True \
76
+ --use_thumbnail True \
77
+ --ps_version 'v2' \
78
+ --deepspeed "zero_stage3_config.json" \
79
+ --report_to "tensorboard" \
80
+ --use_packed_ds True \
81
+ --num_images_expected 48 \
82
+ --max_packed_tokens 16384 \
83
+ --max_buffer_size 20 \
84
+ --log_freq 1000 \
85
+ --strict_mode False \
86
+ --replacement False \
87
+ --allow_overflow False \
88
+ --remove_unused_columns False \
89
+ --loss_reduction "square" \
90
+ --loss_reduction_all_gather True \
91
+ 2>&1 | tee -a "${OUTPUT_DIR}/training_log.txt"