Upload qwen2_5_vl_full_sft.yaml
Browse files
sft_related/qwen2_5_vl_full_sft.yaml
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
### model
|
| 2 |
model_name_or_path: Qwen/Qwen2.5-VL-3B-Instruct
|
| 3 |
-
image_max_pixels:
|
| 4 |
video_max_pixels: 16384
|
| 5 |
trust_remote_code: true
|
| 6 |
|
|
@@ -8,16 +8,16 @@ trust_remote_code: true
|
|
| 8 |
stage: sft
|
| 9 |
do_train: true
|
| 10 |
finetuning_type: full
|
| 11 |
-
freeze_vision_tower:
|
| 12 |
-
freeze_multi_modal_projector:
|
| 13 |
train_mm_proj_only: false # choices: [true, false]
|
| 14 |
deepspeed: examples/deepspeed/ds_z2_config.json # choices: [ds_z0_config.json, ds_z2_config.json, ds_z3_config.json]
|
| 15 |
|
| 16 |
### dataset
|
| 17 |
dataset: mllm_rec_json
|
| 18 |
template: qwen2_vl
|
| 19 |
-
cutoff_len:
|
| 20 |
-
max_samples:
|
| 21 |
overwrite_cache: true
|
| 22 |
preprocessing_num_workers: 16
|
| 23 |
|
|
@@ -31,8 +31,8 @@ overwrite_output_dir: true
|
|
| 31 |
### train
|
| 32 |
per_device_train_batch_size: 1
|
| 33 |
gradient_accumulation_steps: 2
|
| 34 |
-
learning_rate:
|
| 35 |
-
num_train_epochs:
|
| 36 |
lr_scheduler_type: cosine
|
| 37 |
warmup_ratio: 0.1
|
| 38 |
bf16: true
|
|
|
|
| 1 |
### model
|
| 2 |
model_name_or_path: Qwen/Qwen2.5-VL-3B-Instruct
|
| 3 |
+
image_max_pixels: 12845056
|
| 4 |
video_max_pixels: 16384
|
| 5 |
trust_remote_code: true
|
| 6 |
|
|
|
|
| 8 |
stage: sft
|
| 9 |
do_train: true
|
| 10 |
finetuning_type: full
|
| 11 |
+
freeze_vision_tower: false # choices: [true, false]
|
| 12 |
+
freeze_multi_modal_projector: false # choices: [true, false]
|
| 13 |
train_mm_proj_only: false # choices: [true, false]
|
| 14 |
deepspeed: examples/deepspeed/ds_z2_config.json # choices: [ds_z0_config.json, ds_z2_config.json, ds_z3_config.json]
|
| 15 |
|
| 16 |
### dataset
|
| 17 |
dataset: mllm_rec_json
|
| 18 |
template: qwen2_vl
|
| 19 |
+
cutoff_len: 131072
|
| 20 |
+
max_samples: 10000000
|
| 21 |
overwrite_cache: true
|
| 22 |
preprocessing_num_workers: 16
|
| 23 |
|
|
|
|
| 31 |
### train
|
| 32 |
per_device_train_batch_size: 1
|
| 33 |
gradient_accumulation_steps: 2
|
| 34 |
+
learning_rate: 1.0e-6
|
| 35 |
+
num_train_epochs: 2.0
|
| 36 |
lr_scheduler_type: cosine
|
| 37 |
warmup_ratio: 0.1
|
| 38 |
bf16: true
|