Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- ood/ivl-8b-instruct-full_sft_ood/v0-20251004-170240/checkpoint-228/model-00001-of-00004.safetensors +3 -0
- ood/ivl-8b-instruct-full_sft_ood/v0-20251004-170240/checkpoint-228/model-00002-of-00004.safetensors +3 -0
- ood/ivl-8b-instruct-full_sft_ood/v0-20251004-170240/checkpoint-228/model-00003-of-00004.safetensors +3 -0
- ood/ivl-8b-instruct-full_sft_ood/v0-20251004-170240/checkpoint-228/model-00004-of-00004.safetensors +3 -0
- ood/ivl-8b-instruct-full_sft_ood/v0-20251004-170240/checkpoint-228/rng_state_0.pth +3 -0
- ood/ivl-8b-instruct-full_sft_ood/v0-20251004-170240/checkpoint-228/rng_state_1.pth +3 -0
- ood/ivl-8b-instruct-full_sft_ood/v0-20251004-170240/checkpoint-228/rng_state_2.pth +3 -0
- ood/ivl-8b-instruct-full_sft_ood/v0-20251004-170240/checkpoint-228/rng_state_3.pth +3 -0
- ood/ivl-8b-instruct-full_sft_ood/v0-20251004-170240/checkpoint-228/scheduler.pt +3 -0
- ood/ivl-8b-instruct-full_sft_ood/v0-20251004-170240/checkpoint-228/tokenizer.json +3 -0
- ood/ivl-8b-instruct-full_sft_ood/v0-20251004-170240/checkpoint-228/training_args.bin +3 -0
- ood/ivl-8b-instruct-full_sft_ood/v0-20251004-170240/checkpoint-228/zero_to_fp32.py +760 -0
- ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/args.json +385 -0
- ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/added_tokens.json +33 -0
- ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/args.json +385 -0
- ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/chat_template.jinja +54 -0
- ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/config.json +144 -0
- ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/configuration_intern_vit.py +120 -0
- ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/configuration_internvl_chat.py +97 -0
- ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/conversation.py +391 -0
- ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/generation_config.json +5 -0
- ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/latest +1 -0
- ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/merges.txt +0 -0
- ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/model-00001-of-00004.safetensors +3 -0
- ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/model-00004-of-00004.safetensors +3 -0
- ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/model.safetensors.index.json +693 -0
- ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/modeling_intern_vit.py +431 -0
- ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/modeling_internvl_chat.py +359 -0
- ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/preprocessor_config.json +19 -0
- ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/rng_state_0.pth +3 -0
- ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/rng_state_1.pth +3 -0
- ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/rng_state_2.pth +3 -0
- ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/rng_state_3.pth +3 -0
- ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/scheduler.pt +3 -0
- ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/special_tokens_map.json +31 -0
- ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/tokenizer_config.json +280 -0
- ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/trainer_state.json +429 -0
- ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/vocab.json +0 -0
- ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/zero_to_fp32.py +760 -0
- ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/logging.jsonl +82 -0
- ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/val_dataset.jsonl +0 -0
- ood/qwen2.5vl-7b-full_sft_ood/v1-20251004-154120/args.json +384 -0
- ood/qwen2.5vl-7b-full_sft_ood/v1-20251004-154120/checkpoint-304/added_tokens.json +24 -0
- ood/qwen2.5vl-7b-full_sft_ood/v1-20251004-154120/checkpoint-304/args.json +384 -0
- ood/qwen2.5vl-7b-full_sft_ood/v1-20251004-154120/checkpoint-304/chat_template.jinja +7 -0
- ood/qwen2.5vl-7b-full_sft_ood/v1-20251004-154120/checkpoint-304/config.json +138 -0
- ood/qwen2.5vl-7b-full_sft_ood/v1-20251004-154120/checkpoint-304/generation_config.json +12 -0
- ood/qwen2.5vl-7b-full_sft_ood/v1-20251004-154120/checkpoint-304/latest +1 -0
- ood/qwen2.5vl-7b-full_sft_ood/v1-20251004-154120/logging.jsonl +166 -0
.gitattributes
CHANGED
|
@@ -61,3 +61,4 @@ llava-ov-lora/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
|
| 61 |
internvl3-8b-instruct-lora_epoch10_5e-6/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 62 |
qwen2.5vl-7b-qvq_thinking_full_v2/v0-20250823-125422/checkpoint-280/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 63 |
ood/qwen2.5vl-7b-thinking_full_v3_ood_wd001_e10-checkpoint-228/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 61 |
internvl3-8b-instruct-lora_epoch10_5e-6/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 62 |
qwen2.5vl-7b-qvq_thinking_full_v2/v0-20250823-125422/checkpoint-280/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 63 |
ood/qwen2.5vl-7b-thinking_full_v3_ood_wd001_e10-checkpoint-228/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 64 |
+
ood/ivl-8b-instruct-full_sft_ood/v0-20251004-170240/checkpoint-228/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
ood/ivl-8b-instruct-full_sft_ood/v0-20251004-170240/checkpoint-228/model-00001-of-00004.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a6120fda4d24a89d0c6727f21dbb5c7d912949ff73fec9215b417d888088ea61
|
| 3 |
+
size 4991123960
|
ood/ivl-8b-instruct-full_sft_ood/v0-20251004-170240/checkpoint-228/model-00002-of-00004.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:741ec72cbb1ca29f64e84aa34c896bddf28ba23f1aa7823ec58044987fe77ea8
|
| 3 |
+
size 4958443072
|
ood/ivl-8b-instruct-full_sft_ood/v0-20251004-170240/checkpoint-228/model-00003-of-00004.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1e708763ce6944322a4c0c839cb165c40e99d2aa5f7307fc090c97d221673813
|
| 3 |
+
size 4796984024
|
ood/ivl-8b-instruct-full_sft_ood/v0-20251004-170240/checkpoint-228/model-00004-of-00004.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f5f2ddcefe8097dd39857e3e07157c35edde43d603301f4223590892315a11fa
|
| 3 |
+
size 1142280864
|
ood/ivl-8b-instruct-full_sft_ood/v0-20251004-170240/checkpoint-228/rng_state_0.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:08c830b56eaefdd9b372fae4488e1ba93148ecf58aa2ed05c8103ab7afe964ee
|
| 3 |
+
size 15365
|
ood/ivl-8b-instruct-full_sft_ood/v0-20251004-170240/checkpoint-228/rng_state_1.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b6f3949e2b25ad05905da16bc2d58aeadc9a9f1db0b0e97a04ceb63467acff1d
|
| 3 |
+
size 15429
|
ood/ivl-8b-instruct-full_sft_ood/v0-20251004-170240/checkpoint-228/rng_state_2.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5c3812c5e7f13c9d32f97d83cd625428061b1c9486e869caffd453d83ac07ceb
|
| 3 |
+
size 15429
|
ood/ivl-8b-instruct-full_sft_ood/v0-20251004-170240/checkpoint-228/rng_state_3.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6eda756b33768639c1e1932ae00c5d43269de5705c09162bc09acdf71d3533c0
|
| 3 |
+
size 15429
|
ood/ivl-8b-instruct-full_sft_ood/v0-20251004-170240/checkpoint-228/scheduler.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:384610a93cdb0dbf9f7ae8a31314cca71b4e2080e5883139a239afbb81a12680
|
| 3 |
+
size 1465
|
ood/ivl-8b-instruct-full_sft_ood/v0-20251004-170240/checkpoint-228/tokenizer.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6f9ba4b4a6625b5047a1356f6081b641c3e4e6a4a198facbd4bef217747d1685
|
| 3 |
+
size 11423548
|
ood/ivl-8b-instruct-full_sft_ood/v0-20251004-170240/checkpoint-228/training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2ed47cc7ce6963bb954a625c84c64cbecc03cd4252442ae976f419c066288820
|
| 3 |
+
size 9105
|
ood/ivl-8b-instruct-full_sft_ood/v0-20251004-170240/checkpoint-228/zero_to_fp32.py
ADDED
|
@@ -0,0 +1,760 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
|
| 3 |
+
# Copyright (c) Microsoft Corporation.
|
| 4 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 5 |
+
|
| 6 |
+
# DeepSpeed Team
|
| 7 |
+
|
| 8 |
+
# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
|
| 9 |
+
# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
|
| 10 |
+
# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
|
| 11 |
+
# application.
|
| 12 |
+
#
|
| 13 |
+
# example:
|
| 14 |
+
# python zero_to_fp32.py . output_dir/
|
| 15 |
+
# or
|
| 16 |
+
# python zero_to_fp32.py . output_dir/ --safe_serialization
|
| 17 |
+
|
| 18 |
+
import argparse
|
| 19 |
+
import torch
|
| 20 |
+
import glob
|
| 21 |
+
import math
|
| 22 |
+
import os
|
| 23 |
+
import re
|
| 24 |
+
import gc
|
| 25 |
+
import json
|
| 26 |
+
import numpy as np
|
| 27 |
+
from tqdm import tqdm
|
| 28 |
+
from collections import OrderedDict
|
| 29 |
+
from dataclasses import dataclass
|
| 30 |
+
|
| 31 |
+
# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
|
| 32 |
+
# DeepSpeed data structures it has to be available in the current python environment.
|
| 33 |
+
from deepspeed.utils import logger
|
| 34 |
+
from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
|
| 35 |
+
FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
|
| 36 |
+
FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
@dataclass
|
| 40 |
+
class zero_model_state:
|
| 41 |
+
buffers: dict()
|
| 42 |
+
param_shapes: dict()
|
| 43 |
+
shared_params: list
|
| 44 |
+
ds_version: int
|
| 45 |
+
frozen_param_shapes: dict()
|
| 46 |
+
frozen_param_fragments: dict()
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
debug = 0
|
| 50 |
+
|
| 51 |
+
# load to cpu
|
| 52 |
+
device = torch.device('cpu')
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def atoi(text):
|
| 56 |
+
return int(text) if text.isdigit() else text
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def natural_keys(text):
|
| 60 |
+
'''
|
| 61 |
+
alist.sort(key=natural_keys) sorts in human order
|
| 62 |
+
http://nedbatchelder.com/blog/200712/human_sorting.html
|
| 63 |
+
(See Toothy's implementation in the comments)
|
| 64 |
+
'''
|
| 65 |
+
return [atoi(c) for c in re.split(r'(\d+)', text)]
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def get_model_state_file(checkpoint_dir, zero_stage):
|
| 69 |
+
if not os.path.isdir(checkpoint_dir):
|
| 70 |
+
raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
|
| 71 |
+
|
| 72 |
+
# there should be only one file
|
| 73 |
+
if zero_stage <= 2:
|
| 74 |
+
file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
|
| 75 |
+
elif zero_stage == 3:
|
| 76 |
+
file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
|
| 77 |
+
|
| 78 |
+
if not os.path.exists(file):
|
| 79 |
+
raise FileNotFoundError(f"can't find model states file at '{file}'")
|
| 80 |
+
|
| 81 |
+
return file
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def get_checkpoint_files(checkpoint_dir, glob_pattern):
|
| 85 |
+
# XXX: need to test that this simple glob rule works for multi-node setup too
|
| 86 |
+
ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
|
| 87 |
+
|
| 88 |
+
if len(ckpt_files) == 0:
|
| 89 |
+
raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
|
| 90 |
+
|
| 91 |
+
return ckpt_files
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def get_optim_files(checkpoint_dir):
|
| 95 |
+
return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def get_model_state_files(checkpoint_dir):
|
| 99 |
+
return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def parse_model_states(files):
|
| 103 |
+
zero_model_states = []
|
| 104 |
+
for file in files:
|
| 105 |
+
state_dict = torch.load(file, map_location=device, weights_only=False)
|
| 106 |
+
|
| 107 |
+
if BUFFER_NAMES not in state_dict:
|
| 108 |
+
raise ValueError(f"{file} is not a model state checkpoint")
|
| 109 |
+
buffer_names = state_dict[BUFFER_NAMES]
|
| 110 |
+
if debug:
|
| 111 |
+
print("Found buffers:", buffer_names)
|
| 112 |
+
|
| 113 |
+
# recover just the buffers while restoring them to fp32 if they were saved in fp16
|
| 114 |
+
buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
|
| 115 |
+
param_shapes = state_dict[PARAM_SHAPES]
|
| 116 |
+
|
| 117 |
+
# collect parameters that are included in param_shapes
|
| 118 |
+
param_names = []
|
| 119 |
+
for s in param_shapes:
|
| 120 |
+
for name in s.keys():
|
| 121 |
+
param_names.append(name)
|
| 122 |
+
|
| 123 |
+
# update with frozen parameters
|
| 124 |
+
frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
|
| 125 |
+
if frozen_param_shapes is not None:
|
| 126 |
+
if debug:
|
| 127 |
+
print(f"Found frozen_param_shapes: {frozen_param_shapes}")
|
| 128 |
+
param_names += list(frozen_param_shapes.keys())
|
| 129 |
+
|
| 130 |
+
# handle shared params
|
| 131 |
+
shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
|
| 132 |
+
|
| 133 |
+
ds_version = state_dict.get(DS_VERSION, None)
|
| 134 |
+
|
| 135 |
+
frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
|
| 136 |
+
|
| 137 |
+
z_model_state = zero_model_state(buffers=buffers,
|
| 138 |
+
param_shapes=param_shapes,
|
| 139 |
+
shared_params=shared_params,
|
| 140 |
+
ds_version=ds_version,
|
| 141 |
+
frozen_param_shapes=frozen_param_shapes,
|
| 142 |
+
frozen_param_fragments=frozen_param_fragments)
|
| 143 |
+
zero_model_states.append(z_model_state)
|
| 144 |
+
|
| 145 |
+
return zero_model_states
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
def parse_optim_states(files, ds_checkpoint_dir):
|
| 149 |
+
total_files = len(files)
|
| 150 |
+
state_dicts = []
|
| 151 |
+
for f in tqdm(files, desc='Loading checkpoint shards'):
|
| 152 |
+
state_dict = torch.load(f, map_location=device, mmap=True, weights_only=False)
|
| 153 |
+
# immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
|
| 154 |
+
# and also handle the case where it was already removed by another helper script
|
| 155 |
+
state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
|
| 156 |
+
state_dicts.append(state_dict)
|
| 157 |
+
|
| 158 |
+
if ZERO_STAGE not in state_dicts[0][OPTIMIZER_STATE_DICT]:
|
| 159 |
+
raise ValueError(f"{files[0]} is not a zero checkpoint")
|
| 160 |
+
zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
|
| 161 |
+
world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
|
| 162 |
+
|
| 163 |
+
# For ZeRO-2 each param group can have different partition_count as data parallelism for expert
|
| 164 |
+
# parameters can be different from data parallelism for non-expert parameters. So we can just
|
| 165 |
+
# use the max of the partition_count to get the dp world_size.
|
| 166 |
+
|
| 167 |
+
if type(world_size) is list:
|
| 168 |
+
world_size = max(world_size)
|
| 169 |
+
|
| 170 |
+
if world_size != total_files:
|
| 171 |
+
raise ValueError(
|
| 172 |
+
f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
|
| 173 |
+
"Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
|
| 174 |
+
)
|
| 175 |
+
|
| 176 |
+
# the groups are named differently in each stage
|
| 177 |
+
if zero_stage <= 2:
|
| 178 |
+
fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
|
| 179 |
+
elif zero_stage == 3:
|
| 180 |
+
fp32_groups_key = FP32_FLAT_GROUPS
|
| 181 |
+
else:
|
| 182 |
+
raise ValueError(f"unknown zero stage {zero_stage}")
|
| 183 |
+
|
| 184 |
+
fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
|
| 185 |
+
return zero_stage, world_size, fp32_flat_groups
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
|
| 189 |
+
"""
|
| 190 |
+
Returns fp32 state_dict reconstructed from ds checkpoint
|
| 191 |
+
|
| 192 |
+
Args:
|
| 193 |
+
- ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
|
| 194 |
+
|
| 195 |
+
"""
|
| 196 |
+
print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
|
| 197 |
+
|
| 198 |
+
optim_files = get_optim_files(ds_checkpoint_dir)
|
| 199 |
+
zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
|
| 200 |
+
print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
|
| 201 |
+
|
| 202 |
+
model_files = get_model_state_files(ds_checkpoint_dir)
|
| 203 |
+
|
| 204 |
+
zero_model_states = parse_model_states(model_files)
|
| 205 |
+
print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
|
| 206 |
+
|
| 207 |
+
if zero_stage <= 2:
|
| 208 |
+
return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
| 209 |
+
exclude_frozen_parameters)
|
| 210 |
+
elif zero_stage == 3:
|
| 211 |
+
return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
| 212 |
+
exclude_frozen_parameters)
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
def _zero2_merge_frozen_params(state_dict, zero_model_states):
|
| 216 |
+
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
|
| 217 |
+
return
|
| 218 |
+
|
| 219 |
+
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
|
| 220 |
+
frozen_param_fragments = zero_model_states[0].frozen_param_fragments
|
| 221 |
+
|
| 222 |
+
if debug:
|
| 223 |
+
num_elem = sum(s.numel() for s in frozen_param_shapes.values())
|
| 224 |
+
print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
|
| 225 |
+
|
| 226 |
+
wanted_params = len(frozen_param_shapes)
|
| 227 |
+
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
|
| 228 |
+
avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
|
| 229 |
+
print(f'Frozen params: Have {avail_numel} numels to process.')
|
| 230 |
+
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
|
| 231 |
+
|
| 232 |
+
total_params = 0
|
| 233 |
+
total_numel = 0
|
| 234 |
+
for name, shape in frozen_param_shapes.items():
|
| 235 |
+
total_params += 1
|
| 236 |
+
unpartitioned_numel = shape.numel()
|
| 237 |
+
total_numel += unpartitioned_numel
|
| 238 |
+
|
| 239 |
+
state_dict[name] = frozen_param_fragments[name]
|
| 240 |
+
|
| 241 |
+
if debug:
|
| 242 |
+
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
|
| 243 |
+
|
| 244 |
+
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
def _has_callable(obj, fn):
|
| 248 |
+
attr = getattr(obj, fn, None)
|
| 249 |
+
return callable(attr)
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
|
| 253 |
+
param_shapes = zero_model_states[0].param_shapes
|
| 254 |
+
|
| 255 |
+
# Reconstruction protocol:
|
| 256 |
+
#
|
| 257 |
+
# XXX: document this
|
| 258 |
+
|
| 259 |
+
if debug:
|
| 260 |
+
for i in range(world_size):
|
| 261 |
+
for j in range(len(fp32_flat_groups[0])):
|
| 262 |
+
print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
|
| 263 |
+
|
| 264 |
+
# XXX: memory usage doubles here (zero2)
|
| 265 |
+
num_param_groups = len(fp32_flat_groups[0])
|
| 266 |
+
merged_single_partition_of_fp32_groups = []
|
| 267 |
+
for i in range(num_param_groups):
|
| 268 |
+
merged_partitions = [sd[i] for sd in fp32_flat_groups]
|
| 269 |
+
full_single_fp32_vector = torch.cat(merged_partitions, 0)
|
| 270 |
+
merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
|
| 271 |
+
avail_numel = sum(
|
| 272 |
+
[full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
|
| 273 |
+
|
| 274 |
+
if debug:
|
| 275 |
+
wanted_params = sum([len(shapes) for shapes in param_shapes])
|
| 276 |
+
wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
|
| 277 |
+
# not asserting if there is a mismatch due to possible padding
|
| 278 |
+
print(f"Have {avail_numel} numels to process.")
|
| 279 |
+
print(f"Need {wanted_numel} numels in {wanted_params} params.")
|
| 280 |
+
|
| 281 |
+
# params
|
| 282 |
+
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
|
| 283 |
+
# out-of-core computing solution
|
| 284 |
+
total_numel = 0
|
| 285 |
+
total_params = 0
|
| 286 |
+
for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
|
| 287 |
+
offset = 0
|
| 288 |
+
avail_numel = full_single_fp32_vector.numel()
|
| 289 |
+
for name, shape in shapes.items():
|
| 290 |
+
|
| 291 |
+
unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
|
| 292 |
+
total_numel += unpartitioned_numel
|
| 293 |
+
total_params += 1
|
| 294 |
+
|
| 295 |
+
if debug:
|
| 296 |
+
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
|
| 297 |
+
state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
|
| 298 |
+
offset += unpartitioned_numel
|
| 299 |
+
|
| 300 |
+
# Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
|
| 301 |
+
# avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
|
| 302 |
+
# paddings performed in the code it's almost impossible to predict the exact numbers w/o the
|
| 303 |
+
# live optimizer object, so we are checking that the numbers are within the right range
|
| 304 |
+
align_to = 2 * world_size
|
| 305 |
+
|
| 306 |
+
def zero2_align(x):
|
| 307 |
+
return align_to * math.ceil(x / align_to)
|
| 308 |
+
|
| 309 |
+
if debug:
|
| 310 |
+
print(f"original offset={offset}, avail_numel={avail_numel}")
|
| 311 |
+
|
| 312 |
+
offset = zero2_align(offset)
|
| 313 |
+
avail_numel = zero2_align(avail_numel)
|
| 314 |
+
|
| 315 |
+
if debug:
|
| 316 |
+
print(f"aligned offset={offset}, avail_numel={avail_numel}")
|
| 317 |
+
|
| 318 |
+
# Sanity check
|
| 319 |
+
if offset != avail_numel:
|
| 320 |
+
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
|
| 321 |
+
|
| 322 |
+
print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
| 326 |
+
exclude_frozen_parameters):
|
| 327 |
+
state_dict = OrderedDict()
|
| 328 |
+
|
| 329 |
+
# buffers
|
| 330 |
+
buffers = zero_model_states[0].buffers
|
| 331 |
+
state_dict.update(buffers)
|
| 332 |
+
if debug:
|
| 333 |
+
print(f"added {len(buffers)} buffers")
|
| 334 |
+
|
| 335 |
+
if not exclude_frozen_parameters:
|
| 336 |
+
_zero2_merge_frozen_params(state_dict, zero_model_states)
|
| 337 |
+
|
| 338 |
+
_zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
|
| 339 |
+
|
| 340 |
+
# recover shared parameters
|
| 341 |
+
for pair in zero_model_states[0].shared_params:
|
| 342 |
+
if pair[1] in state_dict:
|
| 343 |
+
state_dict[pair[0]] = state_dict[pair[1]]
|
| 344 |
+
|
| 345 |
+
return state_dict
|
| 346 |
+
|
| 347 |
+
|
| 348 |
+
def zero3_partitioned_param_info(unpartitioned_numel, world_size):
|
| 349 |
+
remainder = unpartitioned_numel % world_size
|
| 350 |
+
padding_numel = (world_size - remainder) if remainder else 0
|
| 351 |
+
partitioned_numel = math.ceil(unpartitioned_numel / world_size)
|
| 352 |
+
return partitioned_numel, padding_numel
|
| 353 |
+
|
| 354 |
+
|
| 355 |
+
def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
|
| 356 |
+
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
|
| 357 |
+
return
|
| 358 |
+
|
| 359 |
+
if debug:
|
| 360 |
+
for i in range(world_size):
|
| 361 |
+
num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
|
| 362 |
+
print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
|
| 363 |
+
|
| 364 |
+
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
|
| 365 |
+
wanted_params = len(frozen_param_shapes)
|
| 366 |
+
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
|
| 367 |
+
avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
|
| 368 |
+
print(f'Frozen params: Have {avail_numel} numels to process.')
|
| 369 |
+
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
|
| 370 |
+
|
| 371 |
+
total_params = 0
|
| 372 |
+
total_numel = 0
|
| 373 |
+
for name, shape in zero_model_states[0].frozen_param_shapes.items():
|
| 374 |
+
total_params += 1
|
| 375 |
+
unpartitioned_numel = shape.numel()
|
| 376 |
+
total_numel += unpartitioned_numel
|
| 377 |
+
|
| 378 |
+
param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
|
| 379 |
+
state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
|
| 380 |
+
|
| 381 |
+
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
|
| 382 |
+
|
| 383 |
+
if debug:
|
| 384 |
+
print(
|
| 385 |
+
f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
|
| 386 |
+
)
|
| 387 |
+
|
| 388 |
+
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
|
| 389 |
+
|
| 390 |
+
|
| 391 |
+
class GatheredTensor:
|
| 392 |
+
"""
|
| 393 |
+
A pseudo tensor that collects partitioned weights.
|
| 394 |
+
It is more memory efficient when there are multiple groups.
|
| 395 |
+
"""
|
| 396 |
+
|
| 397 |
+
def __init__(self, flat_groups, flat_groups_offset, offset, partitioned_numel, shape):
|
| 398 |
+
self.flat_groups = flat_groups
|
| 399 |
+
self.flat_groups_offset = flat_groups_offset
|
| 400 |
+
self.offset = offset
|
| 401 |
+
self.partitioned_numel = partitioned_numel
|
| 402 |
+
self.shape = shape
|
| 403 |
+
self.dtype = self.flat_groups[0][0].dtype
|
| 404 |
+
|
| 405 |
+
def contiguous(self):
|
| 406 |
+
"""
|
| 407 |
+
Merge partitioned weights from flat_groups into a single tensor.
|
| 408 |
+
"""
|
| 409 |
+
end_idx = self.offset + self.partitioned_numel
|
| 410 |
+
world_size = len(self.flat_groups)
|
| 411 |
+
pad_flat_param_chunks = []
|
| 412 |
+
|
| 413 |
+
for rank_i in range(world_size):
|
| 414 |
+
# for each rank, we need to collect weights from related group/groups
|
| 415 |
+
flat_groups_at_rank_i = self.flat_groups[rank_i]
|
| 416 |
+
start_group_id = None
|
| 417 |
+
end_group_id = None
|
| 418 |
+
for group_id in range(len(self.flat_groups_offset)):
|
| 419 |
+
if self.flat_groups_offset[group_id] <= self.offset < self.flat_groups_offset[group_id + 1]:
|
| 420 |
+
start_group_id = group_id
|
| 421 |
+
if self.flat_groups_offset[group_id] < end_idx <= self.flat_groups_offset[group_id + 1]:
|
| 422 |
+
end_group_id = group_id
|
| 423 |
+
break
|
| 424 |
+
# collect weights from related group/groups
|
| 425 |
+
for group_id in range(start_group_id, end_group_id + 1):
|
| 426 |
+
flat_tensor = flat_groups_at_rank_i[group_id]
|
| 427 |
+
start_offset = self.offset - self.flat_groups_offset[group_id]
|
| 428 |
+
end_offset = min(end_idx, self.flat_groups_offset[group_id + 1]) - self.flat_groups_offset[group_id]
|
| 429 |
+
pad_flat_param_chunks.append(flat_tensor[start_offset:end_offset])
|
| 430 |
+
|
| 431 |
+
# collect weights from all ranks
|
| 432 |
+
pad_flat_param = torch.cat(pad_flat_param_chunks, dim=0)
|
| 433 |
+
param = pad_flat_param[:self.shape.numel()].view(self.shape).contiguous()
|
| 434 |
+
return param
|
| 435 |
+
|
| 436 |
+
|
| 437 |
+
def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
|
| 438 |
+
param_shapes = zero_model_states[0].param_shapes
|
| 439 |
+
avail_numel = sum([flat_group.numel() for flat_group in fp32_flat_groups[0]]) * world_size
|
| 440 |
+
|
| 441 |
+
# Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
|
| 442 |
+
# param, re-consolidating each param, while dealing with padding if any
|
| 443 |
+
|
| 444 |
+
# merge list of dicts, preserving order
|
| 445 |
+
param_shapes = {k: v for d in param_shapes for k, v in d.items()}
|
| 446 |
+
|
| 447 |
+
if debug:
|
| 448 |
+
for i in range(world_size):
|
| 449 |
+
print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
|
| 450 |
+
|
| 451 |
+
wanted_params = len(param_shapes)
|
| 452 |
+
wanted_numel = sum(shape.numel() for shape in param_shapes.values())
|
| 453 |
+
# not asserting if there is a mismatch due to possible padding
|
| 454 |
+
avail_numel = fp32_flat_groups[0].numel() * world_size
|
| 455 |
+
print(f"Trainable params: Have {avail_numel} numels to process.")
|
| 456 |
+
print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
|
| 457 |
+
|
| 458 |
+
# params
|
| 459 |
+
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
|
| 460 |
+
# out-of-core computing solution
|
| 461 |
+
offset = 0
|
| 462 |
+
total_numel = 0
|
| 463 |
+
total_params = 0
|
| 464 |
+
flat_groups_offset = [0] + list(np.cumsum([flat_tensor.numel() for flat_tensor in fp32_flat_groups[0]]))
|
| 465 |
+
for name, shape in tqdm(param_shapes.items(), desc='Gathering sharded weights'):
|
| 466 |
+
unpartitioned_numel = shape.numel()
|
| 467 |
+
total_numel += unpartitioned_numel
|
| 468 |
+
total_params += 1
|
| 469 |
+
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
|
| 470 |
+
|
| 471 |
+
if debug:
|
| 472 |
+
print(
|
| 473 |
+
f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
|
| 474 |
+
)
|
| 475 |
+
|
| 476 |
+
# memory efficient tensor
|
| 477 |
+
tensor = GatheredTensor(fp32_flat_groups, flat_groups_offset, offset, partitioned_numel, shape)
|
| 478 |
+
state_dict[name] = tensor
|
| 479 |
+
offset += partitioned_numel
|
| 480 |
+
|
| 481 |
+
offset *= world_size
|
| 482 |
+
|
| 483 |
+
# Sanity check
|
| 484 |
+
if offset != avail_numel:
|
| 485 |
+
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
|
| 486 |
+
|
| 487 |
+
print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
|
| 488 |
+
|
| 489 |
+
|
| 490 |
+
def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
| 491 |
+
exclude_frozen_parameters):
|
| 492 |
+
state_dict = OrderedDict()
|
| 493 |
+
|
| 494 |
+
# buffers
|
| 495 |
+
buffers = zero_model_states[0].buffers
|
| 496 |
+
state_dict.update(buffers)
|
| 497 |
+
if debug:
|
| 498 |
+
print(f"added {len(buffers)} buffers")
|
| 499 |
+
|
| 500 |
+
if not exclude_frozen_parameters:
|
| 501 |
+
_zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
|
| 502 |
+
|
| 503 |
+
_zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
|
| 504 |
+
|
| 505 |
+
# recover shared parameters
|
| 506 |
+
for pair in zero_model_states[0].shared_params:
|
| 507 |
+
if pair[1] in state_dict:
|
| 508 |
+
state_dict[pair[0]] = state_dict[pair[1]]
|
| 509 |
+
|
| 510 |
+
return state_dict
|
| 511 |
+
|
| 512 |
+
|
| 513 |
+
def to_torch_tensor(state_dict, return_empty_tensor=False):
|
| 514 |
+
"""
|
| 515 |
+
Convert state_dict of GatheredTensor to torch tensor
|
| 516 |
+
"""
|
| 517 |
+
torch_state_dict = {}
|
| 518 |
+
converted_tensors = {}
|
| 519 |
+
for name, tensor in state_dict.items():
|
| 520 |
+
tensor_id = id(tensor)
|
| 521 |
+
if tensor_id in converted_tensors: # shared tensors
|
| 522 |
+
shared_tensor = torch_state_dict[converted_tensors[tensor_id]]
|
| 523 |
+
torch_state_dict[name] = shared_tensor
|
| 524 |
+
else:
|
| 525 |
+
converted_tensors[tensor_id] = name
|
| 526 |
+
if return_empty_tensor:
|
| 527 |
+
torch_state_dict[name] = torch.empty(tensor.shape, dtype=tensor.dtype)
|
| 528 |
+
else:
|
| 529 |
+
torch_state_dict[name] = tensor.contiguous()
|
| 530 |
+
return torch_state_dict
|
| 531 |
+
|
| 532 |
+
|
| 533 |
+
def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
|
| 534 |
+
tag=None,
|
| 535 |
+
exclude_frozen_parameters=False,
|
| 536 |
+
lazy_mode=False):
|
| 537 |
+
"""
|
| 538 |
+
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
|
| 539 |
+
``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
|
| 540 |
+
via a model hub.
|
| 541 |
+
|
| 542 |
+
Args:
|
| 543 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder
|
| 544 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
|
| 545 |
+
- ``exclude_frozen_parameters``: exclude frozen parameters
|
| 546 |
+
- ``lazy_mode``: get state_dict in lazy mode. It returns a dict of pesduo tensor instead of torch tensor, which is more memory efficient.
|
| 547 |
+
Convert the pesduo tensor to torch tensor by ``.contiguous()``
|
| 548 |
+
|
| 549 |
+
Returns:
|
| 550 |
+
- pytorch ``state_dict``
|
| 551 |
+
|
| 552 |
+
A typical usage might be ::
|
| 553 |
+
|
| 554 |
+
from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
|
| 555 |
+
# do the training and checkpoint saving
|
| 556 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
|
| 557 |
+
model = model.cpu() # move to cpu
|
| 558 |
+
model.load_state_dict(state_dict)
|
| 559 |
+
# submit to model hub or save the model to share with others
|
| 560 |
+
|
| 561 |
+
In this example the ``model`` will no longer be usable in the deepspeed context of the same
|
| 562 |
+
application. i.e. you will need to re-initialize the deepspeed engine, since
|
| 563 |
+
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
|
| 564 |
+
|
| 565 |
+
If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
|
| 566 |
+
|
| 567 |
+
Note: the above usage may not work if your application doesn't have sufficient free CPU memory.
|
| 568 |
+
You may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
|
| 569 |
+
the checkpoint. Or you can load state_dict in lazy mode ::
|
| 570 |
+
|
| 571 |
+
from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
|
| 572 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, lazy_mode=True) # not on cpu
|
| 573 |
+
for name, lazy_tensor in state_dict.item():
|
| 574 |
+
tensor = lazy_tensor.contiguous() # to cpu
|
| 575 |
+
print(name, tensor)
|
| 576 |
+
# del tensor to release memory if it no longer in use
|
| 577 |
+
"""
|
| 578 |
+
if tag is None:
|
| 579 |
+
latest_path = os.path.join(checkpoint_dir, 'latest')
|
| 580 |
+
if os.path.isfile(latest_path):
|
| 581 |
+
with open(latest_path, 'r') as fd:
|
| 582 |
+
tag = fd.read().strip()
|
| 583 |
+
else:
|
| 584 |
+
raise ValueError(f"Unable to find 'latest' file at {latest_path}")
|
| 585 |
+
|
| 586 |
+
ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
|
| 587 |
+
|
| 588 |
+
if not os.path.isdir(ds_checkpoint_dir):
|
| 589 |
+
raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
|
| 590 |
+
|
| 591 |
+
state_dict = _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
|
| 592 |
+
if lazy_mode:
|
| 593 |
+
return state_dict
|
| 594 |
+
else:
|
| 595 |
+
return to_torch_tensor(state_dict)
|
| 596 |
+
|
| 597 |
+
|
| 598 |
+
def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir,
|
| 599 |
+
output_dir,
|
| 600 |
+
max_shard_size="5GB",
|
| 601 |
+
safe_serialization=False,
|
| 602 |
+
tag=None,
|
| 603 |
+
exclude_frozen_parameters=False):
|
| 604 |
+
"""
|
| 605 |
+
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
|
| 606 |
+
loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
|
| 607 |
+
|
| 608 |
+
Args:
|
| 609 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
|
| 610 |
+
- ``output_dir``: directory to the pytorch fp32 state_dict output files
|
| 611 |
+
- ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB
|
| 612 |
+
- ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
|
| 613 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
|
| 614 |
+
- ``exclude_frozen_parameters``: exclude frozen parameters
|
| 615 |
+
"""
|
| 616 |
+
|
| 617 |
+
# Dependency pre-check
|
| 618 |
+
if safe_serialization:
|
| 619 |
+
try:
|
| 620 |
+
from safetensors.torch import save_file
|
| 621 |
+
except ImportError:
|
| 622 |
+
print('If you want to use `safe_serialization`, please `pip install safetensors`')
|
| 623 |
+
raise
|
| 624 |
+
if max_shard_size is not None:
|
| 625 |
+
try:
|
| 626 |
+
from huggingface_hub import split_torch_state_dict_into_shards
|
| 627 |
+
except ImportError:
|
| 628 |
+
print('If you want to use `max_shard_size`, please `pip install huggingface_hub`')
|
| 629 |
+
raise
|
| 630 |
+
|
| 631 |
+
# Convert zero checkpoint to state_dict
|
| 632 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
|
| 633 |
+
tag,
|
| 634 |
+
exclude_frozen_parameters,
|
| 635 |
+
lazy_mode=True)
|
| 636 |
+
|
| 637 |
+
# Shard the model if it is too big.
|
| 638 |
+
weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin"
|
| 639 |
+
if max_shard_size is not None:
|
| 640 |
+
filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors")
|
| 641 |
+
# an memory-efficient approach for sharding
|
| 642 |
+
empty_state_dict = to_torch_tensor(state_dict, return_empty_tensor=True)
|
| 643 |
+
state_dict_split = split_torch_state_dict_into_shards(empty_state_dict,
|
| 644 |
+
filename_pattern=filename_pattern,
|
| 645 |
+
max_shard_size=max_shard_size)
|
| 646 |
+
else:
|
| 647 |
+
from collections import namedtuple
|
| 648 |
+
StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"])
|
| 649 |
+
state_dict_split = StateDictSplit(is_sharded=False,
|
| 650 |
+
filename_to_tensors={weights_name: list(state_dict.keys())})
|
| 651 |
+
|
| 652 |
+
# Save the model by shard
|
| 653 |
+
os.makedirs(output_dir, exist_ok=True)
|
| 654 |
+
filename_to_tensors = state_dict_split.filename_to_tensors.items()
|
| 655 |
+
for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"):
|
| 656 |
+
shard_state_dict = {tensor_name: state_dict[tensor_name] for tensor_name in tensors}
|
| 657 |
+
shard_state_dict = to_torch_tensor(shard_state_dict)
|
| 658 |
+
output_path = os.path.join(output_dir, shard_file)
|
| 659 |
+
if safe_serialization:
|
| 660 |
+
save_file(shard_state_dict, output_path, metadata={"format": "pt"})
|
| 661 |
+
else:
|
| 662 |
+
torch.save(shard_state_dict, output_path)
|
| 663 |
+
# release the memory of current shard
|
| 664 |
+
for tensor_name in list(shard_state_dict.keys()):
|
| 665 |
+
del state_dict[tensor_name]
|
| 666 |
+
del shard_state_dict[tensor_name]
|
| 667 |
+
del shard_state_dict
|
| 668 |
+
gc.collect()
|
| 669 |
+
|
| 670 |
+
# Save index if sharded
|
| 671 |
+
if state_dict_split.is_sharded:
|
| 672 |
+
index = {
|
| 673 |
+
"metadata": state_dict_split.metadata,
|
| 674 |
+
"weight_map": state_dict_split.tensor_to_filename,
|
| 675 |
+
}
|
| 676 |
+
save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json"
|
| 677 |
+
save_index_file = os.path.join(output_dir, save_index_file)
|
| 678 |
+
with open(save_index_file, "w", encoding="utf-8") as f:
|
| 679 |
+
content = json.dumps(index, indent=2, sort_keys=True) + "\n"
|
| 680 |
+
f.write(content)
|
| 681 |
+
|
| 682 |
+
|
| 683 |
+
def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
|
| 684 |
+
"""
|
| 685 |
+
1. Put the provided model to cpu
|
| 686 |
+
2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
|
| 687 |
+
3. Load it into the provided model
|
| 688 |
+
|
| 689 |
+
Args:
|
| 690 |
+
- ``model``: the model object to update
|
| 691 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
|
| 692 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
|
| 693 |
+
|
| 694 |
+
Returns:
|
| 695 |
+
- ``model`: modified model
|
| 696 |
+
|
| 697 |
+
Make sure you have plenty of CPU memory available before you call this function. If you don't
|
| 698 |
+
have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
|
| 699 |
+
conveniently placed for you in the checkpoint folder.
|
| 700 |
+
|
| 701 |
+
A typical usage might be ::
|
| 702 |
+
|
| 703 |
+
from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
|
| 704 |
+
model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
|
| 705 |
+
# submit to model hub or save the model to share with others
|
| 706 |
+
|
| 707 |
+
Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
|
| 708 |
+
of the same application. i.e. you will need to re-initialize the deepspeed engine, since
|
| 709 |
+
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
|
| 710 |
+
|
| 711 |
+
"""
|
| 712 |
+
logger.info("Extracting fp32 weights")
|
| 713 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
|
| 714 |
+
|
| 715 |
+
logger.info("Overwriting model with fp32 weights")
|
| 716 |
+
model = model.cpu()
|
| 717 |
+
model.load_state_dict(state_dict, strict=False)
|
| 718 |
+
|
| 719 |
+
return model
|
| 720 |
+
|
| 721 |
+
|
| 722 |
+
if __name__ == "__main__":
|
| 723 |
+
parser = argparse.ArgumentParser()
|
| 724 |
+
parser.add_argument("checkpoint_dir",
|
| 725 |
+
type=str,
|
| 726 |
+
help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
|
| 727 |
+
parser.add_argument("output_dir",
|
| 728 |
+
type=str,
|
| 729 |
+
help="directory to the pytorch fp32 state_dict output files"
|
| 730 |
+
"(e.g. path/checkpoint-12-output/)")
|
| 731 |
+
parser.add_argument(
|
| 732 |
+
"--max_shard_size",
|
| 733 |
+
type=str,
|
| 734 |
+
default="5GB",
|
| 735 |
+
help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size"
|
| 736 |
+
"lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`"
|
| 737 |
+
"We default it to 5GB in order for models to be able to run easily on free-tier google colab instances"
|
| 738 |
+
"without CPU OOM issues.")
|
| 739 |
+
parser.add_argument(
|
| 740 |
+
"--safe_serialization",
|
| 741 |
+
default=False,
|
| 742 |
+
action='store_true',
|
| 743 |
+
help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).")
|
| 744 |
+
parser.add_argument("-t",
|
| 745 |
+
"--tag",
|
| 746 |
+
type=str,
|
| 747 |
+
default=None,
|
| 748 |
+
help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
|
| 749 |
+
parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
|
| 750 |
+
parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
|
| 751 |
+
args = parser.parse_args()
|
| 752 |
+
|
| 753 |
+
debug = args.debug
|
| 754 |
+
|
| 755 |
+
convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
|
| 756 |
+
args.output_dir,
|
| 757 |
+
max_shard_size=args.max_shard_size,
|
| 758 |
+
safe_serialization=args.safe_serialization,
|
| 759 |
+
tag=args.tag,
|
| 760 |
+
exclude_frozen_parameters=args.exclude_frozen_parameters)
|
ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/args.json
ADDED
|
@@ -0,0 +1,385 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"output_dir": "/mnt/data/users/liamding/data/MMMT/lora/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639",
|
| 3 |
+
"overwrite_output_dir": false,
|
| 4 |
+
"do_train": false,
|
| 5 |
+
"do_eval": false,
|
| 6 |
+
"do_predict": false,
|
| 7 |
+
"eval_strategy": "epoch",
|
| 8 |
+
"prediction_loss_only": false,
|
| 9 |
+
"per_device_train_batch_size": 2,
|
| 10 |
+
"per_device_eval_batch_size": 2,
|
| 11 |
+
"per_gpu_train_batch_size": null,
|
| 12 |
+
"per_gpu_eval_batch_size": null,
|
| 13 |
+
"gradient_accumulation_steps": 2,
|
| 14 |
+
"eval_accumulation_steps": null,
|
| 15 |
+
"eval_delay": 0,
|
| 16 |
+
"torch_empty_cache_steps": null,
|
| 17 |
+
"learning_rate": 2e-06,
|
| 18 |
+
"weight_decay": 0.01,
|
| 19 |
+
"adam_beta1": 0.9,
|
| 20 |
+
"adam_beta2": 0.95,
|
| 21 |
+
"adam_epsilon": 1e-08,
|
| 22 |
+
"max_grad_norm": 1.0,
|
| 23 |
+
"num_train_epochs": 5.0,
|
| 24 |
+
"max_steps": -1,
|
| 25 |
+
"lr_scheduler_type": "cosine",
|
| 26 |
+
"lr_scheduler_kwargs": null,
|
| 27 |
+
"warmup_ratio": 0.1,
|
| 28 |
+
"warmup_steps": 0,
|
| 29 |
+
"log_level": "passive",
|
| 30 |
+
"log_level_replica": "warning",
|
| 31 |
+
"log_on_each_node": true,
|
| 32 |
+
"logging_dir": "/mnt/data/users/liamding/data/MMMT/lora/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/runs",
|
| 33 |
+
"logging_strategy": "steps",
|
| 34 |
+
"logging_first_step": true,
|
| 35 |
+
"logging_steps": 5,
|
| 36 |
+
"logging_nan_inf_filter": true,
|
| 37 |
+
"save_strategy": "epoch",
|
| 38 |
+
"save_steps": 500,
|
| 39 |
+
"save_total_limit": 10,
|
| 40 |
+
"save_safetensors": true,
|
| 41 |
+
"save_on_each_node": false,
|
| 42 |
+
"save_only_model": false,
|
| 43 |
+
"restore_callback_states_from_checkpoint": false,
|
| 44 |
+
"no_cuda": false,
|
| 45 |
+
"use_cpu": false,
|
| 46 |
+
"use_mps_device": false,
|
| 47 |
+
"seed": 42,
|
| 48 |
+
"data_seed": 42,
|
| 49 |
+
"jit_mode_eval": false,
|
| 50 |
+
"use_ipex": false,
|
| 51 |
+
"bf16": true,
|
| 52 |
+
"fp16": false,
|
| 53 |
+
"fp16_opt_level": "O1",
|
| 54 |
+
"half_precision_backend": "auto",
|
| 55 |
+
"bf16_full_eval": false,
|
| 56 |
+
"fp16_full_eval": false,
|
| 57 |
+
"tf32": null,
|
| 58 |
+
"local_rank": 0,
|
| 59 |
+
"ddp_backend": null,
|
| 60 |
+
"tpu_num_cores": null,
|
| 61 |
+
"tpu_metrics_debug": false,
|
| 62 |
+
"debug": null,
|
| 63 |
+
"dataloader_drop_last": false,
|
| 64 |
+
"eval_steps": null,
|
| 65 |
+
"dataloader_num_workers": 4,
|
| 66 |
+
"dataloader_prefetch_factor": null,
|
| 67 |
+
"past_index": -1,
|
| 68 |
+
"run_name": "/mnt/data/users/liamding/data/MMMT/lora/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639",
|
| 69 |
+
"disable_tqdm": null,
|
| 70 |
+
"remove_unused_columns": true,
|
| 71 |
+
"label_names": null,
|
| 72 |
+
"load_best_model_at_end": true,
|
| 73 |
+
"metric_for_best_model": "eval_loss",
|
| 74 |
+
"greater_is_better": false,
|
| 75 |
+
"ignore_data_skip": false,
|
| 76 |
+
"fsdp": "",
|
| 77 |
+
"fsdp_min_num_params": 0,
|
| 78 |
+
"fsdp_config": null,
|
| 79 |
+
"fsdp_transformer_layer_cls_to_wrap": null,
|
| 80 |
+
"accelerator_config": {
|
| 81 |
+
"dispatch_batches": false
|
| 82 |
+
},
|
| 83 |
+
"deepspeed": {
|
| 84 |
+
"fp16": {
|
| 85 |
+
"enabled": "auto",
|
| 86 |
+
"loss_scale": 0,
|
| 87 |
+
"loss_scale_window": 1000,
|
| 88 |
+
"initial_scale_power": 16,
|
| 89 |
+
"hysteresis": 2,
|
| 90 |
+
"min_loss_scale": 1
|
| 91 |
+
},
|
| 92 |
+
"bf16": {
|
| 93 |
+
"enabled": "auto"
|
| 94 |
+
},
|
| 95 |
+
"zero_optimization": {
|
| 96 |
+
"stage": 3,
|
| 97 |
+
"offload_optimizer": {
|
| 98 |
+
"device": "none",
|
| 99 |
+
"pin_memory": true
|
| 100 |
+
},
|
| 101 |
+
"offload_param": {
|
| 102 |
+
"device": "none",
|
| 103 |
+
"pin_memory": true
|
| 104 |
+
},
|
| 105 |
+
"overlap_comm": false,
|
| 106 |
+
"contiguous_gradients": true,
|
| 107 |
+
"sub_group_size": 1000000000.0,
|
| 108 |
+
"reduce_bucket_size": "auto",
|
| 109 |
+
"zero_quantized_weights": false,
|
| 110 |
+
"zero_quantized_gradients": false,
|
| 111 |
+
"stage3_prefetch_bucket_size": "auto",
|
| 112 |
+
"stage3_param_persistence_threshold": "auto",
|
| 113 |
+
"stage3_max_live_parameters": 1000000000.0,
|
| 114 |
+
"stage3_max_reuse_distance": 1000000000.0,
|
| 115 |
+
"stage3_gather_16bit_weights_on_model_save": true
|
| 116 |
+
},
|
| 117 |
+
"gradient_accumulation_steps": "auto",
|
| 118 |
+
"gradient_clipping": "auto",
|
| 119 |
+
"steps_per_print": 2000,
|
| 120 |
+
"train_batch_size": "auto",
|
| 121 |
+
"train_micro_batch_size_per_gpu": "auto",
|
| 122 |
+
"wall_clock_breakdown": false
|
| 123 |
+
},
|
| 124 |
+
"label_smoothing_factor": 0.0,
|
| 125 |
+
"optim": "adamw_torch",
|
| 126 |
+
"optim_args": null,
|
| 127 |
+
"adafactor": false,
|
| 128 |
+
"group_by_length": false,
|
| 129 |
+
"length_column_name": "length",
|
| 130 |
+
"report_to": [
|
| 131 |
+
"swanlab"
|
| 132 |
+
],
|
| 133 |
+
"ddp_find_unused_parameters": null,
|
| 134 |
+
"ddp_bucket_cap_mb": null,
|
| 135 |
+
"ddp_broadcast_buffers": null,
|
| 136 |
+
"dataloader_pin_memory": true,
|
| 137 |
+
"dataloader_persistent_workers": false,
|
| 138 |
+
"skip_memory_metrics": true,
|
| 139 |
+
"use_legacy_prediction_loop": false,
|
| 140 |
+
"push_to_hub": false,
|
| 141 |
+
"resume_from_checkpoint": null,
|
| 142 |
+
"hub_model_id": null,
|
| 143 |
+
"hub_strategy": "every_save",
|
| 144 |
+
"hub_token": null,
|
| 145 |
+
"hub_private_repo": null,
|
| 146 |
+
"hub_always_push": false,
|
| 147 |
+
"hub_revision": null,
|
| 148 |
+
"gradient_checkpointing": true,
|
| 149 |
+
"gradient_checkpointing_kwargs": null,
|
| 150 |
+
"include_inputs_for_metrics": false,
|
| 151 |
+
"include_for_metrics": [],
|
| 152 |
+
"eval_do_concat_batches": true,
|
| 153 |
+
"fp16_backend": "auto",
|
| 154 |
+
"push_to_hub_model_id": null,
|
| 155 |
+
"push_to_hub_organization": null,
|
| 156 |
+
"push_to_hub_token": null,
|
| 157 |
+
"mp_parameters": "",
|
| 158 |
+
"auto_find_batch_size": false,
|
| 159 |
+
"full_determinism": false,
|
| 160 |
+
"torchdynamo": null,
|
| 161 |
+
"ray_scope": "last",
|
| 162 |
+
"ddp_timeout": 18000000,
|
| 163 |
+
"torch_compile": false,
|
| 164 |
+
"torch_compile_backend": null,
|
| 165 |
+
"torch_compile_mode": null,
|
| 166 |
+
"include_tokens_per_second": false,
|
| 167 |
+
"include_num_input_tokens_seen": false,
|
| 168 |
+
"neftune_noise_alpha": null,
|
| 169 |
+
"optim_target_modules": null,
|
| 170 |
+
"batch_eval_metrics": false,
|
| 171 |
+
"eval_on_start": false,
|
| 172 |
+
"use_liger_kernel": false,
|
| 173 |
+
"liger_kernel_config": null,
|
| 174 |
+
"eval_use_gather_object": false,
|
| 175 |
+
"average_tokens_across_devices": true,
|
| 176 |
+
"sortish_sampler": false,
|
| 177 |
+
"predict_with_generate": false,
|
| 178 |
+
"generation_max_length": null,
|
| 179 |
+
"generation_num_beams": null,
|
| 180 |
+
"generation_config": null,
|
| 181 |
+
"tuner_backend": "peft",
|
| 182 |
+
"vit_gradient_checkpointing": null,
|
| 183 |
+
"router_aux_loss_coef": 0.0,
|
| 184 |
+
"enable_dft_loss": false,
|
| 185 |
+
"enable_channel_loss": false,
|
| 186 |
+
"check_model": true,
|
| 187 |
+
"acc_strategy": "token",
|
| 188 |
+
"train_dataloader_shuffle": true,
|
| 189 |
+
"max_epochs": null,
|
| 190 |
+
"aligner_lr": null,
|
| 191 |
+
"vit_lr": null,
|
| 192 |
+
"use_logits_to_keep": null,
|
| 193 |
+
"ds3_gather_for_generation": true,
|
| 194 |
+
"resume_only_model": false,
|
| 195 |
+
"optimizer": null,
|
| 196 |
+
"loss_type": null,
|
| 197 |
+
"metric": null,
|
| 198 |
+
"eval_use_evalscope": false,
|
| 199 |
+
"eval_dataset": [],
|
| 200 |
+
"eval_dataset_args": null,
|
| 201 |
+
"eval_limit": null,
|
| 202 |
+
"eval_generation_config": null,
|
| 203 |
+
"extra_eval_args": null,
|
| 204 |
+
"use_flash_ckpt": false,
|
| 205 |
+
"model": "/mnt/data/users/liamding/data/models/InternVL3-8B-Instruct",
|
| 206 |
+
"model_type": "internvl3",
|
| 207 |
+
"model_revision": null,
|
| 208 |
+
"task_type": "causal_lm",
|
| 209 |
+
"torch_dtype": "bfloat16",
|
| 210 |
+
"attn_impl": null,
|
| 211 |
+
"new_special_tokens": [],
|
| 212 |
+
"num_labels": null,
|
| 213 |
+
"problem_type": null,
|
| 214 |
+
"rope_scaling": null,
|
| 215 |
+
"device_map": null,
|
| 216 |
+
"max_memory": {},
|
| 217 |
+
"max_model_len": null,
|
| 218 |
+
"local_repo_path": null,
|
| 219 |
+
"init_strategy": null,
|
| 220 |
+
"template": "internvl2_5",
|
| 221 |
+
"system": null,
|
| 222 |
+
"max_length": 32768,
|
| 223 |
+
"truncation_strategy": "delete",
|
| 224 |
+
"max_pixels": null,
|
| 225 |
+
"agent_template": null,
|
| 226 |
+
"norm_bbox": null,
|
| 227 |
+
"use_chat_template": true,
|
| 228 |
+
"padding_free": false,
|
| 229 |
+
"padding_side": "right",
|
| 230 |
+
"loss_scale": "default",
|
| 231 |
+
"sequence_parallel_size": 1,
|
| 232 |
+
"response_prefix": null,
|
| 233 |
+
"template_backend": "swift",
|
| 234 |
+
"dataset": [
|
| 235 |
+
"/mnt/data/users/liamding/data/3AM_Plus/final/training/qvq-thinking_answer/ambi_normal_train_thinking_772.json",
|
| 236 |
+
"/mnt/data/users/liamding/data/3AM_Plus/final/training/qvq-thinking_answer/ambi_normal_aug_575.json"
|
| 237 |
+
],
|
| 238 |
+
"val_dataset": [],
|
| 239 |
+
"split_dataset_ratio": 0.1,
|
| 240 |
+
"dataset_num_proc": 1,
|
| 241 |
+
"load_from_cache_file": true,
|
| 242 |
+
"dataset_shuffle": true,
|
| 243 |
+
"val_dataset_shuffle": false,
|
| 244 |
+
"streaming": false,
|
| 245 |
+
"interleave_prob": null,
|
| 246 |
+
"stopping_strategy": "first_exhausted",
|
| 247 |
+
"shuffle_buffer_size": 1000,
|
| 248 |
+
"download_mode": "reuse_dataset_if_exists",
|
| 249 |
+
"columns": {},
|
| 250 |
+
"strict": false,
|
| 251 |
+
"model_name": null,
|
| 252 |
+
"model_author": null,
|
| 253 |
+
"custom_dataset_info": [],
|
| 254 |
+
"quant_method": null,
|
| 255 |
+
"quant_bits": null,
|
| 256 |
+
"hqq_axis": null,
|
| 257 |
+
"bnb_4bit_compute_dtype": "bfloat16",
|
| 258 |
+
"bnb_4bit_quant_type": "nf4",
|
| 259 |
+
"bnb_4bit_use_double_quant": true,
|
| 260 |
+
"bnb_4bit_quant_storage": null,
|
| 261 |
+
"max_new_tokens": 64,
|
| 262 |
+
"temperature": 0.0,
|
| 263 |
+
"top_k": null,
|
| 264 |
+
"top_p": null,
|
| 265 |
+
"repetition_penalty": null,
|
| 266 |
+
"num_beams": 1,
|
| 267 |
+
"stream": false,
|
| 268 |
+
"stop_words": [],
|
| 269 |
+
"logprobs": false,
|
| 270 |
+
"top_logprobs": null,
|
| 271 |
+
"ckpt_dir": null,
|
| 272 |
+
"lora_modules": [],
|
| 273 |
+
"train_type": "full",
|
| 274 |
+
"adapters": [],
|
| 275 |
+
"external_plugins": [],
|
| 276 |
+
"model_kwargs": {},
|
| 277 |
+
"load_args": false,
|
| 278 |
+
"load_data_args": false,
|
| 279 |
+
"packing": false,
|
| 280 |
+
"packing_length": null,
|
| 281 |
+
"lazy_tokenize": true,
|
| 282 |
+
"cached_dataset": [],
|
| 283 |
+
"custom_register_path": [],
|
| 284 |
+
"use_hf": false,
|
| 285 |
+
"ignore_args_error": false,
|
| 286 |
+
"use_swift_lora": false,
|
| 287 |
+
"freeze_parameters": [
|
| 288 |
+
"vision_model",
|
| 289 |
+
"mlp1"
|
| 290 |
+
],
|
| 291 |
+
"freeze_parameters_regex": null,
|
| 292 |
+
"freeze_parameters_ratio": 0.0,
|
| 293 |
+
"trainable_parameters": [],
|
| 294 |
+
"trainable_parameters_regex": null,
|
| 295 |
+
"freeze_llm": false,
|
| 296 |
+
"freeze_vit": true,
|
| 297 |
+
"freeze_aligner": true,
|
| 298 |
+
"target_modules": [
|
| 299 |
+
"all-linear"
|
| 300 |
+
],
|
| 301 |
+
"target_regex": null,
|
| 302 |
+
"target_parameters": null,
|
| 303 |
+
"modules_to_save": [],
|
| 304 |
+
"lora_rank": 8,
|
| 305 |
+
"lora_alpha": 32,
|
| 306 |
+
"lora_dropout": 0.05,
|
| 307 |
+
"lora_bias": "none",
|
| 308 |
+
"lora_dtype": null,
|
| 309 |
+
"lorap_lr_ratio": null,
|
| 310 |
+
"use_rslora": false,
|
| 311 |
+
"use_dora": false,
|
| 312 |
+
"lora_ga_batch_size": 2,
|
| 313 |
+
"lora_ga_iters": 2,
|
| 314 |
+
"lora_ga_max_length": 1024,
|
| 315 |
+
"lora_ga_direction": "ArB2r",
|
| 316 |
+
"lora_ga_scale": "stable",
|
| 317 |
+
"lora_ga_stable_gamma": 16,
|
| 318 |
+
"init_weights": true,
|
| 319 |
+
"fourier_n_frequency": 2000,
|
| 320 |
+
"fourier_scaling": 300.0,
|
| 321 |
+
"boft_block_size": 4,
|
| 322 |
+
"boft_block_num": 0,
|
| 323 |
+
"boft_n_butterfly_factor": 1,
|
| 324 |
+
"boft_dropout": 0.0,
|
| 325 |
+
"vera_rank": 256,
|
| 326 |
+
"vera_projection_prng_key": 0,
|
| 327 |
+
"vera_dropout": 0.0,
|
| 328 |
+
"vera_d_initial": 0.1,
|
| 329 |
+
"adapter_act": "gelu",
|
| 330 |
+
"adapter_length": 128,
|
| 331 |
+
"use_galore": false,
|
| 332 |
+
"galore_target_modules": null,
|
| 333 |
+
"galore_rank": 128,
|
| 334 |
+
"galore_update_proj_gap": 50,
|
| 335 |
+
"galore_scale": 1.0,
|
| 336 |
+
"galore_proj_type": "std",
|
| 337 |
+
"galore_optim_per_parameter": false,
|
| 338 |
+
"galore_with_embedding": false,
|
| 339 |
+
"galore_quantization": false,
|
| 340 |
+
"galore_proj_quant": false,
|
| 341 |
+
"galore_proj_bits": 4,
|
| 342 |
+
"galore_proj_group_size": 256,
|
| 343 |
+
"galore_cos_threshold": 0.4,
|
| 344 |
+
"galore_gamma_proj": 2,
|
| 345 |
+
"galore_queue_size": 5,
|
| 346 |
+
"adalora_target_r": 8,
|
| 347 |
+
"adalora_init_r": 12,
|
| 348 |
+
"adalora_tinit": 0,
|
| 349 |
+
"adalora_tfinal": 0,
|
| 350 |
+
"adalora_deltaT": 1,
|
| 351 |
+
"adalora_beta1": 0.85,
|
| 352 |
+
"adalora_beta2": 0.85,
|
| 353 |
+
"adalora_orth_reg_weight": 0.5,
|
| 354 |
+
"llamapro_num_new_blocks": 4,
|
| 355 |
+
"llamapro_num_groups": null,
|
| 356 |
+
"lisa_activated_layers": 0,
|
| 357 |
+
"lisa_step_interval": 20,
|
| 358 |
+
"reft_layer_key": null,
|
| 359 |
+
"reft_layers": null,
|
| 360 |
+
"reft_rank": 4,
|
| 361 |
+
"reft_intervention_type": "LoreftIntervention",
|
| 362 |
+
"reft_args": null,
|
| 363 |
+
"swanlab_token": null,
|
| 364 |
+
"swanlab_project": null,
|
| 365 |
+
"swanlab_workspace": null,
|
| 366 |
+
"swanlab_exp_name": "/mnt/data/users/liamding/data/MMMT/lora/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639",
|
| 367 |
+
"swanlab_lark_webhook_url": null,
|
| 368 |
+
"swanlab_lark_secret": null,
|
| 369 |
+
"swanlab_mode": "cloud",
|
| 370 |
+
"add_version": true,
|
| 371 |
+
"create_checkpoint_symlink": false,
|
| 372 |
+
"zero_hpz_partition_size": null,
|
| 373 |
+
"deepspeed_autotp_size": null,
|
| 374 |
+
"early_stop_interval": 200,
|
| 375 |
+
"rank": 0,
|
| 376 |
+
"global_world_size": 4,
|
| 377 |
+
"local_world_size": 4,
|
| 378 |
+
"model_suffix": "InternVL3-8B-Instruct",
|
| 379 |
+
"model_info": "ModelInfo(model_type='internvl3', model_dir='/mnt/data/users/liamding/data/models/InternVL3-8B-Instruct', torch_dtype=torch.bfloat16, max_model_len=32768, quant_method=None, quant_bits=None, rope_scaling={'factor': 2.0, 'rope_type': 'dynamic', 'type': 'dynamic'}, is_moe_model=False, config=None, task_type='causal_lm', num_labels=None)",
|
| 380 |
+
"model_meta": "ModelMeta(model_type='internvl3', model_groups=[ModelGroup(models=[Model(ms_model_id='OpenGVLab/InternVL3-1B-Pretrained', hf_model_id='OpenGVLab/InternVL3-1B-Pretrained', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-2B-Pretrained', hf_model_id='OpenGVLab/InternVL3-2B-Pretrained', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-8B-Pretrained', hf_model_id='OpenGVLab/InternVL3-8B-Pretrained', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-9B-Pretrained', hf_model_id='OpenGVLab/InternVL3-9B-Pretrained', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-14B-Pretrained', hf_model_id='OpenGVLab/InternVL3-14B-Pretrained', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-38B-Pretrained', hf_model_id='OpenGVLab/InternVL3-38B-Pretrained', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-78B-Pretrained', hf_model_id='OpenGVLab/InternVL3-78B-Pretrained', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[]), ModelGroup(models=[Model(ms_model_id='OpenGVLab/InternVL3-1B-Instruct', hf_model_id='OpenGVLab/InternVL3-1B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-2B-Instruct', hf_model_id='OpenGVLab/InternVL3-2B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-8B-Instruct', hf_model_id='OpenGVLab/InternVL3-8B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-9B-Instruct', hf_model_id='OpenGVLab/InternVL3-9B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-14B-Instruct', hf_model_id='OpenGVLab/InternVL3-14B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-38B-Instruct', hf_model_id='OpenGVLab/InternVL3-38B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-78B-Instruct', hf_model_id='OpenGVLab/InternVL3-78B-Instruct', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[]), ModelGroup(models=[Model(ms_model_id='OpenGVLab/InternVL3-1B', hf_model_id='OpenGVLab/InternVL3-1B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-2B', hf_model_id='OpenGVLab/InternVL3-2B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-8B', hf_model_id='OpenGVLab/InternVL3-8B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-9B', hf_model_id='OpenGVLab/InternVL3-9B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-14B', hf_model_id='OpenGVLab/InternVL3-14B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-38B', hf_model_id='OpenGVLab/InternVL3-38B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-78B', hf_model_id='OpenGVLab/InternVL3-78B', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[]), ModelGroup(models=[Model(ms_model_id='OpenGVLab/InternVL3-1B-AWQ', hf_model_id='OpenGVLab/InternVL3-1B-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-2B-AWQ', hf_model_id='OpenGVLab/InternVL3-2B-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-8B-AWQ', hf_model_id='OpenGVLab/InternVL3-8B-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-9B-AWQ', hf_model_id='OpenGVLab/InternVL3-9B-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-14B-AWQ', hf_model_id='OpenGVLab/InternVL3-14B-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-38B-AWQ', hf_model_id='OpenGVLab/InternVL3-38B-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-78B-AWQ', hf_model_id='OpenGVLab/InternVL3-78B-AWQ', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[])], template='internvl2_5', get_function=<function get_model_tokenizer_internvl at 0x7f4e7f5f2950>, model_arch=MultiModelKeys(arch_name='internvl', embedding=None, module_list=None, lm_head=None, q_proj=None, k_proj=None, v_proj=None, o_proj=None, attention=None, mlp=None, down_proj=None, qkv_proj=None, qk_proj=None, qa_proj=None, qb_proj=None, kv_proj=None, kva_proj=None, kvb_proj=None, language_model=['language_model'], aligner=['mlp1'], vision_tower=['vision_model'], generator=[]), architectures=['InternVLChatModel'], additional_saved_files=[], torch_dtype=None, is_multimodal=True, is_reward=False, task_type=None, ignore_patterns=None, requires=['transformers>=4.37.2', 'timm'], tags=['vision', 'video'])",
|
| 381 |
+
"model_dir": "/mnt/data/users/liamding/data/models/InternVL3-8B-Instruct",
|
| 382 |
+
"hub": "<class 'swift.hub.hub.MSHub'>",
|
| 383 |
+
"evaluation_strategy": "epoch",
|
| 384 |
+
"training_args": "Seq2SeqTrainingArguments(output_dir='/mnt/data/users/liamding/data/MMMT/lora/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639', overwrite_output_dir=False, do_train=False, do_eval=True, do_predict=False, eval_strategy=<IntervalStrategy.EPOCH: 'epoch'>, prediction_loss_only=False, per_device_train_batch_size=2, per_device_eval_batch_size=2, per_gpu_train_batch_size=None, per_gpu_eval_batch_size=None, gradient_accumulation_steps=2, eval_accumulation_steps=None, eval_delay=0, torch_empty_cache_steps=None, learning_rate=2e-06, weight_decay=0.01, adam_beta1=0.9, adam_beta2=0.95, adam_epsilon=1e-08, max_grad_norm=1.0, num_train_epochs=5.0, max_steps=-1, lr_scheduler_type=<SchedulerType.COSINE: 'cosine'>, lr_scheduler_kwargs=None, warmup_ratio=0.1, warmup_steps=0, log_level='passive', log_level_replica='warning', log_on_each_node=True, logging_dir='/mnt/data/users/liamding/data/MMMT/lora/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/runs', logging_strategy=<IntervalStrategy.STEPS: 'steps'>, logging_first_step=True, logging_steps=5, logging_nan_inf_filter=True, save_strategy=<SaveStrategy.EPOCH: 'epoch'>, save_steps=500, save_total_limit=10, save_safetensors=True, save_on_each_node=False, save_only_model=False, restore_callback_states_from_checkpoint=False, no_cuda=False, use_cpu=False, use_mps_device=False, seed=42, data_seed=42, jit_mode_eval=False, use_ipex=False, bf16=True, fp16=False, fp16_opt_level='O1', half_precision_backend='auto', bf16_full_eval=False, fp16_full_eval=False, tf32=None, local_rank=0, ddp_backend=None, tpu_num_cores=None, tpu_metrics_debug=False, debug=[], dataloader_drop_last=False, eval_steps=None, dataloader_num_workers=4, dataloader_prefetch_factor=10, past_index=-1, run_name='/mnt/data/users/liamding/data/MMMT/lora/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639', disable_tqdm=False, remove_unused_columns=False, label_names=None, load_best_model_at_end=True, metric_for_best_model='eval_loss', greater_is_better=False, ignore_data_skip=False, fsdp=[], fsdp_min_num_params=0, fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, fsdp_transformer_layer_cls_to_wrap=None, accelerator_config=AcceleratorConfig(split_batches=False, dispatch_batches=False, even_batches=True, use_seedable_sampler=True, non_blocking=False, gradient_accumulation_kwargs=None, use_configured_state=False), deepspeed={'fp16': {'enabled': 'auto', 'loss_scale': 0, 'loss_scale_window': 1000, 'initial_scale_power': 16, 'hysteresis': 2, 'min_loss_scale': 1}, 'bf16': {'enabled': 'auto'}, 'zero_optimization': {'stage': 3, 'offload_optimizer': {'device': 'none', 'pin_memory': True}, 'offload_param': {'device': 'none', 'pin_memory': True}, 'overlap_comm': False, 'contiguous_gradients': True, 'sub_group_size': 1000000000.0, 'reduce_bucket_size': 'auto', 'zero_quantized_weights': False, 'zero_quantized_gradients': False, 'stage3_prefetch_bucket_size': 'auto', 'stage3_param_persistence_threshold': 'auto', 'stage3_max_live_parameters': 1000000000.0, 'stage3_max_reuse_distance': 1000000000.0, 'stage3_gather_16bit_weights_on_model_save': True}, 'gradient_accumulation_steps': 'auto', 'gradient_clipping': 'auto', 'steps_per_print': 2000, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'wall_clock_breakdown': False}, label_smoothing_factor=0.0, optim=<OptimizerNames.ADAMW_TORCH: 'adamw_torch'>, optim_args=None, adafactor=False, group_by_length=False, length_column_name='length', report_to=['swanlab'], ddp_find_unused_parameters=None, ddp_bucket_cap_mb=None, ddp_broadcast_buffers=None, dataloader_pin_memory=True, dataloader_persistent_workers=False, skip_memory_metrics=True, use_legacy_prediction_loop=False, push_to_hub=False, resume_from_checkpoint=None, hub_model_id=None, hub_strategy=<HubStrategy.EVERY_SAVE: 'every_save'>, hub_token=None, hub_private_repo=None, hub_always_push=False, hub_revision=None, gradient_checkpointing=True, gradient_checkpointing_kwargs=None, include_inputs_for_metrics=False, include_for_metrics=[], eval_do_concat_batches=True, fp16_backend='auto', push_to_hub_model_id=None, push_to_hub_organization=None, push_to_hub_token=None, mp_parameters='', auto_find_batch_size=False, full_determinism=False, torchdynamo=None, ray_scope='last', ddp_timeout=18000000, torch_compile=False, torch_compile_backend=None, torch_compile_mode=None, include_tokens_per_second=None, include_num_input_tokens_seen=None, neftune_noise_alpha=None, optim_target_modules=None, batch_eval_metrics=False, eval_on_start=False, use_liger_kernel=False, liger_kernel_config=None, eval_use_gather_object=False, average_tokens_across_devices=None, sortish_sampler=False, predict_with_generate=False, generation_max_length=None, generation_num_beams=None, generation_config=None, tuner_backend='peft', vit_gradient_checkpointing=True, router_aux_loss_coef=0.0, enable_dft_loss=False, enable_channel_loss=False, check_model=True, acc_strategy='token', train_dataloader_shuffle=True, max_epochs=None, aligner_lr=None, vit_lr=None, use_logits_to_keep=None, ds3_gather_for_generation=True, resume_only_model=False, optimizer=None, loss_type=None, metric=None, eval_use_evalscope=False, eval_dataset=[], eval_dataset_args=None, eval_limit=None, eval_generation_config=None, extra_eval_args=None, use_flash_ckpt=False, sft_alpha=0, train_type='full', local_repo_path=None, galore_config=None)"
|
| 385 |
+
}
|
ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/added_tokens.json
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"</box>": 151673,
|
| 3 |
+
"</img>": 151666,
|
| 4 |
+
"</quad>": 151669,
|
| 5 |
+
"</ref>": 151671,
|
| 6 |
+
"</tool_call>": 151658,
|
| 7 |
+
"<IMG_CONTEXT>": 151667,
|
| 8 |
+
"<box>": 151672,
|
| 9 |
+
"<img>": 151665,
|
| 10 |
+
"<quad>": 151668,
|
| 11 |
+
"<ref>": 151670,
|
| 12 |
+
"<tool_call>": 151657,
|
| 13 |
+
"<|box_end|>": 151649,
|
| 14 |
+
"<|box_start|>": 151648,
|
| 15 |
+
"<|endoftext|>": 151643,
|
| 16 |
+
"<|file_sep|>": 151664,
|
| 17 |
+
"<|fim_middle|>": 151660,
|
| 18 |
+
"<|fim_pad|>": 151662,
|
| 19 |
+
"<|fim_prefix|>": 151659,
|
| 20 |
+
"<|fim_suffix|>": 151661,
|
| 21 |
+
"<|im_end|>": 151645,
|
| 22 |
+
"<|im_start|>": 151644,
|
| 23 |
+
"<|image_pad|>": 151655,
|
| 24 |
+
"<|object_ref_end|>": 151647,
|
| 25 |
+
"<|object_ref_start|>": 151646,
|
| 26 |
+
"<|quad_end|>": 151651,
|
| 27 |
+
"<|quad_start|>": 151650,
|
| 28 |
+
"<|repo_name|>": 151663,
|
| 29 |
+
"<|video_pad|>": 151656,
|
| 30 |
+
"<|vision_end|>": 151653,
|
| 31 |
+
"<|vision_pad|>": 151654,
|
| 32 |
+
"<|vision_start|>": 151652
|
| 33 |
+
}
|
ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/args.json
ADDED
|
@@ -0,0 +1,385 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"output_dir": "/mnt/data/users/liamding/data/MMMT/lora/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639",
|
| 3 |
+
"overwrite_output_dir": false,
|
| 4 |
+
"do_train": false,
|
| 5 |
+
"do_eval": false,
|
| 6 |
+
"do_predict": false,
|
| 7 |
+
"eval_strategy": "epoch",
|
| 8 |
+
"prediction_loss_only": false,
|
| 9 |
+
"per_device_train_batch_size": 2,
|
| 10 |
+
"per_device_eval_batch_size": 2,
|
| 11 |
+
"per_gpu_train_batch_size": null,
|
| 12 |
+
"per_gpu_eval_batch_size": null,
|
| 13 |
+
"gradient_accumulation_steps": 2,
|
| 14 |
+
"eval_accumulation_steps": null,
|
| 15 |
+
"eval_delay": 0,
|
| 16 |
+
"torch_empty_cache_steps": null,
|
| 17 |
+
"learning_rate": 2e-06,
|
| 18 |
+
"weight_decay": 0.01,
|
| 19 |
+
"adam_beta1": 0.9,
|
| 20 |
+
"adam_beta2": 0.95,
|
| 21 |
+
"adam_epsilon": 1e-08,
|
| 22 |
+
"max_grad_norm": 1.0,
|
| 23 |
+
"num_train_epochs": 5.0,
|
| 24 |
+
"max_steps": -1,
|
| 25 |
+
"lr_scheduler_type": "cosine",
|
| 26 |
+
"lr_scheduler_kwargs": null,
|
| 27 |
+
"warmup_ratio": 0.1,
|
| 28 |
+
"warmup_steps": 0,
|
| 29 |
+
"log_level": "passive",
|
| 30 |
+
"log_level_replica": "warning",
|
| 31 |
+
"log_on_each_node": true,
|
| 32 |
+
"logging_dir": "/mnt/data/users/liamding/data/MMMT/lora/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/runs",
|
| 33 |
+
"logging_strategy": "steps",
|
| 34 |
+
"logging_first_step": true,
|
| 35 |
+
"logging_steps": 5,
|
| 36 |
+
"logging_nan_inf_filter": true,
|
| 37 |
+
"save_strategy": "epoch",
|
| 38 |
+
"save_steps": 500,
|
| 39 |
+
"save_total_limit": 10,
|
| 40 |
+
"save_safetensors": true,
|
| 41 |
+
"save_on_each_node": false,
|
| 42 |
+
"save_only_model": false,
|
| 43 |
+
"restore_callback_states_from_checkpoint": false,
|
| 44 |
+
"no_cuda": false,
|
| 45 |
+
"use_cpu": false,
|
| 46 |
+
"use_mps_device": false,
|
| 47 |
+
"seed": 42,
|
| 48 |
+
"data_seed": 42,
|
| 49 |
+
"jit_mode_eval": false,
|
| 50 |
+
"use_ipex": false,
|
| 51 |
+
"bf16": true,
|
| 52 |
+
"fp16": false,
|
| 53 |
+
"fp16_opt_level": "O1",
|
| 54 |
+
"half_precision_backend": "auto",
|
| 55 |
+
"bf16_full_eval": false,
|
| 56 |
+
"fp16_full_eval": false,
|
| 57 |
+
"tf32": null,
|
| 58 |
+
"local_rank": 0,
|
| 59 |
+
"ddp_backend": null,
|
| 60 |
+
"tpu_num_cores": null,
|
| 61 |
+
"tpu_metrics_debug": false,
|
| 62 |
+
"debug": null,
|
| 63 |
+
"dataloader_drop_last": false,
|
| 64 |
+
"eval_steps": null,
|
| 65 |
+
"dataloader_num_workers": 4,
|
| 66 |
+
"dataloader_prefetch_factor": null,
|
| 67 |
+
"past_index": -1,
|
| 68 |
+
"run_name": "/mnt/data/users/liamding/data/MMMT/lora/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639",
|
| 69 |
+
"disable_tqdm": null,
|
| 70 |
+
"remove_unused_columns": true,
|
| 71 |
+
"label_names": null,
|
| 72 |
+
"load_best_model_at_end": true,
|
| 73 |
+
"metric_for_best_model": "eval_loss",
|
| 74 |
+
"greater_is_better": false,
|
| 75 |
+
"ignore_data_skip": false,
|
| 76 |
+
"fsdp": "",
|
| 77 |
+
"fsdp_min_num_params": 0,
|
| 78 |
+
"fsdp_config": null,
|
| 79 |
+
"fsdp_transformer_layer_cls_to_wrap": null,
|
| 80 |
+
"accelerator_config": {
|
| 81 |
+
"dispatch_batches": false
|
| 82 |
+
},
|
| 83 |
+
"deepspeed": {
|
| 84 |
+
"fp16": {
|
| 85 |
+
"enabled": "auto",
|
| 86 |
+
"loss_scale": 0,
|
| 87 |
+
"loss_scale_window": 1000,
|
| 88 |
+
"initial_scale_power": 16,
|
| 89 |
+
"hysteresis": 2,
|
| 90 |
+
"min_loss_scale": 1
|
| 91 |
+
},
|
| 92 |
+
"bf16": {
|
| 93 |
+
"enabled": "auto"
|
| 94 |
+
},
|
| 95 |
+
"zero_optimization": {
|
| 96 |
+
"stage": 3,
|
| 97 |
+
"offload_optimizer": {
|
| 98 |
+
"device": "none",
|
| 99 |
+
"pin_memory": true
|
| 100 |
+
},
|
| 101 |
+
"offload_param": {
|
| 102 |
+
"device": "none",
|
| 103 |
+
"pin_memory": true
|
| 104 |
+
},
|
| 105 |
+
"overlap_comm": false,
|
| 106 |
+
"contiguous_gradients": true,
|
| 107 |
+
"sub_group_size": 1000000000.0,
|
| 108 |
+
"reduce_bucket_size": "auto",
|
| 109 |
+
"zero_quantized_weights": false,
|
| 110 |
+
"zero_quantized_gradients": false,
|
| 111 |
+
"stage3_prefetch_bucket_size": "auto",
|
| 112 |
+
"stage3_param_persistence_threshold": "auto",
|
| 113 |
+
"stage3_max_live_parameters": 1000000000.0,
|
| 114 |
+
"stage3_max_reuse_distance": 1000000000.0,
|
| 115 |
+
"stage3_gather_16bit_weights_on_model_save": true
|
| 116 |
+
},
|
| 117 |
+
"gradient_accumulation_steps": "auto",
|
| 118 |
+
"gradient_clipping": "auto",
|
| 119 |
+
"steps_per_print": 2000,
|
| 120 |
+
"train_batch_size": "auto",
|
| 121 |
+
"train_micro_batch_size_per_gpu": "auto",
|
| 122 |
+
"wall_clock_breakdown": false
|
| 123 |
+
},
|
| 124 |
+
"label_smoothing_factor": 0.0,
|
| 125 |
+
"optim": "adamw_torch",
|
| 126 |
+
"optim_args": null,
|
| 127 |
+
"adafactor": false,
|
| 128 |
+
"group_by_length": false,
|
| 129 |
+
"length_column_name": "length",
|
| 130 |
+
"report_to": [
|
| 131 |
+
"swanlab"
|
| 132 |
+
],
|
| 133 |
+
"ddp_find_unused_parameters": null,
|
| 134 |
+
"ddp_bucket_cap_mb": null,
|
| 135 |
+
"ddp_broadcast_buffers": null,
|
| 136 |
+
"dataloader_pin_memory": true,
|
| 137 |
+
"dataloader_persistent_workers": false,
|
| 138 |
+
"skip_memory_metrics": true,
|
| 139 |
+
"use_legacy_prediction_loop": false,
|
| 140 |
+
"push_to_hub": false,
|
| 141 |
+
"resume_from_checkpoint": null,
|
| 142 |
+
"hub_model_id": null,
|
| 143 |
+
"hub_strategy": "every_save",
|
| 144 |
+
"hub_token": null,
|
| 145 |
+
"hub_private_repo": null,
|
| 146 |
+
"hub_always_push": false,
|
| 147 |
+
"hub_revision": null,
|
| 148 |
+
"gradient_checkpointing": true,
|
| 149 |
+
"gradient_checkpointing_kwargs": null,
|
| 150 |
+
"include_inputs_for_metrics": false,
|
| 151 |
+
"include_for_metrics": [],
|
| 152 |
+
"eval_do_concat_batches": true,
|
| 153 |
+
"fp16_backend": "auto",
|
| 154 |
+
"push_to_hub_model_id": null,
|
| 155 |
+
"push_to_hub_organization": null,
|
| 156 |
+
"push_to_hub_token": null,
|
| 157 |
+
"mp_parameters": "",
|
| 158 |
+
"auto_find_batch_size": false,
|
| 159 |
+
"full_determinism": false,
|
| 160 |
+
"torchdynamo": null,
|
| 161 |
+
"ray_scope": "last",
|
| 162 |
+
"ddp_timeout": 18000000,
|
| 163 |
+
"torch_compile": false,
|
| 164 |
+
"torch_compile_backend": null,
|
| 165 |
+
"torch_compile_mode": null,
|
| 166 |
+
"include_tokens_per_second": false,
|
| 167 |
+
"include_num_input_tokens_seen": false,
|
| 168 |
+
"neftune_noise_alpha": null,
|
| 169 |
+
"optim_target_modules": null,
|
| 170 |
+
"batch_eval_metrics": false,
|
| 171 |
+
"eval_on_start": false,
|
| 172 |
+
"use_liger_kernel": false,
|
| 173 |
+
"liger_kernel_config": null,
|
| 174 |
+
"eval_use_gather_object": false,
|
| 175 |
+
"average_tokens_across_devices": true,
|
| 176 |
+
"sortish_sampler": false,
|
| 177 |
+
"predict_with_generate": false,
|
| 178 |
+
"generation_max_length": null,
|
| 179 |
+
"generation_num_beams": null,
|
| 180 |
+
"generation_config": null,
|
| 181 |
+
"tuner_backend": "peft",
|
| 182 |
+
"vit_gradient_checkpointing": null,
|
| 183 |
+
"router_aux_loss_coef": 0.0,
|
| 184 |
+
"enable_dft_loss": false,
|
| 185 |
+
"enable_channel_loss": false,
|
| 186 |
+
"check_model": true,
|
| 187 |
+
"acc_strategy": "token",
|
| 188 |
+
"train_dataloader_shuffle": true,
|
| 189 |
+
"max_epochs": null,
|
| 190 |
+
"aligner_lr": null,
|
| 191 |
+
"vit_lr": null,
|
| 192 |
+
"use_logits_to_keep": null,
|
| 193 |
+
"ds3_gather_for_generation": true,
|
| 194 |
+
"resume_only_model": false,
|
| 195 |
+
"optimizer": null,
|
| 196 |
+
"loss_type": null,
|
| 197 |
+
"metric": null,
|
| 198 |
+
"eval_use_evalscope": false,
|
| 199 |
+
"eval_dataset": [],
|
| 200 |
+
"eval_dataset_args": null,
|
| 201 |
+
"eval_limit": null,
|
| 202 |
+
"eval_generation_config": null,
|
| 203 |
+
"extra_eval_args": null,
|
| 204 |
+
"use_flash_ckpt": false,
|
| 205 |
+
"model": "/mnt/data/users/liamding/data/models/InternVL3-8B-Instruct",
|
| 206 |
+
"model_type": "internvl3",
|
| 207 |
+
"model_revision": null,
|
| 208 |
+
"task_type": "causal_lm",
|
| 209 |
+
"torch_dtype": "bfloat16",
|
| 210 |
+
"attn_impl": null,
|
| 211 |
+
"new_special_tokens": [],
|
| 212 |
+
"num_labels": null,
|
| 213 |
+
"problem_type": null,
|
| 214 |
+
"rope_scaling": null,
|
| 215 |
+
"device_map": null,
|
| 216 |
+
"max_memory": {},
|
| 217 |
+
"max_model_len": null,
|
| 218 |
+
"local_repo_path": null,
|
| 219 |
+
"init_strategy": null,
|
| 220 |
+
"template": "internvl2_5",
|
| 221 |
+
"system": null,
|
| 222 |
+
"max_length": 32768,
|
| 223 |
+
"truncation_strategy": "delete",
|
| 224 |
+
"max_pixels": null,
|
| 225 |
+
"agent_template": null,
|
| 226 |
+
"norm_bbox": null,
|
| 227 |
+
"use_chat_template": true,
|
| 228 |
+
"padding_free": false,
|
| 229 |
+
"padding_side": "right",
|
| 230 |
+
"loss_scale": "default",
|
| 231 |
+
"sequence_parallel_size": 1,
|
| 232 |
+
"response_prefix": null,
|
| 233 |
+
"template_backend": "swift",
|
| 234 |
+
"dataset": [
|
| 235 |
+
"/mnt/data/users/liamding/data/3AM_Plus/final/training/qvq-thinking_answer/ambi_normal_train_thinking_772.json",
|
| 236 |
+
"/mnt/data/users/liamding/data/3AM_Plus/final/training/qvq-thinking_answer/ambi_normal_aug_575.json"
|
| 237 |
+
],
|
| 238 |
+
"val_dataset": [],
|
| 239 |
+
"split_dataset_ratio": 0.1,
|
| 240 |
+
"dataset_num_proc": 1,
|
| 241 |
+
"load_from_cache_file": true,
|
| 242 |
+
"dataset_shuffle": true,
|
| 243 |
+
"val_dataset_shuffle": false,
|
| 244 |
+
"streaming": false,
|
| 245 |
+
"interleave_prob": null,
|
| 246 |
+
"stopping_strategy": "first_exhausted",
|
| 247 |
+
"shuffle_buffer_size": 1000,
|
| 248 |
+
"download_mode": "reuse_dataset_if_exists",
|
| 249 |
+
"columns": {},
|
| 250 |
+
"strict": false,
|
| 251 |
+
"model_name": null,
|
| 252 |
+
"model_author": null,
|
| 253 |
+
"custom_dataset_info": [],
|
| 254 |
+
"quant_method": null,
|
| 255 |
+
"quant_bits": null,
|
| 256 |
+
"hqq_axis": null,
|
| 257 |
+
"bnb_4bit_compute_dtype": "bfloat16",
|
| 258 |
+
"bnb_4bit_quant_type": "nf4",
|
| 259 |
+
"bnb_4bit_use_double_quant": true,
|
| 260 |
+
"bnb_4bit_quant_storage": null,
|
| 261 |
+
"max_new_tokens": 64,
|
| 262 |
+
"temperature": 0.0,
|
| 263 |
+
"top_k": null,
|
| 264 |
+
"top_p": null,
|
| 265 |
+
"repetition_penalty": null,
|
| 266 |
+
"num_beams": 1,
|
| 267 |
+
"stream": false,
|
| 268 |
+
"stop_words": [],
|
| 269 |
+
"logprobs": false,
|
| 270 |
+
"top_logprobs": null,
|
| 271 |
+
"ckpt_dir": null,
|
| 272 |
+
"lora_modules": [],
|
| 273 |
+
"train_type": "full",
|
| 274 |
+
"adapters": [],
|
| 275 |
+
"external_plugins": [],
|
| 276 |
+
"model_kwargs": {},
|
| 277 |
+
"load_args": false,
|
| 278 |
+
"load_data_args": false,
|
| 279 |
+
"packing": false,
|
| 280 |
+
"packing_length": null,
|
| 281 |
+
"lazy_tokenize": true,
|
| 282 |
+
"cached_dataset": [],
|
| 283 |
+
"custom_register_path": [],
|
| 284 |
+
"use_hf": false,
|
| 285 |
+
"ignore_args_error": false,
|
| 286 |
+
"use_swift_lora": false,
|
| 287 |
+
"freeze_parameters": [
|
| 288 |
+
"vision_model",
|
| 289 |
+
"mlp1"
|
| 290 |
+
],
|
| 291 |
+
"freeze_parameters_regex": null,
|
| 292 |
+
"freeze_parameters_ratio": 0.0,
|
| 293 |
+
"trainable_parameters": [],
|
| 294 |
+
"trainable_parameters_regex": null,
|
| 295 |
+
"freeze_llm": false,
|
| 296 |
+
"freeze_vit": true,
|
| 297 |
+
"freeze_aligner": true,
|
| 298 |
+
"target_modules": [
|
| 299 |
+
"all-linear"
|
| 300 |
+
],
|
| 301 |
+
"target_regex": null,
|
| 302 |
+
"target_parameters": null,
|
| 303 |
+
"modules_to_save": [],
|
| 304 |
+
"lora_rank": 8,
|
| 305 |
+
"lora_alpha": 32,
|
| 306 |
+
"lora_dropout": 0.05,
|
| 307 |
+
"lora_bias": "none",
|
| 308 |
+
"lora_dtype": null,
|
| 309 |
+
"lorap_lr_ratio": null,
|
| 310 |
+
"use_rslora": false,
|
| 311 |
+
"use_dora": false,
|
| 312 |
+
"lora_ga_batch_size": 2,
|
| 313 |
+
"lora_ga_iters": 2,
|
| 314 |
+
"lora_ga_max_length": 1024,
|
| 315 |
+
"lora_ga_direction": "ArB2r",
|
| 316 |
+
"lora_ga_scale": "stable",
|
| 317 |
+
"lora_ga_stable_gamma": 16,
|
| 318 |
+
"init_weights": true,
|
| 319 |
+
"fourier_n_frequency": 2000,
|
| 320 |
+
"fourier_scaling": 300.0,
|
| 321 |
+
"boft_block_size": 4,
|
| 322 |
+
"boft_block_num": 0,
|
| 323 |
+
"boft_n_butterfly_factor": 1,
|
| 324 |
+
"boft_dropout": 0.0,
|
| 325 |
+
"vera_rank": 256,
|
| 326 |
+
"vera_projection_prng_key": 0,
|
| 327 |
+
"vera_dropout": 0.0,
|
| 328 |
+
"vera_d_initial": 0.1,
|
| 329 |
+
"adapter_act": "gelu",
|
| 330 |
+
"adapter_length": 128,
|
| 331 |
+
"use_galore": false,
|
| 332 |
+
"galore_target_modules": null,
|
| 333 |
+
"galore_rank": 128,
|
| 334 |
+
"galore_update_proj_gap": 50,
|
| 335 |
+
"galore_scale": 1.0,
|
| 336 |
+
"galore_proj_type": "std",
|
| 337 |
+
"galore_optim_per_parameter": false,
|
| 338 |
+
"galore_with_embedding": false,
|
| 339 |
+
"galore_quantization": false,
|
| 340 |
+
"galore_proj_quant": false,
|
| 341 |
+
"galore_proj_bits": 4,
|
| 342 |
+
"galore_proj_group_size": 256,
|
| 343 |
+
"galore_cos_threshold": 0.4,
|
| 344 |
+
"galore_gamma_proj": 2,
|
| 345 |
+
"galore_queue_size": 5,
|
| 346 |
+
"adalora_target_r": 8,
|
| 347 |
+
"adalora_init_r": 12,
|
| 348 |
+
"adalora_tinit": 0,
|
| 349 |
+
"adalora_tfinal": 0,
|
| 350 |
+
"adalora_deltaT": 1,
|
| 351 |
+
"adalora_beta1": 0.85,
|
| 352 |
+
"adalora_beta2": 0.85,
|
| 353 |
+
"adalora_orth_reg_weight": 0.5,
|
| 354 |
+
"llamapro_num_new_blocks": 4,
|
| 355 |
+
"llamapro_num_groups": null,
|
| 356 |
+
"lisa_activated_layers": 0,
|
| 357 |
+
"lisa_step_interval": 20,
|
| 358 |
+
"reft_layer_key": null,
|
| 359 |
+
"reft_layers": null,
|
| 360 |
+
"reft_rank": 4,
|
| 361 |
+
"reft_intervention_type": "LoreftIntervention",
|
| 362 |
+
"reft_args": null,
|
| 363 |
+
"swanlab_token": null,
|
| 364 |
+
"swanlab_project": null,
|
| 365 |
+
"swanlab_workspace": null,
|
| 366 |
+
"swanlab_exp_name": "/mnt/data/users/liamding/data/MMMT/lora/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639",
|
| 367 |
+
"swanlab_lark_webhook_url": null,
|
| 368 |
+
"swanlab_lark_secret": null,
|
| 369 |
+
"swanlab_mode": "cloud",
|
| 370 |
+
"add_version": true,
|
| 371 |
+
"create_checkpoint_symlink": false,
|
| 372 |
+
"zero_hpz_partition_size": null,
|
| 373 |
+
"deepspeed_autotp_size": null,
|
| 374 |
+
"early_stop_interval": 200,
|
| 375 |
+
"rank": 0,
|
| 376 |
+
"global_world_size": 4,
|
| 377 |
+
"local_world_size": 4,
|
| 378 |
+
"model_suffix": "InternVL3-8B-Instruct",
|
| 379 |
+
"model_info": "ModelInfo(model_type='internvl3', model_dir='/mnt/data/users/liamding/data/models/InternVL3-8B-Instruct', torch_dtype=torch.bfloat16, max_model_len=32768, quant_method=None, quant_bits=None, rope_scaling={'factor': 2.0, 'rope_type': 'dynamic', 'type': 'dynamic'}, is_moe_model=False, config=None, task_type='causal_lm', num_labels=None)",
|
| 380 |
+
"model_meta": "ModelMeta(model_type='internvl3', model_groups=[ModelGroup(models=[Model(ms_model_id='OpenGVLab/InternVL3-1B-Pretrained', hf_model_id='OpenGVLab/InternVL3-1B-Pretrained', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-2B-Pretrained', hf_model_id='OpenGVLab/InternVL3-2B-Pretrained', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-8B-Pretrained', hf_model_id='OpenGVLab/InternVL3-8B-Pretrained', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-9B-Pretrained', hf_model_id='OpenGVLab/InternVL3-9B-Pretrained', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-14B-Pretrained', hf_model_id='OpenGVLab/InternVL3-14B-Pretrained', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-38B-Pretrained', hf_model_id='OpenGVLab/InternVL3-38B-Pretrained', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-78B-Pretrained', hf_model_id='OpenGVLab/InternVL3-78B-Pretrained', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[]), ModelGroup(models=[Model(ms_model_id='OpenGVLab/InternVL3-1B-Instruct', hf_model_id='OpenGVLab/InternVL3-1B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-2B-Instruct', hf_model_id='OpenGVLab/InternVL3-2B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-8B-Instruct', hf_model_id='OpenGVLab/InternVL3-8B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-9B-Instruct', hf_model_id='OpenGVLab/InternVL3-9B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-14B-Instruct', hf_model_id='OpenGVLab/InternVL3-14B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-38B-Instruct', hf_model_id='OpenGVLab/InternVL3-38B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-78B-Instruct', hf_model_id='OpenGVLab/InternVL3-78B-Instruct', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[]), ModelGroup(models=[Model(ms_model_id='OpenGVLab/InternVL3-1B', hf_model_id='OpenGVLab/InternVL3-1B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-2B', hf_model_id='OpenGVLab/InternVL3-2B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-8B', hf_model_id='OpenGVLab/InternVL3-8B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-9B', hf_model_id='OpenGVLab/InternVL3-9B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-14B', hf_model_id='OpenGVLab/InternVL3-14B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-38B', hf_model_id='OpenGVLab/InternVL3-38B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-78B', hf_model_id='OpenGVLab/InternVL3-78B', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[]), ModelGroup(models=[Model(ms_model_id='OpenGVLab/InternVL3-1B-AWQ', hf_model_id='OpenGVLab/InternVL3-1B-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-2B-AWQ', hf_model_id='OpenGVLab/InternVL3-2B-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-8B-AWQ', hf_model_id='OpenGVLab/InternVL3-8B-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-9B-AWQ', hf_model_id='OpenGVLab/InternVL3-9B-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-14B-AWQ', hf_model_id='OpenGVLab/InternVL3-14B-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-38B-AWQ', hf_model_id='OpenGVLab/InternVL3-38B-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL3-78B-AWQ', hf_model_id='OpenGVLab/InternVL3-78B-AWQ', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[])], template='internvl2_5', get_function=<function get_model_tokenizer_internvl at 0x7f4e7f5f2950>, model_arch=MultiModelKeys(arch_name='internvl', embedding=None, module_list=None, lm_head=None, q_proj=None, k_proj=None, v_proj=None, o_proj=None, attention=None, mlp=None, down_proj=None, qkv_proj=None, qk_proj=None, qa_proj=None, qb_proj=None, kv_proj=None, kva_proj=None, kvb_proj=None, language_model=['language_model'], aligner=['mlp1'], vision_tower=['vision_model'], generator=[]), architectures=['InternVLChatModel'], additional_saved_files=[], torch_dtype=None, is_multimodal=True, is_reward=False, task_type=None, ignore_patterns=None, requires=['transformers>=4.37.2', 'timm'], tags=['vision', 'video'])",
|
| 381 |
+
"model_dir": "/mnt/data/users/liamding/data/models/InternVL3-8B-Instruct",
|
| 382 |
+
"hub": "<class 'swift.hub.hub.MSHub'>",
|
| 383 |
+
"evaluation_strategy": "epoch",
|
| 384 |
+
"training_args": "Seq2SeqTrainingArguments(output_dir='/mnt/data/users/liamding/data/MMMT/lora/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639', overwrite_output_dir=False, do_train=False, do_eval=True, do_predict=False, eval_strategy=<IntervalStrategy.EPOCH: 'epoch'>, prediction_loss_only=False, per_device_train_batch_size=2, per_device_eval_batch_size=2, per_gpu_train_batch_size=None, per_gpu_eval_batch_size=None, gradient_accumulation_steps=2, eval_accumulation_steps=None, eval_delay=0, torch_empty_cache_steps=None, learning_rate=2e-06, weight_decay=0.01, adam_beta1=0.9, adam_beta2=0.95, adam_epsilon=1e-08, max_grad_norm=1.0, num_train_epochs=5.0, max_steps=-1, lr_scheduler_type=<SchedulerType.COSINE: 'cosine'>, lr_scheduler_kwargs=None, warmup_ratio=0.1, warmup_steps=0, log_level='passive', log_level_replica='warning', log_on_each_node=True, logging_dir='/mnt/data/users/liamding/data/MMMT/lora/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/runs', logging_strategy=<IntervalStrategy.STEPS: 'steps'>, logging_first_step=True, logging_steps=5, logging_nan_inf_filter=True, save_strategy=<SaveStrategy.EPOCH: 'epoch'>, save_steps=500, save_total_limit=10, save_safetensors=True, save_on_each_node=False, save_only_model=False, restore_callback_states_from_checkpoint=False, no_cuda=False, use_cpu=False, use_mps_device=False, seed=42, data_seed=42, jit_mode_eval=False, use_ipex=False, bf16=True, fp16=False, fp16_opt_level='O1', half_precision_backend='auto', bf16_full_eval=False, fp16_full_eval=False, tf32=None, local_rank=0, ddp_backend=None, tpu_num_cores=None, tpu_metrics_debug=False, debug=[], dataloader_drop_last=False, eval_steps=None, dataloader_num_workers=4, dataloader_prefetch_factor=10, past_index=-1, run_name='/mnt/data/users/liamding/data/MMMT/lora/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639', disable_tqdm=False, remove_unused_columns=False, label_names=None, load_best_model_at_end=True, metric_for_best_model='eval_loss', greater_is_better=False, ignore_data_skip=False, fsdp=[], fsdp_min_num_params=0, fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, fsdp_transformer_layer_cls_to_wrap=None, accelerator_config=AcceleratorConfig(split_batches=False, dispatch_batches=False, even_batches=True, use_seedable_sampler=True, non_blocking=False, gradient_accumulation_kwargs=None, use_configured_state=False), deepspeed={'fp16': {'enabled': 'auto', 'loss_scale': 0, 'loss_scale_window': 1000, 'initial_scale_power': 16, 'hysteresis': 2, 'min_loss_scale': 1}, 'bf16': {'enabled': 'auto'}, 'zero_optimization': {'stage': 3, 'offload_optimizer': {'device': 'none', 'pin_memory': True}, 'offload_param': {'device': 'none', 'pin_memory': True}, 'overlap_comm': False, 'contiguous_gradients': True, 'sub_group_size': 1000000000.0, 'reduce_bucket_size': 'auto', 'zero_quantized_weights': False, 'zero_quantized_gradients': False, 'stage3_prefetch_bucket_size': 'auto', 'stage3_param_persistence_threshold': 'auto', 'stage3_max_live_parameters': 1000000000.0, 'stage3_max_reuse_distance': 1000000000.0, 'stage3_gather_16bit_weights_on_model_save': True}, 'gradient_accumulation_steps': 'auto', 'gradient_clipping': 'auto', 'steps_per_print': 2000, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'wall_clock_breakdown': False}, label_smoothing_factor=0.0, optim=<OptimizerNames.ADAMW_TORCH: 'adamw_torch'>, optim_args=None, adafactor=False, group_by_length=False, length_column_name='length', report_to=['swanlab'], ddp_find_unused_parameters=None, ddp_bucket_cap_mb=None, ddp_broadcast_buffers=None, dataloader_pin_memory=True, dataloader_persistent_workers=False, skip_memory_metrics=True, use_legacy_prediction_loop=False, push_to_hub=False, resume_from_checkpoint=None, hub_model_id=None, hub_strategy=<HubStrategy.EVERY_SAVE: 'every_save'>, hub_token=None, hub_private_repo=None, hub_always_push=False, hub_revision=None, gradient_checkpointing=True, gradient_checkpointing_kwargs=None, include_inputs_for_metrics=False, include_for_metrics=[], eval_do_concat_batches=True, fp16_backend='auto', push_to_hub_model_id=None, push_to_hub_organization=None, push_to_hub_token=None, mp_parameters='', auto_find_batch_size=False, full_determinism=False, torchdynamo=None, ray_scope='last', ddp_timeout=18000000, torch_compile=False, torch_compile_backend=None, torch_compile_mode=None, include_tokens_per_second=None, include_num_input_tokens_seen=None, neftune_noise_alpha=None, optim_target_modules=None, batch_eval_metrics=False, eval_on_start=False, use_liger_kernel=False, liger_kernel_config=None, eval_use_gather_object=False, average_tokens_across_devices=None, sortish_sampler=False, predict_with_generate=False, generation_max_length=None, generation_num_beams=None, generation_config=None, tuner_backend='peft', vit_gradient_checkpointing=True, router_aux_loss_coef=0.0, enable_dft_loss=False, enable_channel_loss=False, check_model=True, acc_strategy='token', train_dataloader_shuffle=True, max_epochs=None, aligner_lr=None, vit_lr=None, use_logits_to_keep=None, ds3_gather_for_generation=True, resume_only_model=False, optimizer=None, loss_type=None, metric=None, eval_use_evalscope=False, eval_dataset=[], eval_dataset_args=None, eval_limit=None, eval_generation_config=None, extra_eval_args=None, use_flash_ckpt=False, sft_alpha=0, train_type='full', local_repo_path=None, galore_config=None)"
|
| 385 |
+
}
|
ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/chat_template.jinja
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{%- if tools %}
|
| 2 |
+
{{- '<|im_start|>system\n' }}
|
| 3 |
+
{%- if messages[0]['role'] == 'system' %}
|
| 4 |
+
{{- messages[0]['content'] }}
|
| 5 |
+
{%- else %}
|
| 6 |
+
{{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}
|
| 7 |
+
{%- endif %}
|
| 8 |
+
{{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
|
| 9 |
+
{%- for tool in tools %}
|
| 10 |
+
{{- "\n" }}
|
| 11 |
+
{{- tool | tojson }}
|
| 12 |
+
{%- endfor %}
|
| 13 |
+
{{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
|
| 14 |
+
{%- else %}
|
| 15 |
+
{%- if messages[0]['role'] == 'system' %}
|
| 16 |
+
{{- '<|im_start|>system\n' + messages[0]['content'] + '<|im_end|>\n' }}
|
| 17 |
+
{%- else %}
|
| 18 |
+
{{- '<|im_start|>system\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\n' }}
|
| 19 |
+
{%- endif %}
|
| 20 |
+
{%- endif %}
|
| 21 |
+
{%- for message in messages %}
|
| 22 |
+
{%- if (message.role == "user") or (message.role == "system" and not loop.first) or (message.role == "assistant" and not message.tool_calls) %}
|
| 23 |
+
{{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
|
| 24 |
+
{%- elif message.role == "assistant" %}
|
| 25 |
+
{{- '<|im_start|>' + message.role }}
|
| 26 |
+
{%- if message.content %}
|
| 27 |
+
{{- '\n' + message.content }}
|
| 28 |
+
{%- endif %}
|
| 29 |
+
{%- for tool_call in message.tool_calls %}
|
| 30 |
+
{%- if tool_call.function is defined %}
|
| 31 |
+
{%- set tool_call = tool_call.function %}
|
| 32 |
+
{%- endif %}
|
| 33 |
+
{{- '\n<tool_call>\n{"name": "' }}
|
| 34 |
+
{{- tool_call.name }}
|
| 35 |
+
{{- '", "arguments": ' }}
|
| 36 |
+
{{- tool_call.arguments | tojson }}
|
| 37 |
+
{{- '}\n</tool_call>' }}
|
| 38 |
+
{%- endfor %}
|
| 39 |
+
{{- '<|im_end|>\n' }}
|
| 40 |
+
{%- elif message.role == "tool" %}
|
| 41 |
+
{%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %}
|
| 42 |
+
{{- '<|im_start|>user' }}
|
| 43 |
+
{%- endif %}
|
| 44 |
+
{{- '\n<tool_response>\n' }}
|
| 45 |
+
{{- message.content }}
|
| 46 |
+
{{- '\n</tool_response>' }}
|
| 47 |
+
{%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
|
| 48 |
+
{{- '<|im_end|>\n' }}
|
| 49 |
+
{%- endif %}
|
| 50 |
+
{%- endif %}
|
| 51 |
+
{%- endfor %}
|
| 52 |
+
{%- if add_generation_prompt %}
|
| 53 |
+
{{- '<|im_start|>assistant\n' }}
|
| 54 |
+
{%- endif %}
|
ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/config.json
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"InternVLChatModel"
|
| 4 |
+
],
|
| 5 |
+
"auto_map": {
|
| 6 |
+
"AutoConfig": "configuration_internvl_chat.InternVLChatConfig",
|
| 7 |
+
"AutoModel": "modeling_internvl_chat.InternVLChatModel",
|
| 8 |
+
"AutoModelForCausalLM": "modeling_internvl_chat.InternVLChatModel"
|
| 9 |
+
},
|
| 10 |
+
"downsample_ratio": 0.5,
|
| 11 |
+
"dynamic_image_size": true,
|
| 12 |
+
"force_image_size": 448,
|
| 13 |
+
"hidden_size": 3584,
|
| 14 |
+
"image_fold": null,
|
| 15 |
+
"keys_to_ignore_at_inference": [
|
| 16 |
+
"past_key_values"
|
| 17 |
+
],
|
| 18 |
+
"llm_config": {
|
| 19 |
+
"_name_or_path": "./pretrained/Qwen2.5-32B-Instruct",
|
| 20 |
+
"architectures": [
|
| 21 |
+
"Qwen2ForCausalLM"
|
| 22 |
+
],
|
| 23 |
+
"attention_dropout": 0.0,
|
| 24 |
+
"bos_token_id": 151643,
|
| 25 |
+
"eos_token_id": 151643,
|
| 26 |
+
"hidden_act": "silu",
|
| 27 |
+
"hidden_size": 3584,
|
| 28 |
+
"initializer_range": 0.02,
|
| 29 |
+
"intermediate_size": 18944,
|
| 30 |
+
"layer_types": [
|
| 31 |
+
"full_attention",
|
| 32 |
+
"full_attention",
|
| 33 |
+
"full_attention",
|
| 34 |
+
"full_attention",
|
| 35 |
+
"full_attention",
|
| 36 |
+
"full_attention",
|
| 37 |
+
"full_attention",
|
| 38 |
+
"full_attention",
|
| 39 |
+
"full_attention",
|
| 40 |
+
"full_attention",
|
| 41 |
+
"full_attention",
|
| 42 |
+
"full_attention",
|
| 43 |
+
"full_attention",
|
| 44 |
+
"full_attention",
|
| 45 |
+
"full_attention",
|
| 46 |
+
"full_attention",
|
| 47 |
+
"full_attention",
|
| 48 |
+
"full_attention",
|
| 49 |
+
"full_attention",
|
| 50 |
+
"full_attention",
|
| 51 |
+
"full_attention",
|
| 52 |
+
"full_attention",
|
| 53 |
+
"full_attention",
|
| 54 |
+
"full_attention",
|
| 55 |
+
"full_attention",
|
| 56 |
+
"full_attention",
|
| 57 |
+
"full_attention",
|
| 58 |
+
"full_attention"
|
| 59 |
+
],
|
| 60 |
+
"max_position_embeddings": 32768,
|
| 61 |
+
"max_window_layers": 70,
|
| 62 |
+
"model_type": "qwen2",
|
| 63 |
+
"moe_config": null,
|
| 64 |
+
"num_attention_heads": 28,
|
| 65 |
+
"num_hidden_layers": 28,
|
| 66 |
+
"num_key_value_heads": 4,
|
| 67 |
+
"pad_token_id": 151643,
|
| 68 |
+
"rms_norm_eps": 1e-06,
|
| 69 |
+
"rope_scaling": {
|
| 70 |
+
"factor": 2.0,
|
| 71 |
+
"rope_type": "dynamic",
|
| 72 |
+
"type": "dynamic"
|
| 73 |
+
},
|
| 74 |
+
"rope_theta": 1000000.0,
|
| 75 |
+
"sliding_window": null,
|
| 76 |
+
"torch_dtype": "bfloat16",
|
| 77 |
+
"use_bfloat16": true,
|
| 78 |
+
"use_cache": false,
|
| 79 |
+
"use_sliding_window": false,
|
| 80 |
+
"vocab_size": 151674
|
| 81 |
+
},
|
| 82 |
+
"max_dynamic_patch": 12,
|
| 83 |
+
"min_dynamic_patch": 1,
|
| 84 |
+
"model_type": "internvl_chat",
|
| 85 |
+
"output_attentions": false,
|
| 86 |
+
"pad2square": false,
|
| 87 |
+
"pad_token_id": 151643,
|
| 88 |
+
"ps_version": "v2",
|
| 89 |
+
"select_layer": -1,
|
| 90 |
+
"template": "internvl2_5",
|
| 91 |
+
"tie_word_embeddings": false,
|
| 92 |
+
"torch_dtype": "bfloat16",
|
| 93 |
+
"transformers_version": null,
|
| 94 |
+
"use_backbone_lora": 0,
|
| 95 |
+
"use_llm_lora": 0,
|
| 96 |
+
"use_thumbnail": true,
|
| 97 |
+
"vision_config": {
|
| 98 |
+
"_name_or_path": "OpenGVLab/InternViT-6B-448px-V1-5",
|
| 99 |
+
"architectures": [
|
| 100 |
+
"InternVisionModel"
|
| 101 |
+
],
|
| 102 |
+
"attention_dropout": 0.0,
|
| 103 |
+
"auto_map": {
|
| 104 |
+
"AutoConfig": "configuration_intern_vit.InternVisionConfig",
|
| 105 |
+
"AutoModel": "modeling_intern_vit.InternVisionModel"
|
| 106 |
+
},
|
| 107 |
+
"capacity_factor": 1.2,
|
| 108 |
+
"drop_path_rate": 0.0,
|
| 109 |
+
"dropout": 0.0,
|
| 110 |
+
"eval_capacity_factor": 1.4,
|
| 111 |
+
"hidden_act": "gelu",
|
| 112 |
+
"hidden_size": 1024,
|
| 113 |
+
"image_size": 448,
|
| 114 |
+
"initializer_factor": 0.1,
|
| 115 |
+
"initializer_range": 1e-10,
|
| 116 |
+
"intermediate_size": 4096,
|
| 117 |
+
"laux_allreduce": "all_nodes",
|
| 118 |
+
"layer_norm_eps": 1e-06,
|
| 119 |
+
"model_type": "intern_vit_6b",
|
| 120 |
+
"moe_coeff_ratio": 0.5,
|
| 121 |
+
"moe_intermediate_size": 768,
|
| 122 |
+
"moe_output_scale": 4.0,
|
| 123 |
+
"noisy_gate_policy": "RSample_before",
|
| 124 |
+
"norm_type": "layer_norm",
|
| 125 |
+
"num_attention_heads": 16,
|
| 126 |
+
"num_channels": 3,
|
| 127 |
+
"num_experts": 8,
|
| 128 |
+
"num_hidden_layers": 24,
|
| 129 |
+
"num_routed_experts": 4,
|
| 130 |
+
"num_shared_experts": 4,
|
| 131 |
+
"pad_token_id": 151643,
|
| 132 |
+
"patch_size": 14,
|
| 133 |
+
"qk_normalization": false,
|
| 134 |
+
"qkv_bias": true,
|
| 135 |
+
"shared_expert_intermediate_size": 3072,
|
| 136 |
+
"torch_dtype": "bfloat16",
|
| 137 |
+
"use_bfloat16": true,
|
| 138 |
+
"use_flash_attn": true,
|
| 139 |
+
"use_moe": false,
|
| 140 |
+
"use_residual": true,
|
| 141 |
+
"use_rts": false,
|
| 142 |
+
"use_weighted_residual": false
|
| 143 |
+
}
|
| 144 |
+
}
|
ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/configuration_intern_vit.py
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# --------------------------------------------------------
|
| 2 |
+
# InternVL
|
| 3 |
+
# Copyright (c) 2024 OpenGVLab
|
| 4 |
+
# Licensed under The MIT License [see LICENSE for details]
|
| 5 |
+
# --------------------------------------------------------
|
| 6 |
+
|
| 7 |
+
import os
|
| 8 |
+
from typing import Union
|
| 9 |
+
|
| 10 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 11 |
+
from transformers.utils import logging
|
| 12 |
+
|
| 13 |
+
logger = logging.get_logger(__name__)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class InternVisionConfig(PretrainedConfig):
|
| 17 |
+
r"""
|
| 18 |
+
This is the configuration class to store the configuration of a [`InternVisionModel`]. It is used to
|
| 19 |
+
instantiate a vision encoder according to the specified arguments, defining the model architecture.
|
| 20 |
+
|
| 21 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 22 |
+
documentation from [`PretrainedConfig`] for more information.
|
| 23 |
+
|
| 24 |
+
Args:
|
| 25 |
+
num_channels (`int`, *optional*, defaults to 3):
|
| 26 |
+
Number of color channels in the input images (e.g., 3 for RGB).
|
| 27 |
+
patch_size (`int`, *optional*, defaults to 14):
|
| 28 |
+
The size (resolution) of each patch.
|
| 29 |
+
image_size (`int`, *optional*, defaults to 224):
|
| 30 |
+
The size (resolution) of each image.
|
| 31 |
+
qkv_bias (`bool`, *optional*, defaults to `False`):
|
| 32 |
+
Whether to add a bias to the queries and values in the self-attention layers.
|
| 33 |
+
hidden_size (`int`, *optional*, defaults to 3200):
|
| 34 |
+
Dimensionality of the encoder layers and the pooler layer.
|
| 35 |
+
num_attention_heads (`int`, *optional*, defaults to 25):
|
| 36 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
| 37 |
+
intermediate_size (`int`, *optional*, defaults to 12800):
|
| 38 |
+
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
|
| 39 |
+
qk_normalization (`bool`, *optional*, defaults to `True`):
|
| 40 |
+
Whether to normalize the queries and keys in the self-attention layers.
|
| 41 |
+
num_hidden_layers (`int`, *optional*, defaults to 48):
|
| 42 |
+
Number of hidden layers in the Transformer encoder.
|
| 43 |
+
use_flash_attn (`bool`, *optional*, defaults to `True`):
|
| 44 |
+
Whether to use flash attention mechanism.
|
| 45 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
|
| 46 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
| 47 |
+
`"relu"`, `"selu"` and `"gelu_new"` ``"gelu"` are supported.
|
| 48 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-6):
|
| 49 |
+
The epsilon used by the layer normalization layers.
|
| 50 |
+
dropout (`float`, *optional*, defaults to 0.0):
|
| 51 |
+
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
| 52 |
+
drop_path_rate (`float`, *optional*, defaults to 0.0):
|
| 53 |
+
Dropout rate for stochastic depth.
|
| 54 |
+
attention_dropout (`float`, *optional*, defaults to 0.0):
|
| 55 |
+
The dropout ratio for the attention probabilities.
|
| 56 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
| 57 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
| 58 |
+
initializer_factor (`float`, *optional*, defaults to 0.1):
|
| 59 |
+
A factor for layer scale.
|
| 60 |
+
"""
|
| 61 |
+
|
| 62 |
+
model_type = 'intern_vit_6b'
|
| 63 |
+
|
| 64 |
+
def __init__(
|
| 65 |
+
self,
|
| 66 |
+
num_channels=3,
|
| 67 |
+
patch_size=14,
|
| 68 |
+
image_size=224,
|
| 69 |
+
qkv_bias=False,
|
| 70 |
+
hidden_size=3200,
|
| 71 |
+
num_attention_heads=25,
|
| 72 |
+
intermediate_size=12800,
|
| 73 |
+
qk_normalization=True,
|
| 74 |
+
num_hidden_layers=48,
|
| 75 |
+
use_flash_attn=True,
|
| 76 |
+
hidden_act='gelu',
|
| 77 |
+
norm_type='rms_norm',
|
| 78 |
+
layer_norm_eps=1e-6,
|
| 79 |
+
dropout=0.0,
|
| 80 |
+
drop_path_rate=0.0,
|
| 81 |
+
attention_dropout=0.0,
|
| 82 |
+
initializer_range=0.02,
|
| 83 |
+
initializer_factor=0.1,
|
| 84 |
+
**kwargs,
|
| 85 |
+
):
|
| 86 |
+
super().__init__(**kwargs)
|
| 87 |
+
|
| 88 |
+
self.hidden_size = hidden_size
|
| 89 |
+
self.intermediate_size = intermediate_size
|
| 90 |
+
self.dropout = dropout
|
| 91 |
+
self.drop_path_rate = drop_path_rate
|
| 92 |
+
self.num_hidden_layers = num_hidden_layers
|
| 93 |
+
self.num_attention_heads = num_attention_heads
|
| 94 |
+
self.num_channels = num_channels
|
| 95 |
+
self.patch_size = patch_size
|
| 96 |
+
self.image_size = image_size
|
| 97 |
+
self.initializer_range = initializer_range
|
| 98 |
+
self.initializer_factor = initializer_factor
|
| 99 |
+
self.attention_dropout = attention_dropout
|
| 100 |
+
self.layer_norm_eps = layer_norm_eps
|
| 101 |
+
self.hidden_act = hidden_act
|
| 102 |
+
self.norm_type = norm_type
|
| 103 |
+
self.qkv_bias = qkv_bias
|
| 104 |
+
self.qk_normalization = qk_normalization
|
| 105 |
+
self.use_flash_attn = use_flash_attn
|
| 106 |
+
|
| 107 |
+
@classmethod
|
| 108 |
+
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig':
|
| 109 |
+
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
|
| 110 |
+
|
| 111 |
+
if 'vision_config' in config_dict:
|
| 112 |
+
config_dict = config_dict['vision_config']
|
| 113 |
+
|
| 114 |
+
if 'model_type' in config_dict and hasattr(cls, 'model_type') and config_dict['model_type'] != cls.model_type:
|
| 115 |
+
logger.warning(
|
| 116 |
+
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
|
| 117 |
+
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.'
|
| 118 |
+
)
|
| 119 |
+
|
| 120 |
+
return cls.from_dict(config_dict, **kwargs)
|
ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/configuration_internvl_chat.py
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# --------------------------------------------------------
|
| 2 |
+
# InternVL
|
| 3 |
+
# Copyright (c) 2024 OpenGVLab
|
| 4 |
+
# Licensed under The MIT License [see LICENSE for details]
|
| 5 |
+
# --------------------------------------------------------
|
| 6 |
+
|
| 7 |
+
import copy
|
| 8 |
+
|
| 9 |
+
from transformers import AutoConfig, LlamaConfig, Qwen2Config
|
| 10 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 11 |
+
from transformers.utils import logging
|
| 12 |
+
|
| 13 |
+
from .configuration_intern_vit import InternVisionConfig
|
| 14 |
+
|
| 15 |
+
logger = logging.get_logger(__name__)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class InternVLChatConfig(PretrainedConfig):
|
| 19 |
+
model_type = 'internvl_chat'
|
| 20 |
+
is_composition = True
|
| 21 |
+
|
| 22 |
+
def __init__(
|
| 23 |
+
self,
|
| 24 |
+
vision_config=None,
|
| 25 |
+
llm_config=None,
|
| 26 |
+
use_backbone_lora=0,
|
| 27 |
+
use_llm_lora=0,
|
| 28 |
+
select_layer=-1,
|
| 29 |
+
force_image_size=None,
|
| 30 |
+
downsample_ratio=0.5,
|
| 31 |
+
template=None,
|
| 32 |
+
dynamic_image_size=False,
|
| 33 |
+
use_thumbnail=False,
|
| 34 |
+
ps_version='v1',
|
| 35 |
+
min_dynamic_patch=1,
|
| 36 |
+
max_dynamic_patch=6,
|
| 37 |
+
**kwargs):
|
| 38 |
+
super().__init__(**kwargs)
|
| 39 |
+
|
| 40 |
+
if vision_config is None:
|
| 41 |
+
vision_config = {'architectures': ['InternVisionModel']}
|
| 42 |
+
logger.info('vision_config is None. Initializing the InternVisionConfig with default values.')
|
| 43 |
+
|
| 44 |
+
if llm_config is None:
|
| 45 |
+
llm_config = {'architectures': ['Qwen2ForCausalLM']}
|
| 46 |
+
logger.info('llm_config is None. Initializing the LlamaConfig config with default values (`LlamaConfig`).')
|
| 47 |
+
|
| 48 |
+
self.vision_config = InternVisionConfig(**vision_config)
|
| 49 |
+
if llm_config.get('architectures')[0] == 'LlamaForCausalLM':
|
| 50 |
+
self.llm_config = LlamaConfig(**llm_config)
|
| 51 |
+
elif llm_config.get('architectures')[0] == 'Qwen2ForCausalLM':
|
| 52 |
+
self.llm_config = Qwen2Config(**llm_config)
|
| 53 |
+
else:
|
| 54 |
+
raise ValueError('Unsupported architecture: {}'.format(llm_config.get('architectures')[0]))
|
| 55 |
+
self.use_backbone_lora = use_backbone_lora
|
| 56 |
+
self.use_llm_lora = use_llm_lora
|
| 57 |
+
self.select_layer = select_layer
|
| 58 |
+
self.force_image_size = force_image_size
|
| 59 |
+
self.downsample_ratio = downsample_ratio
|
| 60 |
+
self.template = template
|
| 61 |
+
self.dynamic_image_size = dynamic_image_size
|
| 62 |
+
self.use_thumbnail = use_thumbnail
|
| 63 |
+
self.ps_version = ps_version # pixel shuffle version
|
| 64 |
+
self.min_dynamic_patch = min_dynamic_patch
|
| 65 |
+
self.max_dynamic_patch = max_dynamic_patch
|
| 66 |
+
# By default, we use tie_word_embeddings=False for models of all sizes.
|
| 67 |
+
self.tie_word_embeddings = self.llm_config.tie_word_embeddings
|
| 68 |
+
|
| 69 |
+
logger.info(f'vision_select_layer: {self.select_layer}')
|
| 70 |
+
logger.info(f'ps_version: {self.ps_version}')
|
| 71 |
+
logger.info(f'min_dynamic_patch: {self.min_dynamic_patch}')
|
| 72 |
+
logger.info(f'max_dynamic_patch: {self.max_dynamic_patch}')
|
| 73 |
+
|
| 74 |
+
def to_dict(self):
|
| 75 |
+
"""
|
| 76 |
+
Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
|
| 77 |
+
|
| 78 |
+
Returns:
|
| 79 |
+
`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
|
| 80 |
+
"""
|
| 81 |
+
output = copy.deepcopy(self.__dict__)
|
| 82 |
+
output['vision_config'] = self.vision_config.to_dict()
|
| 83 |
+
output['llm_config'] = self.llm_config.to_dict()
|
| 84 |
+
output['model_type'] = self.__class__.model_type
|
| 85 |
+
output['use_backbone_lora'] = self.use_backbone_lora
|
| 86 |
+
output['use_llm_lora'] = self.use_llm_lora
|
| 87 |
+
output['select_layer'] = self.select_layer
|
| 88 |
+
output['force_image_size'] = self.force_image_size
|
| 89 |
+
output['downsample_ratio'] = self.downsample_ratio
|
| 90 |
+
output['template'] = self.template
|
| 91 |
+
output['dynamic_image_size'] = self.dynamic_image_size
|
| 92 |
+
output['use_thumbnail'] = self.use_thumbnail
|
| 93 |
+
output['ps_version'] = self.ps_version
|
| 94 |
+
output['min_dynamic_patch'] = self.min_dynamic_patch
|
| 95 |
+
output['max_dynamic_patch'] = self.max_dynamic_patch
|
| 96 |
+
|
| 97 |
+
return output
|
ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/conversation.py
ADDED
|
@@ -0,0 +1,391 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Conversation prompt templates.
|
| 3 |
+
|
| 4 |
+
We kindly request that you import fastchat instead of copying this file if you wish to use it.
|
| 5 |
+
If you have changes in mind, please contribute back so the community can benefit collectively and continue to maintain these valuable templates.
|
| 6 |
+
|
| 7 |
+
Modified from https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
import dataclasses
|
| 11 |
+
from enum import IntEnum, auto
|
| 12 |
+
from typing import Dict, List, Tuple, Union
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class SeparatorStyle(IntEnum):
|
| 16 |
+
"""Separator styles."""
|
| 17 |
+
|
| 18 |
+
ADD_COLON_SINGLE = auto()
|
| 19 |
+
ADD_COLON_TWO = auto()
|
| 20 |
+
ADD_COLON_SPACE_SINGLE = auto()
|
| 21 |
+
NO_COLON_SINGLE = auto()
|
| 22 |
+
NO_COLON_TWO = auto()
|
| 23 |
+
ADD_NEW_LINE_SINGLE = auto()
|
| 24 |
+
LLAMA2 = auto()
|
| 25 |
+
CHATGLM = auto()
|
| 26 |
+
CHATML = auto()
|
| 27 |
+
CHATINTERN = auto()
|
| 28 |
+
DOLLY = auto()
|
| 29 |
+
RWKV = auto()
|
| 30 |
+
PHOENIX = auto()
|
| 31 |
+
ROBIN = auto()
|
| 32 |
+
FALCON_CHAT = auto()
|
| 33 |
+
CHATGLM3 = auto()
|
| 34 |
+
INTERNVL_ZH = auto()
|
| 35 |
+
MPT = auto()
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
@dataclasses.dataclass
|
| 39 |
+
class Conversation:
|
| 40 |
+
"""A class that manages prompt templates and keeps all conversation history."""
|
| 41 |
+
|
| 42 |
+
# The name of this template
|
| 43 |
+
name: str
|
| 44 |
+
# The template of the system prompt
|
| 45 |
+
system_template: str = '{system_message}'
|
| 46 |
+
# The system message
|
| 47 |
+
system_message: str = ''
|
| 48 |
+
# The names of two roles
|
| 49 |
+
roles: Tuple[str] = ('USER', 'ASSISTANT')
|
| 50 |
+
# All messages. Each item is (role, message).
|
| 51 |
+
messages: List[List[str]] = ()
|
| 52 |
+
# The number of few shot examples
|
| 53 |
+
offset: int = 0
|
| 54 |
+
# The separator style and configurations
|
| 55 |
+
sep_style: SeparatorStyle = SeparatorStyle.ADD_COLON_SINGLE
|
| 56 |
+
sep: str = '\n'
|
| 57 |
+
sep2: str = None
|
| 58 |
+
# Stop criteria (the default one is EOS token)
|
| 59 |
+
stop_str: Union[str, List[str]] = None
|
| 60 |
+
# Stops generation if meeting any token in this list
|
| 61 |
+
stop_token_ids: List[int] = None
|
| 62 |
+
|
| 63 |
+
def get_prompt(self) -> str:
|
| 64 |
+
"""Get the prompt for generation."""
|
| 65 |
+
system_prompt = self.system_template.format(system_message=self.system_message)
|
| 66 |
+
if self.sep_style == SeparatorStyle.ADD_COLON_SINGLE:
|
| 67 |
+
ret = system_prompt + self.sep
|
| 68 |
+
for role, message in self.messages:
|
| 69 |
+
if message:
|
| 70 |
+
ret += role + ': ' + message + self.sep
|
| 71 |
+
else:
|
| 72 |
+
ret += role + ':'
|
| 73 |
+
return ret
|
| 74 |
+
elif self.sep_style == SeparatorStyle.ADD_COLON_TWO:
|
| 75 |
+
seps = [self.sep, self.sep2]
|
| 76 |
+
ret = system_prompt + seps[0]
|
| 77 |
+
for i, (role, message) in enumerate(self.messages):
|
| 78 |
+
if message:
|
| 79 |
+
ret += role + ': ' + message + seps[i % 2]
|
| 80 |
+
else:
|
| 81 |
+
ret += role + ':'
|
| 82 |
+
return ret
|
| 83 |
+
elif self.sep_style == SeparatorStyle.ADD_COLON_SPACE_SINGLE:
|
| 84 |
+
ret = system_prompt + self.sep
|
| 85 |
+
for role, message in self.messages:
|
| 86 |
+
if message:
|
| 87 |
+
ret += role + ': ' + message + self.sep
|
| 88 |
+
else:
|
| 89 |
+
ret += role + ': ' # must be end with a space
|
| 90 |
+
return ret
|
| 91 |
+
elif self.sep_style == SeparatorStyle.ADD_NEW_LINE_SINGLE:
|
| 92 |
+
ret = '' if system_prompt == '' else system_prompt + self.sep
|
| 93 |
+
for role, message in self.messages:
|
| 94 |
+
if message:
|
| 95 |
+
ret += role + '\n' + message + self.sep
|
| 96 |
+
else:
|
| 97 |
+
ret += role + '\n'
|
| 98 |
+
return ret
|
| 99 |
+
elif self.sep_style == SeparatorStyle.NO_COLON_SINGLE:
|
| 100 |
+
ret = system_prompt
|
| 101 |
+
for role, message in self.messages:
|
| 102 |
+
if message:
|
| 103 |
+
ret += role + message + self.sep
|
| 104 |
+
else:
|
| 105 |
+
ret += role
|
| 106 |
+
return ret
|
| 107 |
+
elif self.sep_style == SeparatorStyle.NO_COLON_TWO:
|
| 108 |
+
seps = [self.sep, self.sep2]
|
| 109 |
+
ret = system_prompt
|
| 110 |
+
for i, (role, message) in enumerate(self.messages):
|
| 111 |
+
if message:
|
| 112 |
+
ret += role + message + seps[i % 2]
|
| 113 |
+
else:
|
| 114 |
+
ret += role
|
| 115 |
+
return ret
|
| 116 |
+
elif self.sep_style == SeparatorStyle.RWKV:
|
| 117 |
+
ret = system_prompt
|
| 118 |
+
for i, (role, message) in enumerate(self.messages):
|
| 119 |
+
if message:
|
| 120 |
+
ret += (
|
| 121 |
+
role
|
| 122 |
+
+ ': '
|
| 123 |
+
+ message.replace('\r\n', '\n').replace('\n\n', '\n')
|
| 124 |
+
)
|
| 125 |
+
ret += '\n\n'
|
| 126 |
+
else:
|
| 127 |
+
ret += role + ':'
|
| 128 |
+
return ret
|
| 129 |
+
elif self.sep_style == SeparatorStyle.LLAMA2:
|
| 130 |
+
seps = [self.sep, self.sep2]
|
| 131 |
+
if self.system_message:
|
| 132 |
+
ret = system_prompt
|
| 133 |
+
else:
|
| 134 |
+
ret = '[INST] '
|
| 135 |
+
for i, (role, message) in enumerate(self.messages):
|
| 136 |
+
tag = self.roles[i % 2]
|
| 137 |
+
if message:
|
| 138 |
+
if i == 0:
|
| 139 |
+
ret += message + ' '
|
| 140 |
+
else:
|
| 141 |
+
ret += tag + ' ' + message + seps[i % 2]
|
| 142 |
+
else:
|
| 143 |
+
ret += tag
|
| 144 |
+
return ret
|
| 145 |
+
elif self.sep_style == SeparatorStyle.CHATGLM:
|
| 146 |
+
# source: https://huggingface.co/THUDM/chatglm-6b/blob/1d240ba371910e9282298d4592532d7f0f3e9f3e/modeling_chatglm.py#L1302-L1308
|
| 147 |
+
# source2: https://huggingface.co/THUDM/chatglm2-6b/blob/e186c891cf64310ac66ef10a87e6635fa6c2a579/modeling_chatglm.py#L926
|
| 148 |
+
round_add_n = 1 if self.name == 'chatglm2' else 0
|
| 149 |
+
if system_prompt:
|
| 150 |
+
ret = system_prompt + self.sep
|
| 151 |
+
else:
|
| 152 |
+
ret = ''
|
| 153 |
+
|
| 154 |
+
for i, (role, message) in enumerate(self.messages):
|
| 155 |
+
if i % 2 == 0:
|
| 156 |
+
ret += f'[Round {i//2 + round_add_n}]{self.sep}'
|
| 157 |
+
|
| 158 |
+
if message:
|
| 159 |
+
ret += f'{role}:{message}{self.sep}'
|
| 160 |
+
else:
|
| 161 |
+
ret += f'{role}:'
|
| 162 |
+
return ret
|
| 163 |
+
elif self.sep_style == SeparatorStyle.CHATML:
|
| 164 |
+
ret = '' if system_prompt == '' else system_prompt + self.sep + '\n'
|
| 165 |
+
for role, message in self.messages:
|
| 166 |
+
if message:
|
| 167 |
+
ret += role + '\n' + message + self.sep + '\n'
|
| 168 |
+
else:
|
| 169 |
+
ret += role + '\n'
|
| 170 |
+
return ret
|
| 171 |
+
elif self.sep_style == SeparatorStyle.CHATGLM3:
|
| 172 |
+
ret = ''
|
| 173 |
+
if self.system_message:
|
| 174 |
+
ret += system_prompt
|
| 175 |
+
for role, message in self.messages:
|
| 176 |
+
if message:
|
| 177 |
+
ret += role + '\n' + ' ' + message
|
| 178 |
+
else:
|
| 179 |
+
ret += role
|
| 180 |
+
return ret
|
| 181 |
+
elif self.sep_style == SeparatorStyle.CHATINTERN:
|
| 182 |
+
# source: https://huggingface.co/internlm/internlm-chat-7b-8k/blob/bd546fa984b4b0b86958f56bf37f94aa75ab8831/modeling_internlm.py#L771
|
| 183 |
+
seps = [self.sep, self.sep2]
|
| 184 |
+
ret = system_prompt
|
| 185 |
+
for i, (role, message) in enumerate(self.messages):
|
| 186 |
+
# if i % 2 == 0:
|
| 187 |
+
# ret += "<s>"
|
| 188 |
+
if message:
|
| 189 |
+
ret += role + ':' + message + seps[i % 2] + '\n'
|
| 190 |
+
else:
|
| 191 |
+
ret += role + ':'
|
| 192 |
+
return ret
|
| 193 |
+
elif self.sep_style == SeparatorStyle.DOLLY:
|
| 194 |
+
seps = [self.sep, self.sep2]
|
| 195 |
+
ret = system_prompt
|
| 196 |
+
for i, (role, message) in enumerate(self.messages):
|
| 197 |
+
if message:
|
| 198 |
+
ret += role + ':\n' + message + seps[i % 2]
|
| 199 |
+
if i % 2 == 1:
|
| 200 |
+
ret += '\n\n'
|
| 201 |
+
else:
|
| 202 |
+
ret += role + ':\n'
|
| 203 |
+
return ret
|
| 204 |
+
elif self.sep_style == SeparatorStyle.PHOENIX:
|
| 205 |
+
ret = system_prompt
|
| 206 |
+
for role, message in self.messages:
|
| 207 |
+
if message:
|
| 208 |
+
ret += role + ': ' + '<s>' + message + '</s>'
|
| 209 |
+
else:
|
| 210 |
+
ret += role + ': ' + '<s>'
|
| 211 |
+
return ret
|
| 212 |
+
elif self.sep_style == SeparatorStyle.ROBIN:
|
| 213 |
+
ret = system_prompt + self.sep
|
| 214 |
+
for role, message in self.messages:
|
| 215 |
+
if message:
|
| 216 |
+
ret += role + ':\n' + message + self.sep
|
| 217 |
+
else:
|
| 218 |
+
ret += role + ':\n'
|
| 219 |
+
return ret
|
| 220 |
+
elif self.sep_style == SeparatorStyle.FALCON_CHAT:
|
| 221 |
+
ret = ''
|
| 222 |
+
if self.system_message:
|
| 223 |
+
ret += system_prompt + self.sep
|
| 224 |
+
for role, message in self.messages:
|
| 225 |
+
if message:
|
| 226 |
+
ret += role + ': ' + message + self.sep
|
| 227 |
+
else:
|
| 228 |
+
ret += role + ':'
|
| 229 |
+
|
| 230 |
+
return ret
|
| 231 |
+
elif self.sep_style == SeparatorStyle.INTERNVL_ZH:
|
| 232 |
+
seps = [self.sep, self.sep2]
|
| 233 |
+
ret = self.system_message + seps[0]
|
| 234 |
+
for i, (role, message) in enumerate(self.messages):
|
| 235 |
+
if message:
|
| 236 |
+
ret += role + ': ' + message + seps[i % 2]
|
| 237 |
+
else:
|
| 238 |
+
ret += role + ':'
|
| 239 |
+
return ret
|
| 240 |
+
elif self.sep_style == SeparatorStyle.MPT:
|
| 241 |
+
ret = system_prompt + self.sep
|
| 242 |
+
for role, message in self.messages:
|
| 243 |
+
if message:
|
| 244 |
+
if type(message) is tuple:
|
| 245 |
+
message, _, _ = message
|
| 246 |
+
ret += role + message + self.sep
|
| 247 |
+
else:
|
| 248 |
+
ret += role
|
| 249 |
+
return ret
|
| 250 |
+
else:
|
| 251 |
+
raise ValueError(f'Invalid style: {self.sep_style}')
|
| 252 |
+
|
| 253 |
+
def set_system_message(self, system_message: str):
|
| 254 |
+
"""Set the system message."""
|
| 255 |
+
self.system_message = system_message
|
| 256 |
+
|
| 257 |
+
def append_message(self, role: str, message: str):
|
| 258 |
+
"""Append a new message."""
|
| 259 |
+
self.messages.append([role, message])
|
| 260 |
+
|
| 261 |
+
def update_last_message(self, message: str):
|
| 262 |
+
"""Update the last output.
|
| 263 |
+
|
| 264 |
+
The last message is typically set to be None when constructing the prompt,
|
| 265 |
+
so we need to update it in-place after getting the response from a model.
|
| 266 |
+
"""
|
| 267 |
+
self.messages[-1][1] = message
|
| 268 |
+
|
| 269 |
+
def to_gradio_chatbot(self):
|
| 270 |
+
"""Convert the conversation to gradio chatbot format."""
|
| 271 |
+
ret = []
|
| 272 |
+
for i, (role, msg) in enumerate(self.messages[self.offset :]):
|
| 273 |
+
if i % 2 == 0:
|
| 274 |
+
ret.append([msg, None])
|
| 275 |
+
else:
|
| 276 |
+
ret[-1][-1] = msg
|
| 277 |
+
return ret
|
| 278 |
+
|
| 279 |
+
def to_openai_api_messages(self):
|
| 280 |
+
"""Convert the conversation to OpenAI chat completion format."""
|
| 281 |
+
ret = [{'role': 'system', 'content': self.system_message}]
|
| 282 |
+
|
| 283 |
+
for i, (_, msg) in enumerate(self.messages[self.offset :]):
|
| 284 |
+
if i % 2 == 0:
|
| 285 |
+
ret.append({'role': 'user', 'content': msg})
|
| 286 |
+
else:
|
| 287 |
+
if msg is not None:
|
| 288 |
+
ret.append({'role': 'assistant', 'content': msg})
|
| 289 |
+
return ret
|
| 290 |
+
|
| 291 |
+
def copy(self):
|
| 292 |
+
return Conversation(
|
| 293 |
+
name=self.name,
|
| 294 |
+
system_template=self.system_template,
|
| 295 |
+
system_message=self.system_message,
|
| 296 |
+
roles=self.roles,
|
| 297 |
+
messages=[[x, y] for x, y in self.messages],
|
| 298 |
+
offset=self.offset,
|
| 299 |
+
sep_style=self.sep_style,
|
| 300 |
+
sep=self.sep,
|
| 301 |
+
sep2=self.sep2,
|
| 302 |
+
stop_str=self.stop_str,
|
| 303 |
+
stop_token_ids=self.stop_token_ids,
|
| 304 |
+
)
|
| 305 |
+
|
| 306 |
+
def dict(self):
|
| 307 |
+
return {
|
| 308 |
+
'template_name': self.name,
|
| 309 |
+
'system_message': self.system_message,
|
| 310 |
+
'roles': self.roles,
|
| 311 |
+
'messages': self.messages,
|
| 312 |
+
'offset': self.offset,
|
| 313 |
+
}
|
| 314 |
+
|
| 315 |
+
|
| 316 |
+
# A global registry for all conversation templates
|
| 317 |
+
conv_templates: Dict[str, Conversation] = {}
|
| 318 |
+
|
| 319 |
+
|
| 320 |
+
def register_conv_template(template: Conversation, override: bool = False):
|
| 321 |
+
"""Register a new conversation template."""
|
| 322 |
+
if not override:
|
| 323 |
+
assert (
|
| 324 |
+
template.name not in conv_templates
|
| 325 |
+
), f'{template.name} has been registered.'
|
| 326 |
+
|
| 327 |
+
conv_templates[template.name] = template
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
def get_conv_template(name: str) -> Conversation:
|
| 331 |
+
"""Get a conversation template."""
|
| 332 |
+
return conv_templates[name].copy()
|
| 333 |
+
|
| 334 |
+
|
| 335 |
+
# Both Hermes-2 and internlm2-chat are chatml-format conversation templates. The difference
|
| 336 |
+
# is that during training, the preprocessing function for the Hermes-2 template doesn't add
|
| 337 |
+
# <s> at the beginning of the tokenized sequence, while the internlm2-chat template does.
|
| 338 |
+
# Therefore, they are completely equivalent during inference.
|
| 339 |
+
register_conv_template(
|
| 340 |
+
Conversation(
|
| 341 |
+
name='Hermes-2',
|
| 342 |
+
system_template='<|im_start|>system\n{system_message}',
|
| 343 |
+
# note: The new system prompt was not used here to avoid changes in benchmark performance.
|
| 344 |
+
# system_message='我是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。',
|
| 345 |
+
system_message='你是由上海人工智能实验室联合商汤科技开发的书生多模态大模型,英文名叫InternVL, 是一个有用无害的人工智能助手。',
|
| 346 |
+
roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
|
| 347 |
+
sep_style=SeparatorStyle.MPT,
|
| 348 |
+
sep='<|im_end|>',
|
| 349 |
+
stop_str='<|endoftext|>',
|
| 350 |
+
)
|
| 351 |
+
)
|
| 352 |
+
|
| 353 |
+
|
| 354 |
+
register_conv_template(
|
| 355 |
+
Conversation(
|
| 356 |
+
name='internlm2-chat',
|
| 357 |
+
system_template='<|im_start|>system\n{system_message}',
|
| 358 |
+
# note: The new system prompt was not used here to avoid changes in benchmark performance.
|
| 359 |
+
# system_message='我是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。',
|
| 360 |
+
system_message='你是由上海人工智能实验室联合商汤科技开发的书生多模态大模型,英文名叫InternVL, 是一个有用无害的人工智能助手。',
|
| 361 |
+
roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
|
| 362 |
+
sep_style=SeparatorStyle.MPT,
|
| 363 |
+
sep='<|im_end|>',
|
| 364 |
+
)
|
| 365 |
+
)
|
| 366 |
+
|
| 367 |
+
|
| 368 |
+
register_conv_template(
|
| 369 |
+
Conversation(
|
| 370 |
+
name='phi3-chat',
|
| 371 |
+
system_template='<|system|>\n{system_message}',
|
| 372 |
+
# note: The new system prompt was not used here to avoid changes in benchmark performance.
|
| 373 |
+
# system_message='我是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。',
|
| 374 |
+
system_message='你是由上海人工智能实验室联合商汤科技开发的书生多模态大模型,英文名叫InternVL, 是一个有用无害的人工智能助手。',
|
| 375 |
+
roles=('<|user|>\n', '<|assistant|>\n'),
|
| 376 |
+
sep_style=SeparatorStyle.MPT,
|
| 377 |
+
sep='<|end|>',
|
| 378 |
+
)
|
| 379 |
+
)
|
| 380 |
+
|
| 381 |
+
|
| 382 |
+
register_conv_template(
|
| 383 |
+
Conversation(
|
| 384 |
+
name='internvl2_5',
|
| 385 |
+
system_template='<|im_start|>system\n{system_message}',
|
| 386 |
+
system_message='你是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。',
|
| 387 |
+
roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
|
| 388 |
+
sep_style=SeparatorStyle.MPT,
|
| 389 |
+
sep='<|im_end|>\n',
|
| 390 |
+
)
|
| 391 |
+
)
|
ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/generation_config.json
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_from_model_config": true,
|
| 3 |
+
"eos_token_id": 151645,
|
| 4 |
+
"transformers_version": "4.55.4"
|
| 5 |
+
}
|
ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/latest
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
global_step228
|
ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/merges.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/model-00001-of-00004.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6fe38424d305b0e19c17661cbe165f0411248b1dd0c1bf3ff7e6aa9251287747
|
| 3 |
+
size 4991123960
|
ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/model-00004-of-00004.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2da8427b89bf7fa298faaf21f388d4479c15ec668ee8900059c1d72e7ab3b9bc
|
| 3 |
+
size 1142280864
|
ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/model.safetensors.index.json
ADDED
|
@@ -0,0 +1,693 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"metadata": {
|
| 3 |
+
"total_parameters": 719360,
|
| 4 |
+
"total_size": 15888747520
|
| 5 |
+
},
|
| 6 |
+
"weight_map": {
|
| 7 |
+
"language_model.lm_head.weight": "model-00004-of-00004.safetensors",
|
| 8 |
+
"language_model.model.embed_tokens.weight": "model-00001-of-00004.safetensors",
|
| 9 |
+
"language_model.model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 10 |
+
"language_model.model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 11 |
+
"language_model.model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 12 |
+
"language_model.model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 13 |
+
"language_model.model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 14 |
+
"language_model.model.layers.0.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 15 |
+
"language_model.model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 16 |
+
"language_model.model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 17 |
+
"language_model.model.layers.0.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 18 |
+
"language_model.model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 19 |
+
"language_model.model.layers.0.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 20 |
+
"language_model.model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 21 |
+
"language_model.model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 22 |
+
"language_model.model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 23 |
+
"language_model.model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 24 |
+
"language_model.model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 25 |
+
"language_model.model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 26 |
+
"language_model.model.layers.1.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 27 |
+
"language_model.model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 28 |
+
"language_model.model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 29 |
+
"language_model.model.layers.1.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 30 |
+
"language_model.model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 31 |
+
"language_model.model.layers.1.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 32 |
+
"language_model.model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 33 |
+
"language_model.model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 34 |
+
"language_model.model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 35 |
+
"language_model.model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 36 |
+
"language_model.model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 37 |
+
"language_model.model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 38 |
+
"language_model.model.layers.10.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 39 |
+
"language_model.model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 40 |
+
"language_model.model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 41 |
+
"language_model.model.layers.10.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 42 |
+
"language_model.model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 43 |
+
"language_model.model.layers.10.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 44 |
+
"language_model.model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 45 |
+
"language_model.model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 46 |
+
"language_model.model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 47 |
+
"language_model.model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 48 |
+
"language_model.model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 49 |
+
"language_model.model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 50 |
+
"language_model.model.layers.11.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 51 |
+
"language_model.model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 52 |
+
"language_model.model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 53 |
+
"language_model.model.layers.11.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 54 |
+
"language_model.model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 55 |
+
"language_model.model.layers.11.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 56 |
+
"language_model.model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 57 |
+
"language_model.model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 58 |
+
"language_model.model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 59 |
+
"language_model.model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 60 |
+
"language_model.model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 61 |
+
"language_model.model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 62 |
+
"language_model.model.layers.12.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 63 |
+
"language_model.model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 64 |
+
"language_model.model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 65 |
+
"language_model.model.layers.12.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 66 |
+
"language_model.model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 67 |
+
"language_model.model.layers.12.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 68 |
+
"language_model.model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 69 |
+
"language_model.model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 70 |
+
"language_model.model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 71 |
+
"language_model.model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 72 |
+
"language_model.model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 73 |
+
"language_model.model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 74 |
+
"language_model.model.layers.13.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 75 |
+
"language_model.model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 76 |
+
"language_model.model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 77 |
+
"language_model.model.layers.13.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 78 |
+
"language_model.model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 79 |
+
"language_model.model.layers.13.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 80 |
+
"language_model.model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 81 |
+
"language_model.model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 82 |
+
"language_model.model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 83 |
+
"language_model.model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 84 |
+
"language_model.model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 85 |
+
"language_model.model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 86 |
+
"language_model.model.layers.14.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 87 |
+
"language_model.model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 88 |
+
"language_model.model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 89 |
+
"language_model.model.layers.14.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 90 |
+
"language_model.model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 91 |
+
"language_model.model.layers.14.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 92 |
+
"language_model.model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 93 |
+
"language_model.model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 94 |
+
"language_model.model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 95 |
+
"language_model.model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 96 |
+
"language_model.model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 97 |
+
"language_model.model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 98 |
+
"language_model.model.layers.15.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 99 |
+
"language_model.model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 100 |
+
"language_model.model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 101 |
+
"language_model.model.layers.15.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 102 |
+
"language_model.model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 103 |
+
"language_model.model.layers.15.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 104 |
+
"language_model.model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 105 |
+
"language_model.model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 106 |
+
"language_model.model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 107 |
+
"language_model.model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 108 |
+
"language_model.model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 109 |
+
"language_model.model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 110 |
+
"language_model.model.layers.16.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 111 |
+
"language_model.model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 112 |
+
"language_model.model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 113 |
+
"language_model.model.layers.16.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 114 |
+
"language_model.model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 115 |
+
"language_model.model.layers.16.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 116 |
+
"language_model.model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 117 |
+
"language_model.model.layers.17.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 118 |
+
"language_model.model.layers.17.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 119 |
+
"language_model.model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 120 |
+
"language_model.model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 121 |
+
"language_model.model.layers.17.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 122 |
+
"language_model.model.layers.17.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 123 |
+
"language_model.model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 124 |
+
"language_model.model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 125 |
+
"language_model.model.layers.17.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 126 |
+
"language_model.model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 127 |
+
"language_model.model.layers.17.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 128 |
+
"language_model.model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 129 |
+
"language_model.model.layers.18.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 130 |
+
"language_model.model.layers.18.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 131 |
+
"language_model.model.layers.18.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 132 |
+
"language_model.model.layers.18.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 133 |
+
"language_model.model.layers.18.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 134 |
+
"language_model.model.layers.18.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 135 |
+
"language_model.model.layers.18.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 136 |
+
"language_model.model.layers.18.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 137 |
+
"language_model.model.layers.18.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 138 |
+
"language_model.model.layers.18.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 139 |
+
"language_model.model.layers.18.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 140 |
+
"language_model.model.layers.18.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 141 |
+
"language_model.model.layers.19.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 142 |
+
"language_model.model.layers.19.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 143 |
+
"language_model.model.layers.19.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 144 |
+
"language_model.model.layers.19.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 145 |
+
"language_model.model.layers.19.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 146 |
+
"language_model.model.layers.19.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 147 |
+
"language_model.model.layers.19.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 148 |
+
"language_model.model.layers.19.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 149 |
+
"language_model.model.layers.19.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 150 |
+
"language_model.model.layers.19.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 151 |
+
"language_model.model.layers.19.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 152 |
+
"language_model.model.layers.19.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 153 |
+
"language_model.model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 154 |
+
"language_model.model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 155 |
+
"language_model.model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 156 |
+
"language_model.model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 157 |
+
"language_model.model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 158 |
+
"language_model.model.layers.2.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 159 |
+
"language_model.model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 160 |
+
"language_model.model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 161 |
+
"language_model.model.layers.2.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 162 |
+
"language_model.model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 163 |
+
"language_model.model.layers.2.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 164 |
+
"language_model.model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 165 |
+
"language_model.model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 166 |
+
"language_model.model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 167 |
+
"language_model.model.layers.20.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 168 |
+
"language_model.model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 169 |
+
"language_model.model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 170 |
+
"language_model.model.layers.20.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 171 |
+
"language_model.model.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 172 |
+
"language_model.model.layers.20.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 173 |
+
"language_model.model.layers.20.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 174 |
+
"language_model.model.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 175 |
+
"language_model.model.layers.20.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 176 |
+
"language_model.model.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 177 |
+
"language_model.model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 178 |
+
"language_model.model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 179 |
+
"language_model.model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 180 |
+
"language_model.model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 181 |
+
"language_model.model.layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 182 |
+
"language_model.model.layers.21.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 183 |
+
"language_model.model.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 184 |
+
"language_model.model.layers.21.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 185 |
+
"language_model.model.layers.21.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 186 |
+
"language_model.model.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 187 |
+
"language_model.model.layers.21.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 188 |
+
"language_model.model.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 189 |
+
"language_model.model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 190 |
+
"language_model.model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 191 |
+
"language_model.model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 192 |
+
"language_model.model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 193 |
+
"language_model.model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 194 |
+
"language_model.model.layers.22.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 195 |
+
"language_model.model.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 196 |
+
"language_model.model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 197 |
+
"language_model.model.layers.22.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 198 |
+
"language_model.model.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 199 |
+
"language_model.model.layers.22.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 200 |
+
"language_model.model.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 201 |
+
"language_model.model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 202 |
+
"language_model.model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 203 |
+
"language_model.model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 204 |
+
"language_model.model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 205 |
+
"language_model.model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 206 |
+
"language_model.model.layers.23.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 207 |
+
"language_model.model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 208 |
+
"language_model.model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 209 |
+
"language_model.model.layers.23.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 210 |
+
"language_model.model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 211 |
+
"language_model.model.layers.23.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 212 |
+
"language_model.model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 213 |
+
"language_model.model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 214 |
+
"language_model.model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 215 |
+
"language_model.model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 216 |
+
"language_model.model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 217 |
+
"language_model.model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 218 |
+
"language_model.model.layers.24.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 219 |
+
"language_model.model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 220 |
+
"language_model.model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 221 |
+
"language_model.model.layers.24.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 222 |
+
"language_model.model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 223 |
+
"language_model.model.layers.24.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 224 |
+
"language_model.model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 225 |
+
"language_model.model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 226 |
+
"language_model.model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 227 |
+
"language_model.model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 228 |
+
"language_model.model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 229 |
+
"language_model.model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 230 |
+
"language_model.model.layers.25.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 231 |
+
"language_model.model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 232 |
+
"language_model.model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 233 |
+
"language_model.model.layers.25.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 234 |
+
"language_model.model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 235 |
+
"language_model.model.layers.25.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 236 |
+
"language_model.model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 237 |
+
"language_model.model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 238 |
+
"language_model.model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 239 |
+
"language_model.model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 240 |
+
"language_model.model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 241 |
+
"language_model.model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 242 |
+
"language_model.model.layers.26.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 243 |
+
"language_model.model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 244 |
+
"language_model.model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 245 |
+
"language_model.model.layers.26.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 246 |
+
"language_model.model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 247 |
+
"language_model.model.layers.26.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 248 |
+
"language_model.model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 249 |
+
"language_model.model.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 250 |
+
"language_model.model.layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 251 |
+
"language_model.model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 252 |
+
"language_model.model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 253 |
+
"language_model.model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 254 |
+
"language_model.model.layers.27.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 255 |
+
"language_model.model.layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 256 |
+
"language_model.model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 257 |
+
"language_model.model.layers.27.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 258 |
+
"language_model.model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 259 |
+
"language_model.model.layers.27.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 260 |
+
"language_model.model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 261 |
+
"language_model.model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 262 |
+
"language_model.model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 263 |
+
"language_model.model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 264 |
+
"language_model.model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 265 |
+
"language_model.model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 266 |
+
"language_model.model.layers.3.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 267 |
+
"language_model.model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 268 |
+
"language_model.model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 269 |
+
"language_model.model.layers.3.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 270 |
+
"language_model.model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 271 |
+
"language_model.model.layers.3.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 272 |
+
"language_model.model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 273 |
+
"language_model.model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 274 |
+
"language_model.model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 275 |
+
"language_model.model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 276 |
+
"language_model.model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 277 |
+
"language_model.model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 278 |
+
"language_model.model.layers.4.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 279 |
+
"language_model.model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 280 |
+
"language_model.model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 281 |
+
"language_model.model.layers.4.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 282 |
+
"language_model.model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 283 |
+
"language_model.model.layers.4.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 284 |
+
"language_model.model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 285 |
+
"language_model.model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 286 |
+
"language_model.model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 287 |
+
"language_model.model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 288 |
+
"language_model.model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 289 |
+
"language_model.model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 290 |
+
"language_model.model.layers.5.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 291 |
+
"language_model.model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 292 |
+
"language_model.model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 293 |
+
"language_model.model.layers.5.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 294 |
+
"language_model.model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 295 |
+
"language_model.model.layers.5.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 296 |
+
"language_model.model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 297 |
+
"language_model.model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 298 |
+
"language_model.model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 299 |
+
"language_model.model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 300 |
+
"language_model.model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 301 |
+
"language_model.model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 302 |
+
"language_model.model.layers.6.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 303 |
+
"language_model.model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 304 |
+
"language_model.model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 305 |
+
"language_model.model.layers.6.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 306 |
+
"language_model.model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 307 |
+
"language_model.model.layers.6.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 308 |
+
"language_model.model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 309 |
+
"language_model.model.layers.7.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 310 |
+
"language_model.model.layers.7.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 311 |
+
"language_model.model.layers.7.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 312 |
+
"language_model.model.layers.7.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 313 |
+
"language_model.model.layers.7.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 314 |
+
"language_model.model.layers.7.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 315 |
+
"language_model.model.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 316 |
+
"language_model.model.layers.7.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 317 |
+
"language_model.model.layers.7.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 318 |
+
"language_model.model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 319 |
+
"language_model.model.layers.7.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 320 |
+
"language_model.model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 321 |
+
"language_model.model.layers.8.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 322 |
+
"language_model.model.layers.8.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 323 |
+
"language_model.model.layers.8.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 324 |
+
"language_model.model.layers.8.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 325 |
+
"language_model.model.layers.8.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 326 |
+
"language_model.model.layers.8.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 327 |
+
"language_model.model.layers.8.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 328 |
+
"language_model.model.layers.8.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 329 |
+
"language_model.model.layers.8.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 330 |
+
"language_model.model.layers.8.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 331 |
+
"language_model.model.layers.8.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 332 |
+
"language_model.model.layers.8.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 333 |
+
"language_model.model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 334 |
+
"language_model.model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 335 |
+
"language_model.model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 336 |
+
"language_model.model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 337 |
+
"language_model.model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 338 |
+
"language_model.model.layers.9.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 339 |
+
"language_model.model.layers.9.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 340 |
+
"language_model.model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 341 |
+
"language_model.model.layers.9.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 342 |
+
"language_model.model.layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 343 |
+
"language_model.model.layers.9.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 344 |
+
"language_model.model.layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 345 |
+
"language_model.model.norm.weight": "model-00003-of-00004.safetensors",
|
| 346 |
+
"mlp1.0.bias": "model-00004-of-00004.safetensors",
|
| 347 |
+
"mlp1.0.weight": "model-00004-of-00004.safetensors",
|
| 348 |
+
"mlp1.1.bias": "model-00004-of-00004.safetensors",
|
| 349 |
+
"mlp1.1.weight": "model-00004-of-00004.safetensors",
|
| 350 |
+
"mlp1.3.bias": "model-00004-of-00004.safetensors",
|
| 351 |
+
"mlp1.3.weight": "model-00004-of-00004.safetensors",
|
| 352 |
+
"vision_model.embeddings.class_embedding": "model-00001-of-00004.safetensors",
|
| 353 |
+
"vision_model.embeddings.patch_embedding.bias": "model-00001-of-00004.safetensors",
|
| 354 |
+
"vision_model.embeddings.patch_embedding.weight": "model-00001-of-00004.safetensors",
|
| 355 |
+
"vision_model.embeddings.position_embedding": "model-00001-of-00004.safetensors",
|
| 356 |
+
"vision_model.encoder.layers.0.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 357 |
+
"vision_model.encoder.layers.0.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 358 |
+
"vision_model.encoder.layers.0.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 359 |
+
"vision_model.encoder.layers.0.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 360 |
+
"vision_model.encoder.layers.0.ls1": "model-00001-of-00004.safetensors",
|
| 361 |
+
"vision_model.encoder.layers.0.ls2": "model-00001-of-00004.safetensors",
|
| 362 |
+
"vision_model.encoder.layers.0.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 363 |
+
"vision_model.encoder.layers.0.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 364 |
+
"vision_model.encoder.layers.0.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 365 |
+
"vision_model.encoder.layers.0.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 366 |
+
"vision_model.encoder.layers.0.norm1.bias": "model-00001-of-00004.safetensors",
|
| 367 |
+
"vision_model.encoder.layers.0.norm1.weight": "model-00001-of-00004.safetensors",
|
| 368 |
+
"vision_model.encoder.layers.0.norm2.bias": "model-00001-of-00004.safetensors",
|
| 369 |
+
"vision_model.encoder.layers.0.norm2.weight": "model-00001-of-00004.safetensors",
|
| 370 |
+
"vision_model.encoder.layers.1.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 371 |
+
"vision_model.encoder.layers.1.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 372 |
+
"vision_model.encoder.layers.1.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 373 |
+
"vision_model.encoder.layers.1.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 374 |
+
"vision_model.encoder.layers.1.ls1": "model-00001-of-00004.safetensors",
|
| 375 |
+
"vision_model.encoder.layers.1.ls2": "model-00001-of-00004.safetensors",
|
| 376 |
+
"vision_model.encoder.layers.1.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 377 |
+
"vision_model.encoder.layers.1.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 378 |
+
"vision_model.encoder.layers.1.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 379 |
+
"vision_model.encoder.layers.1.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 380 |
+
"vision_model.encoder.layers.1.norm1.bias": "model-00001-of-00004.safetensors",
|
| 381 |
+
"vision_model.encoder.layers.1.norm1.weight": "model-00001-of-00004.safetensors",
|
| 382 |
+
"vision_model.encoder.layers.1.norm2.bias": "model-00001-of-00004.safetensors",
|
| 383 |
+
"vision_model.encoder.layers.1.norm2.weight": "model-00001-of-00004.safetensors",
|
| 384 |
+
"vision_model.encoder.layers.10.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 385 |
+
"vision_model.encoder.layers.10.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 386 |
+
"vision_model.encoder.layers.10.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 387 |
+
"vision_model.encoder.layers.10.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 388 |
+
"vision_model.encoder.layers.10.ls1": "model-00001-of-00004.safetensors",
|
| 389 |
+
"vision_model.encoder.layers.10.ls2": "model-00001-of-00004.safetensors",
|
| 390 |
+
"vision_model.encoder.layers.10.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 391 |
+
"vision_model.encoder.layers.10.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 392 |
+
"vision_model.encoder.layers.10.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 393 |
+
"vision_model.encoder.layers.10.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 394 |
+
"vision_model.encoder.layers.10.norm1.bias": "model-00001-of-00004.safetensors",
|
| 395 |
+
"vision_model.encoder.layers.10.norm1.weight": "model-00001-of-00004.safetensors",
|
| 396 |
+
"vision_model.encoder.layers.10.norm2.bias": "model-00001-of-00004.safetensors",
|
| 397 |
+
"vision_model.encoder.layers.10.norm2.weight": "model-00001-of-00004.safetensors",
|
| 398 |
+
"vision_model.encoder.layers.11.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 399 |
+
"vision_model.encoder.layers.11.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 400 |
+
"vision_model.encoder.layers.11.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 401 |
+
"vision_model.encoder.layers.11.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 402 |
+
"vision_model.encoder.layers.11.ls1": "model-00001-of-00004.safetensors",
|
| 403 |
+
"vision_model.encoder.layers.11.ls2": "model-00001-of-00004.safetensors",
|
| 404 |
+
"vision_model.encoder.layers.11.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 405 |
+
"vision_model.encoder.layers.11.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 406 |
+
"vision_model.encoder.layers.11.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 407 |
+
"vision_model.encoder.layers.11.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 408 |
+
"vision_model.encoder.layers.11.norm1.bias": "model-00001-of-00004.safetensors",
|
| 409 |
+
"vision_model.encoder.layers.11.norm1.weight": "model-00001-of-00004.safetensors",
|
| 410 |
+
"vision_model.encoder.layers.11.norm2.bias": "model-00001-of-00004.safetensors",
|
| 411 |
+
"vision_model.encoder.layers.11.norm2.weight": "model-00001-of-00004.safetensors",
|
| 412 |
+
"vision_model.encoder.layers.12.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 413 |
+
"vision_model.encoder.layers.12.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 414 |
+
"vision_model.encoder.layers.12.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 415 |
+
"vision_model.encoder.layers.12.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 416 |
+
"vision_model.encoder.layers.12.ls1": "model-00001-of-00004.safetensors",
|
| 417 |
+
"vision_model.encoder.layers.12.ls2": "model-00001-of-00004.safetensors",
|
| 418 |
+
"vision_model.encoder.layers.12.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 419 |
+
"vision_model.encoder.layers.12.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 420 |
+
"vision_model.encoder.layers.12.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 421 |
+
"vision_model.encoder.layers.12.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 422 |
+
"vision_model.encoder.layers.12.norm1.bias": "model-00001-of-00004.safetensors",
|
| 423 |
+
"vision_model.encoder.layers.12.norm1.weight": "model-00001-of-00004.safetensors",
|
| 424 |
+
"vision_model.encoder.layers.12.norm2.bias": "model-00001-of-00004.safetensors",
|
| 425 |
+
"vision_model.encoder.layers.12.norm2.weight": "model-00001-of-00004.safetensors",
|
| 426 |
+
"vision_model.encoder.layers.13.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 427 |
+
"vision_model.encoder.layers.13.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 428 |
+
"vision_model.encoder.layers.13.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 429 |
+
"vision_model.encoder.layers.13.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 430 |
+
"vision_model.encoder.layers.13.ls1": "model-00001-of-00004.safetensors",
|
| 431 |
+
"vision_model.encoder.layers.13.ls2": "model-00001-of-00004.safetensors",
|
| 432 |
+
"vision_model.encoder.layers.13.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 433 |
+
"vision_model.encoder.layers.13.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 434 |
+
"vision_model.encoder.layers.13.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 435 |
+
"vision_model.encoder.layers.13.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 436 |
+
"vision_model.encoder.layers.13.norm1.bias": "model-00001-of-00004.safetensors",
|
| 437 |
+
"vision_model.encoder.layers.13.norm1.weight": "model-00001-of-00004.safetensors",
|
| 438 |
+
"vision_model.encoder.layers.13.norm2.bias": "model-00001-of-00004.safetensors",
|
| 439 |
+
"vision_model.encoder.layers.13.norm2.weight": "model-00001-of-00004.safetensors",
|
| 440 |
+
"vision_model.encoder.layers.14.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 441 |
+
"vision_model.encoder.layers.14.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 442 |
+
"vision_model.encoder.layers.14.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 443 |
+
"vision_model.encoder.layers.14.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 444 |
+
"vision_model.encoder.layers.14.ls1": "model-00001-of-00004.safetensors",
|
| 445 |
+
"vision_model.encoder.layers.14.ls2": "model-00001-of-00004.safetensors",
|
| 446 |
+
"vision_model.encoder.layers.14.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 447 |
+
"vision_model.encoder.layers.14.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 448 |
+
"vision_model.encoder.layers.14.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 449 |
+
"vision_model.encoder.layers.14.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 450 |
+
"vision_model.encoder.layers.14.norm1.bias": "model-00001-of-00004.safetensors",
|
| 451 |
+
"vision_model.encoder.layers.14.norm1.weight": "model-00001-of-00004.safetensors",
|
| 452 |
+
"vision_model.encoder.layers.14.norm2.bias": "model-00001-of-00004.safetensors",
|
| 453 |
+
"vision_model.encoder.layers.14.norm2.weight": "model-00001-of-00004.safetensors",
|
| 454 |
+
"vision_model.encoder.layers.15.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 455 |
+
"vision_model.encoder.layers.15.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 456 |
+
"vision_model.encoder.layers.15.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 457 |
+
"vision_model.encoder.layers.15.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 458 |
+
"vision_model.encoder.layers.15.ls1": "model-00001-of-00004.safetensors",
|
| 459 |
+
"vision_model.encoder.layers.15.ls2": "model-00001-of-00004.safetensors",
|
| 460 |
+
"vision_model.encoder.layers.15.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 461 |
+
"vision_model.encoder.layers.15.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 462 |
+
"vision_model.encoder.layers.15.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 463 |
+
"vision_model.encoder.layers.15.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 464 |
+
"vision_model.encoder.layers.15.norm1.bias": "model-00001-of-00004.safetensors",
|
| 465 |
+
"vision_model.encoder.layers.15.norm1.weight": "model-00001-of-00004.safetensors",
|
| 466 |
+
"vision_model.encoder.layers.15.norm2.bias": "model-00001-of-00004.safetensors",
|
| 467 |
+
"vision_model.encoder.layers.15.norm2.weight": "model-00001-of-00004.safetensors",
|
| 468 |
+
"vision_model.encoder.layers.16.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 469 |
+
"vision_model.encoder.layers.16.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 470 |
+
"vision_model.encoder.layers.16.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 471 |
+
"vision_model.encoder.layers.16.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 472 |
+
"vision_model.encoder.layers.16.ls1": "model-00001-of-00004.safetensors",
|
| 473 |
+
"vision_model.encoder.layers.16.ls2": "model-00001-of-00004.safetensors",
|
| 474 |
+
"vision_model.encoder.layers.16.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 475 |
+
"vision_model.encoder.layers.16.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 476 |
+
"vision_model.encoder.layers.16.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 477 |
+
"vision_model.encoder.layers.16.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 478 |
+
"vision_model.encoder.layers.16.norm1.bias": "model-00001-of-00004.safetensors",
|
| 479 |
+
"vision_model.encoder.layers.16.norm1.weight": "model-00001-of-00004.safetensors",
|
| 480 |
+
"vision_model.encoder.layers.16.norm2.bias": "model-00001-of-00004.safetensors",
|
| 481 |
+
"vision_model.encoder.layers.16.norm2.weight": "model-00001-of-00004.safetensors",
|
| 482 |
+
"vision_model.encoder.layers.17.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 483 |
+
"vision_model.encoder.layers.17.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 484 |
+
"vision_model.encoder.layers.17.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 485 |
+
"vision_model.encoder.layers.17.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 486 |
+
"vision_model.encoder.layers.17.ls1": "model-00001-of-00004.safetensors",
|
| 487 |
+
"vision_model.encoder.layers.17.ls2": "model-00001-of-00004.safetensors",
|
| 488 |
+
"vision_model.encoder.layers.17.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 489 |
+
"vision_model.encoder.layers.17.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 490 |
+
"vision_model.encoder.layers.17.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 491 |
+
"vision_model.encoder.layers.17.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 492 |
+
"vision_model.encoder.layers.17.norm1.bias": "model-00001-of-00004.safetensors",
|
| 493 |
+
"vision_model.encoder.layers.17.norm1.weight": "model-00001-of-00004.safetensors",
|
| 494 |
+
"vision_model.encoder.layers.17.norm2.bias": "model-00001-of-00004.safetensors",
|
| 495 |
+
"vision_model.encoder.layers.17.norm2.weight": "model-00001-of-00004.safetensors",
|
| 496 |
+
"vision_model.encoder.layers.18.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 497 |
+
"vision_model.encoder.layers.18.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 498 |
+
"vision_model.encoder.layers.18.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 499 |
+
"vision_model.encoder.layers.18.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 500 |
+
"vision_model.encoder.layers.18.ls1": "model-00001-of-00004.safetensors",
|
| 501 |
+
"vision_model.encoder.layers.18.ls2": "model-00001-of-00004.safetensors",
|
| 502 |
+
"vision_model.encoder.layers.18.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 503 |
+
"vision_model.encoder.layers.18.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 504 |
+
"vision_model.encoder.layers.18.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 505 |
+
"vision_model.encoder.layers.18.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 506 |
+
"vision_model.encoder.layers.18.norm1.bias": "model-00001-of-00004.safetensors",
|
| 507 |
+
"vision_model.encoder.layers.18.norm1.weight": "model-00001-of-00004.safetensors",
|
| 508 |
+
"vision_model.encoder.layers.18.norm2.bias": "model-00001-of-00004.safetensors",
|
| 509 |
+
"vision_model.encoder.layers.18.norm2.weight": "model-00001-of-00004.safetensors",
|
| 510 |
+
"vision_model.encoder.layers.19.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 511 |
+
"vision_model.encoder.layers.19.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 512 |
+
"vision_model.encoder.layers.19.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 513 |
+
"vision_model.encoder.layers.19.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 514 |
+
"vision_model.encoder.layers.19.ls1": "model-00001-of-00004.safetensors",
|
| 515 |
+
"vision_model.encoder.layers.19.ls2": "model-00001-of-00004.safetensors",
|
| 516 |
+
"vision_model.encoder.layers.19.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 517 |
+
"vision_model.encoder.layers.19.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 518 |
+
"vision_model.encoder.layers.19.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 519 |
+
"vision_model.encoder.layers.19.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 520 |
+
"vision_model.encoder.layers.19.norm1.bias": "model-00001-of-00004.safetensors",
|
| 521 |
+
"vision_model.encoder.layers.19.norm1.weight": "model-00001-of-00004.safetensors",
|
| 522 |
+
"vision_model.encoder.layers.19.norm2.bias": "model-00001-of-00004.safetensors",
|
| 523 |
+
"vision_model.encoder.layers.19.norm2.weight": "model-00001-of-00004.safetensors",
|
| 524 |
+
"vision_model.encoder.layers.2.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 525 |
+
"vision_model.encoder.layers.2.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 526 |
+
"vision_model.encoder.layers.2.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 527 |
+
"vision_model.encoder.layers.2.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 528 |
+
"vision_model.encoder.layers.2.ls1": "model-00001-of-00004.safetensors",
|
| 529 |
+
"vision_model.encoder.layers.2.ls2": "model-00001-of-00004.safetensors",
|
| 530 |
+
"vision_model.encoder.layers.2.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 531 |
+
"vision_model.encoder.layers.2.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 532 |
+
"vision_model.encoder.layers.2.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 533 |
+
"vision_model.encoder.layers.2.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 534 |
+
"vision_model.encoder.layers.2.norm1.bias": "model-00001-of-00004.safetensors",
|
| 535 |
+
"vision_model.encoder.layers.2.norm1.weight": "model-00001-of-00004.safetensors",
|
| 536 |
+
"vision_model.encoder.layers.2.norm2.bias": "model-00001-of-00004.safetensors",
|
| 537 |
+
"vision_model.encoder.layers.2.norm2.weight": "model-00001-of-00004.safetensors",
|
| 538 |
+
"vision_model.encoder.layers.20.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 539 |
+
"vision_model.encoder.layers.20.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 540 |
+
"vision_model.encoder.layers.20.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 541 |
+
"vision_model.encoder.layers.20.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 542 |
+
"vision_model.encoder.layers.20.ls1": "model-00001-of-00004.safetensors",
|
| 543 |
+
"vision_model.encoder.layers.20.ls2": "model-00001-of-00004.safetensors",
|
| 544 |
+
"vision_model.encoder.layers.20.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 545 |
+
"vision_model.encoder.layers.20.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 546 |
+
"vision_model.encoder.layers.20.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 547 |
+
"vision_model.encoder.layers.20.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 548 |
+
"vision_model.encoder.layers.20.norm1.bias": "model-00001-of-00004.safetensors",
|
| 549 |
+
"vision_model.encoder.layers.20.norm1.weight": "model-00001-of-00004.safetensors",
|
| 550 |
+
"vision_model.encoder.layers.20.norm2.bias": "model-00001-of-00004.safetensors",
|
| 551 |
+
"vision_model.encoder.layers.20.norm2.weight": "model-00001-of-00004.safetensors",
|
| 552 |
+
"vision_model.encoder.layers.21.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 553 |
+
"vision_model.encoder.layers.21.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 554 |
+
"vision_model.encoder.layers.21.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 555 |
+
"vision_model.encoder.layers.21.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 556 |
+
"vision_model.encoder.layers.21.ls1": "model-00001-of-00004.safetensors",
|
| 557 |
+
"vision_model.encoder.layers.21.ls2": "model-00001-of-00004.safetensors",
|
| 558 |
+
"vision_model.encoder.layers.21.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 559 |
+
"vision_model.encoder.layers.21.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 560 |
+
"vision_model.encoder.layers.21.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 561 |
+
"vision_model.encoder.layers.21.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 562 |
+
"vision_model.encoder.layers.21.norm1.bias": "model-00001-of-00004.safetensors",
|
| 563 |
+
"vision_model.encoder.layers.21.norm1.weight": "model-00001-of-00004.safetensors",
|
| 564 |
+
"vision_model.encoder.layers.21.norm2.bias": "model-00001-of-00004.safetensors",
|
| 565 |
+
"vision_model.encoder.layers.21.norm2.weight": "model-00001-of-00004.safetensors",
|
| 566 |
+
"vision_model.encoder.layers.22.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 567 |
+
"vision_model.encoder.layers.22.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 568 |
+
"vision_model.encoder.layers.22.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 569 |
+
"vision_model.encoder.layers.22.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 570 |
+
"vision_model.encoder.layers.22.ls1": "model-00001-of-00004.safetensors",
|
| 571 |
+
"vision_model.encoder.layers.22.ls2": "model-00001-of-00004.safetensors",
|
| 572 |
+
"vision_model.encoder.layers.22.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 573 |
+
"vision_model.encoder.layers.22.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 574 |
+
"vision_model.encoder.layers.22.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 575 |
+
"vision_model.encoder.layers.22.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 576 |
+
"vision_model.encoder.layers.22.norm1.bias": "model-00001-of-00004.safetensors",
|
| 577 |
+
"vision_model.encoder.layers.22.norm1.weight": "model-00001-of-00004.safetensors",
|
| 578 |
+
"vision_model.encoder.layers.22.norm2.bias": "model-00001-of-00004.safetensors",
|
| 579 |
+
"vision_model.encoder.layers.22.norm2.weight": "model-00001-of-00004.safetensors",
|
| 580 |
+
"vision_model.encoder.layers.23.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 581 |
+
"vision_model.encoder.layers.23.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 582 |
+
"vision_model.encoder.layers.23.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 583 |
+
"vision_model.encoder.layers.23.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 584 |
+
"vision_model.encoder.layers.23.ls1": "model-00001-of-00004.safetensors",
|
| 585 |
+
"vision_model.encoder.layers.23.ls2": "model-00001-of-00004.safetensors",
|
| 586 |
+
"vision_model.encoder.layers.23.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 587 |
+
"vision_model.encoder.layers.23.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 588 |
+
"vision_model.encoder.layers.23.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 589 |
+
"vision_model.encoder.layers.23.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 590 |
+
"vision_model.encoder.layers.23.norm1.bias": "model-00001-of-00004.safetensors",
|
| 591 |
+
"vision_model.encoder.layers.23.norm1.weight": "model-00001-of-00004.safetensors",
|
| 592 |
+
"vision_model.encoder.layers.23.norm2.bias": "model-00001-of-00004.safetensors",
|
| 593 |
+
"vision_model.encoder.layers.23.norm2.weight": "model-00001-of-00004.safetensors",
|
| 594 |
+
"vision_model.encoder.layers.3.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 595 |
+
"vision_model.encoder.layers.3.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 596 |
+
"vision_model.encoder.layers.3.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 597 |
+
"vision_model.encoder.layers.3.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 598 |
+
"vision_model.encoder.layers.3.ls1": "model-00001-of-00004.safetensors",
|
| 599 |
+
"vision_model.encoder.layers.3.ls2": "model-00001-of-00004.safetensors",
|
| 600 |
+
"vision_model.encoder.layers.3.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 601 |
+
"vision_model.encoder.layers.3.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 602 |
+
"vision_model.encoder.layers.3.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 603 |
+
"vision_model.encoder.layers.3.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 604 |
+
"vision_model.encoder.layers.3.norm1.bias": "model-00001-of-00004.safetensors",
|
| 605 |
+
"vision_model.encoder.layers.3.norm1.weight": "model-00001-of-00004.safetensors",
|
| 606 |
+
"vision_model.encoder.layers.3.norm2.bias": "model-00001-of-00004.safetensors",
|
| 607 |
+
"vision_model.encoder.layers.3.norm2.weight": "model-00001-of-00004.safetensors",
|
| 608 |
+
"vision_model.encoder.layers.4.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 609 |
+
"vision_model.encoder.layers.4.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 610 |
+
"vision_model.encoder.layers.4.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 611 |
+
"vision_model.encoder.layers.4.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 612 |
+
"vision_model.encoder.layers.4.ls1": "model-00001-of-00004.safetensors",
|
| 613 |
+
"vision_model.encoder.layers.4.ls2": "model-00001-of-00004.safetensors",
|
| 614 |
+
"vision_model.encoder.layers.4.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 615 |
+
"vision_model.encoder.layers.4.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 616 |
+
"vision_model.encoder.layers.4.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 617 |
+
"vision_model.encoder.layers.4.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 618 |
+
"vision_model.encoder.layers.4.norm1.bias": "model-00001-of-00004.safetensors",
|
| 619 |
+
"vision_model.encoder.layers.4.norm1.weight": "model-00001-of-00004.safetensors",
|
| 620 |
+
"vision_model.encoder.layers.4.norm2.bias": "model-00001-of-00004.safetensors",
|
| 621 |
+
"vision_model.encoder.layers.4.norm2.weight": "model-00001-of-00004.safetensors",
|
| 622 |
+
"vision_model.encoder.layers.5.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 623 |
+
"vision_model.encoder.layers.5.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 624 |
+
"vision_model.encoder.layers.5.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 625 |
+
"vision_model.encoder.layers.5.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 626 |
+
"vision_model.encoder.layers.5.ls1": "model-00001-of-00004.safetensors",
|
| 627 |
+
"vision_model.encoder.layers.5.ls2": "model-00001-of-00004.safetensors",
|
| 628 |
+
"vision_model.encoder.layers.5.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 629 |
+
"vision_model.encoder.layers.5.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 630 |
+
"vision_model.encoder.layers.5.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 631 |
+
"vision_model.encoder.layers.5.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 632 |
+
"vision_model.encoder.layers.5.norm1.bias": "model-00001-of-00004.safetensors",
|
| 633 |
+
"vision_model.encoder.layers.5.norm1.weight": "model-00001-of-00004.safetensors",
|
| 634 |
+
"vision_model.encoder.layers.5.norm2.bias": "model-00001-of-00004.safetensors",
|
| 635 |
+
"vision_model.encoder.layers.5.norm2.weight": "model-00001-of-00004.safetensors",
|
| 636 |
+
"vision_model.encoder.layers.6.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 637 |
+
"vision_model.encoder.layers.6.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 638 |
+
"vision_model.encoder.layers.6.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 639 |
+
"vision_model.encoder.layers.6.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 640 |
+
"vision_model.encoder.layers.6.ls1": "model-00001-of-00004.safetensors",
|
| 641 |
+
"vision_model.encoder.layers.6.ls2": "model-00001-of-00004.safetensors",
|
| 642 |
+
"vision_model.encoder.layers.6.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 643 |
+
"vision_model.encoder.layers.6.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 644 |
+
"vision_model.encoder.layers.6.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 645 |
+
"vision_model.encoder.layers.6.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 646 |
+
"vision_model.encoder.layers.6.norm1.bias": "model-00001-of-00004.safetensors",
|
| 647 |
+
"vision_model.encoder.layers.6.norm1.weight": "model-00001-of-00004.safetensors",
|
| 648 |
+
"vision_model.encoder.layers.6.norm2.bias": "model-00001-of-00004.safetensors",
|
| 649 |
+
"vision_model.encoder.layers.6.norm2.weight": "model-00001-of-00004.safetensors",
|
| 650 |
+
"vision_model.encoder.layers.7.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 651 |
+
"vision_model.encoder.layers.7.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 652 |
+
"vision_model.encoder.layers.7.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 653 |
+
"vision_model.encoder.layers.7.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 654 |
+
"vision_model.encoder.layers.7.ls1": "model-00001-of-00004.safetensors",
|
| 655 |
+
"vision_model.encoder.layers.7.ls2": "model-00001-of-00004.safetensors",
|
| 656 |
+
"vision_model.encoder.layers.7.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 657 |
+
"vision_model.encoder.layers.7.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 658 |
+
"vision_model.encoder.layers.7.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 659 |
+
"vision_model.encoder.layers.7.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 660 |
+
"vision_model.encoder.layers.7.norm1.bias": "model-00001-of-00004.safetensors",
|
| 661 |
+
"vision_model.encoder.layers.7.norm1.weight": "model-00001-of-00004.safetensors",
|
| 662 |
+
"vision_model.encoder.layers.7.norm2.bias": "model-00001-of-00004.safetensors",
|
| 663 |
+
"vision_model.encoder.layers.7.norm2.weight": "model-00001-of-00004.safetensors",
|
| 664 |
+
"vision_model.encoder.layers.8.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 665 |
+
"vision_model.encoder.layers.8.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 666 |
+
"vision_model.encoder.layers.8.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 667 |
+
"vision_model.encoder.layers.8.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 668 |
+
"vision_model.encoder.layers.8.ls1": "model-00001-of-00004.safetensors",
|
| 669 |
+
"vision_model.encoder.layers.8.ls2": "model-00001-of-00004.safetensors",
|
| 670 |
+
"vision_model.encoder.layers.8.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 671 |
+
"vision_model.encoder.layers.8.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 672 |
+
"vision_model.encoder.layers.8.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 673 |
+
"vision_model.encoder.layers.8.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 674 |
+
"vision_model.encoder.layers.8.norm1.bias": "model-00001-of-00004.safetensors",
|
| 675 |
+
"vision_model.encoder.layers.8.norm1.weight": "model-00001-of-00004.safetensors",
|
| 676 |
+
"vision_model.encoder.layers.8.norm2.bias": "model-00001-of-00004.safetensors",
|
| 677 |
+
"vision_model.encoder.layers.8.norm2.weight": "model-00001-of-00004.safetensors",
|
| 678 |
+
"vision_model.encoder.layers.9.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 679 |
+
"vision_model.encoder.layers.9.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 680 |
+
"vision_model.encoder.layers.9.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 681 |
+
"vision_model.encoder.layers.9.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 682 |
+
"vision_model.encoder.layers.9.ls1": "model-00001-of-00004.safetensors",
|
| 683 |
+
"vision_model.encoder.layers.9.ls2": "model-00001-of-00004.safetensors",
|
| 684 |
+
"vision_model.encoder.layers.9.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 685 |
+
"vision_model.encoder.layers.9.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 686 |
+
"vision_model.encoder.layers.9.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 687 |
+
"vision_model.encoder.layers.9.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 688 |
+
"vision_model.encoder.layers.9.norm1.bias": "model-00001-of-00004.safetensors",
|
| 689 |
+
"vision_model.encoder.layers.9.norm1.weight": "model-00001-of-00004.safetensors",
|
| 690 |
+
"vision_model.encoder.layers.9.norm2.bias": "model-00001-of-00004.safetensors",
|
| 691 |
+
"vision_model.encoder.layers.9.norm2.weight": "model-00001-of-00004.safetensors"
|
| 692 |
+
}
|
| 693 |
+
}
|
ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/modeling_intern_vit.py
ADDED
|
@@ -0,0 +1,431 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# --------------------------------------------------------
|
| 2 |
+
# InternVL
|
| 3 |
+
# Copyright (c) 2024 OpenGVLab
|
| 4 |
+
# Licensed under The MIT License [see LICENSE for details]
|
| 5 |
+
# --------------------------------------------------------
|
| 6 |
+
|
| 7 |
+
from typing import Optional, Tuple, Union
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn.functional as F
|
| 11 |
+
import torch.utils.checkpoint
|
| 12 |
+
from einops import rearrange
|
| 13 |
+
from timm.layers import DropPath
|
| 14 |
+
from torch import nn
|
| 15 |
+
from transformers.activations import ACT2FN
|
| 16 |
+
from transformers.modeling_outputs import (BaseModelOutput,
|
| 17 |
+
BaseModelOutputWithPooling)
|
| 18 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 19 |
+
from transformers.utils import logging
|
| 20 |
+
|
| 21 |
+
from .configuration_intern_vit import InternVisionConfig
|
| 22 |
+
|
| 23 |
+
try:
|
| 24 |
+
from flash_attn.bert_padding import pad_input, unpad_input
|
| 25 |
+
from flash_attn.flash_attn_interface import \
|
| 26 |
+
flash_attn_varlen_qkvpacked_func
|
| 27 |
+
has_flash_attn = True
|
| 28 |
+
except:
|
| 29 |
+
print('FlashAttention2 is not installed.')
|
| 30 |
+
has_flash_attn = False
|
| 31 |
+
|
| 32 |
+
logger = logging.get_logger(__name__)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class FlashAttention(nn.Module):
|
| 36 |
+
"""Implement the scaled dot product attention with softmax.
|
| 37 |
+
Arguments
|
| 38 |
+
---------
|
| 39 |
+
softmax_scale: The temperature to use for the softmax attention.
|
| 40 |
+
(default: 1/sqrt(d_keys) where d_keys is computed at
|
| 41 |
+
runtime)
|
| 42 |
+
attention_dropout: The dropout rate to apply to the attention
|
| 43 |
+
(default: 0.0)
|
| 44 |
+
"""
|
| 45 |
+
|
| 46 |
+
def __init__(self, softmax_scale=None, attention_dropout=0.0, device=None, dtype=None):
|
| 47 |
+
super().__init__()
|
| 48 |
+
self.softmax_scale = softmax_scale
|
| 49 |
+
self.dropout_p = attention_dropout
|
| 50 |
+
|
| 51 |
+
def forward(self, qkv, key_padding_mask=None, causal=False, cu_seqlens=None,
|
| 52 |
+
max_s=None, need_weights=False):
|
| 53 |
+
"""Implements the multihead softmax attention.
|
| 54 |
+
Arguments
|
| 55 |
+
---------
|
| 56 |
+
qkv: The tensor containing the query, key, and value. (B, S, 3, H, D) if key_padding_mask is None
|
| 57 |
+
if unpadded: (nnz, 3, h, d)
|
| 58 |
+
key_padding_mask: a bool tensor of shape (B, S)
|
| 59 |
+
"""
|
| 60 |
+
assert not need_weights
|
| 61 |
+
assert qkv.dtype in [torch.float16, torch.bfloat16]
|
| 62 |
+
assert qkv.is_cuda
|
| 63 |
+
|
| 64 |
+
if cu_seqlens is None:
|
| 65 |
+
batch_size = qkv.shape[0]
|
| 66 |
+
seqlen = qkv.shape[1]
|
| 67 |
+
if key_padding_mask is None:
|
| 68 |
+
qkv = rearrange(qkv, 'b s ... -> (b s) ...')
|
| 69 |
+
max_s = seqlen
|
| 70 |
+
cu_seqlens = torch.arange(0, (batch_size + 1) * seqlen, step=seqlen, dtype=torch.int32,
|
| 71 |
+
device=qkv.device)
|
| 72 |
+
output = flash_attn_varlen_qkvpacked_func(
|
| 73 |
+
qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
|
| 74 |
+
softmax_scale=self.softmax_scale, causal=causal
|
| 75 |
+
)
|
| 76 |
+
output = rearrange(output, '(b s) ... -> b s ...', b=batch_size)
|
| 77 |
+
else:
|
| 78 |
+
nheads = qkv.shape[-2]
|
| 79 |
+
x = rearrange(qkv, 'b s three h d -> b s (three h d)')
|
| 80 |
+
x_unpad, indices, cu_seqlens, max_s = unpad_input(x, key_padding_mask)
|
| 81 |
+
x_unpad = rearrange(x_unpad, 'nnz (three h d) -> nnz three h d', three=3, h=nheads)
|
| 82 |
+
output_unpad = flash_attn_varlen_qkvpacked_func(
|
| 83 |
+
x_unpad, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
|
| 84 |
+
softmax_scale=self.softmax_scale, causal=causal
|
| 85 |
+
)
|
| 86 |
+
output = rearrange(pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'),
|
| 87 |
+
indices, batch_size, seqlen),
|
| 88 |
+
'b s (h d) -> b s h d', h=nheads)
|
| 89 |
+
else:
|
| 90 |
+
assert max_s is not None
|
| 91 |
+
output = flash_attn_varlen_qkvpacked_func(
|
| 92 |
+
qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
|
| 93 |
+
softmax_scale=self.softmax_scale, causal=causal
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
return output, None
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
class InternRMSNorm(nn.Module):
|
| 100 |
+
def __init__(self, hidden_size, eps=1e-6):
|
| 101 |
+
super().__init__()
|
| 102 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
| 103 |
+
self.variance_epsilon = eps
|
| 104 |
+
|
| 105 |
+
def forward(self, hidden_states):
|
| 106 |
+
input_dtype = hidden_states.dtype
|
| 107 |
+
hidden_states = hidden_states.to(torch.float32)
|
| 108 |
+
variance = hidden_states.pow(2).mean(-1, keepdim=True)
|
| 109 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
| 110 |
+
return self.weight * hidden_states.to(input_dtype)
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
try:
|
| 114 |
+
from apex.normalization import FusedRMSNorm
|
| 115 |
+
|
| 116 |
+
InternRMSNorm = FusedRMSNorm # noqa
|
| 117 |
+
|
| 118 |
+
logger.info('Discovered apex.normalization.FusedRMSNorm - will use it instead of InternRMSNorm')
|
| 119 |
+
except ImportError:
|
| 120 |
+
# using the normal InternRMSNorm
|
| 121 |
+
pass
|
| 122 |
+
except Exception:
|
| 123 |
+
logger.warning('discovered apex but it failed to load, falling back to InternRMSNorm')
|
| 124 |
+
pass
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
NORM2FN = {
|
| 128 |
+
'rms_norm': InternRMSNorm,
|
| 129 |
+
'layer_norm': nn.LayerNorm,
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
class InternVisionEmbeddings(nn.Module):
|
| 134 |
+
def __init__(self, config: InternVisionConfig):
|
| 135 |
+
super().__init__()
|
| 136 |
+
self.config = config
|
| 137 |
+
self.embed_dim = config.hidden_size
|
| 138 |
+
self.image_size = config.image_size
|
| 139 |
+
self.patch_size = config.patch_size
|
| 140 |
+
|
| 141 |
+
self.class_embedding = nn.Parameter(
|
| 142 |
+
torch.randn(1, 1, self.embed_dim),
|
| 143 |
+
)
|
| 144 |
+
|
| 145 |
+
self.patch_embedding = nn.Conv2d(
|
| 146 |
+
in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size
|
| 147 |
+
)
|
| 148 |
+
|
| 149 |
+
self.num_patches = (self.image_size // self.patch_size) ** 2
|
| 150 |
+
self.num_positions = self.num_patches + 1
|
| 151 |
+
|
| 152 |
+
self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim))
|
| 153 |
+
|
| 154 |
+
def _get_pos_embed(self, pos_embed, H, W):
|
| 155 |
+
target_dtype = pos_embed.dtype
|
| 156 |
+
pos_embed = pos_embed.float().reshape(
|
| 157 |
+
1, self.image_size // self.patch_size, self.image_size // self.patch_size, -1).permute(0, 3, 1, 2)
|
| 158 |
+
pos_embed = F.interpolate(pos_embed, size=(H, W), mode='bicubic', align_corners=False). \
|
| 159 |
+
reshape(1, -1, H * W).permute(0, 2, 1).to(target_dtype)
|
| 160 |
+
return pos_embed
|
| 161 |
+
|
| 162 |
+
def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
|
| 163 |
+
target_dtype = self.patch_embedding.weight.dtype
|
| 164 |
+
patch_embeds = self.patch_embedding(pixel_values) # shape = [*, channel, width, height]
|
| 165 |
+
batch_size, _, height, width = patch_embeds.shape
|
| 166 |
+
patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
|
| 167 |
+
class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype)
|
| 168 |
+
embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
|
| 169 |
+
position_embedding = torch.cat([
|
| 170 |
+
self.position_embedding[:, :1, :],
|
| 171 |
+
self._get_pos_embed(self.position_embedding[:, 1:, :], height, width)
|
| 172 |
+
], dim=1)
|
| 173 |
+
embeddings = embeddings + position_embedding.to(target_dtype)
|
| 174 |
+
return embeddings
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
class InternAttention(nn.Module):
|
| 178 |
+
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
| 179 |
+
|
| 180 |
+
def __init__(self, config: InternVisionConfig):
|
| 181 |
+
super().__init__()
|
| 182 |
+
self.config = config
|
| 183 |
+
self.embed_dim = config.hidden_size
|
| 184 |
+
self.num_heads = config.num_attention_heads
|
| 185 |
+
self.use_flash_attn = config.use_flash_attn and has_flash_attn
|
| 186 |
+
if config.use_flash_attn and not has_flash_attn:
|
| 187 |
+
print('Warning: Flash Attention is not available, use_flash_attn is set to False.')
|
| 188 |
+
self.head_dim = self.embed_dim // self.num_heads
|
| 189 |
+
if self.head_dim * self.num_heads != self.embed_dim:
|
| 190 |
+
raise ValueError(
|
| 191 |
+
f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:'
|
| 192 |
+
f' {self.num_heads}).'
|
| 193 |
+
)
|
| 194 |
+
|
| 195 |
+
self.scale = self.head_dim ** -0.5
|
| 196 |
+
self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim, bias=config.qkv_bias)
|
| 197 |
+
self.attn_drop = nn.Dropout(config.attention_dropout)
|
| 198 |
+
self.proj_drop = nn.Dropout(config.dropout)
|
| 199 |
+
|
| 200 |
+
self.qk_normalization = config.qk_normalization
|
| 201 |
+
|
| 202 |
+
if self.qk_normalization:
|
| 203 |
+
self.q_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
|
| 204 |
+
self.k_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
|
| 205 |
+
|
| 206 |
+
if self.use_flash_attn:
|
| 207 |
+
self.inner_attn = FlashAttention(attention_dropout=config.attention_dropout)
|
| 208 |
+
self.proj = nn.Linear(self.embed_dim, self.embed_dim)
|
| 209 |
+
|
| 210 |
+
def _naive_attn(self, x):
|
| 211 |
+
B, N, C = x.shape
|
| 212 |
+
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
|
| 213 |
+
q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
|
| 214 |
+
|
| 215 |
+
if self.qk_normalization:
|
| 216 |
+
B_, H_, N_, D_ = q.shape
|
| 217 |
+
q = self.q_norm(q.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
|
| 218 |
+
k = self.k_norm(k.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
|
| 219 |
+
|
| 220 |
+
attn = ((q * self.scale) @ k.transpose(-2, -1))
|
| 221 |
+
attn = attn.softmax(dim=-1)
|
| 222 |
+
attn = self.attn_drop(attn)
|
| 223 |
+
|
| 224 |
+
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
|
| 225 |
+
x = self.proj(x)
|
| 226 |
+
x = self.proj_drop(x)
|
| 227 |
+
return x
|
| 228 |
+
|
| 229 |
+
def _flash_attn(self, x, key_padding_mask=None, need_weights=False):
|
| 230 |
+
qkv = self.qkv(x)
|
| 231 |
+
qkv = rearrange(qkv, 'b s (three h d) -> b s three h d', three=3, h=self.num_heads)
|
| 232 |
+
|
| 233 |
+
if self.qk_normalization:
|
| 234 |
+
q, k, v = qkv.unbind(2)
|
| 235 |
+
q = self.q_norm(q.flatten(-2, -1)).view(q.shape)
|
| 236 |
+
k = self.k_norm(k.flatten(-2, -1)).view(k.shape)
|
| 237 |
+
qkv = torch.stack([q, k, v], dim=2)
|
| 238 |
+
|
| 239 |
+
context, _ = self.inner_attn(
|
| 240 |
+
qkv, key_padding_mask=key_padding_mask, need_weights=need_weights, causal=False
|
| 241 |
+
)
|
| 242 |
+
outs = self.proj(rearrange(context, 'b s h d -> b s (h d)'))
|
| 243 |
+
outs = self.proj_drop(outs)
|
| 244 |
+
return outs
|
| 245 |
+
|
| 246 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 247 |
+
x = self._naive_attn(hidden_states) if not self.use_flash_attn else self._flash_attn(hidden_states)
|
| 248 |
+
return x
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
class InternMLP(nn.Module):
|
| 252 |
+
def __init__(self, config: InternVisionConfig):
|
| 253 |
+
super().__init__()
|
| 254 |
+
self.config = config
|
| 255 |
+
self.act = ACT2FN[config.hidden_act]
|
| 256 |
+
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
|
| 257 |
+
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
|
| 258 |
+
|
| 259 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 260 |
+
hidden_states = self.fc1(hidden_states)
|
| 261 |
+
hidden_states = self.act(hidden_states)
|
| 262 |
+
hidden_states = self.fc2(hidden_states)
|
| 263 |
+
return hidden_states
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
class InternVisionEncoderLayer(nn.Module):
|
| 267 |
+
def __init__(self, config: InternVisionConfig, drop_path_rate: float):
|
| 268 |
+
super().__init__()
|
| 269 |
+
self.embed_dim = config.hidden_size
|
| 270 |
+
self.intermediate_size = config.intermediate_size
|
| 271 |
+
self.norm_type = config.norm_type
|
| 272 |
+
|
| 273 |
+
self.attn = InternAttention(config)
|
| 274 |
+
self.mlp = InternMLP(config)
|
| 275 |
+
self.norm1 = NORM2FN[self.norm_type](self.embed_dim, eps=config.layer_norm_eps)
|
| 276 |
+
self.norm2 = NORM2FN[self.norm_type](self.embed_dim, eps=config.layer_norm_eps)
|
| 277 |
+
|
| 278 |
+
self.ls1 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
|
| 279 |
+
self.ls2 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
|
| 280 |
+
self.drop_path1 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
|
| 281 |
+
self.drop_path2 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
|
| 282 |
+
|
| 283 |
+
def forward(
|
| 284 |
+
self,
|
| 285 |
+
hidden_states: torch.Tensor,
|
| 286 |
+
) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor], Optional[Tuple[torch.FloatTensor]]]:
|
| 287 |
+
"""
|
| 288 |
+
Args:
|
| 289 |
+
hidden_states (`Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
| 290 |
+
"""
|
| 291 |
+
hidden_states = hidden_states + self.drop_path1(self.attn(self.norm1(hidden_states).to(hidden_states.dtype)) * self.ls1)
|
| 292 |
+
|
| 293 |
+
hidden_states = hidden_states + self.drop_path2(self.mlp(self.norm2(hidden_states).to(hidden_states.dtype)) * self.ls2)
|
| 294 |
+
|
| 295 |
+
return hidden_states
|
| 296 |
+
|
| 297 |
+
|
| 298 |
+
class InternVisionEncoder(nn.Module):
|
| 299 |
+
"""
|
| 300 |
+
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
|
| 301 |
+
[`InternEncoderLayer`].
|
| 302 |
+
|
| 303 |
+
Args:
|
| 304 |
+
config (`InternConfig`):
|
| 305 |
+
The corresponding vision configuration for the `InternEncoder`.
|
| 306 |
+
"""
|
| 307 |
+
|
| 308 |
+
def __init__(self, config: InternVisionConfig):
|
| 309 |
+
super().__init__()
|
| 310 |
+
self.config = config
|
| 311 |
+
# stochastic depth decay rule
|
| 312 |
+
dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers)]
|
| 313 |
+
self.layers = nn.ModuleList([
|
| 314 |
+
InternVisionEncoderLayer(config, dpr[idx]) for idx in range(config.num_hidden_layers)])
|
| 315 |
+
self.gradient_checkpointing = True
|
| 316 |
+
|
| 317 |
+
def forward(
|
| 318 |
+
self,
|
| 319 |
+
inputs_embeds,
|
| 320 |
+
output_hidden_states: Optional[bool] = None,
|
| 321 |
+
return_dict: Optional[bool] = None,
|
| 322 |
+
) -> Union[Tuple, BaseModelOutput]:
|
| 323 |
+
r"""
|
| 324 |
+
Args:
|
| 325 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
| 326 |
+
Embedded representation of the inputs. Should be float, not int tokens.
|
| 327 |
+
output_hidden_states (`bool`, *optional*):
|
| 328 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
|
| 329 |
+
for more detail.
|
| 330 |
+
return_dict (`bool`, *optional*):
|
| 331 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 332 |
+
"""
|
| 333 |
+
output_hidden_states = (
|
| 334 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 335 |
+
)
|
| 336 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 337 |
+
|
| 338 |
+
encoder_states = () if output_hidden_states else None
|
| 339 |
+
hidden_states = inputs_embeds
|
| 340 |
+
|
| 341 |
+
for idx, encoder_layer in enumerate(self.layers):
|
| 342 |
+
if output_hidden_states:
|
| 343 |
+
encoder_states = encoder_states + (hidden_states,)
|
| 344 |
+
if self.gradient_checkpointing and self.training:
|
| 345 |
+
layer_outputs = torch.utils.checkpoint.checkpoint(
|
| 346 |
+
encoder_layer,
|
| 347 |
+
hidden_states)
|
| 348 |
+
else:
|
| 349 |
+
layer_outputs = encoder_layer(
|
| 350 |
+
hidden_states,
|
| 351 |
+
)
|
| 352 |
+
hidden_states = layer_outputs
|
| 353 |
+
|
| 354 |
+
if output_hidden_states:
|
| 355 |
+
encoder_states = encoder_states + (hidden_states,)
|
| 356 |
+
|
| 357 |
+
if not return_dict:
|
| 358 |
+
return tuple(v for v in [hidden_states, encoder_states] if v is not None)
|
| 359 |
+
return BaseModelOutput(
|
| 360 |
+
last_hidden_state=hidden_states, hidden_states=encoder_states
|
| 361 |
+
)
|
| 362 |
+
|
| 363 |
+
|
| 364 |
+
class InternVisionModel(PreTrainedModel):
|
| 365 |
+
main_input_name = 'pixel_values'
|
| 366 |
+
_supports_flash_attn_2 = True
|
| 367 |
+
supports_gradient_checkpointing = True
|
| 368 |
+
config_class = InternVisionConfig
|
| 369 |
+
_no_split_modules = ['InternVisionEncoderLayer']
|
| 370 |
+
|
| 371 |
+
def __init__(self, config: InternVisionConfig):
|
| 372 |
+
super().__init__(config)
|
| 373 |
+
self.config = config
|
| 374 |
+
|
| 375 |
+
self.embeddings = InternVisionEmbeddings(config)
|
| 376 |
+
self.encoder = InternVisionEncoder(config)
|
| 377 |
+
|
| 378 |
+
def resize_pos_embeddings(self, old_size, new_size, patch_size):
|
| 379 |
+
pos_emb = self.embeddings.position_embedding
|
| 380 |
+
_, num_positions, embed_dim = pos_emb.shape
|
| 381 |
+
cls_emb = pos_emb[:, :1, :]
|
| 382 |
+
pos_emb = pos_emb[:, 1:, :].reshape(1, old_size // patch_size, old_size // patch_size, -1).permute(0, 3, 1, 2)
|
| 383 |
+
pos_emb = F.interpolate(pos_emb.float(), size=new_size // patch_size, mode='bicubic', align_corners=False)
|
| 384 |
+
pos_emb = pos_emb.to(cls_emb.dtype).reshape(1, embed_dim, -1).permute(0, 2, 1)
|
| 385 |
+
pos_emb = torch.cat([cls_emb, pos_emb], dim=1)
|
| 386 |
+
self.embeddings.position_embedding = nn.Parameter(pos_emb)
|
| 387 |
+
self.embeddings.image_size = new_size
|
| 388 |
+
logger.info('Resized position embeddings from {} to {}'.format(old_size, new_size))
|
| 389 |
+
|
| 390 |
+
def get_input_embeddings(self):
|
| 391 |
+
return self.embeddings
|
| 392 |
+
|
| 393 |
+
def forward(
|
| 394 |
+
self,
|
| 395 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
| 396 |
+
output_hidden_states: Optional[bool] = None,
|
| 397 |
+
return_dict: Optional[bool] = None,
|
| 398 |
+
pixel_embeds: Optional[torch.FloatTensor] = None,
|
| 399 |
+
) -> Union[Tuple, BaseModelOutputWithPooling]:
|
| 400 |
+
output_hidden_states = (
|
| 401 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 402 |
+
)
|
| 403 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 404 |
+
|
| 405 |
+
if pixel_values is None and pixel_embeds is None:
|
| 406 |
+
raise ValueError('You have to specify pixel_values or pixel_embeds')
|
| 407 |
+
|
| 408 |
+
if pixel_embeds is not None:
|
| 409 |
+
hidden_states = pixel_embeds
|
| 410 |
+
else:
|
| 411 |
+
if len(pixel_values.shape) == 4:
|
| 412 |
+
hidden_states = self.embeddings(pixel_values)
|
| 413 |
+
else:
|
| 414 |
+
raise ValueError(f'wrong pixel_values size: {pixel_values.shape}')
|
| 415 |
+
encoder_outputs = self.encoder(
|
| 416 |
+
inputs_embeds=hidden_states,
|
| 417 |
+
output_hidden_states=output_hidden_states,
|
| 418 |
+
return_dict=return_dict,
|
| 419 |
+
)
|
| 420 |
+
last_hidden_state = encoder_outputs.last_hidden_state
|
| 421 |
+
pooled_output = last_hidden_state[:, 0, :]
|
| 422 |
+
|
| 423 |
+
if not return_dict:
|
| 424 |
+
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
|
| 425 |
+
|
| 426 |
+
return BaseModelOutputWithPooling(
|
| 427 |
+
last_hidden_state=last_hidden_state,
|
| 428 |
+
pooler_output=pooled_output,
|
| 429 |
+
hidden_states=encoder_outputs.hidden_states,
|
| 430 |
+
attentions=encoder_outputs.attentions,
|
| 431 |
+
)
|
ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/modeling_internvl_chat.py
ADDED
|
@@ -0,0 +1,359 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# --------------------------------------------------------
|
| 2 |
+
# InternVL
|
| 3 |
+
# Copyright (c) 2024 OpenGVLab
|
| 4 |
+
# Licensed under The MIT License [see LICENSE for details]
|
| 5 |
+
# --------------------------------------------------------
|
| 6 |
+
|
| 7 |
+
import warnings
|
| 8 |
+
from typing import List, Optional, Tuple, Union
|
| 9 |
+
|
| 10 |
+
import torch.utils.checkpoint
|
| 11 |
+
import transformers
|
| 12 |
+
from torch import nn
|
| 13 |
+
from torch.nn import CrossEntropyLoss
|
| 14 |
+
from transformers import (AutoModel, GenerationConfig, LlamaForCausalLM,
|
| 15 |
+
Qwen2ForCausalLM)
|
| 16 |
+
from transformers.modeling_outputs import CausalLMOutputWithPast
|
| 17 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 18 |
+
from transformers.utils import ModelOutput, logging
|
| 19 |
+
|
| 20 |
+
from .configuration_internvl_chat import InternVLChatConfig
|
| 21 |
+
from .conversation import get_conv_template
|
| 22 |
+
from .modeling_intern_vit import InternVisionModel, has_flash_attn
|
| 23 |
+
|
| 24 |
+
logger = logging.get_logger(__name__)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def version_cmp(v1, v2, op='eq'):
|
| 28 |
+
import operator
|
| 29 |
+
|
| 30 |
+
from packaging import version
|
| 31 |
+
op_func = getattr(operator, op)
|
| 32 |
+
return op_func(version.parse(v1), version.parse(v2))
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class InternVLChatModel(PreTrainedModel):
|
| 36 |
+
config_class = InternVLChatConfig
|
| 37 |
+
main_input_name = 'pixel_values'
|
| 38 |
+
base_model_prefix = 'language_model'
|
| 39 |
+
_supports_flash_attn_2 = True
|
| 40 |
+
supports_gradient_checkpointing = True
|
| 41 |
+
_no_split_modules = ['InternVisionModel', 'LlamaDecoderLayer', 'Qwen2DecoderLayer']
|
| 42 |
+
|
| 43 |
+
def __init__(self, config: InternVLChatConfig, vision_model=None, language_model=None, use_flash_attn=True):
|
| 44 |
+
super().__init__(config)
|
| 45 |
+
|
| 46 |
+
assert version_cmp(transformers.__version__, '4.37.0', 'ge')
|
| 47 |
+
image_size = config.force_image_size or config.vision_config.image_size
|
| 48 |
+
patch_size = config.vision_config.patch_size
|
| 49 |
+
self.patch_size = patch_size
|
| 50 |
+
self.select_layer = config.select_layer
|
| 51 |
+
self.template = config.template
|
| 52 |
+
self.num_image_token = int((image_size // patch_size) ** 2 * (config.downsample_ratio ** 2))
|
| 53 |
+
self.downsample_ratio = config.downsample_ratio
|
| 54 |
+
self.ps_version = config.ps_version
|
| 55 |
+
use_flash_attn = use_flash_attn if has_flash_attn else False
|
| 56 |
+
config.vision_config.use_flash_attn = True if use_flash_attn else False
|
| 57 |
+
config.llm_config._attn_implementation = 'flash_attention_2' if use_flash_attn else 'eager'
|
| 58 |
+
|
| 59 |
+
logger.info(f'num_image_token: {self.num_image_token}')
|
| 60 |
+
logger.info(f'ps_version: {self.ps_version}')
|
| 61 |
+
if vision_model is not None:
|
| 62 |
+
self.vision_model = vision_model
|
| 63 |
+
else:
|
| 64 |
+
self.vision_model = InternVisionModel(config.vision_config)
|
| 65 |
+
if language_model is not None:
|
| 66 |
+
self.language_model = language_model
|
| 67 |
+
else:
|
| 68 |
+
if config.llm_config.architectures[0] == 'LlamaForCausalLM':
|
| 69 |
+
self.language_model = LlamaForCausalLM(config.llm_config)
|
| 70 |
+
elif config.llm_config.architectures[0] == 'Qwen2ForCausalLM':
|
| 71 |
+
self.language_model = Qwen2ForCausalLM(config.llm_config)
|
| 72 |
+
else:
|
| 73 |
+
raise NotImplementedError(f'{config.llm_config.architectures[0]} is not implemented.')
|
| 74 |
+
|
| 75 |
+
vit_hidden_size = config.vision_config.hidden_size
|
| 76 |
+
llm_hidden_size = config.llm_config.hidden_size
|
| 77 |
+
|
| 78 |
+
self.mlp1 = nn.Sequential(
|
| 79 |
+
nn.LayerNorm(vit_hidden_size * int(1 / self.downsample_ratio) ** 2),
|
| 80 |
+
nn.Linear(vit_hidden_size * int(1 / self.downsample_ratio) ** 2, llm_hidden_size),
|
| 81 |
+
nn.GELU(),
|
| 82 |
+
nn.Linear(llm_hidden_size, llm_hidden_size)
|
| 83 |
+
)
|
| 84 |
+
|
| 85 |
+
self.img_context_token_id = None
|
| 86 |
+
self.conv_template = get_conv_template(self.template)
|
| 87 |
+
self.system_message = self.conv_template.system_message
|
| 88 |
+
|
| 89 |
+
def forward(
|
| 90 |
+
self,
|
| 91 |
+
pixel_values: torch.FloatTensor,
|
| 92 |
+
input_ids: torch.LongTensor = None,
|
| 93 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 94 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 95 |
+
image_flags: Optional[torch.LongTensor] = None,
|
| 96 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| 97 |
+
labels: Optional[torch.LongTensor] = None,
|
| 98 |
+
use_cache: Optional[bool] = None,
|
| 99 |
+
output_attentions: Optional[bool] = None,
|
| 100 |
+
output_hidden_states: Optional[bool] = None,
|
| 101 |
+
return_dict: Optional[bool] = None,
|
| 102 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 103 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 104 |
+
|
| 105 |
+
image_flags = image_flags.squeeze(-1)
|
| 106 |
+
input_embeds = self.language_model.get_input_embeddings()(input_ids).clone()
|
| 107 |
+
|
| 108 |
+
vit_embeds = self.extract_feature(pixel_values)
|
| 109 |
+
vit_embeds = vit_embeds[image_flags == 1]
|
| 110 |
+
vit_batch_size = pixel_values.shape[0]
|
| 111 |
+
|
| 112 |
+
B, N, C = input_embeds.shape
|
| 113 |
+
input_embeds = input_embeds.reshape(B * N, C)
|
| 114 |
+
|
| 115 |
+
if torch.distributed.is_initialized() and torch.distributed.get_rank() == 0:
|
| 116 |
+
print(f'dynamic ViT batch size: {vit_batch_size}, images per sample: {vit_batch_size / B}, dynamic token length: {N}')
|
| 117 |
+
|
| 118 |
+
input_ids = input_ids.reshape(B * N)
|
| 119 |
+
selected = (input_ids == self.img_context_token_id)
|
| 120 |
+
try:
|
| 121 |
+
input_embeds[selected] = input_embeds[selected] * 0.0 + vit_embeds.reshape(-1, C)
|
| 122 |
+
except Exception as e:
|
| 123 |
+
vit_embeds = vit_embeds.reshape(-1, C)
|
| 124 |
+
print(f'warning: {e}, input_embeds[selected].shape={input_embeds[selected].shape}, '
|
| 125 |
+
f'vit_embeds.shape={vit_embeds.shape}')
|
| 126 |
+
n_token = min(selected.sum(), vit_embeds.size(0))
|
| 127 |
+
input_embeds[selected][:n_token] = input_embeds[selected][:n_token] * 0.0 + vit_embeds[:n_token]
|
| 128 |
+
|
| 129 |
+
input_embeds = input_embeds.reshape(B, N, C)
|
| 130 |
+
|
| 131 |
+
outputs = self.language_model(
|
| 132 |
+
inputs_embeds=input_embeds,
|
| 133 |
+
attention_mask=attention_mask,
|
| 134 |
+
position_ids=position_ids,
|
| 135 |
+
past_key_values=past_key_values,
|
| 136 |
+
use_cache=use_cache,
|
| 137 |
+
output_attentions=output_attentions,
|
| 138 |
+
output_hidden_states=output_hidden_states,
|
| 139 |
+
return_dict=return_dict,
|
| 140 |
+
)
|
| 141 |
+
logits = outputs.logits
|
| 142 |
+
|
| 143 |
+
loss = None
|
| 144 |
+
if labels is not None:
|
| 145 |
+
# Shift so that tokens < n predict n
|
| 146 |
+
shift_logits = logits[..., :-1, :].contiguous()
|
| 147 |
+
shift_labels = labels[..., 1:].contiguous()
|
| 148 |
+
# Flatten the tokens
|
| 149 |
+
loss_fct = CrossEntropyLoss()
|
| 150 |
+
shift_logits = shift_logits.view(-1, self.language_model.config.vocab_size)
|
| 151 |
+
shift_labels = shift_labels.view(-1)
|
| 152 |
+
# Enable model parallelism
|
| 153 |
+
shift_labels = shift_labels.to(shift_logits.device)
|
| 154 |
+
loss = loss_fct(shift_logits, shift_labels)
|
| 155 |
+
|
| 156 |
+
if not return_dict:
|
| 157 |
+
output = (logits,) + outputs[1:]
|
| 158 |
+
return (loss,) + output if loss is not None else output
|
| 159 |
+
|
| 160 |
+
return CausalLMOutputWithPast(
|
| 161 |
+
loss=loss,
|
| 162 |
+
logits=logits,
|
| 163 |
+
past_key_values=outputs.past_key_values,
|
| 164 |
+
hidden_states=outputs.hidden_states,
|
| 165 |
+
attentions=outputs.attentions,
|
| 166 |
+
)
|
| 167 |
+
|
| 168 |
+
def pixel_shuffle(self, x, scale_factor=0.5):
|
| 169 |
+
n, w, h, c = x.size()
|
| 170 |
+
# N, W, H, C --> N, W, H * scale, C // scale
|
| 171 |
+
x = x.view(n, w, int(h * scale_factor), int(c / scale_factor))
|
| 172 |
+
# N, W, H * scale, C // scale --> N, H * scale, W, C // scale
|
| 173 |
+
x = x.permute(0, 2, 1, 3).contiguous()
|
| 174 |
+
# N, H * scale, W, C // scale --> N, H * scale, W * scale, C // (scale ** 2)
|
| 175 |
+
x = x.view(n, int(h * scale_factor), int(w * scale_factor),
|
| 176 |
+
int(c / (scale_factor * scale_factor)))
|
| 177 |
+
if self.ps_version == 'v1':
|
| 178 |
+
warnings.warn("In ps_version 'v1', the height and width have not been swapped back, "
|
| 179 |
+
'which results in a transposed image.')
|
| 180 |
+
else:
|
| 181 |
+
x = x.permute(0, 2, 1, 3).contiguous()
|
| 182 |
+
return x
|
| 183 |
+
|
| 184 |
+
def extract_feature(self, pixel_values):
|
| 185 |
+
if self.select_layer == -1:
|
| 186 |
+
vit_embeds = self.vision_model(
|
| 187 |
+
pixel_values=pixel_values,
|
| 188 |
+
output_hidden_states=False,
|
| 189 |
+
return_dict=True).last_hidden_state
|
| 190 |
+
else:
|
| 191 |
+
vit_embeds = self.vision_model(
|
| 192 |
+
pixel_values=pixel_values,
|
| 193 |
+
output_hidden_states=True,
|
| 194 |
+
return_dict=True).hidden_states[self.select_layer]
|
| 195 |
+
vit_embeds = vit_embeds[:, 1:, :]
|
| 196 |
+
|
| 197 |
+
h = w = int(vit_embeds.shape[1] ** 0.5)
|
| 198 |
+
vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], h, w, -1)
|
| 199 |
+
vit_embeds = self.pixel_shuffle(vit_embeds, scale_factor=self.downsample_ratio)
|
| 200 |
+
vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], -1, vit_embeds.shape[-1])
|
| 201 |
+
vit_embeds = self.mlp1(vit_embeds)
|
| 202 |
+
return vit_embeds
|
| 203 |
+
|
| 204 |
+
def batch_chat(self, tokenizer, pixel_values, questions, generation_config, num_patches_list=None,
|
| 205 |
+
history=None, return_history=False, IMG_START_TOKEN='<img>', IMG_END_TOKEN='</img>',
|
| 206 |
+
IMG_CONTEXT_TOKEN='<IMG_CONTEXT>', verbose=False, image_counts=None):
|
| 207 |
+
if history is not None or return_history:
|
| 208 |
+
print('Now multi-turn chat is not supported in batch_chat.')
|
| 209 |
+
raise NotImplementedError
|
| 210 |
+
|
| 211 |
+
if image_counts is not None:
|
| 212 |
+
num_patches_list = image_counts
|
| 213 |
+
print('Warning: `image_counts` is deprecated. Please use `num_patches_list` instead.')
|
| 214 |
+
|
| 215 |
+
img_context_token_id = tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN)
|
| 216 |
+
self.img_context_token_id = img_context_token_id
|
| 217 |
+
|
| 218 |
+
if verbose and pixel_values is not None:
|
| 219 |
+
image_bs = pixel_values.shape[0]
|
| 220 |
+
print(f'dynamic ViT batch size: {image_bs}')
|
| 221 |
+
|
| 222 |
+
queries = []
|
| 223 |
+
for idx, num_patches in enumerate(num_patches_list):
|
| 224 |
+
question = questions[idx]
|
| 225 |
+
if pixel_values is not None and '<image>' not in question:
|
| 226 |
+
question = '<image>\n' + question
|
| 227 |
+
template = get_conv_template(self.template)
|
| 228 |
+
template.system_message = self.system_message
|
| 229 |
+
template.append_message(template.roles[0], question)
|
| 230 |
+
template.append_message(template.roles[1], None)
|
| 231 |
+
query = template.get_prompt()
|
| 232 |
+
|
| 233 |
+
image_tokens = IMG_START_TOKEN + IMG_CONTEXT_TOKEN * self.num_image_token * num_patches + IMG_END_TOKEN
|
| 234 |
+
query = query.replace('<image>', image_tokens, 1)
|
| 235 |
+
queries.append(query)
|
| 236 |
+
|
| 237 |
+
tokenizer.padding_side = 'left'
|
| 238 |
+
model_inputs = tokenizer(queries, return_tensors='pt', padding=True)
|
| 239 |
+
input_ids = model_inputs['input_ids'].to(self.device)
|
| 240 |
+
attention_mask = model_inputs['attention_mask'].to(self.device)
|
| 241 |
+
eos_token_id = tokenizer.convert_tokens_to_ids(template.sep.strip())
|
| 242 |
+
generation_config['eos_token_id'] = eos_token_id
|
| 243 |
+
generation_output = self.generate(
|
| 244 |
+
pixel_values=pixel_values,
|
| 245 |
+
input_ids=input_ids,
|
| 246 |
+
attention_mask=attention_mask,
|
| 247 |
+
**generation_config
|
| 248 |
+
)
|
| 249 |
+
responses = tokenizer.batch_decode(generation_output, skip_special_tokens=True)
|
| 250 |
+
responses = [response.split(template.sep.strip())[0].strip() for response in responses]
|
| 251 |
+
return responses
|
| 252 |
+
|
| 253 |
+
def chat(self, tokenizer, pixel_values, question, generation_config, history=None, return_history=False,
|
| 254 |
+
num_patches_list=None, IMG_START_TOKEN='<img>', IMG_END_TOKEN='</img>', IMG_CONTEXT_TOKEN='<IMG_CONTEXT>',
|
| 255 |
+
verbose=False):
|
| 256 |
+
|
| 257 |
+
if history is None and pixel_values is not None and '<image>' not in question:
|
| 258 |
+
question = '<image>\n' + question
|
| 259 |
+
|
| 260 |
+
if num_patches_list is None:
|
| 261 |
+
num_patches_list = [pixel_values.shape[0]] if pixel_values is not None else []
|
| 262 |
+
assert pixel_values is None or len(pixel_values) == sum(num_patches_list)
|
| 263 |
+
|
| 264 |
+
img_context_token_id = tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN)
|
| 265 |
+
self.img_context_token_id = img_context_token_id
|
| 266 |
+
|
| 267 |
+
template = get_conv_template(self.template)
|
| 268 |
+
template.system_message = self.system_message
|
| 269 |
+
eos_token_id = tokenizer.convert_tokens_to_ids(template.sep.strip())
|
| 270 |
+
|
| 271 |
+
history = [] if history is None else history
|
| 272 |
+
for (old_question, old_answer) in history:
|
| 273 |
+
template.append_message(template.roles[0], old_question)
|
| 274 |
+
template.append_message(template.roles[1], old_answer)
|
| 275 |
+
template.append_message(template.roles[0], question)
|
| 276 |
+
template.append_message(template.roles[1], None)
|
| 277 |
+
query = template.get_prompt()
|
| 278 |
+
|
| 279 |
+
if verbose and pixel_values is not None:
|
| 280 |
+
image_bs = pixel_values.shape[0]
|
| 281 |
+
print(f'dynamic ViT batch size: {image_bs}')
|
| 282 |
+
|
| 283 |
+
for num_patches in num_patches_list:
|
| 284 |
+
image_tokens = IMG_START_TOKEN + IMG_CONTEXT_TOKEN * self.num_image_token * num_patches + IMG_END_TOKEN
|
| 285 |
+
query = query.replace('<image>', image_tokens, 1)
|
| 286 |
+
|
| 287 |
+
model_inputs = tokenizer(query, return_tensors='pt')
|
| 288 |
+
input_ids = model_inputs['input_ids'].to(self.device)
|
| 289 |
+
attention_mask = model_inputs['attention_mask'].to(self.device)
|
| 290 |
+
generation_config['eos_token_id'] = eos_token_id
|
| 291 |
+
generation_output = self.generate(
|
| 292 |
+
pixel_values=pixel_values,
|
| 293 |
+
input_ids=input_ids,
|
| 294 |
+
attention_mask=attention_mask,
|
| 295 |
+
**generation_config
|
| 296 |
+
)
|
| 297 |
+
response = tokenizer.batch_decode(generation_output, skip_special_tokens=True)[0]
|
| 298 |
+
response = response.split(template.sep.strip())[0].strip()
|
| 299 |
+
history.append((question, response))
|
| 300 |
+
if return_history:
|
| 301 |
+
return response, history
|
| 302 |
+
else:
|
| 303 |
+
query_to_print = query.replace(IMG_CONTEXT_TOKEN, '')
|
| 304 |
+
query_to_print = query_to_print.replace(f'{IMG_START_TOKEN}{IMG_END_TOKEN}', '<image>')
|
| 305 |
+
if verbose:
|
| 306 |
+
print(query_to_print, response)
|
| 307 |
+
return response
|
| 308 |
+
|
| 309 |
+
@torch.no_grad()
|
| 310 |
+
def generate(
|
| 311 |
+
self,
|
| 312 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
| 313 |
+
input_ids: Optional[torch.FloatTensor] = None,
|
| 314 |
+
attention_mask: Optional[torch.LongTensor] = None,
|
| 315 |
+
visual_features: Optional[torch.FloatTensor] = None,
|
| 316 |
+
generation_config: Optional[GenerationConfig] = None,
|
| 317 |
+
output_hidden_states: Optional[bool] = None,
|
| 318 |
+
**generate_kwargs,
|
| 319 |
+
) -> torch.LongTensor:
|
| 320 |
+
|
| 321 |
+
assert self.img_context_token_id is not None
|
| 322 |
+
if pixel_values is not None:
|
| 323 |
+
if visual_features is not None:
|
| 324 |
+
vit_embeds = visual_features
|
| 325 |
+
else:
|
| 326 |
+
vit_embeds = self.extract_feature(pixel_values)
|
| 327 |
+
input_embeds = self.language_model.get_input_embeddings()(input_ids)
|
| 328 |
+
B, N, C = input_embeds.shape
|
| 329 |
+
input_embeds = input_embeds.reshape(B * N, C)
|
| 330 |
+
|
| 331 |
+
input_ids = input_ids.reshape(B * N)
|
| 332 |
+
selected = (input_ids == self.img_context_token_id)
|
| 333 |
+
assert selected.sum() != 0
|
| 334 |
+
input_embeds[selected] = vit_embeds.reshape(-1, C).to(input_embeds.device)
|
| 335 |
+
|
| 336 |
+
input_embeds = input_embeds.reshape(B, N, C)
|
| 337 |
+
else:
|
| 338 |
+
input_embeds = self.language_model.get_input_embeddings()(input_ids)
|
| 339 |
+
|
| 340 |
+
outputs = self.language_model.generate(
|
| 341 |
+
inputs_embeds=input_embeds,
|
| 342 |
+
attention_mask=attention_mask,
|
| 343 |
+
generation_config=generation_config,
|
| 344 |
+
output_hidden_states=output_hidden_states,
|
| 345 |
+
use_cache=True,
|
| 346 |
+
**generate_kwargs,
|
| 347 |
+
)
|
| 348 |
+
|
| 349 |
+
return outputs
|
| 350 |
+
|
| 351 |
+
@property
|
| 352 |
+
def lm_head(self):
|
| 353 |
+
return self.language_model.get_output_embeddings()
|
| 354 |
+
|
| 355 |
+
def get_input_embeddings(self):
|
| 356 |
+
return self.language_model.get_input_embeddings()
|
| 357 |
+
|
| 358 |
+
def get_output_embeddings(self):
|
| 359 |
+
return self.language_model.get_output_embeddings()
|
ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/preprocessor_config.json
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"crop_size": 448,
|
| 3 |
+
"do_center_crop": true,
|
| 4 |
+
"do_normalize": true,
|
| 5 |
+
"do_resize": true,
|
| 6 |
+
"feature_extractor_type": "CLIPFeatureExtractor",
|
| 7 |
+
"image_mean": [
|
| 8 |
+
0.485,
|
| 9 |
+
0.456,
|
| 10 |
+
0.406
|
| 11 |
+
],
|
| 12 |
+
"image_std": [
|
| 13 |
+
0.229,
|
| 14 |
+
0.224,
|
| 15 |
+
0.225
|
| 16 |
+
],
|
| 17 |
+
"resample": 3,
|
| 18 |
+
"size": 448
|
| 19 |
+
}
|
ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/rng_state_0.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:08c830b56eaefdd9b372fae4488e1ba93148ecf58aa2ed05c8103ab7afe964ee
|
| 3 |
+
size 15365
|
ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/rng_state_1.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b6f3949e2b25ad05905da16bc2d58aeadc9a9f1db0b0e97a04ceb63467acff1d
|
| 3 |
+
size 15429
|
ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/rng_state_2.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5c3812c5e7f13c9d32f97d83cd625428061b1c9486e869caffd453d83ac07ceb
|
| 3 |
+
size 15429
|
ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/rng_state_3.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6eda756b33768639c1e1932ae00c5d43269de5705c09162bc09acdf71d3533c0
|
| 3 |
+
size 15429
|
ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/scheduler.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d4501fff0ee5438d0b9846a6dc44b07779ec2c539bccf765f336f11aa4a06ce6
|
| 3 |
+
size 1465
|
ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/special_tokens_map.json
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"additional_special_tokens": [
|
| 3 |
+
"<|im_start|>",
|
| 4 |
+
"<|im_end|>",
|
| 5 |
+
"<|object_ref_start|>",
|
| 6 |
+
"<|object_ref_end|>",
|
| 7 |
+
"<|box_start|>",
|
| 8 |
+
"<|box_end|>",
|
| 9 |
+
"<|quad_start|>",
|
| 10 |
+
"<|quad_end|>",
|
| 11 |
+
"<|vision_start|>",
|
| 12 |
+
"<|vision_end|>",
|
| 13 |
+
"<|vision_pad|>",
|
| 14 |
+
"<|image_pad|>",
|
| 15 |
+
"<|video_pad|>"
|
| 16 |
+
],
|
| 17 |
+
"eos_token": {
|
| 18 |
+
"content": "<|im_end|>",
|
| 19 |
+
"lstrip": false,
|
| 20 |
+
"normalized": false,
|
| 21 |
+
"rstrip": false,
|
| 22 |
+
"single_word": false
|
| 23 |
+
},
|
| 24 |
+
"pad_token": {
|
| 25 |
+
"content": "<|endoftext|>",
|
| 26 |
+
"lstrip": false,
|
| 27 |
+
"normalized": false,
|
| 28 |
+
"rstrip": false,
|
| 29 |
+
"single_word": false
|
| 30 |
+
}
|
| 31 |
+
}
|
ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/tokenizer_config.json
ADDED
|
@@ -0,0 +1,280 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_bos_token": false,
|
| 3 |
+
"add_eos_token": false,
|
| 4 |
+
"add_prefix_space": false,
|
| 5 |
+
"added_tokens_decoder": {
|
| 6 |
+
"151643": {
|
| 7 |
+
"content": "<|endoftext|>",
|
| 8 |
+
"lstrip": false,
|
| 9 |
+
"normalized": false,
|
| 10 |
+
"rstrip": false,
|
| 11 |
+
"single_word": false,
|
| 12 |
+
"special": true
|
| 13 |
+
},
|
| 14 |
+
"151644": {
|
| 15 |
+
"content": "<|im_start|>",
|
| 16 |
+
"lstrip": false,
|
| 17 |
+
"normalized": false,
|
| 18 |
+
"rstrip": false,
|
| 19 |
+
"single_word": false,
|
| 20 |
+
"special": true
|
| 21 |
+
},
|
| 22 |
+
"151645": {
|
| 23 |
+
"content": "<|im_end|>",
|
| 24 |
+
"lstrip": false,
|
| 25 |
+
"normalized": false,
|
| 26 |
+
"rstrip": false,
|
| 27 |
+
"single_word": false,
|
| 28 |
+
"special": true
|
| 29 |
+
},
|
| 30 |
+
"151646": {
|
| 31 |
+
"content": "<|object_ref_start|>",
|
| 32 |
+
"lstrip": false,
|
| 33 |
+
"normalized": false,
|
| 34 |
+
"rstrip": false,
|
| 35 |
+
"single_word": false,
|
| 36 |
+
"special": true
|
| 37 |
+
},
|
| 38 |
+
"151647": {
|
| 39 |
+
"content": "<|object_ref_end|>",
|
| 40 |
+
"lstrip": false,
|
| 41 |
+
"normalized": false,
|
| 42 |
+
"rstrip": false,
|
| 43 |
+
"single_word": false,
|
| 44 |
+
"special": true
|
| 45 |
+
},
|
| 46 |
+
"151648": {
|
| 47 |
+
"content": "<|box_start|>",
|
| 48 |
+
"lstrip": false,
|
| 49 |
+
"normalized": false,
|
| 50 |
+
"rstrip": false,
|
| 51 |
+
"single_word": false,
|
| 52 |
+
"special": true
|
| 53 |
+
},
|
| 54 |
+
"151649": {
|
| 55 |
+
"content": "<|box_end|>",
|
| 56 |
+
"lstrip": false,
|
| 57 |
+
"normalized": false,
|
| 58 |
+
"rstrip": false,
|
| 59 |
+
"single_word": false,
|
| 60 |
+
"special": true
|
| 61 |
+
},
|
| 62 |
+
"151650": {
|
| 63 |
+
"content": "<|quad_start|>",
|
| 64 |
+
"lstrip": false,
|
| 65 |
+
"normalized": false,
|
| 66 |
+
"rstrip": false,
|
| 67 |
+
"single_word": false,
|
| 68 |
+
"special": true
|
| 69 |
+
},
|
| 70 |
+
"151651": {
|
| 71 |
+
"content": "<|quad_end|>",
|
| 72 |
+
"lstrip": false,
|
| 73 |
+
"normalized": false,
|
| 74 |
+
"rstrip": false,
|
| 75 |
+
"single_word": false,
|
| 76 |
+
"special": true
|
| 77 |
+
},
|
| 78 |
+
"151652": {
|
| 79 |
+
"content": "<|vision_start|>",
|
| 80 |
+
"lstrip": false,
|
| 81 |
+
"normalized": false,
|
| 82 |
+
"rstrip": false,
|
| 83 |
+
"single_word": false,
|
| 84 |
+
"special": true
|
| 85 |
+
},
|
| 86 |
+
"151653": {
|
| 87 |
+
"content": "<|vision_end|>",
|
| 88 |
+
"lstrip": false,
|
| 89 |
+
"normalized": false,
|
| 90 |
+
"rstrip": false,
|
| 91 |
+
"single_word": false,
|
| 92 |
+
"special": true
|
| 93 |
+
},
|
| 94 |
+
"151654": {
|
| 95 |
+
"content": "<|vision_pad|>",
|
| 96 |
+
"lstrip": false,
|
| 97 |
+
"normalized": false,
|
| 98 |
+
"rstrip": false,
|
| 99 |
+
"single_word": false,
|
| 100 |
+
"special": true
|
| 101 |
+
},
|
| 102 |
+
"151655": {
|
| 103 |
+
"content": "<|image_pad|>",
|
| 104 |
+
"lstrip": false,
|
| 105 |
+
"normalized": false,
|
| 106 |
+
"rstrip": false,
|
| 107 |
+
"single_word": false,
|
| 108 |
+
"special": true
|
| 109 |
+
},
|
| 110 |
+
"151656": {
|
| 111 |
+
"content": "<|video_pad|>",
|
| 112 |
+
"lstrip": false,
|
| 113 |
+
"normalized": false,
|
| 114 |
+
"rstrip": false,
|
| 115 |
+
"single_word": false,
|
| 116 |
+
"special": true
|
| 117 |
+
},
|
| 118 |
+
"151657": {
|
| 119 |
+
"content": "<tool_call>",
|
| 120 |
+
"lstrip": false,
|
| 121 |
+
"normalized": false,
|
| 122 |
+
"rstrip": false,
|
| 123 |
+
"single_word": false,
|
| 124 |
+
"special": false
|
| 125 |
+
},
|
| 126 |
+
"151658": {
|
| 127 |
+
"content": "</tool_call>",
|
| 128 |
+
"lstrip": false,
|
| 129 |
+
"normalized": false,
|
| 130 |
+
"rstrip": false,
|
| 131 |
+
"single_word": false,
|
| 132 |
+
"special": false
|
| 133 |
+
},
|
| 134 |
+
"151659": {
|
| 135 |
+
"content": "<|fim_prefix|>",
|
| 136 |
+
"lstrip": false,
|
| 137 |
+
"normalized": false,
|
| 138 |
+
"rstrip": false,
|
| 139 |
+
"single_word": false,
|
| 140 |
+
"special": false
|
| 141 |
+
},
|
| 142 |
+
"151660": {
|
| 143 |
+
"content": "<|fim_middle|>",
|
| 144 |
+
"lstrip": false,
|
| 145 |
+
"normalized": false,
|
| 146 |
+
"rstrip": false,
|
| 147 |
+
"single_word": false,
|
| 148 |
+
"special": false
|
| 149 |
+
},
|
| 150 |
+
"151661": {
|
| 151 |
+
"content": "<|fim_suffix|>",
|
| 152 |
+
"lstrip": false,
|
| 153 |
+
"normalized": false,
|
| 154 |
+
"rstrip": false,
|
| 155 |
+
"single_word": false,
|
| 156 |
+
"special": false
|
| 157 |
+
},
|
| 158 |
+
"151662": {
|
| 159 |
+
"content": "<|fim_pad|>",
|
| 160 |
+
"lstrip": false,
|
| 161 |
+
"normalized": false,
|
| 162 |
+
"rstrip": false,
|
| 163 |
+
"single_word": false,
|
| 164 |
+
"special": false
|
| 165 |
+
},
|
| 166 |
+
"151663": {
|
| 167 |
+
"content": "<|repo_name|>",
|
| 168 |
+
"lstrip": false,
|
| 169 |
+
"normalized": false,
|
| 170 |
+
"rstrip": false,
|
| 171 |
+
"single_word": false,
|
| 172 |
+
"special": false
|
| 173 |
+
},
|
| 174 |
+
"151664": {
|
| 175 |
+
"content": "<|file_sep|>",
|
| 176 |
+
"lstrip": false,
|
| 177 |
+
"normalized": false,
|
| 178 |
+
"rstrip": false,
|
| 179 |
+
"single_word": false,
|
| 180 |
+
"special": false
|
| 181 |
+
},
|
| 182 |
+
"151665": {
|
| 183 |
+
"content": "<img>",
|
| 184 |
+
"lstrip": false,
|
| 185 |
+
"normalized": false,
|
| 186 |
+
"rstrip": false,
|
| 187 |
+
"single_word": false,
|
| 188 |
+
"special": true
|
| 189 |
+
},
|
| 190 |
+
"151666": {
|
| 191 |
+
"content": "</img>",
|
| 192 |
+
"lstrip": false,
|
| 193 |
+
"normalized": false,
|
| 194 |
+
"rstrip": false,
|
| 195 |
+
"single_word": false,
|
| 196 |
+
"special": true
|
| 197 |
+
},
|
| 198 |
+
"151667": {
|
| 199 |
+
"content": "<IMG_CONTEXT>",
|
| 200 |
+
"lstrip": false,
|
| 201 |
+
"normalized": false,
|
| 202 |
+
"rstrip": false,
|
| 203 |
+
"single_word": false,
|
| 204 |
+
"special": true
|
| 205 |
+
},
|
| 206 |
+
"151668": {
|
| 207 |
+
"content": "<quad>",
|
| 208 |
+
"lstrip": false,
|
| 209 |
+
"normalized": false,
|
| 210 |
+
"rstrip": false,
|
| 211 |
+
"single_word": false,
|
| 212 |
+
"special": true
|
| 213 |
+
},
|
| 214 |
+
"151669": {
|
| 215 |
+
"content": "</quad>",
|
| 216 |
+
"lstrip": false,
|
| 217 |
+
"normalized": false,
|
| 218 |
+
"rstrip": false,
|
| 219 |
+
"single_word": false,
|
| 220 |
+
"special": true
|
| 221 |
+
},
|
| 222 |
+
"151670": {
|
| 223 |
+
"content": "<ref>",
|
| 224 |
+
"lstrip": false,
|
| 225 |
+
"normalized": false,
|
| 226 |
+
"rstrip": false,
|
| 227 |
+
"single_word": false,
|
| 228 |
+
"special": true
|
| 229 |
+
},
|
| 230 |
+
"151671": {
|
| 231 |
+
"content": "</ref>",
|
| 232 |
+
"lstrip": false,
|
| 233 |
+
"normalized": false,
|
| 234 |
+
"rstrip": false,
|
| 235 |
+
"single_word": false,
|
| 236 |
+
"special": true
|
| 237 |
+
},
|
| 238 |
+
"151672": {
|
| 239 |
+
"content": "<box>",
|
| 240 |
+
"lstrip": false,
|
| 241 |
+
"normalized": false,
|
| 242 |
+
"rstrip": false,
|
| 243 |
+
"single_word": false,
|
| 244 |
+
"special": true
|
| 245 |
+
},
|
| 246 |
+
"151673": {
|
| 247 |
+
"content": "</box>",
|
| 248 |
+
"lstrip": false,
|
| 249 |
+
"normalized": false,
|
| 250 |
+
"rstrip": false,
|
| 251 |
+
"single_word": false,
|
| 252 |
+
"special": true
|
| 253 |
+
}
|
| 254 |
+
},
|
| 255 |
+
"additional_special_tokens": [
|
| 256 |
+
"<|im_start|>",
|
| 257 |
+
"<|im_end|>",
|
| 258 |
+
"<|object_ref_start|>",
|
| 259 |
+
"<|object_ref_end|>",
|
| 260 |
+
"<|box_start|>",
|
| 261 |
+
"<|box_end|>",
|
| 262 |
+
"<|quad_start|>",
|
| 263 |
+
"<|quad_end|>",
|
| 264 |
+
"<|vision_start|>",
|
| 265 |
+
"<|vision_end|>",
|
| 266 |
+
"<|vision_pad|>",
|
| 267 |
+
"<|image_pad|>",
|
| 268 |
+
"<|video_pad|>"
|
| 269 |
+
],
|
| 270 |
+
"bos_token": null,
|
| 271 |
+
"clean_up_tokenization_spaces": false,
|
| 272 |
+
"eos_token": "<|im_end|>",
|
| 273 |
+
"errors": "replace",
|
| 274 |
+
"extra_special_tokens": {},
|
| 275 |
+
"model_max_length": 1000000,
|
| 276 |
+
"pad_token": "<|endoftext|>",
|
| 277 |
+
"split_special_tokens": false,
|
| 278 |
+
"tokenizer_class": "Qwen2Tokenizer",
|
| 279 |
+
"unk_token": null
|
| 280 |
+
}
|
ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/trainer_state.json
ADDED
|
@@ -0,0 +1,429 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"best_global_step": 152,
|
| 3 |
+
"best_metric": 0.58733511,
|
| 4 |
+
"best_model_checkpoint": "/mnt/data/users/liamding/data/MMMT/lora/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-152",
|
| 5 |
+
"epoch": 3.0,
|
| 6 |
+
"eval_steps": 500,
|
| 7 |
+
"global_step": 228,
|
| 8 |
+
"is_hyper_param_search": false,
|
| 9 |
+
"is_local_process_zero": true,
|
| 10 |
+
"is_world_process_zero": true,
|
| 11 |
+
"log_history": [
|
| 12 |
+
{
|
| 13 |
+
"epoch": 0.013157894736842105,
|
| 14 |
+
"grad_norm": 7.443980742894238,
|
| 15 |
+
"learning_rate": 5.2631578947368416e-08,
|
| 16 |
+
"loss": 1.2054955959320068,
|
| 17 |
+
"step": 1,
|
| 18 |
+
"token_acc": 0.6650124069478908
|
| 19 |
+
},
|
| 20 |
+
{
|
| 21 |
+
"epoch": 0.06578947368421052,
|
| 22 |
+
"grad_norm": 6.994858478446768,
|
| 23 |
+
"learning_rate": 2.631578947368421e-07,
|
| 24 |
+
"loss": 1.1846562623977661,
|
| 25 |
+
"step": 5,
|
| 26 |
+
"token_acc": 0.6773799979084603
|
| 27 |
+
},
|
| 28 |
+
{
|
| 29 |
+
"epoch": 0.13157894736842105,
|
| 30 |
+
"grad_norm": 6.882946793485346,
|
| 31 |
+
"learning_rate": 5.263157894736842e-07,
|
| 32 |
+
"loss": 1.1630187034606934,
|
| 33 |
+
"step": 10,
|
| 34 |
+
"token_acc": 0.6727064875678765
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"epoch": 0.19736842105263158,
|
| 38 |
+
"grad_norm": 6.56262502850702,
|
| 39 |
+
"learning_rate": 7.894736842105263e-07,
|
| 40 |
+
"loss": 1.1065165519714355,
|
| 41 |
+
"step": 15,
|
| 42 |
+
"token_acc": 0.6860647322634679
|
| 43 |
+
},
|
| 44 |
+
{
|
| 45 |
+
"epoch": 0.2631578947368421,
|
| 46 |
+
"grad_norm": 4.349898766527815,
|
| 47 |
+
"learning_rate": 1.0526315789473683e-06,
|
| 48 |
+
"loss": 1.0518027305603028,
|
| 49 |
+
"step": 20,
|
| 50 |
+
"token_acc": 0.6943658580629627
|
| 51 |
+
},
|
| 52 |
+
{
|
| 53 |
+
"epoch": 0.32894736842105265,
|
| 54 |
+
"grad_norm": 3.911631629267191,
|
| 55 |
+
"learning_rate": 1.3157894736842106e-06,
|
| 56 |
+
"loss": 0.9281719207763672,
|
| 57 |
+
"step": 25,
|
| 58 |
+
"token_acc": 0.7247400800580286
|
| 59 |
+
},
|
| 60 |
+
{
|
| 61 |
+
"epoch": 0.39473684210526316,
|
| 62 |
+
"grad_norm": 4.172022146440079,
|
| 63 |
+
"learning_rate": 1.5789473684210526e-06,
|
| 64 |
+
"loss": 0.8204439163208008,
|
| 65 |
+
"step": 30,
|
| 66 |
+
"token_acc": 0.7546454605699139
|
| 67 |
+
},
|
| 68 |
+
{
|
| 69 |
+
"epoch": 0.4605263157894737,
|
| 70 |
+
"grad_norm": 3.089806220840059,
|
| 71 |
+
"learning_rate": 1.8421052631578946e-06,
|
| 72 |
+
"loss": 0.7725029945373535,
|
| 73 |
+
"step": 35,
|
| 74 |
+
"token_acc": 0.7605701730882589
|
| 75 |
+
},
|
| 76 |
+
{
|
| 77 |
+
"epoch": 0.5263157894736842,
|
| 78 |
+
"grad_norm": 3.573307688479329,
|
| 79 |
+
"learning_rate": 1.9998312416333223e-06,
|
| 80 |
+
"loss": 0.736858606338501,
|
| 81 |
+
"step": 40,
|
| 82 |
+
"token_acc": 0.7723412646246878
|
| 83 |
+
},
|
| 84 |
+
{
|
| 85 |
+
"epoch": 0.5921052631578947,
|
| 86 |
+
"grad_norm": 3.005279630810127,
|
| 87 |
+
"learning_rate": 1.9979333640833945e-06,
|
| 88 |
+
"loss": 0.7091458797454834,
|
| 89 |
+
"step": 45,
|
| 90 |
+
"token_acc": 0.7757977404962559
|
| 91 |
+
},
|
| 92 |
+
{
|
| 93 |
+
"epoch": 0.6578947368421053,
|
| 94 |
+
"grad_norm": 2.6547906270389987,
|
| 95 |
+
"learning_rate": 1.9939306773179494e-06,
|
| 96 |
+
"loss": 0.6816033363342285,
|
| 97 |
+
"step": 50,
|
| 98 |
+
"token_acc": 0.7845786750315021
|
| 99 |
+
},
|
| 100 |
+
{
|
| 101 |
+
"epoch": 0.7236842105263158,
|
| 102 |
+
"grad_norm": 2.6397159135636863,
|
| 103 |
+
"learning_rate": 1.9878316236762193e-06,
|
| 104 |
+
"loss": 0.6458231925964355,
|
| 105 |
+
"step": 55,
|
| 106 |
+
"token_acc": 0.7937998091455394
|
| 107 |
+
},
|
| 108 |
+
{
|
| 109 |
+
"epoch": 0.7894736842105263,
|
| 110 |
+
"grad_norm": 2.854823622950123,
|
| 111 |
+
"learning_rate": 1.9796490670875738e-06,
|
| 112 |
+
"loss": 0.6452323913574218,
|
| 113 |
+
"step": 60,
|
| 114 |
+
"token_acc": 0.7941120452752156
|
| 115 |
+
},
|
| 116 |
+
{
|
| 117 |
+
"epoch": 0.8552631578947368,
|
| 118 |
+
"grad_norm": 2.893500925424196,
|
| 119 |
+
"learning_rate": 1.9694002659393305e-06,
|
| 120 |
+
"loss": 0.6606711387634278,
|
| 121 |
+
"step": 65,
|
| 122 |
+
"token_acc": 0.7889678924563938
|
| 123 |
+
},
|
| 124 |
+
{
|
| 125 |
+
"epoch": 0.9210526315789473,
|
| 126 |
+
"grad_norm": 2.783788162077395,
|
| 127 |
+
"learning_rate": 1.957106836675914e-06,
|
| 128 |
+
"loss": 0.6494266986846924,
|
| 129 |
+
"step": 70,
|
| 130 |
+
"token_acc": 0.7904519555235935
|
| 131 |
+
},
|
| 132 |
+
{
|
| 133 |
+
"epoch": 0.9868421052631579,
|
| 134 |
+
"grad_norm": 2.7988099493453547,
|
| 135 |
+
"learning_rate": 1.942794708206143e-06,
|
| 136 |
+
"loss": 0.6436448574066163,
|
| 137 |
+
"step": 75,
|
| 138 |
+
"token_acc": 0.7935710698141637
|
| 139 |
+
},
|
| 140 |
+
{
|
| 141 |
+
"epoch": 1.0,
|
| 142 |
+
"eval_loss": 0.6168130040168762,
|
| 143 |
+
"eval_runtime": 14.5426,
|
| 144 |
+
"eval_samples_per_second": 9.214,
|
| 145 |
+
"eval_steps_per_second": 1.169,
|
| 146 |
+
"eval_token_acc": 0.8025923865012602,
|
| 147 |
+
"step": 76
|
| 148 |
+
},
|
| 149 |
+
{
|
| 150 |
+
"epoch": 1.0526315789473684,
|
| 151 |
+
"grad_norm": 2.843676295839037,
|
| 152 |
+
"learning_rate": 1.9264940672148015e-06,
|
| 153 |
+
"loss": 0.5734551429748536,
|
| 154 |
+
"step": 80,
|
| 155 |
+
"token_acc": 0.8115053003533569
|
| 156 |
+
},
|
| 157 |
+
{
|
| 158 |
+
"epoch": 1.118421052631579,
|
| 159 |
+
"grad_norm": 2.7171783073156064,
|
| 160 |
+
"learning_rate": 1.9082392944938463e-06,
|
| 161 |
+
"loss": 0.5453813076019287,
|
| 162 |
+
"step": 85,
|
| 163 |
+
"token_acc": 0.817947946030269
|
| 164 |
+
},
|
| 165 |
+
{
|
| 166 |
+
"epoch": 1.1842105263157894,
|
| 167 |
+
"grad_norm": 3.0474411761804863,
|
| 168 |
+
"learning_rate": 1.8880688924275375e-06,
|
| 169 |
+
"loss": 0.5494410514831543,
|
| 170 |
+
"step": 90,
|
| 171 |
+
"token_acc": 0.8209292435277231
|
| 172 |
+
},
|
| 173 |
+
{
|
| 174 |
+
"epoch": 1.25,
|
| 175 |
+
"grad_norm": 2.6264194208633915,
|
| 176 |
+
"learning_rate": 1.8660254037844386e-06,
|
| 177 |
+
"loss": 0.538231086730957,
|
| 178 |
+
"step": 95,
|
| 179 |
+
"token_acc": 0.8209094297428252
|
| 180 |
+
},
|
| 181 |
+
{
|
| 182 |
+
"epoch": 1.3157894736842106,
|
| 183 |
+
"grad_norm": 2.729094045019327,
|
| 184 |
+
"learning_rate": 1.8421553219875656e-06,
|
| 185 |
+
"loss": 0.553615951538086,
|
| 186 |
+
"step": 100,
|
| 187 |
+
"token_acc": 0.8173360210885409
|
| 188 |
+
},
|
| 189 |
+
{
|
| 190 |
+
"epoch": 1.381578947368421,
|
| 191 |
+
"grad_norm": 2.7672404883247057,
|
| 192 |
+
"learning_rate": 1.8165089930519428e-06,
|
| 193 |
+
"loss": 0.5331531047821045,
|
| 194 |
+
"step": 105,
|
| 195 |
+
"token_acc": 0.8239764441951767
|
| 196 |
+
},
|
| 197 |
+
{
|
| 198 |
+
"epoch": 1.4473684210526316,
|
| 199 |
+
"grad_norm": 2.7388761924928016,
|
| 200 |
+
"learning_rate": 1.7891405093963937e-06,
|
| 201 |
+
"loss": 0.5318719387054444,
|
| 202 |
+
"step": 110,
|
| 203 |
+
"token_acc": 0.8226405746841714
|
| 204 |
+
},
|
| 205 |
+
{
|
| 206 |
+
"epoch": 1.513157894736842,
|
| 207 |
+
"grad_norm": 2.5418644828797468,
|
| 208 |
+
"learning_rate": 1.7601075957535362e-06,
|
| 209 |
+
"loss": 0.5143476486206054,
|
| 210 |
+
"step": 115,
|
| 211 |
+
"token_acc": 0.8280721235194586
|
| 212 |
+
},
|
| 213 |
+
{
|
| 214 |
+
"epoch": 1.5789473684210527,
|
| 215 |
+
"grad_norm": 2.7544656818664928,
|
| 216 |
+
"learning_rate": 1.7294714874186208e-06,
|
| 217 |
+
"loss": 0.5286956787109375,
|
| 218 |
+
"step": 120,
|
| 219 |
+
"token_acc": 0.8235277770058083
|
| 220 |
+
},
|
| 221 |
+
{
|
| 222 |
+
"epoch": 1.6447368421052633,
|
| 223 |
+
"grad_norm": 2.818526777429581,
|
| 224 |
+
"learning_rate": 1.6972968010939952e-06,
|
| 225 |
+
"loss": 0.538817024230957,
|
| 226 |
+
"step": 125,
|
| 227 |
+
"token_acc": 0.8173395294801046
|
| 228 |
+
},
|
| 229 |
+
{
|
| 230 |
+
"epoch": 1.7105263157894737,
|
| 231 |
+
"grad_norm": 2.570309846472994,
|
| 232 |
+
"learning_rate": 1.6636513986016212e-06,
|
| 233 |
+
"loss": 0.5275390148162842,
|
| 234 |
+
"step": 130,
|
| 235 |
+
"token_acc": 0.8256479119108365
|
| 236 |
+
},
|
| 237 |
+
{
|
| 238 |
+
"epoch": 1.776315789473684,
|
| 239 |
+
"grad_norm": 2.642287833986887,
|
| 240 |
+
"learning_rate": 1.628606243751082e-06,
|
| 241 |
+
"loss": 0.5242605209350586,
|
| 242 |
+
"step": 135,
|
| 243 |
+
"token_acc": 0.8259167355941549
|
| 244 |
+
},
|
| 245 |
+
{
|
| 246 |
+
"epoch": 1.8421052631578947,
|
| 247 |
+
"grad_norm": 2.8193490230655005,
|
| 248 |
+
"learning_rate": 1.5922352526649801e-06,
|
| 249 |
+
"loss": 0.5326420783996582,
|
| 250 |
+
"step": 140,
|
| 251 |
+
"token_acc": 0.8207143059771353
|
| 252 |
+
},
|
| 253 |
+
{
|
| 254 |
+
"epoch": 1.9078947368421053,
|
| 255 |
+
"grad_norm": 2.829235622860951,
|
| 256 |
+
"learning_rate": 1.5546151378774087e-06,
|
| 257 |
+
"loss": 0.5403931617736817,
|
| 258 |
+
"step": 145,
|
| 259 |
+
"token_acc": 0.821919714937921
|
| 260 |
+
},
|
| 261 |
+
{
|
| 262 |
+
"epoch": 1.973684210526316,
|
| 263 |
+
"grad_norm": 2.5244815201810953,
|
| 264 |
+
"learning_rate": 1.515825246534324e-06,
|
| 265 |
+
"loss": 0.5291405200958252,
|
| 266 |
+
"step": 150,
|
| 267 |
+
"token_acc": 0.8255817110586191
|
| 268 |
+
},
|
| 269 |
+
{
|
| 270 |
+
"epoch": 2.0,
|
| 271 |
+
"eval_loss": 0.5873351097106934,
|
| 272 |
+
"eval_runtime": 15.0089,
|
| 273 |
+
"eval_samples_per_second": 8.928,
|
| 274 |
+
"eval_steps_per_second": 1.133,
|
| 275 |
+
"eval_token_acc": 0.8101535137966024,
|
| 276 |
+
"step": 152
|
| 277 |
+
},
|
| 278 |
+
{
|
| 279 |
+
"epoch": 2.039473684210526,
|
| 280 |
+
"grad_norm": 2.634004731790707,
|
| 281 |
+
"learning_rate": 1.4759473930370736e-06,
|
| 282 |
+
"loss": 0.480256462097168,
|
| 283 |
+
"step": 155,
|
| 284 |
+
"token_acc": 0.8391755048235364
|
| 285 |
+
},
|
| 286 |
+
{
|
| 287 |
+
"epoch": 2.1052631578947367,
|
| 288 |
+
"grad_norm": 2.458163907263063,
|
| 289 |
+
"learning_rate": 1.4350656864820732e-06,
|
| 290 |
+
"loss": 0.4241488456726074,
|
| 291 |
+
"step": 160,
|
| 292 |
+
"token_acc": 0.8546429349009362
|
| 293 |
+
},
|
| 294 |
+
{
|
| 295 |
+
"epoch": 2.1710526315789473,
|
| 296 |
+
"grad_norm": 3.0072031878594743,
|
| 297 |
+
"learning_rate": 1.393266353260583e-06,
|
| 298 |
+
"loss": 0.4239327430725098,
|
| 299 |
+
"step": 165,
|
| 300 |
+
"token_acc": 0.8566516285211268
|
| 301 |
+
},
|
| 302 |
+
{
|
| 303 |
+
"epoch": 2.236842105263158,
|
| 304 |
+
"grad_norm": 2.765452730417083,
|
| 305 |
+
"learning_rate": 1.3506375551927544e-06,
|
| 306 |
+
"loss": 0.4281306266784668,
|
| 307 |
+
"step": 170,
|
| 308 |
+
"token_acc": 0.8572701074141996
|
| 309 |
+
},
|
| 310 |
+
{
|
| 311 |
+
"epoch": 2.3026315789473686,
|
| 312 |
+
"grad_norm": 2.851903657831883,
|
| 313 |
+
"learning_rate": 1.3072692035795304e-06,
|
| 314 |
+
"loss": 0.4317302703857422,
|
| 315 |
+
"step": 175,
|
| 316 |
+
"token_acc": 0.8539255373738526
|
| 317 |
+
},
|
| 318 |
+
{
|
| 319 |
+
"epoch": 2.3684210526315788,
|
| 320 |
+
"grad_norm": 2.9069316927957245,
|
| 321 |
+
"learning_rate": 1.263252769564599e-06,
|
| 322 |
+
"loss": 0.4294229507446289,
|
| 323 |
+
"step": 180,
|
| 324 |
+
"token_acc": 0.853015476639856
|
| 325 |
+
},
|
| 326 |
+
{
|
| 327 |
+
"epoch": 2.4342105263157894,
|
| 328 |
+
"grad_norm": 2.766993468219373,
|
| 329 |
+
"learning_rate": 1.2186810912063758e-06,
|
| 330 |
+
"loss": 0.4209444046020508,
|
| 331 |
+
"step": 185,
|
| 332 |
+
"token_acc": 0.8580880008502497
|
| 333 |
+
},
|
| 334 |
+
{
|
| 335 |
+
"epoch": 2.5,
|
| 336 |
+
"grad_norm": 2.910279148073952,
|
| 337 |
+
"learning_rate": 1.1736481776669305e-06,
|
| 338 |
+
"loss": 0.4108576774597168,
|
| 339 |
+
"step": 190,
|
| 340 |
+
"token_acc": 0.8612809434526235
|
| 341 |
+
},
|
| 342 |
+
{
|
| 343 |
+
"epoch": 2.5657894736842106,
|
| 344 |
+
"grad_norm": 2.70203438317374,
|
| 345 |
+
"learning_rate": 1.1282490109308631e-06,
|
| 346 |
+
"loss": 0.42939186096191406,
|
| 347 |
+
"step": 195,
|
| 348 |
+
"token_acc": 0.8561881120597943
|
| 349 |
+
},
|
| 350 |
+
{
|
| 351 |
+
"epoch": 2.6315789473684212,
|
| 352 |
+
"grad_norm": 2.6721363594078316,
|
| 353 |
+
"learning_rate": 1.0825793454723324e-06,
|
| 354 |
+
"loss": 0.4140902042388916,
|
| 355 |
+
"step": 200,
|
| 356 |
+
"token_acc": 0.8595277649173453
|
| 357 |
+
},
|
| 358 |
+
{
|
| 359 |
+
"epoch": 2.6973684210526314,
|
| 360 |
+
"grad_norm": 2.685613098664299,
|
| 361 |
+
"learning_rate": 1.0367355062927725e-06,
|
| 362 |
+
"loss": 0.4143805503845215,
|
| 363 |
+
"step": 205,
|
| 364 |
+
"token_acc": 0.8593918191603875
|
| 365 |
+
},
|
| 366 |
+
{
|
| 367 |
+
"epoch": 2.763157894736842,
|
| 368 |
+
"grad_norm": 2.678658171344208,
|
| 369 |
+
"learning_rate": 9.908141857552737e-07,
|
| 370 |
+
"loss": 0.3974578380584717,
|
| 371 |
+
"step": 210,
|
| 372 |
+
"token_acc": 0.8657520695579737
|
| 373 |
+
},
|
| 374 |
+
{
|
| 375 |
+
"epoch": 2.8289473684210527,
|
| 376 |
+
"grad_norm": 2.8574376892263107,
|
| 377 |
+
"learning_rate": 9.449122396441343e-07,
|
| 378 |
+
"loss": 0.40328640937805177,
|
| 379 |
+
"step": 215,
|
| 380 |
+
"token_acc": 0.8625554895806825
|
| 381 |
+
},
|
| 382 |
+
{
|
| 383 |
+
"epoch": 2.8947368421052633,
|
| 384 |
+
"grad_norm": 2.6535340329998482,
|
| 385 |
+
"learning_rate": 8.991264828797318e-07,
|
| 386 |
+
"loss": 0.40736236572265627,
|
| 387 |
+
"step": 220,
|
| 388 |
+
"token_acc": 0.8601765750705256
|
| 389 |
+
},
|
| 390 |
+
{
|
| 391 |
+
"epoch": 2.9605263157894735,
|
| 392 |
+
"grad_norm": 2.7012120688258126,
|
| 393 |
+
"learning_rate": 8.535534853195784e-07,
|
| 394 |
+
"loss": 0.4065969467163086,
|
| 395 |
+
"step": 225,
|
| 396 |
+
"token_acc": 0.8599431898079141
|
| 397 |
+
},
|
| 398 |
+
{
|
| 399 |
+
"epoch": 3.0,
|
| 400 |
+
"eval_loss": 0.5999683141708374,
|
| 401 |
+
"eval_runtime": 14.9348,
|
| 402 |
+
"eval_samples_per_second": 8.972,
|
| 403 |
+
"eval_steps_per_second": 1.138,
|
| 404 |
+
"eval_token_acc": 0.8112173087623973,
|
| 405 |
+
"step": 228
|
| 406 |
+
}
|
| 407 |
+
],
|
| 408 |
+
"logging_steps": 5,
|
| 409 |
+
"max_steps": 380,
|
| 410 |
+
"num_input_tokens_seen": 0,
|
| 411 |
+
"num_train_epochs": 5,
|
| 412 |
+
"save_steps": 500,
|
| 413 |
+
"stateful_callbacks": {
|
| 414 |
+
"TrainerControl": {
|
| 415 |
+
"args": {
|
| 416 |
+
"should_epoch_stop": false,
|
| 417 |
+
"should_evaluate": false,
|
| 418 |
+
"should_log": false,
|
| 419 |
+
"should_save": true,
|
| 420 |
+
"should_training_stop": false
|
| 421 |
+
},
|
| 422 |
+
"attributes": {}
|
| 423 |
+
}
|
| 424 |
+
},
|
| 425 |
+
"total_flos": 1.6813882256601907e+17,
|
| 426 |
+
"train_batch_size": 2,
|
| 427 |
+
"trial_name": null,
|
| 428 |
+
"trial_params": null
|
| 429 |
+
}
|
ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/checkpoint-228/zero_to_fp32.py
ADDED
|
@@ -0,0 +1,760 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
|
| 3 |
+
# Copyright (c) Microsoft Corporation.
|
| 4 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 5 |
+
|
| 6 |
+
# DeepSpeed Team
|
| 7 |
+
|
| 8 |
+
# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
|
| 9 |
+
# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
|
| 10 |
+
# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
|
| 11 |
+
# application.
|
| 12 |
+
#
|
| 13 |
+
# example:
|
| 14 |
+
# python zero_to_fp32.py . output_dir/
|
| 15 |
+
# or
|
| 16 |
+
# python zero_to_fp32.py . output_dir/ --safe_serialization
|
| 17 |
+
|
| 18 |
+
import argparse
|
| 19 |
+
import torch
|
| 20 |
+
import glob
|
| 21 |
+
import math
|
| 22 |
+
import os
|
| 23 |
+
import re
|
| 24 |
+
import gc
|
| 25 |
+
import json
|
| 26 |
+
import numpy as np
|
| 27 |
+
from tqdm import tqdm
|
| 28 |
+
from collections import OrderedDict
|
| 29 |
+
from dataclasses import dataclass
|
| 30 |
+
|
| 31 |
+
# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
|
| 32 |
+
# DeepSpeed data structures it has to be available in the current python environment.
|
| 33 |
+
from deepspeed.utils import logger
|
| 34 |
+
from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
|
| 35 |
+
FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
|
| 36 |
+
FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
@dataclass
|
| 40 |
+
class zero_model_state:
|
| 41 |
+
buffers: dict()
|
| 42 |
+
param_shapes: dict()
|
| 43 |
+
shared_params: list
|
| 44 |
+
ds_version: int
|
| 45 |
+
frozen_param_shapes: dict()
|
| 46 |
+
frozen_param_fragments: dict()
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
debug = 0
|
| 50 |
+
|
| 51 |
+
# load to cpu
|
| 52 |
+
device = torch.device('cpu')
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def atoi(text):
|
| 56 |
+
return int(text) if text.isdigit() else text
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def natural_keys(text):
|
| 60 |
+
'''
|
| 61 |
+
alist.sort(key=natural_keys) sorts in human order
|
| 62 |
+
http://nedbatchelder.com/blog/200712/human_sorting.html
|
| 63 |
+
(See Toothy's implementation in the comments)
|
| 64 |
+
'''
|
| 65 |
+
return [atoi(c) for c in re.split(r'(\d+)', text)]
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def get_model_state_file(checkpoint_dir, zero_stage):
|
| 69 |
+
if not os.path.isdir(checkpoint_dir):
|
| 70 |
+
raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
|
| 71 |
+
|
| 72 |
+
# there should be only one file
|
| 73 |
+
if zero_stage <= 2:
|
| 74 |
+
file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
|
| 75 |
+
elif zero_stage == 3:
|
| 76 |
+
file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
|
| 77 |
+
|
| 78 |
+
if not os.path.exists(file):
|
| 79 |
+
raise FileNotFoundError(f"can't find model states file at '{file}'")
|
| 80 |
+
|
| 81 |
+
return file
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def get_checkpoint_files(checkpoint_dir, glob_pattern):
|
| 85 |
+
# XXX: need to test that this simple glob rule works for multi-node setup too
|
| 86 |
+
ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
|
| 87 |
+
|
| 88 |
+
if len(ckpt_files) == 0:
|
| 89 |
+
raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
|
| 90 |
+
|
| 91 |
+
return ckpt_files
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def get_optim_files(checkpoint_dir):
|
| 95 |
+
return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def get_model_state_files(checkpoint_dir):
|
| 99 |
+
return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def parse_model_states(files):
|
| 103 |
+
zero_model_states = []
|
| 104 |
+
for file in files:
|
| 105 |
+
state_dict = torch.load(file, map_location=device, weights_only=False)
|
| 106 |
+
|
| 107 |
+
if BUFFER_NAMES not in state_dict:
|
| 108 |
+
raise ValueError(f"{file} is not a model state checkpoint")
|
| 109 |
+
buffer_names = state_dict[BUFFER_NAMES]
|
| 110 |
+
if debug:
|
| 111 |
+
print("Found buffers:", buffer_names)
|
| 112 |
+
|
| 113 |
+
# recover just the buffers while restoring them to fp32 if they were saved in fp16
|
| 114 |
+
buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
|
| 115 |
+
param_shapes = state_dict[PARAM_SHAPES]
|
| 116 |
+
|
| 117 |
+
# collect parameters that are included in param_shapes
|
| 118 |
+
param_names = []
|
| 119 |
+
for s in param_shapes:
|
| 120 |
+
for name in s.keys():
|
| 121 |
+
param_names.append(name)
|
| 122 |
+
|
| 123 |
+
# update with frozen parameters
|
| 124 |
+
frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
|
| 125 |
+
if frozen_param_shapes is not None:
|
| 126 |
+
if debug:
|
| 127 |
+
print(f"Found frozen_param_shapes: {frozen_param_shapes}")
|
| 128 |
+
param_names += list(frozen_param_shapes.keys())
|
| 129 |
+
|
| 130 |
+
# handle shared params
|
| 131 |
+
shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
|
| 132 |
+
|
| 133 |
+
ds_version = state_dict.get(DS_VERSION, None)
|
| 134 |
+
|
| 135 |
+
frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
|
| 136 |
+
|
| 137 |
+
z_model_state = zero_model_state(buffers=buffers,
|
| 138 |
+
param_shapes=param_shapes,
|
| 139 |
+
shared_params=shared_params,
|
| 140 |
+
ds_version=ds_version,
|
| 141 |
+
frozen_param_shapes=frozen_param_shapes,
|
| 142 |
+
frozen_param_fragments=frozen_param_fragments)
|
| 143 |
+
zero_model_states.append(z_model_state)
|
| 144 |
+
|
| 145 |
+
return zero_model_states
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
def parse_optim_states(files, ds_checkpoint_dir):
|
| 149 |
+
total_files = len(files)
|
| 150 |
+
state_dicts = []
|
| 151 |
+
for f in tqdm(files, desc='Loading checkpoint shards'):
|
| 152 |
+
state_dict = torch.load(f, map_location=device, mmap=True, weights_only=False)
|
| 153 |
+
# immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
|
| 154 |
+
# and also handle the case where it was already removed by another helper script
|
| 155 |
+
state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
|
| 156 |
+
state_dicts.append(state_dict)
|
| 157 |
+
|
| 158 |
+
if ZERO_STAGE not in state_dicts[0][OPTIMIZER_STATE_DICT]:
|
| 159 |
+
raise ValueError(f"{files[0]} is not a zero checkpoint")
|
| 160 |
+
zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
|
| 161 |
+
world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
|
| 162 |
+
|
| 163 |
+
# For ZeRO-2 each param group can have different partition_count as data parallelism for expert
|
| 164 |
+
# parameters can be different from data parallelism for non-expert parameters. So we can just
|
| 165 |
+
# use the max of the partition_count to get the dp world_size.
|
| 166 |
+
|
| 167 |
+
if type(world_size) is list:
|
| 168 |
+
world_size = max(world_size)
|
| 169 |
+
|
| 170 |
+
if world_size != total_files:
|
| 171 |
+
raise ValueError(
|
| 172 |
+
f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
|
| 173 |
+
"Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
|
| 174 |
+
)
|
| 175 |
+
|
| 176 |
+
# the groups are named differently in each stage
|
| 177 |
+
if zero_stage <= 2:
|
| 178 |
+
fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
|
| 179 |
+
elif zero_stage == 3:
|
| 180 |
+
fp32_groups_key = FP32_FLAT_GROUPS
|
| 181 |
+
else:
|
| 182 |
+
raise ValueError(f"unknown zero stage {zero_stage}")
|
| 183 |
+
|
| 184 |
+
fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
|
| 185 |
+
return zero_stage, world_size, fp32_flat_groups
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
|
| 189 |
+
"""
|
| 190 |
+
Returns fp32 state_dict reconstructed from ds checkpoint
|
| 191 |
+
|
| 192 |
+
Args:
|
| 193 |
+
- ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
|
| 194 |
+
|
| 195 |
+
"""
|
| 196 |
+
print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
|
| 197 |
+
|
| 198 |
+
optim_files = get_optim_files(ds_checkpoint_dir)
|
| 199 |
+
zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
|
| 200 |
+
print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
|
| 201 |
+
|
| 202 |
+
model_files = get_model_state_files(ds_checkpoint_dir)
|
| 203 |
+
|
| 204 |
+
zero_model_states = parse_model_states(model_files)
|
| 205 |
+
print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
|
| 206 |
+
|
| 207 |
+
if zero_stage <= 2:
|
| 208 |
+
return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
| 209 |
+
exclude_frozen_parameters)
|
| 210 |
+
elif zero_stage == 3:
|
| 211 |
+
return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
| 212 |
+
exclude_frozen_parameters)
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
def _zero2_merge_frozen_params(state_dict, zero_model_states):
|
| 216 |
+
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
|
| 217 |
+
return
|
| 218 |
+
|
| 219 |
+
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
|
| 220 |
+
frozen_param_fragments = zero_model_states[0].frozen_param_fragments
|
| 221 |
+
|
| 222 |
+
if debug:
|
| 223 |
+
num_elem = sum(s.numel() for s in frozen_param_shapes.values())
|
| 224 |
+
print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
|
| 225 |
+
|
| 226 |
+
wanted_params = len(frozen_param_shapes)
|
| 227 |
+
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
|
| 228 |
+
avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
|
| 229 |
+
print(f'Frozen params: Have {avail_numel} numels to process.')
|
| 230 |
+
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
|
| 231 |
+
|
| 232 |
+
total_params = 0
|
| 233 |
+
total_numel = 0
|
| 234 |
+
for name, shape in frozen_param_shapes.items():
|
| 235 |
+
total_params += 1
|
| 236 |
+
unpartitioned_numel = shape.numel()
|
| 237 |
+
total_numel += unpartitioned_numel
|
| 238 |
+
|
| 239 |
+
state_dict[name] = frozen_param_fragments[name]
|
| 240 |
+
|
| 241 |
+
if debug:
|
| 242 |
+
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
|
| 243 |
+
|
| 244 |
+
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
def _has_callable(obj, fn):
|
| 248 |
+
attr = getattr(obj, fn, None)
|
| 249 |
+
return callable(attr)
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
|
| 253 |
+
param_shapes = zero_model_states[0].param_shapes
|
| 254 |
+
|
| 255 |
+
# Reconstruction protocol:
|
| 256 |
+
#
|
| 257 |
+
# XXX: document this
|
| 258 |
+
|
| 259 |
+
if debug:
|
| 260 |
+
for i in range(world_size):
|
| 261 |
+
for j in range(len(fp32_flat_groups[0])):
|
| 262 |
+
print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
|
| 263 |
+
|
| 264 |
+
# XXX: memory usage doubles here (zero2)
|
| 265 |
+
num_param_groups = len(fp32_flat_groups[0])
|
| 266 |
+
merged_single_partition_of_fp32_groups = []
|
| 267 |
+
for i in range(num_param_groups):
|
| 268 |
+
merged_partitions = [sd[i] for sd in fp32_flat_groups]
|
| 269 |
+
full_single_fp32_vector = torch.cat(merged_partitions, 0)
|
| 270 |
+
merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
|
| 271 |
+
avail_numel = sum(
|
| 272 |
+
[full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
|
| 273 |
+
|
| 274 |
+
if debug:
|
| 275 |
+
wanted_params = sum([len(shapes) for shapes in param_shapes])
|
| 276 |
+
wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
|
| 277 |
+
# not asserting if there is a mismatch due to possible padding
|
| 278 |
+
print(f"Have {avail_numel} numels to process.")
|
| 279 |
+
print(f"Need {wanted_numel} numels in {wanted_params} params.")
|
| 280 |
+
|
| 281 |
+
# params
|
| 282 |
+
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
|
| 283 |
+
# out-of-core computing solution
|
| 284 |
+
total_numel = 0
|
| 285 |
+
total_params = 0
|
| 286 |
+
for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
|
| 287 |
+
offset = 0
|
| 288 |
+
avail_numel = full_single_fp32_vector.numel()
|
| 289 |
+
for name, shape in shapes.items():
|
| 290 |
+
|
| 291 |
+
unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
|
| 292 |
+
total_numel += unpartitioned_numel
|
| 293 |
+
total_params += 1
|
| 294 |
+
|
| 295 |
+
if debug:
|
| 296 |
+
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
|
| 297 |
+
state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
|
| 298 |
+
offset += unpartitioned_numel
|
| 299 |
+
|
| 300 |
+
# Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
|
| 301 |
+
# avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
|
| 302 |
+
# paddings performed in the code it's almost impossible to predict the exact numbers w/o the
|
| 303 |
+
# live optimizer object, so we are checking that the numbers are within the right range
|
| 304 |
+
align_to = 2 * world_size
|
| 305 |
+
|
| 306 |
+
def zero2_align(x):
|
| 307 |
+
return align_to * math.ceil(x / align_to)
|
| 308 |
+
|
| 309 |
+
if debug:
|
| 310 |
+
print(f"original offset={offset}, avail_numel={avail_numel}")
|
| 311 |
+
|
| 312 |
+
offset = zero2_align(offset)
|
| 313 |
+
avail_numel = zero2_align(avail_numel)
|
| 314 |
+
|
| 315 |
+
if debug:
|
| 316 |
+
print(f"aligned offset={offset}, avail_numel={avail_numel}")
|
| 317 |
+
|
| 318 |
+
# Sanity check
|
| 319 |
+
if offset != avail_numel:
|
| 320 |
+
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
|
| 321 |
+
|
| 322 |
+
print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
| 326 |
+
exclude_frozen_parameters):
|
| 327 |
+
state_dict = OrderedDict()
|
| 328 |
+
|
| 329 |
+
# buffers
|
| 330 |
+
buffers = zero_model_states[0].buffers
|
| 331 |
+
state_dict.update(buffers)
|
| 332 |
+
if debug:
|
| 333 |
+
print(f"added {len(buffers)} buffers")
|
| 334 |
+
|
| 335 |
+
if not exclude_frozen_parameters:
|
| 336 |
+
_zero2_merge_frozen_params(state_dict, zero_model_states)
|
| 337 |
+
|
| 338 |
+
_zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
|
| 339 |
+
|
| 340 |
+
# recover shared parameters
|
| 341 |
+
for pair in zero_model_states[0].shared_params:
|
| 342 |
+
if pair[1] in state_dict:
|
| 343 |
+
state_dict[pair[0]] = state_dict[pair[1]]
|
| 344 |
+
|
| 345 |
+
return state_dict
|
| 346 |
+
|
| 347 |
+
|
| 348 |
+
def zero3_partitioned_param_info(unpartitioned_numel, world_size):
|
| 349 |
+
remainder = unpartitioned_numel % world_size
|
| 350 |
+
padding_numel = (world_size - remainder) if remainder else 0
|
| 351 |
+
partitioned_numel = math.ceil(unpartitioned_numel / world_size)
|
| 352 |
+
return partitioned_numel, padding_numel
|
| 353 |
+
|
| 354 |
+
|
| 355 |
+
def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
|
| 356 |
+
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
|
| 357 |
+
return
|
| 358 |
+
|
| 359 |
+
if debug:
|
| 360 |
+
for i in range(world_size):
|
| 361 |
+
num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
|
| 362 |
+
print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
|
| 363 |
+
|
| 364 |
+
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
|
| 365 |
+
wanted_params = len(frozen_param_shapes)
|
| 366 |
+
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
|
| 367 |
+
avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
|
| 368 |
+
print(f'Frozen params: Have {avail_numel} numels to process.')
|
| 369 |
+
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
|
| 370 |
+
|
| 371 |
+
total_params = 0
|
| 372 |
+
total_numel = 0
|
| 373 |
+
for name, shape in zero_model_states[0].frozen_param_shapes.items():
|
| 374 |
+
total_params += 1
|
| 375 |
+
unpartitioned_numel = shape.numel()
|
| 376 |
+
total_numel += unpartitioned_numel
|
| 377 |
+
|
| 378 |
+
param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
|
| 379 |
+
state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
|
| 380 |
+
|
| 381 |
+
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
|
| 382 |
+
|
| 383 |
+
if debug:
|
| 384 |
+
print(
|
| 385 |
+
f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
|
| 386 |
+
)
|
| 387 |
+
|
| 388 |
+
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
|
| 389 |
+
|
| 390 |
+
|
| 391 |
+
class GatheredTensor:
|
| 392 |
+
"""
|
| 393 |
+
A pseudo tensor that collects partitioned weights.
|
| 394 |
+
It is more memory efficient when there are multiple groups.
|
| 395 |
+
"""
|
| 396 |
+
|
| 397 |
+
def __init__(self, flat_groups, flat_groups_offset, offset, partitioned_numel, shape):
|
| 398 |
+
self.flat_groups = flat_groups
|
| 399 |
+
self.flat_groups_offset = flat_groups_offset
|
| 400 |
+
self.offset = offset
|
| 401 |
+
self.partitioned_numel = partitioned_numel
|
| 402 |
+
self.shape = shape
|
| 403 |
+
self.dtype = self.flat_groups[0][0].dtype
|
| 404 |
+
|
| 405 |
+
def contiguous(self):
|
| 406 |
+
"""
|
| 407 |
+
Merge partitioned weights from flat_groups into a single tensor.
|
| 408 |
+
"""
|
| 409 |
+
end_idx = self.offset + self.partitioned_numel
|
| 410 |
+
world_size = len(self.flat_groups)
|
| 411 |
+
pad_flat_param_chunks = []
|
| 412 |
+
|
| 413 |
+
for rank_i in range(world_size):
|
| 414 |
+
# for each rank, we need to collect weights from related group/groups
|
| 415 |
+
flat_groups_at_rank_i = self.flat_groups[rank_i]
|
| 416 |
+
start_group_id = None
|
| 417 |
+
end_group_id = None
|
| 418 |
+
for group_id in range(len(self.flat_groups_offset)):
|
| 419 |
+
if self.flat_groups_offset[group_id] <= self.offset < self.flat_groups_offset[group_id + 1]:
|
| 420 |
+
start_group_id = group_id
|
| 421 |
+
if self.flat_groups_offset[group_id] < end_idx <= self.flat_groups_offset[group_id + 1]:
|
| 422 |
+
end_group_id = group_id
|
| 423 |
+
break
|
| 424 |
+
# collect weights from related group/groups
|
| 425 |
+
for group_id in range(start_group_id, end_group_id + 1):
|
| 426 |
+
flat_tensor = flat_groups_at_rank_i[group_id]
|
| 427 |
+
start_offset = self.offset - self.flat_groups_offset[group_id]
|
| 428 |
+
end_offset = min(end_idx, self.flat_groups_offset[group_id + 1]) - self.flat_groups_offset[group_id]
|
| 429 |
+
pad_flat_param_chunks.append(flat_tensor[start_offset:end_offset])
|
| 430 |
+
|
| 431 |
+
# collect weights from all ranks
|
| 432 |
+
pad_flat_param = torch.cat(pad_flat_param_chunks, dim=0)
|
| 433 |
+
param = pad_flat_param[:self.shape.numel()].view(self.shape).contiguous()
|
| 434 |
+
return param
|
| 435 |
+
|
| 436 |
+
|
| 437 |
+
def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
|
| 438 |
+
param_shapes = zero_model_states[0].param_shapes
|
| 439 |
+
avail_numel = sum([flat_group.numel() for flat_group in fp32_flat_groups[0]]) * world_size
|
| 440 |
+
|
| 441 |
+
# Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
|
| 442 |
+
# param, re-consolidating each param, while dealing with padding if any
|
| 443 |
+
|
| 444 |
+
# merge list of dicts, preserving order
|
| 445 |
+
param_shapes = {k: v for d in param_shapes for k, v in d.items()}
|
| 446 |
+
|
| 447 |
+
if debug:
|
| 448 |
+
for i in range(world_size):
|
| 449 |
+
print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
|
| 450 |
+
|
| 451 |
+
wanted_params = len(param_shapes)
|
| 452 |
+
wanted_numel = sum(shape.numel() for shape in param_shapes.values())
|
| 453 |
+
# not asserting if there is a mismatch due to possible padding
|
| 454 |
+
avail_numel = fp32_flat_groups[0].numel() * world_size
|
| 455 |
+
print(f"Trainable params: Have {avail_numel} numels to process.")
|
| 456 |
+
print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
|
| 457 |
+
|
| 458 |
+
# params
|
| 459 |
+
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
|
| 460 |
+
# out-of-core computing solution
|
| 461 |
+
offset = 0
|
| 462 |
+
total_numel = 0
|
| 463 |
+
total_params = 0
|
| 464 |
+
flat_groups_offset = [0] + list(np.cumsum([flat_tensor.numel() for flat_tensor in fp32_flat_groups[0]]))
|
| 465 |
+
for name, shape in tqdm(param_shapes.items(), desc='Gathering sharded weights'):
|
| 466 |
+
unpartitioned_numel = shape.numel()
|
| 467 |
+
total_numel += unpartitioned_numel
|
| 468 |
+
total_params += 1
|
| 469 |
+
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
|
| 470 |
+
|
| 471 |
+
if debug:
|
| 472 |
+
print(
|
| 473 |
+
f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
|
| 474 |
+
)
|
| 475 |
+
|
| 476 |
+
# memory efficient tensor
|
| 477 |
+
tensor = GatheredTensor(fp32_flat_groups, flat_groups_offset, offset, partitioned_numel, shape)
|
| 478 |
+
state_dict[name] = tensor
|
| 479 |
+
offset += partitioned_numel
|
| 480 |
+
|
| 481 |
+
offset *= world_size
|
| 482 |
+
|
| 483 |
+
# Sanity check
|
| 484 |
+
if offset != avail_numel:
|
| 485 |
+
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
|
| 486 |
+
|
| 487 |
+
print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
|
| 488 |
+
|
| 489 |
+
|
| 490 |
+
def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
| 491 |
+
exclude_frozen_parameters):
|
| 492 |
+
state_dict = OrderedDict()
|
| 493 |
+
|
| 494 |
+
# buffers
|
| 495 |
+
buffers = zero_model_states[0].buffers
|
| 496 |
+
state_dict.update(buffers)
|
| 497 |
+
if debug:
|
| 498 |
+
print(f"added {len(buffers)} buffers")
|
| 499 |
+
|
| 500 |
+
if not exclude_frozen_parameters:
|
| 501 |
+
_zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
|
| 502 |
+
|
| 503 |
+
_zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
|
| 504 |
+
|
| 505 |
+
# recover shared parameters
|
| 506 |
+
for pair in zero_model_states[0].shared_params:
|
| 507 |
+
if pair[1] in state_dict:
|
| 508 |
+
state_dict[pair[0]] = state_dict[pair[1]]
|
| 509 |
+
|
| 510 |
+
return state_dict
|
| 511 |
+
|
| 512 |
+
|
| 513 |
+
def to_torch_tensor(state_dict, return_empty_tensor=False):
|
| 514 |
+
"""
|
| 515 |
+
Convert state_dict of GatheredTensor to torch tensor
|
| 516 |
+
"""
|
| 517 |
+
torch_state_dict = {}
|
| 518 |
+
converted_tensors = {}
|
| 519 |
+
for name, tensor in state_dict.items():
|
| 520 |
+
tensor_id = id(tensor)
|
| 521 |
+
if tensor_id in converted_tensors: # shared tensors
|
| 522 |
+
shared_tensor = torch_state_dict[converted_tensors[tensor_id]]
|
| 523 |
+
torch_state_dict[name] = shared_tensor
|
| 524 |
+
else:
|
| 525 |
+
converted_tensors[tensor_id] = name
|
| 526 |
+
if return_empty_tensor:
|
| 527 |
+
torch_state_dict[name] = torch.empty(tensor.shape, dtype=tensor.dtype)
|
| 528 |
+
else:
|
| 529 |
+
torch_state_dict[name] = tensor.contiguous()
|
| 530 |
+
return torch_state_dict
|
| 531 |
+
|
| 532 |
+
|
| 533 |
+
def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
|
| 534 |
+
tag=None,
|
| 535 |
+
exclude_frozen_parameters=False,
|
| 536 |
+
lazy_mode=False):
|
| 537 |
+
"""
|
| 538 |
+
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
|
| 539 |
+
``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
|
| 540 |
+
via a model hub.
|
| 541 |
+
|
| 542 |
+
Args:
|
| 543 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder
|
| 544 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
|
| 545 |
+
- ``exclude_frozen_parameters``: exclude frozen parameters
|
| 546 |
+
- ``lazy_mode``: get state_dict in lazy mode. It returns a dict of pesduo tensor instead of torch tensor, which is more memory efficient.
|
| 547 |
+
Convert the pesduo tensor to torch tensor by ``.contiguous()``
|
| 548 |
+
|
| 549 |
+
Returns:
|
| 550 |
+
- pytorch ``state_dict``
|
| 551 |
+
|
| 552 |
+
A typical usage might be ::
|
| 553 |
+
|
| 554 |
+
from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
|
| 555 |
+
# do the training and checkpoint saving
|
| 556 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
|
| 557 |
+
model = model.cpu() # move to cpu
|
| 558 |
+
model.load_state_dict(state_dict)
|
| 559 |
+
# submit to model hub or save the model to share with others
|
| 560 |
+
|
| 561 |
+
In this example the ``model`` will no longer be usable in the deepspeed context of the same
|
| 562 |
+
application. i.e. you will need to re-initialize the deepspeed engine, since
|
| 563 |
+
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
|
| 564 |
+
|
| 565 |
+
If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
|
| 566 |
+
|
| 567 |
+
Note: the above usage may not work if your application doesn't have sufficient free CPU memory.
|
| 568 |
+
You may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
|
| 569 |
+
the checkpoint. Or you can load state_dict in lazy mode ::
|
| 570 |
+
|
| 571 |
+
from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
|
| 572 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, lazy_mode=True) # not on cpu
|
| 573 |
+
for name, lazy_tensor in state_dict.item():
|
| 574 |
+
tensor = lazy_tensor.contiguous() # to cpu
|
| 575 |
+
print(name, tensor)
|
| 576 |
+
# del tensor to release memory if it no longer in use
|
| 577 |
+
"""
|
| 578 |
+
if tag is None:
|
| 579 |
+
latest_path = os.path.join(checkpoint_dir, 'latest')
|
| 580 |
+
if os.path.isfile(latest_path):
|
| 581 |
+
with open(latest_path, 'r') as fd:
|
| 582 |
+
tag = fd.read().strip()
|
| 583 |
+
else:
|
| 584 |
+
raise ValueError(f"Unable to find 'latest' file at {latest_path}")
|
| 585 |
+
|
| 586 |
+
ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
|
| 587 |
+
|
| 588 |
+
if not os.path.isdir(ds_checkpoint_dir):
|
| 589 |
+
raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
|
| 590 |
+
|
| 591 |
+
state_dict = _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
|
| 592 |
+
if lazy_mode:
|
| 593 |
+
return state_dict
|
| 594 |
+
else:
|
| 595 |
+
return to_torch_tensor(state_dict)
|
| 596 |
+
|
| 597 |
+
|
| 598 |
+
def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir,
|
| 599 |
+
output_dir,
|
| 600 |
+
max_shard_size="5GB",
|
| 601 |
+
safe_serialization=False,
|
| 602 |
+
tag=None,
|
| 603 |
+
exclude_frozen_parameters=False):
|
| 604 |
+
"""
|
| 605 |
+
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
|
| 606 |
+
loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
|
| 607 |
+
|
| 608 |
+
Args:
|
| 609 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
|
| 610 |
+
- ``output_dir``: directory to the pytorch fp32 state_dict output files
|
| 611 |
+
- ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB
|
| 612 |
+
- ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
|
| 613 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
|
| 614 |
+
- ``exclude_frozen_parameters``: exclude frozen parameters
|
| 615 |
+
"""
|
| 616 |
+
|
| 617 |
+
# Dependency pre-check
|
| 618 |
+
if safe_serialization:
|
| 619 |
+
try:
|
| 620 |
+
from safetensors.torch import save_file
|
| 621 |
+
except ImportError:
|
| 622 |
+
print('If you want to use `safe_serialization`, please `pip install safetensors`')
|
| 623 |
+
raise
|
| 624 |
+
if max_shard_size is not None:
|
| 625 |
+
try:
|
| 626 |
+
from huggingface_hub import split_torch_state_dict_into_shards
|
| 627 |
+
except ImportError:
|
| 628 |
+
print('If you want to use `max_shard_size`, please `pip install huggingface_hub`')
|
| 629 |
+
raise
|
| 630 |
+
|
| 631 |
+
# Convert zero checkpoint to state_dict
|
| 632 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
|
| 633 |
+
tag,
|
| 634 |
+
exclude_frozen_parameters,
|
| 635 |
+
lazy_mode=True)
|
| 636 |
+
|
| 637 |
+
# Shard the model if it is too big.
|
| 638 |
+
weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin"
|
| 639 |
+
if max_shard_size is not None:
|
| 640 |
+
filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors")
|
| 641 |
+
# an memory-efficient approach for sharding
|
| 642 |
+
empty_state_dict = to_torch_tensor(state_dict, return_empty_tensor=True)
|
| 643 |
+
state_dict_split = split_torch_state_dict_into_shards(empty_state_dict,
|
| 644 |
+
filename_pattern=filename_pattern,
|
| 645 |
+
max_shard_size=max_shard_size)
|
| 646 |
+
else:
|
| 647 |
+
from collections import namedtuple
|
| 648 |
+
StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"])
|
| 649 |
+
state_dict_split = StateDictSplit(is_sharded=False,
|
| 650 |
+
filename_to_tensors={weights_name: list(state_dict.keys())})
|
| 651 |
+
|
| 652 |
+
# Save the model by shard
|
| 653 |
+
os.makedirs(output_dir, exist_ok=True)
|
| 654 |
+
filename_to_tensors = state_dict_split.filename_to_tensors.items()
|
| 655 |
+
for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"):
|
| 656 |
+
shard_state_dict = {tensor_name: state_dict[tensor_name] for tensor_name in tensors}
|
| 657 |
+
shard_state_dict = to_torch_tensor(shard_state_dict)
|
| 658 |
+
output_path = os.path.join(output_dir, shard_file)
|
| 659 |
+
if safe_serialization:
|
| 660 |
+
save_file(shard_state_dict, output_path, metadata={"format": "pt"})
|
| 661 |
+
else:
|
| 662 |
+
torch.save(shard_state_dict, output_path)
|
| 663 |
+
# release the memory of current shard
|
| 664 |
+
for tensor_name in list(shard_state_dict.keys()):
|
| 665 |
+
del state_dict[tensor_name]
|
| 666 |
+
del shard_state_dict[tensor_name]
|
| 667 |
+
del shard_state_dict
|
| 668 |
+
gc.collect()
|
| 669 |
+
|
| 670 |
+
# Save index if sharded
|
| 671 |
+
if state_dict_split.is_sharded:
|
| 672 |
+
index = {
|
| 673 |
+
"metadata": state_dict_split.metadata,
|
| 674 |
+
"weight_map": state_dict_split.tensor_to_filename,
|
| 675 |
+
}
|
| 676 |
+
save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json"
|
| 677 |
+
save_index_file = os.path.join(output_dir, save_index_file)
|
| 678 |
+
with open(save_index_file, "w", encoding="utf-8") as f:
|
| 679 |
+
content = json.dumps(index, indent=2, sort_keys=True) + "\n"
|
| 680 |
+
f.write(content)
|
| 681 |
+
|
| 682 |
+
|
| 683 |
+
def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
|
| 684 |
+
"""
|
| 685 |
+
1. Put the provided model to cpu
|
| 686 |
+
2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
|
| 687 |
+
3. Load it into the provided model
|
| 688 |
+
|
| 689 |
+
Args:
|
| 690 |
+
- ``model``: the model object to update
|
| 691 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
|
| 692 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
|
| 693 |
+
|
| 694 |
+
Returns:
|
| 695 |
+
- ``model`: modified model
|
| 696 |
+
|
| 697 |
+
Make sure you have plenty of CPU memory available before you call this function. If you don't
|
| 698 |
+
have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
|
| 699 |
+
conveniently placed for you in the checkpoint folder.
|
| 700 |
+
|
| 701 |
+
A typical usage might be ::
|
| 702 |
+
|
| 703 |
+
from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
|
| 704 |
+
model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
|
| 705 |
+
# submit to model hub or save the model to share with others
|
| 706 |
+
|
| 707 |
+
Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
|
| 708 |
+
of the same application. i.e. you will need to re-initialize the deepspeed engine, since
|
| 709 |
+
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
|
| 710 |
+
|
| 711 |
+
"""
|
| 712 |
+
logger.info("Extracting fp32 weights")
|
| 713 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
|
| 714 |
+
|
| 715 |
+
logger.info("Overwriting model with fp32 weights")
|
| 716 |
+
model = model.cpu()
|
| 717 |
+
model.load_state_dict(state_dict, strict=False)
|
| 718 |
+
|
| 719 |
+
return model
|
| 720 |
+
|
| 721 |
+
|
| 722 |
+
if __name__ == "__main__":
|
| 723 |
+
parser = argparse.ArgumentParser()
|
| 724 |
+
parser.add_argument("checkpoint_dir",
|
| 725 |
+
type=str,
|
| 726 |
+
help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
|
| 727 |
+
parser.add_argument("output_dir",
|
| 728 |
+
type=str,
|
| 729 |
+
help="directory to the pytorch fp32 state_dict output files"
|
| 730 |
+
"(e.g. path/checkpoint-12-output/)")
|
| 731 |
+
parser.add_argument(
|
| 732 |
+
"--max_shard_size",
|
| 733 |
+
type=str,
|
| 734 |
+
default="5GB",
|
| 735 |
+
help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size"
|
| 736 |
+
"lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`"
|
| 737 |
+
"We default it to 5GB in order for models to be able to run easily on free-tier google colab instances"
|
| 738 |
+
"without CPU OOM issues.")
|
| 739 |
+
parser.add_argument(
|
| 740 |
+
"--safe_serialization",
|
| 741 |
+
default=False,
|
| 742 |
+
action='store_true',
|
| 743 |
+
help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).")
|
| 744 |
+
parser.add_argument("-t",
|
| 745 |
+
"--tag",
|
| 746 |
+
type=str,
|
| 747 |
+
default=None,
|
| 748 |
+
help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
|
| 749 |
+
parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
|
| 750 |
+
parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
|
| 751 |
+
args = parser.parse_args()
|
| 752 |
+
|
| 753 |
+
debug = args.debug
|
| 754 |
+
|
| 755 |
+
convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
|
| 756 |
+
args.output_dir,
|
| 757 |
+
max_shard_size=args.max_shard_size,
|
| 758 |
+
safe_serialization=args.safe_serialization,
|
| 759 |
+
tag=args.tag,
|
| 760 |
+
exclude_frozen_parameters=args.exclude_frozen_parameters)
|
ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/logging.jsonl
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{"loss": 1.2054956, "grad_norm": 7.44398074, "learning_rate": 5e-08, "token_acc": 0.66501241, "epoch": 0.01315789, "global_step/max_steps": "1/380", "percentage": "0.26%", "elapsed_time": "13s", "remaining_time": "1h 23m 7s", "memory(GiB)": 48.39, "train_speed(iter/s)": 0.075998}
|
| 2 |
+
{"loss": 1.18465626, "grad_norm": 6.99485848, "learning_rate": 2.6e-07, "token_acc": 0.67738, "epoch": 0.06578947, "global_step/max_steps": "5/380", "percentage": "1.32%", "elapsed_time": "43s", "remaining_time": "54m 31s", "memory(GiB)": 74.81, "train_speed(iter/s)": 0.114616}
|
| 3 |
+
{"loss": 1.1630187, "grad_norm": 6.88294679, "learning_rate": 5.3e-07, "token_acc": 0.67270649, "epoch": 0.13157895, "global_step/max_steps": "10/380", "percentage": "2.63%", "elapsed_time": "1m 15s", "remaining_time": "46m 24s", "memory(GiB)": 74.81, "train_speed(iter/s)": 0.132869}
|
| 4 |
+
{"loss": 1.10651655, "grad_norm": 6.56262503, "learning_rate": 7.9e-07, "token_acc": 0.68606473, "epoch": 0.19736842, "global_step/max_steps": "15/380", "percentage": "3.95%", "elapsed_time": "1m 49s", "remaining_time": "44m 31s", "memory(GiB)": 74.81, "train_speed(iter/s)": 0.13664}
|
| 5 |
+
{"loss": 1.05180273, "grad_norm": 4.34989877, "learning_rate": 1.05e-06, "token_acc": 0.69436586, "epoch": 0.26315789, "global_step/max_steps": "20/380", "percentage": "5.26%", "elapsed_time": "2m 22s", "remaining_time": "42m 40s", "memory(GiB)": 74.81, "train_speed(iter/s)": 0.140586}
|
| 6 |
+
{"loss": 0.92817192, "grad_norm": 3.91163163, "learning_rate": 1.32e-06, "token_acc": 0.72474008, "epoch": 0.32894737, "global_step/max_steps": "25/380", "percentage": "6.58%", "elapsed_time": "2m 54s", "remaining_time": "41m 16s", "memory(GiB)": 74.81, "train_speed(iter/s)": 0.143333}
|
| 7 |
+
{"loss": 0.82044392, "grad_norm": 4.17202215, "learning_rate": 1.58e-06, "token_acc": 0.75464546, "epoch": 0.39473684, "global_step/max_steps": "30/380", "percentage": "7.89%", "elapsed_time": "3m 28s", "remaining_time": "40m 28s", "memory(GiB)": 74.81, "train_speed(iter/s)": 0.144127}
|
| 8 |
+
{"loss": 0.77250299, "grad_norm": 3.08980622, "learning_rate": 1.84e-06, "token_acc": 0.76057017, "epoch": 0.46052632, "global_step/max_steps": "35/380", "percentage": "9.21%", "elapsed_time": "4m 2s", "remaining_time": "39m 49s", "memory(GiB)": 74.81, "train_speed(iter/s)": 0.144385}
|
| 9 |
+
{"loss": 0.73685861, "grad_norm": 3.57330769, "learning_rate": 2e-06, "token_acc": 0.77234126, "epoch": 0.52631579, "global_step/max_steps": "40/380", "percentage": "10.53%", "elapsed_time": "4m 36s", "remaining_time": "39m 6s", "memory(GiB)": 76.97, "train_speed(iter/s)": 0.144868}
|
| 10 |
+
{"loss": 0.70914588, "grad_norm": 3.00527963, "learning_rate": 2e-06, "token_acc": 0.77579774, "epoch": 0.59210526, "global_step/max_steps": "45/380", "percentage": "11.84%", "elapsed_time": "5m 7s", "remaining_time": "38m 10s", "memory(GiB)": 76.97, "train_speed(iter/s)": 0.146257}
|
| 11 |
+
{"loss": 0.68160334, "grad_norm": 2.65479063, "learning_rate": 1.99e-06, "token_acc": 0.78457868, "epoch": 0.65789474, "global_step/max_steps": "50/380", "percentage": "13.16%", "elapsed_time": "5m 37s", "remaining_time": "37m 6s", "memory(GiB)": 76.97, "train_speed(iter/s)": 0.14823}
|
| 12 |
+
{"loss": 0.64582319, "grad_norm": 2.63971591, "learning_rate": 1.99e-06, "token_acc": 0.79379981, "epoch": 0.72368421, "global_step/max_steps": "55/380", "percentage": "14.47%", "elapsed_time": "6m 9s", "remaining_time": "36m 22s", "memory(GiB)": 76.97, "train_speed(iter/s)": 0.14893}
|
| 13 |
+
{"loss": 0.64523239, "grad_norm": 2.85482362, "learning_rate": 1.98e-06, "token_acc": 0.79411205, "epoch": 0.78947368, "global_step/max_steps": "60/380", "percentage": "15.79%", "elapsed_time": "6m 39s", "remaining_time": "35m 33s", "memory(GiB)": 76.97, "train_speed(iter/s)": 0.150011}
|
| 14 |
+
{"loss": 0.66067114, "grad_norm": 2.89350093, "learning_rate": 1.97e-06, "token_acc": 0.78896789, "epoch": 0.85526316, "global_step/max_steps": "65/380", "percentage": "17.11%", "elapsed_time": "7m 11s", "remaining_time": "34m 50s", "memory(GiB)": 76.97, "train_speed(iter/s)": 0.150698}
|
| 15 |
+
{"loss": 0.6494267, "grad_norm": 2.78378816, "learning_rate": 1.96e-06, "token_acc": 0.79045196, "epoch": 0.92105263, "global_step/max_steps": "70/380", "percentage": "18.42%", "elapsed_time": "7m 41s", "remaining_time": "34m 4s", "memory(GiB)": 76.97, "train_speed(iter/s)": 0.151656}
|
| 16 |
+
{"loss": 0.64364486, "grad_norm": 2.79880995, "learning_rate": 1.94e-06, "token_acc": 0.79357107, "epoch": 0.98684211, "global_step/max_steps": "75/380", "percentage": "19.74%", "elapsed_time": "8m 12s", "remaining_time": "33m 22s", "memory(GiB)": 76.97, "train_speed(iter/s)": 0.152293}
|
| 17 |
+
{"eval_loss": 0.616813, "eval_runtime": 14.5426, "eval_samples_per_second": 9.214, "eval_steps_per_second": 1.169, "eval_token_acc": 0.80259239, "epoch": 1.0, "global_step/max_steps": "76/380", "percentage": "20.00%", "elapsed_time": "8m 32s", "remaining_time": "34m 8s", "memory(GiB)": 76.97, "train_speed(iter/s)": 0.148396}
|
| 18 |
+
{"loss": 0.57345514, "grad_norm": 2.8436763, "learning_rate": 1.93e-06, "token_acc": 0.8115053, "epoch": 1.05263158, "global_step/max_steps": "80/380", "percentage": "21.05%", "elapsed_time": "10m 28s", "remaining_time": "39m 16s", "memory(GiB)": 76.97, "train_speed(iter/s)": 0.127324}
|
| 19 |
+
{"loss": 0.54538131, "grad_norm": 2.71717831, "learning_rate": 1.91e-06, "token_acc": 0.81794795, "epoch": 1.11842105, "global_step/max_steps": "85/380", "percentage": "22.37%", "elapsed_time": "10m 59s", "remaining_time": "38m 10s", "memory(GiB)": 76.97, "train_speed(iter/s)": 0.128807}
|
| 20 |
+
{"loss": 0.54944105, "grad_norm": 3.04744118, "learning_rate": 1.89e-06, "token_acc": 0.82092924, "epoch": 1.18421053, "global_step/max_steps": "90/380", "percentage": "23.68%", "elapsed_time": "11m 35s", "remaining_time": "37m 20s", "memory(GiB)": 76.97, "train_speed(iter/s)": 0.129454}
|
| 21 |
+
{"loss": 0.53823109, "grad_norm": 2.62641942, "learning_rate": 1.87e-06, "token_acc": 0.82090943, "epoch": 1.25, "global_step/max_steps": "95/380", "percentage": "25.00%", "elapsed_time": "12m 7s", "remaining_time": "36m 21s", "memory(GiB)": 76.97, "train_speed(iter/s)": 0.13065}
|
| 22 |
+
{"loss": 0.55361595, "grad_norm": 2.72909405, "learning_rate": 1.84e-06, "token_acc": 0.81733602, "epoch": 1.31578947, "global_step/max_steps": "100/380", "percentage": "26.32%", "elapsed_time": "12m 38s", "remaining_time": "35m 23s", "memory(GiB)": 76.97, "train_speed(iter/s)": 0.131865}
|
| 23 |
+
{"loss": 0.5331531, "grad_norm": 2.76724049, "learning_rate": 1.82e-06, "token_acc": 0.82397644, "epoch": 1.38157895, "global_step/max_steps": "105/380", "percentage": "27.63%", "elapsed_time": "13m 10s", "remaining_time": "34m 29s", "memory(GiB)": 76.97, "train_speed(iter/s)": 0.132881}
|
| 24 |
+
{"loss": 0.53187194, "grad_norm": 2.73887619, "learning_rate": 1.79e-06, "token_acc": 0.82264057, "epoch": 1.44736842, "global_step/max_steps": "110/380", "percentage": "28.95%", "elapsed_time": "13m 41s", "remaining_time": "33m 36s", "memory(GiB)": 76.97, "train_speed(iter/s)": 0.133877}
|
| 25 |
+
{"loss": 0.51434765, "grad_norm": 2.54186448, "learning_rate": 1.76e-06, "token_acc": 0.82807212, "epoch": 1.51315789, "global_step/max_steps": "115/380", "percentage": "30.26%", "elapsed_time": "14m 14s", "remaining_time": "32m 48s", "memory(GiB)": 76.97, "train_speed(iter/s)": 0.134596}
|
| 26 |
+
{"loss": 0.52869568, "grad_norm": 2.75446568, "learning_rate": 1.73e-06, "token_acc": 0.82352778, "epoch": 1.57894737, "global_step/max_steps": "120/380", "percentage": "31.58%", "elapsed_time": "14m 44s", "remaining_time": "31m 56s", "memory(GiB)": 76.97, "train_speed(iter/s)": 0.135635}
|
| 27 |
+
{"loss": 0.53881702, "grad_norm": 2.81852678, "learning_rate": 1.7e-06, "token_acc": 0.81733953, "epoch": 1.64473684, "global_step/max_steps": "125/380", "percentage": "32.89%", "elapsed_time": "15m 15s", "remaining_time": "31m 8s", "memory(GiB)": 76.97, "train_speed(iter/s)": 0.136503}
|
| 28 |
+
{"loss": 0.52753901, "grad_norm": 2.57030985, "learning_rate": 1.66e-06, "token_acc": 0.82564791, "epoch": 1.71052632, "global_step/max_steps": "130/380", "percentage": "34.21%", "elapsed_time": "15m 48s", "remaining_time": "30m 24s", "memory(GiB)": 76.97, "train_speed(iter/s)": 0.137018}
|
| 29 |
+
{"loss": 0.52426052, "grad_norm": 2.64228783, "learning_rate": 1.63e-06, "token_acc": 0.82591674, "epoch": 1.77631579, "global_step/max_steps": "135/380", "percentage": "35.53%", "elapsed_time": "16m 19s", "remaining_time": "29m 37s", "memory(GiB)": 76.97, "train_speed(iter/s)": 0.137812}
|
| 30 |
+
{"loss": 0.53264208, "grad_norm": 2.81934902, "learning_rate": 1.59e-06, "token_acc": 0.82071431, "epoch": 1.84210526, "global_step/max_steps": "140/380", "percentage": "36.84%", "elapsed_time": "16m 50s", "remaining_time": "28m 52s", "memory(GiB)": 76.97, "train_speed(iter/s)": 0.138497}
|
| 31 |
+
{"loss": 0.54039316, "grad_norm": 2.82923562, "learning_rate": 1.55e-06, "token_acc": 0.82191971, "epoch": 1.90789474, "global_step/max_steps": "145/380", "percentage": "38.16%", "elapsed_time": "17m 22s", "remaining_time": "28m 9s", "memory(GiB)": 76.97, "train_speed(iter/s)": 0.139098}
|
| 32 |
+
{"loss": 0.52914052, "grad_norm": 2.52448152, "learning_rate": 1.52e-06, "token_acc": 0.82558171, "epoch": 1.97368421, "global_step/max_steps": "150/380", "percentage": "39.47%", "elapsed_time": "17m 53s", "remaining_time": "27m 25s", "memory(GiB)": 76.97, "train_speed(iter/s)": 0.139766}
|
| 33 |
+
{"eval_loss": 0.58733511, "eval_runtime": 15.0089, "eval_samples_per_second": 8.928, "eval_steps_per_second": 1.133, "eval_token_acc": 0.81015351, "epoch": 2.0, "global_step/max_steps": "152/380", "percentage": "40.00%", "elapsed_time": "18m 19s", "remaining_time": "27m 28s", "memory(GiB)": 76.97, "train_speed(iter/s)": 0.13829}
|
| 34 |
+
{"loss": 0.48025646, "grad_norm": 2.63400473, "learning_rate": 1.48e-06, "token_acc": 0.8391755, "epoch": 2.03947368, "global_step/max_steps": "155/380", "percentage": "40.79%", "elapsed_time": "20m 16s", "remaining_time": "29m 25s", "memory(GiB)": 76.97, "train_speed(iter/s)": 0.127426}
|
| 35 |
+
{"loss": 0.42414885, "grad_norm": 2.45816391, "learning_rate": 1.44e-06, "token_acc": 0.85464293, "epoch": 2.10526316, "global_step/max_steps": "160/380", "percentage": "42.11%", "elapsed_time": "20m 50s", "remaining_time": "28m 39s", "memory(GiB)": 76.97, "train_speed(iter/s)": 0.127973}
|
| 36 |
+
{"loss": 0.42393274, "grad_norm": 3.00720319, "learning_rate": 1.39e-06, "token_acc": 0.85665163, "epoch": 2.17105263, "global_step/max_steps": "165/380", "percentage": "43.42%", "elapsed_time": "21m 22s", "remaining_time": "27m 50s", "memory(GiB)": 76.97, "train_speed(iter/s)": 0.128692}
|
| 37 |
+
{"loss": 0.42813063, "grad_norm": 2.76545273, "learning_rate": 1.35e-06, "token_acc": 0.85727011, "epoch": 2.23684211, "global_step/max_steps": "170/380", "percentage": "44.74%", "elapsed_time": "21m 54s", "remaining_time": "27m 3s", "memory(GiB)": 76.97, "train_speed(iter/s)": 0.129335}
|
| 38 |
+
{"loss": 0.43173027, "grad_norm": 2.85190366, "learning_rate": 1.31e-06, "token_acc": 0.85392554, "epoch": 2.30263158, "global_step/max_steps": "175/380", "percentage": "46.05%", "elapsed_time": "22m 26s", "remaining_time": "26m 17s", "memory(GiB)": 76.97, "train_speed(iter/s)": 0.129973}
|
| 39 |
+
{"loss": 0.42942295, "grad_norm": 2.90693169, "learning_rate": 1.26e-06, "token_acc": 0.85301548, "epoch": 2.36842105, "global_step/max_steps": "180/380", "percentage": "47.37%", "elapsed_time": "22m 57s", "remaining_time": "25m 30s", "memory(GiB)": 76.97, "train_speed(iter/s)": 0.130671}
|
| 40 |
+
{"loss": 0.4209444, "grad_norm": 2.76699347, "learning_rate": 1.22e-06, "token_acc": 0.858088, "epoch": 2.43421053, "global_step/max_steps": "185/380", "percentage": "48.68%", "elapsed_time": "23m 29s", "remaining_time": "24m 46s", "memory(GiB)": 76.97, "train_speed(iter/s)": 0.131218}
|
| 41 |
+
{"loss": 0.41085768, "grad_norm": 2.91027915, "learning_rate": 1.17e-06, "token_acc": 0.86128094, "epoch": 2.5, "global_step/max_steps": "190/380", "percentage": "50.00%", "elapsed_time": "24m 2s", "remaining_time": "24m 2s", "memory(GiB)": 76.97, "train_speed(iter/s)": 0.131735}
|
| 42 |
+
{"loss": 0.42939186, "grad_norm": 2.70203438, "learning_rate": 1.13e-06, "token_acc": 0.85618811, "epoch": 2.56578947, "global_step/max_steps": "195/380", "percentage": "51.32%", "elapsed_time": "24m 32s", "remaining_time": "23m 16s", "memory(GiB)": 76.97, "train_speed(iter/s)": 0.132443}
|
| 43 |
+
{"loss": 0.4140902, "grad_norm": 2.67213636, "learning_rate": 1.08e-06, "token_acc": 0.85952776, "epoch": 2.63157895, "global_step/max_steps": "200/380", "percentage": "52.63%", "elapsed_time": "25m 4s", "remaining_time": "22m 33s", "memory(GiB)": 76.97, "train_speed(iter/s)": 0.132957}
|
| 44 |
+
{"loss": 0.41438055, "grad_norm": 2.6856131, "learning_rate": 1.04e-06, "token_acc": 0.85939182, "epoch": 2.69736842, "global_step/max_steps": "205/380", "percentage": "53.95%", "elapsed_time": "25m 35s", "remaining_time": "21m 50s", "memory(GiB)": 76.97, "train_speed(iter/s)": 0.1335}
|
| 45 |
+
{"loss": 0.39745784, "grad_norm": 2.67865817, "learning_rate": 9.9e-07, "token_acc": 0.86575207, "epoch": 2.76315789, "global_step/max_steps": "210/380", "percentage": "55.26%", "elapsed_time": "26m 8s", "remaining_time": "21m 9s", "memory(GiB)": 76.97, "train_speed(iter/s)": 0.133884}
|
| 46 |
+
{"loss": 0.40328641, "grad_norm": 2.85743769, "learning_rate": 9.4e-07, "token_acc": 0.86255549, "epoch": 2.82894737, "global_step/max_steps": "215/380", "percentage": "56.58%", "elapsed_time": "26m 39s", "remaining_time": "20m 27s", "memory(GiB)": 76.97, "train_speed(iter/s)": 0.134413}
|
| 47 |
+
{"loss": 0.40736237, "grad_norm": 2.65353403, "learning_rate": 9e-07, "token_acc": 0.86017658, "epoch": 2.89473684, "global_step/max_steps": "220/380", "percentage": "57.89%", "elapsed_time": "27m 11s", "remaining_time": "19m 46s", "memory(GiB)": 77.29, "train_speed(iter/s)": 0.134867}
|
| 48 |
+
{"loss": 0.40659695, "grad_norm": 2.70121207, "learning_rate": 8.5e-07, "token_acc": 0.85994319, "epoch": 2.96052632, "global_step/max_steps": "225/380", "percentage": "59.21%", "elapsed_time": "27m 42s", "remaining_time": "19m 5s", "memory(GiB)": 77.29, "train_speed(iter/s)": 0.135358}
|
| 49 |
+
{"eval_loss": 0.59996831, "eval_runtime": 14.9348, "eval_samples_per_second": 8.972, "eval_steps_per_second": 1.138, "eval_token_acc": 0.81121731, "epoch": 3.0, "global_step/max_steps": "228/380", "percentage": "60.00%", "elapsed_time": "28m 13s", "remaining_time": "18m 49s", "memory(GiB)": 77.29, "train_speed(iter/s)": 0.134632}
|
| 50 |
+
{"loss": 0.37590837, "grad_norm": 2.34303951, "learning_rate": 8.1e-07, "token_acc": 0.87325984, "epoch": 3.02631579, "global_step/max_steps": "230/380", "percentage": "60.53%", "elapsed_time": "29m 59s", "remaining_time": "19m 33s", "memory(GiB)": 77.29, "train_speed(iter/s)": 0.127781}
|
| 51 |
+
{"loss": 0.34411049, "grad_norm": 2.64090227, "learning_rate": 7.6e-07, "token_acc": 0.88515155, "epoch": 3.09210526, "global_step/max_steps": "235/380", "percentage": "61.84%", "elapsed_time": "30m 34s", "remaining_time": "18m 52s", "memory(GiB)": 77.29, "train_speed(iter/s)": 0.128083}
|
| 52 |
+
{"loss": 0.33349562, "grad_norm": 2.73673123, "learning_rate": 7.2e-07, "token_acc": 0.88579021, "epoch": 3.15789474, "global_step/max_steps": "240/380", "percentage": "63.16%", "elapsed_time": "31m 6s", "remaining_time": "18m 8s", "memory(GiB)": 77.29, "train_speed(iter/s)": 0.128578}
|
| 53 |
+
{"loss": 0.33648844, "grad_norm": 2.79852526, "learning_rate": 6.8e-07, "token_acc": 0.88734995, "epoch": 3.22368421, "global_step/max_steps": "245/380", "percentage": "64.47%", "elapsed_time": "31m 38s", "remaining_time": "17m 26s", "memory(GiB)": 77.29, "train_speed(iter/s)": 0.129036}
|
| 54 |
+
{"loss": 0.32933283, "grad_norm": 2.83857994, "learning_rate": 6.3e-07, "token_acc": 0.88551507, "epoch": 3.28947368, "global_step/max_steps": "250/380", "percentage": "65.79%", "elapsed_time": "32m 12s", "remaining_time": "16m 44s", "memory(GiB)": 77.29, "train_speed(iter/s)": 0.129356}
|
| 55 |
+
{"loss": 0.33368144, "grad_norm": 2.91740683, "learning_rate": 5.9e-07, "token_acc": 0.88633695, "epoch": 3.35526316, "global_step/max_steps": "255/380", "percentage": "67.11%", "elapsed_time": "32m 46s", "remaining_time": "16m 3s", "memory(GiB)": 77.29, "train_speed(iter/s)": 0.12967}
|
| 56 |
+
{"loss": 0.31928144, "grad_norm": 2.71597386, "learning_rate": 5.5e-07, "token_acc": 0.89106513, "epoch": 3.42105263, "global_step/max_steps": "260/380", "percentage": "68.42%", "elapsed_time": "33m 19s", "remaining_time": "15m 22s", "memory(GiB)": 77.29, "train_speed(iter/s)": 0.130035}
|
| 57 |
+
{"loss": 0.32702599, "grad_norm": 2.63773504, "learning_rate": 5.1e-07, "token_acc": 0.8884918, "epoch": 3.48684211, "global_step/max_steps": "265/380", "percentage": "69.74%", "elapsed_time": "33m 50s", "remaining_time": "14m 41s", "memory(GiB)": 77.29, "train_speed(iter/s)": 0.130517}
|
| 58 |
+
{"loss": 0.33068037, "grad_norm": 2.77826519, "learning_rate": 4.7e-07, "token_acc": 0.88732108, "epoch": 3.55263158, "global_step/max_steps": "270/380", "percentage": "71.05%", "elapsed_time": "34m 21s", "remaining_time": "13m 59s", "memory(GiB)": 77.29, "train_speed(iter/s)": 0.130953}
|
| 59 |
+
{"loss": 0.34255319, "grad_norm": 2.82606344, "learning_rate": 4.3e-07, "token_acc": 0.88418658, "epoch": 3.61842105, "global_step/max_steps": "275/380", "percentage": "72.37%", "elapsed_time": "34m 54s", "remaining_time": "13m 19s", "memory(GiB)": 77.29, "train_speed(iter/s)": 0.131291}
|
| 60 |
+
{"loss": 0.33419976, "grad_norm": 2.7130524, "learning_rate": 3.9e-07, "token_acc": 0.88693176, "epoch": 3.68421053, "global_step/max_steps": "280/380", "percentage": "73.68%", "elapsed_time": "35m 25s", "remaining_time": "12m 39s", "memory(GiB)": 77.29, "train_speed(iter/s)": 0.131751}
|
| 61 |
+
{"loss": 0.33861413, "grad_norm": 2.86742049, "learning_rate": 3.6e-07, "token_acc": 0.8844872, "epoch": 3.75, "global_step/max_steps": "285/380", "percentage": "75.00%", "elapsed_time": "35m 56s", "remaining_time": "11m 58s", "memory(GiB)": 77.29, "train_speed(iter/s)": 0.13216}
|
| 62 |
+
{"loss": 0.34265239, "grad_norm": 2.61177458, "learning_rate": 3.2e-07, "token_acc": 0.88458715, "epoch": 3.81578947, "global_step/max_steps": "290/380", "percentage": "76.32%", "elapsed_time": "36m 28s", "remaining_time": "11m 19s", "memory(GiB)": 77.29, "train_speed(iter/s)": 0.132536}
|
| 63 |
+
{"loss": 0.32698245, "grad_norm": 2.78414977, "learning_rate": 2.9e-07, "token_acc": 0.88858972, "epoch": 3.88157895, "global_step/max_steps": "295/380", "percentage": "77.63%", "elapsed_time": "36m 59s", "remaining_time": "10m 39s", "memory(GiB)": 77.29, "train_speed(iter/s)": 0.132938}
|
| 64 |
+
{"loss": 0.33692307, "grad_norm": 2.49466861, "learning_rate": 2.6e-07, "token_acc": 0.88669291, "epoch": 3.94736842, "global_step/max_steps": "300/380", "percentage": "78.95%", "elapsed_time": "37m 29s", "remaining_time": "9m 59s", "memory(GiB)": 77.29, "train_speed(iter/s)": 0.133335}
|
| 65 |
+
{"eval_loss": 0.62609965, "eval_runtime": 15.7575, "eval_samples_per_second": 8.504, "eval_steps_per_second": 1.079, "eval_token_acc": 0.81034991, "epoch": 4.0, "global_step/max_steps": "304/380", "percentage": "80.00%", "elapsed_time": "38m 12s", "remaining_time": "9m 33s", "memory(GiB)": 77.29, "train_speed(iter/s)": 0.132633}
|
| 66 |
+
{"loss": 0.33888826, "grad_norm": 2.44170868, "learning_rate": 2.3e-07, "token_acc": 0.88497406, "epoch": 4.01315789, "global_step/max_steps": "305/380", "percentage": "80.26%", "elapsed_time": "39m 49s", "remaining_time": "9m 47s", "memory(GiB)": 77.29, "train_speed(iter/s)": 0.127645}
|
| 67 |
+
{"loss": 0.30261137, "grad_norm": 2.3470474, "learning_rate": 2e-07, "token_acc": 0.89888967, "epoch": 4.07894737, "global_step/max_steps": "310/380", "percentage": "81.58%", "elapsed_time": "40m 25s", "remaining_time": "9m 7s", "memory(GiB)": 77.29, "train_speed(iter/s)": 0.12782}
|
| 68 |
+
{"loss": 0.29790361, "grad_norm": 2.54135867, "learning_rate": 1.7e-07, "token_acc": 0.89942274, "epoch": 4.14473684, "global_step/max_steps": "315/380", "percentage": "82.89%", "elapsed_time": "41m 2s", "remaining_time": "8m 28s", "memory(GiB)": 77.29, "train_speed(iter/s)": 0.127939}
|
| 69 |
+
{"loss": 0.29341311, "grad_norm": 2.71093939, "learning_rate": 1.5e-07, "token_acc": 0.90098437, "epoch": 4.21052632, "global_step/max_steps": "320/380", "percentage": "84.21%", "elapsed_time": "41m 35s", "remaining_time": "7m 47s", "memory(GiB)": 77.29, "train_speed(iter/s)": 0.128234}
|
| 70 |
+
{"loss": 0.28581276, "grad_norm": 2.40393774, "learning_rate": 1.2e-07, "token_acc": 0.90298033, "epoch": 4.27631579, "global_step/max_steps": "325/380", "percentage": "85.53%", "elapsed_time": "42m 6s", "remaining_time": "7m 7s", "memory(GiB)": 77.29, "train_speed(iter/s)": 0.12862}
|
| 71 |
+
{"loss": 0.29638333, "grad_norm": 2.73543103, "learning_rate": 1e-07, "token_acc": 0.90127643, "epoch": 4.34210526, "global_step/max_steps": "330/380", "percentage": "86.84%", "elapsed_time": "42m 38s", "remaining_time": "6m 27s", "memory(GiB)": 77.29, "train_speed(iter/s)": 0.128998}
|
| 72 |
+
{"loss": 0.29314277, "grad_norm": 2.50176047, "learning_rate": 8e-08, "token_acc": 0.90104935, "epoch": 4.40789474, "global_step/max_steps": "335/380", "percentage": "88.16%", "elapsed_time": "43m 11s", "remaining_time": "5m 48s", "memory(GiB)": 77.29, "train_speed(iter/s)": 0.129292}
|
| 73 |
+
{"loss": 0.29729128, "grad_norm": 2.60715813, "learning_rate": 7e-08, "token_acc": 0.89942937, "epoch": 4.47368421, "global_step/max_steps": "340/380", "percentage": "89.47%", "elapsed_time": "43m 41s", "remaining_time": "5m 8s", "memory(GiB)": 77.29, "train_speed(iter/s)": 0.129676}
|
| 74 |
+
{"loss": 0.29389222, "grad_norm": 2.59512918, "learning_rate": 5e-08, "token_acc": 0.90163595, "epoch": 4.53947368, "global_step/max_steps": "345/380", "percentage": "90.79%", "elapsed_time": "44m 13s", "remaining_time": "4m 29s", "memory(GiB)": 77.29, "train_speed(iter/s)": 0.129997}
|
| 75 |
+
{"loss": 0.3116612, "grad_norm": 2.61069145, "learning_rate": 4e-08, "token_acc": 0.89649329, "epoch": 4.60526316, "global_step/max_steps": "350/380", "percentage": "92.11%", "elapsed_time": "44m 44s", "remaining_time": "3m 50s", "memory(GiB)": 77.29, "train_speed(iter/s)": 0.13038}
|
| 76 |
+
{"loss": 0.29344308, "grad_norm": 2.47391232, "learning_rate": 3e-08, "token_acc": 0.89970942, "epoch": 4.67105263, "global_step/max_steps": "355/380", "percentage": "93.42%", "elapsed_time": "45m 17s", "remaining_time": "3m 11s", "memory(GiB)": 77.29, "train_speed(iter/s)": 0.130626}
|
| 77 |
+
{"loss": 0.28615444, "grad_norm": 2.60012451, "learning_rate": 2e-08, "token_acc": 0.90323878, "epoch": 4.73684211, "global_step/max_steps": "360/380", "percentage": "94.74%", "elapsed_time": "45m 50s", "remaining_time": "2m 32s", "memory(GiB)": 77.29, "train_speed(iter/s)": 0.130877}
|
| 78 |
+
{"loss": 0.29576859, "grad_norm": 2.43552294, "learning_rate": 1e-08, "token_acc": 0.90143775, "epoch": 4.80263158, "global_step/max_steps": "365/380", "percentage": "96.05%", "elapsed_time": "46m 21s", "remaining_time": "1m 54s", "memory(GiB)": 77.29, "train_speed(iter/s)": 0.131228}
|
| 79 |
+
{"loss": 0.29920444, "grad_norm": 2.7377967, "learning_rate": 0.0, "token_acc": 0.90092493, "epoch": 4.86842105, "global_step/max_steps": "370/380", "percentage": "97.37%", "elapsed_time": "46m 53s", "remaining_time": "1m 16s", "memory(GiB)": 77.29, "train_speed(iter/s)": 0.131529}
|
| 80 |
+
{"loss": 0.30521078, "grad_norm": 2.77751398, "learning_rate": 0.0, "token_acc": 0.89559328, "epoch": 4.93421053, "global_step/max_steps": "375/380", "percentage": "98.68%", "elapsed_time": "47m 24s", "remaining_time": "37s", "memory(GiB)": 77.29, "train_speed(iter/s)": 0.131811}
|
| 81 |
+
{"loss": 0.3077724, "grad_norm": 3.11641431, "learning_rate": 0.0, "token_acc": 0.8958139, "epoch": 5.0, "global_step/max_steps": "380/380", "percentage": "100.00%", "elapsed_time": "47m 53s", "remaining_time": "0s", "memory(GiB)": 77.29, "train_speed(iter/s)": 0.132226}
|
| 82 |
+
{"eval_loss": 0.64305353, "eval_runtime": 15.4821, "eval_samples_per_second": 8.655, "eval_steps_per_second": 1.098, "eval_token_acc": 0.80995712, "epoch": 5.0, "global_step/max_steps": "380/380", "percentage": "100.00%", "elapsed_time": "48m 9s", "remaining_time": "0s", "memory(GiB)": 77.29, "train_speed(iter/s)": 0.131517}
|
ood/ivl-8b-instruct-thinking_full_qvq_ood_e5/v0-20250928-190639/val_dataset.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
ood/qwen2.5vl-7b-full_sft_ood/v1-20251004-154120/args.json
ADDED
|
@@ -0,0 +1,384 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"output_dir": "/mnt/data/users/liamding/data/MMMT/lora/qwen2.5vl-7b-full_sft_ood/v1-20251004-154120",
|
| 3 |
+
"overwrite_output_dir": false,
|
| 4 |
+
"do_train": false,
|
| 5 |
+
"do_eval": false,
|
| 6 |
+
"do_predict": false,
|
| 7 |
+
"eval_strategy": "epoch",
|
| 8 |
+
"prediction_loss_only": false,
|
| 9 |
+
"per_device_train_batch_size": 2,
|
| 10 |
+
"per_device_eval_batch_size": 2,
|
| 11 |
+
"per_gpu_train_batch_size": null,
|
| 12 |
+
"per_gpu_eval_batch_size": null,
|
| 13 |
+
"gradient_accumulation_steps": 2,
|
| 14 |
+
"eval_accumulation_steps": null,
|
| 15 |
+
"eval_delay": 0,
|
| 16 |
+
"torch_empty_cache_steps": null,
|
| 17 |
+
"learning_rate": 5e-07,
|
| 18 |
+
"weight_decay": 0.1,
|
| 19 |
+
"adam_beta1": 0.9,
|
| 20 |
+
"adam_beta2": 0.95,
|
| 21 |
+
"adam_epsilon": 1e-08,
|
| 22 |
+
"max_grad_norm": 1.0,
|
| 23 |
+
"num_train_epochs": 10.0,
|
| 24 |
+
"max_steps": -1,
|
| 25 |
+
"lr_scheduler_type": "cosine",
|
| 26 |
+
"lr_scheduler_kwargs": null,
|
| 27 |
+
"warmup_ratio": 0.1,
|
| 28 |
+
"warmup_steps": 0,
|
| 29 |
+
"log_level": "passive",
|
| 30 |
+
"log_level_replica": "warning",
|
| 31 |
+
"log_on_each_node": true,
|
| 32 |
+
"logging_dir": "/mnt/data/users/liamding/data/MMMT/lora/qwen2.5vl-7b-full_sft_ood/v1-20251004-154120/runs",
|
| 33 |
+
"logging_strategy": "steps",
|
| 34 |
+
"logging_first_step": true,
|
| 35 |
+
"logging_steps": 5,
|
| 36 |
+
"logging_nan_inf_filter": true,
|
| 37 |
+
"save_strategy": "epoch",
|
| 38 |
+
"save_steps": 500,
|
| 39 |
+
"save_total_limit": 5,
|
| 40 |
+
"save_safetensors": true,
|
| 41 |
+
"save_on_each_node": false,
|
| 42 |
+
"save_only_model": false,
|
| 43 |
+
"restore_callback_states_from_checkpoint": false,
|
| 44 |
+
"no_cuda": false,
|
| 45 |
+
"use_cpu": false,
|
| 46 |
+
"use_mps_device": false,
|
| 47 |
+
"seed": 42,
|
| 48 |
+
"data_seed": 42,
|
| 49 |
+
"jit_mode_eval": false,
|
| 50 |
+
"use_ipex": false,
|
| 51 |
+
"bf16": true,
|
| 52 |
+
"fp16": false,
|
| 53 |
+
"fp16_opt_level": "O1",
|
| 54 |
+
"half_precision_backend": "auto",
|
| 55 |
+
"bf16_full_eval": false,
|
| 56 |
+
"fp16_full_eval": false,
|
| 57 |
+
"tf32": null,
|
| 58 |
+
"local_rank": 0,
|
| 59 |
+
"ddp_backend": null,
|
| 60 |
+
"tpu_num_cores": null,
|
| 61 |
+
"tpu_metrics_debug": false,
|
| 62 |
+
"debug": null,
|
| 63 |
+
"dataloader_drop_last": false,
|
| 64 |
+
"eval_steps": null,
|
| 65 |
+
"dataloader_num_workers": 4,
|
| 66 |
+
"dataloader_prefetch_factor": null,
|
| 67 |
+
"past_index": -1,
|
| 68 |
+
"run_name": "/mnt/data/users/liamding/data/MMMT/lora/qwen2.5vl-7b-full_sft_ood/v1-20251004-154120",
|
| 69 |
+
"disable_tqdm": null,
|
| 70 |
+
"remove_unused_columns": true,
|
| 71 |
+
"label_names": null,
|
| 72 |
+
"load_best_model_at_end": true,
|
| 73 |
+
"metric_for_best_model": "eval_loss",
|
| 74 |
+
"greater_is_better": false,
|
| 75 |
+
"ignore_data_skip": false,
|
| 76 |
+
"fsdp": "",
|
| 77 |
+
"fsdp_min_num_params": 0,
|
| 78 |
+
"fsdp_config": null,
|
| 79 |
+
"fsdp_transformer_layer_cls_to_wrap": null,
|
| 80 |
+
"accelerator_config": {
|
| 81 |
+
"dispatch_batches": false
|
| 82 |
+
},
|
| 83 |
+
"deepspeed": {
|
| 84 |
+
"fp16": {
|
| 85 |
+
"enabled": "auto",
|
| 86 |
+
"loss_scale": 0,
|
| 87 |
+
"loss_scale_window": 1000,
|
| 88 |
+
"initial_scale_power": 16,
|
| 89 |
+
"hysteresis": 2,
|
| 90 |
+
"min_loss_scale": 1
|
| 91 |
+
},
|
| 92 |
+
"bf16": {
|
| 93 |
+
"enabled": "auto"
|
| 94 |
+
},
|
| 95 |
+
"zero_optimization": {
|
| 96 |
+
"stage": 3,
|
| 97 |
+
"offload_optimizer": {
|
| 98 |
+
"device": "none",
|
| 99 |
+
"pin_memory": true
|
| 100 |
+
},
|
| 101 |
+
"offload_param": {
|
| 102 |
+
"device": "none",
|
| 103 |
+
"pin_memory": true
|
| 104 |
+
},
|
| 105 |
+
"overlap_comm": false,
|
| 106 |
+
"contiguous_gradients": true,
|
| 107 |
+
"sub_group_size": 1000000000.0,
|
| 108 |
+
"reduce_bucket_size": "auto",
|
| 109 |
+
"zero_quantized_weights": false,
|
| 110 |
+
"zero_quantized_gradients": false,
|
| 111 |
+
"stage3_prefetch_bucket_size": "auto",
|
| 112 |
+
"stage3_param_persistence_threshold": "auto",
|
| 113 |
+
"stage3_max_live_parameters": 1000000000.0,
|
| 114 |
+
"stage3_max_reuse_distance": 1000000000.0,
|
| 115 |
+
"stage3_gather_16bit_weights_on_model_save": true
|
| 116 |
+
},
|
| 117 |
+
"gradient_accumulation_steps": "auto",
|
| 118 |
+
"gradient_clipping": "auto",
|
| 119 |
+
"steps_per_print": 2000,
|
| 120 |
+
"train_batch_size": "auto",
|
| 121 |
+
"train_micro_batch_size_per_gpu": "auto",
|
| 122 |
+
"wall_clock_breakdown": false
|
| 123 |
+
},
|
| 124 |
+
"label_smoothing_factor": 0.0,
|
| 125 |
+
"optim": "adamw_torch",
|
| 126 |
+
"optim_args": null,
|
| 127 |
+
"adafactor": false,
|
| 128 |
+
"group_by_length": false,
|
| 129 |
+
"length_column_name": "length",
|
| 130 |
+
"report_to": [
|
| 131 |
+
"swanlab"
|
| 132 |
+
],
|
| 133 |
+
"ddp_find_unused_parameters": null,
|
| 134 |
+
"ddp_bucket_cap_mb": null,
|
| 135 |
+
"ddp_broadcast_buffers": null,
|
| 136 |
+
"dataloader_pin_memory": true,
|
| 137 |
+
"dataloader_persistent_workers": false,
|
| 138 |
+
"skip_memory_metrics": true,
|
| 139 |
+
"use_legacy_prediction_loop": false,
|
| 140 |
+
"push_to_hub": false,
|
| 141 |
+
"resume_from_checkpoint": null,
|
| 142 |
+
"hub_model_id": null,
|
| 143 |
+
"hub_strategy": "every_save",
|
| 144 |
+
"hub_token": null,
|
| 145 |
+
"hub_private_repo": null,
|
| 146 |
+
"hub_always_push": false,
|
| 147 |
+
"hub_revision": null,
|
| 148 |
+
"gradient_checkpointing": true,
|
| 149 |
+
"gradient_checkpointing_kwargs": null,
|
| 150 |
+
"include_inputs_for_metrics": false,
|
| 151 |
+
"include_for_metrics": [],
|
| 152 |
+
"eval_do_concat_batches": true,
|
| 153 |
+
"fp16_backend": "auto",
|
| 154 |
+
"push_to_hub_model_id": null,
|
| 155 |
+
"push_to_hub_organization": null,
|
| 156 |
+
"push_to_hub_token": null,
|
| 157 |
+
"mp_parameters": "",
|
| 158 |
+
"auto_find_batch_size": false,
|
| 159 |
+
"full_determinism": false,
|
| 160 |
+
"torchdynamo": null,
|
| 161 |
+
"ray_scope": "last",
|
| 162 |
+
"ddp_timeout": 18000000,
|
| 163 |
+
"torch_compile": false,
|
| 164 |
+
"torch_compile_backend": null,
|
| 165 |
+
"torch_compile_mode": null,
|
| 166 |
+
"include_tokens_per_second": false,
|
| 167 |
+
"include_num_input_tokens_seen": false,
|
| 168 |
+
"neftune_noise_alpha": null,
|
| 169 |
+
"optim_target_modules": null,
|
| 170 |
+
"batch_eval_metrics": false,
|
| 171 |
+
"eval_on_start": false,
|
| 172 |
+
"use_liger_kernel": false,
|
| 173 |
+
"liger_kernel_config": null,
|
| 174 |
+
"eval_use_gather_object": false,
|
| 175 |
+
"average_tokens_across_devices": true,
|
| 176 |
+
"sortish_sampler": false,
|
| 177 |
+
"predict_with_generate": false,
|
| 178 |
+
"generation_max_length": null,
|
| 179 |
+
"generation_num_beams": null,
|
| 180 |
+
"generation_config": null,
|
| 181 |
+
"tuner_backend": "peft",
|
| 182 |
+
"vit_gradient_checkpointing": null,
|
| 183 |
+
"router_aux_loss_coef": 0.0,
|
| 184 |
+
"enable_dft_loss": false,
|
| 185 |
+
"enable_channel_loss": false,
|
| 186 |
+
"check_model": true,
|
| 187 |
+
"acc_strategy": "token",
|
| 188 |
+
"train_dataloader_shuffle": true,
|
| 189 |
+
"max_epochs": null,
|
| 190 |
+
"aligner_lr": null,
|
| 191 |
+
"vit_lr": null,
|
| 192 |
+
"use_logits_to_keep": null,
|
| 193 |
+
"ds3_gather_for_generation": true,
|
| 194 |
+
"resume_only_model": false,
|
| 195 |
+
"optimizer": null,
|
| 196 |
+
"loss_type": null,
|
| 197 |
+
"metric": null,
|
| 198 |
+
"eval_use_evalscope": false,
|
| 199 |
+
"eval_dataset": [],
|
| 200 |
+
"eval_dataset_args": null,
|
| 201 |
+
"eval_limit": null,
|
| 202 |
+
"eval_generation_config": null,
|
| 203 |
+
"extra_eval_args": null,
|
| 204 |
+
"use_flash_ckpt": false,
|
| 205 |
+
"model": "/mnt/data/users/liamding/data/models/Qwen2.5-VL-7B-Instruct",
|
| 206 |
+
"model_type": "qwen2_5_vl",
|
| 207 |
+
"model_revision": null,
|
| 208 |
+
"task_type": "causal_lm",
|
| 209 |
+
"torch_dtype": "bfloat16",
|
| 210 |
+
"attn_impl": null,
|
| 211 |
+
"new_special_tokens": [],
|
| 212 |
+
"num_labels": null,
|
| 213 |
+
"problem_type": null,
|
| 214 |
+
"rope_scaling": null,
|
| 215 |
+
"device_map": null,
|
| 216 |
+
"max_memory": {},
|
| 217 |
+
"max_model_len": null,
|
| 218 |
+
"local_repo_path": null,
|
| 219 |
+
"init_strategy": null,
|
| 220 |
+
"template": "qwen2_5_vl",
|
| 221 |
+
"system": null,
|
| 222 |
+
"max_length": 32768,
|
| 223 |
+
"truncation_strategy": "delete",
|
| 224 |
+
"max_pixels": null,
|
| 225 |
+
"agent_template": null,
|
| 226 |
+
"norm_bbox": null,
|
| 227 |
+
"use_chat_template": true,
|
| 228 |
+
"padding_free": false,
|
| 229 |
+
"padding_side": "right",
|
| 230 |
+
"loss_scale": "default",
|
| 231 |
+
"sequence_parallel_size": 1,
|
| 232 |
+
"response_prefix": null,
|
| 233 |
+
"template_backend": "swift",
|
| 234 |
+
"dataset": [
|
| 235 |
+
"/mnt/data/users/liamding/data/3AM_Plus/final/ood_split/ambi_normal_train_aug_messages.json"
|
| 236 |
+
],
|
| 237 |
+
"val_dataset": [],
|
| 238 |
+
"split_dataset_ratio": 0.1,
|
| 239 |
+
"dataset_num_proc": 1,
|
| 240 |
+
"load_from_cache_file": true,
|
| 241 |
+
"dataset_shuffle": true,
|
| 242 |
+
"val_dataset_shuffle": false,
|
| 243 |
+
"streaming": false,
|
| 244 |
+
"interleave_prob": null,
|
| 245 |
+
"stopping_strategy": "first_exhausted",
|
| 246 |
+
"shuffle_buffer_size": 1000,
|
| 247 |
+
"download_mode": "reuse_dataset_if_exists",
|
| 248 |
+
"columns": {},
|
| 249 |
+
"strict": false,
|
| 250 |
+
"model_name": null,
|
| 251 |
+
"model_author": null,
|
| 252 |
+
"custom_dataset_info": [],
|
| 253 |
+
"quant_method": null,
|
| 254 |
+
"quant_bits": null,
|
| 255 |
+
"hqq_axis": null,
|
| 256 |
+
"bnb_4bit_compute_dtype": "bfloat16",
|
| 257 |
+
"bnb_4bit_quant_type": "nf4",
|
| 258 |
+
"bnb_4bit_use_double_quant": true,
|
| 259 |
+
"bnb_4bit_quant_storage": null,
|
| 260 |
+
"max_new_tokens": 64,
|
| 261 |
+
"temperature": 0.0,
|
| 262 |
+
"top_k": null,
|
| 263 |
+
"top_p": null,
|
| 264 |
+
"repetition_penalty": null,
|
| 265 |
+
"num_beams": 1,
|
| 266 |
+
"stream": false,
|
| 267 |
+
"stop_words": [],
|
| 268 |
+
"logprobs": false,
|
| 269 |
+
"top_logprobs": null,
|
| 270 |
+
"ckpt_dir": null,
|
| 271 |
+
"lora_modules": [],
|
| 272 |
+
"train_type": "full",
|
| 273 |
+
"adapters": [],
|
| 274 |
+
"external_plugins": [],
|
| 275 |
+
"model_kwargs": {},
|
| 276 |
+
"load_args": false,
|
| 277 |
+
"load_data_args": false,
|
| 278 |
+
"packing": false,
|
| 279 |
+
"packing_length": null,
|
| 280 |
+
"lazy_tokenize": true,
|
| 281 |
+
"cached_dataset": [],
|
| 282 |
+
"custom_register_path": [],
|
| 283 |
+
"use_hf": false,
|
| 284 |
+
"ignore_args_error": false,
|
| 285 |
+
"use_swift_lora": false,
|
| 286 |
+
"freeze_parameters": [
|
| 287 |
+
"model.visual",
|
| 288 |
+
"model.visual.merger"
|
| 289 |
+
],
|
| 290 |
+
"freeze_parameters_regex": null,
|
| 291 |
+
"freeze_parameters_ratio": 0.0,
|
| 292 |
+
"trainable_parameters": [],
|
| 293 |
+
"trainable_parameters_regex": null,
|
| 294 |
+
"freeze_llm": false,
|
| 295 |
+
"freeze_vit": true,
|
| 296 |
+
"freeze_aligner": true,
|
| 297 |
+
"target_modules": [
|
| 298 |
+
"all-linear"
|
| 299 |
+
],
|
| 300 |
+
"target_regex": null,
|
| 301 |
+
"target_parameters": null,
|
| 302 |
+
"modules_to_save": [],
|
| 303 |
+
"lora_rank": 8,
|
| 304 |
+
"lora_alpha": 32,
|
| 305 |
+
"lora_dropout": 0.05,
|
| 306 |
+
"lora_bias": "none",
|
| 307 |
+
"lora_dtype": null,
|
| 308 |
+
"lorap_lr_ratio": null,
|
| 309 |
+
"use_rslora": false,
|
| 310 |
+
"use_dora": false,
|
| 311 |
+
"lora_ga_batch_size": 2,
|
| 312 |
+
"lora_ga_iters": 2,
|
| 313 |
+
"lora_ga_max_length": 1024,
|
| 314 |
+
"lora_ga_direction": "ArB2r",
|
| 315 |
+
"lora_ga_scale": "stable",
|
| 316 |
+
"lora_ga_stable_gamma": 16,
|
| 317 |
+
"init_weights": true,
|
| 318 |
+
"fourier_n_frequency": 2000,
|
| 319 |
+
"fourier_scaling": 300.0,
|
| 320 |
+
"boft_block_size": 4,
|
| 321 |
+
"boft_block_num": 0,
|
| 322 |
+
"boft_n_butterfly_factor": 1,
|
| 323 |
+
"boft_dropout": 0.0,
|
| 324 |
+
"vera_rank": 256,
|
| 325 |
+
"vera_projection_prng_key": 0,
|
| 326 |
+
"vera_dropout": 0.0,
|
| 327 |
+
"vera_d_initial": 0.1,
|
| 328 |
+
"adapter_act": "gelu",
|
| 329 |
+
"adapter_length": 128,
|
| 330 |
+
"use_galore": false,
|
| 331 |
+
"galore_target_modules": null,
|
| 332 |
+
"galore_rank": 128,
|
| 333 |
+
"galore_update_proj_gap": 50,
|
| 334 |
+
"galore_scale": 1.0,
|
| 335 |
+
"galore_proj_type": "std",
|
| 336 |
+
"galore_optim_per_parameter": false,
|
| 337 |
+
"galore_with_embedding": false,
|
| 338 |
+
"galore_quantization": false,
|
| 339 |
+
"galore_proj_quant": false,
|
| 340 |
+
"galore_proj_bits": 4,
|
| 341 |
+
"galore_proj_group_size": 256,
|
| 342 |
+
"galore_cos_threshold": 0.4,
|
| 343 |
+
"galore_gamma_proj": 2,
|
| 344 |
+
"galore_queue_size": 5,
|
| 345 |
+
"adalora_target_r": 8,
|
| 346 |
+
"adalora_init_r": 12,
|
| 347 |
+
"adalora_tinit": 0,
|
| 348 |
+
"adalora_tfinal": 0,
|
| 349 |
+
"adalora_deltaT": 1,
|
| 350 |
+
"adalora_beta1": 0.85,
|
| 351 |
+
"adalora_beta2": 0.85,
|
| 352 |
+
"adalora_orth_reg_weight": 0.5,
|
| 353 |
+
"llamapro_num_new_blocks": 4,
|
| 354 |
+
"llamapro_num_groups": null,
|
| 355 |
+
"lisa_activated_layers": 0,
|
| 356 |
+
"lisa_step_interval": 20,
|
| 357 |
+
"reft_layer_key": null,
|
| 358 |
+
"reft_layers": null,
|
| 359 |
+
"reft_rank": 4,
|
| 360 |
+
"reft_intervention_type": "LoreftIntervention",
|
| 361 |
+
"reft_args": null,
|
| 362 |
+
"swanlab_token": null,
|
| 363 |
+
"swanlab_project": null,
|
| 364 |
+
"swanlab_workspace": null,
|
| 365 |
+
"swanlab_exp_name": "/mnt/data/users/liamding/data/MMMT/lora/qwen2.5vl-7b-full_sft_ood/v1-20251004-154120",
|
| 366 |
+
"swanlab_lark_webhook_url": null,
|
| 367 |
+
"swanlab_lark_secret": null,
|
| 368 |
+
"swanlab_mode": "cloud",
|
| 369 |
+
"add_version": true,
|
| 370 |
+
"create_checkpoint_symlink": false,
|
| 371 |
+
"zero_hpz_partition_size": null,
|
| 372 |
+
"deepspeed_autotp_size": null,
|
| 373 |
+
"early_stop_interval": null,
|
| 374 |
+
"rank": 0,
|
| 375 |
+
"global_world_size": 4,
|
| 376 |
+
"local_world_size": 4,
|
| 377 |
+
"model_suffix": "Qwen2.5-VL-7B-Instruct",
|
| 378 |
+
"model_info": "ModelInfo(model_type='qwen2_5_vl', model_dir='/mnt/data/users/liamding/data/models/Qwen2.5-VL-7B-Instruct', torch_dtype=torch.bfloat16, max_model_len=128000, quant_method=None, quant_bits=None, rope_scaling={'type': 'default', 'mrope_section': [16, 24, 24], 'rope_type': 'default'}, is_moe_model=False, config=None, task_type='causal_lm', num_labels=None)",
|
| 379 |
+
"model_meta": "ModelMeta(model_type='qwen2_5_vl', model_groups=[ModelGroup(models=[Model(ms_model_id='Qwen/Qwen2.5-VL-3B-Instruct', hf_model_id='Qwen/Qwen2.5-VL-3B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-VL-7B-Instruct', hf_model_id='Qwen/Qwen2.5-VL-7B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-VL-32B-Instruct', hf_model_id='Qwen/Qwen2.5-VL-32B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-VL-72B-Instruct', hf_model_id='Qwen/Qwen2.5-VL-72B-Instruct', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[]), ModelGroup(models=[Model(ms_model_id='Qwen/Qwen2.5-VL-3B-Instruct-AWQ', hf_model_id='Qwen/Qwen2.5-VL-3B-Instruct-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-VL-7B-Instruct-AWQ', hf_model_id='Qwen/Qwen2.5-VL-7B-Instruct-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-VL-32B-Instruct-AWQ', hf_model_id='Qwen/Qwen2.5-VL-32B-Instruct-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-VL-72B-Instruct-AWQ', hf_model_id='Qwen/Qwen2.5-VL-72B-Instruct-AWQ', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[])], template='qwen2_5_vl', get_function=<function get_model_tokenizer_qwen2_5_vl at 0x7f64da1f1cf0>, model_arch=MultiModelKeys(arch_name='qwen2_vl', embedding=None, module_list=None, lm_head=None, q_proj=None, k_proj=None, v_proj=None, o_proj=None, attention=None, mlp=None, down_proj=None, qkv_proj=None, qk_proj=None, qa_proj=None, qb_proj=None, kv_proj=None, kva_proj=None, kvb_proj=None, language_model=['model.language_model'], aligner=['model.visual.merger'], vision_tower=['model.visual'], generator=[]), architectures=['Qwen2_5_VLForConditionalGeneration'], additional_saved_files=[], torch_dtype=None, is_multimodal=True, is_reward=False, task_type=None, ignore_patterns=None, requires=['transformers>=4.49', 'qwen_vl_utils>=0.0.6', 'decord'], tags=['vision', 'video'])",
|
| 380 |
+
"model_dir": "/mnt/data/users/liamding/data/models/Qwen2.5-VL-7B-Instruct",
|
| 381 |
+
"hub": "<class 'swift.hub.hub.MSHub'>",
|
| 382 |
+
"evaluation_strategy": "epoch",
|
| 383 |
+
"training_args": "Seq2SeqTrainingArguments(output_dir='/mnt/data/users/liamding/data/MMMT/lora/qwen2.5vl-7b-full_sft_ood/v1-20251004-154120', overwrite_output_dir=False, do_train=False, do_eval=True, do_predict=False, eval_strategy=<IntervalStrategy.EPOCH: 'epoch'>, prediction_loss_only=False, per_device_train_batch_size=2, per_device_eval_batch_size=2, per_gpu_train_batch_size=None, per_gpu_eval_batch_size=None, gradient_accumulation_steps=2, eval_accumulation_steps=None, eval_delay=0, torch_empty_cache_steps=None, learning_rate=5e-07, weight_decay=0.1, adam_beta1=0.9, adam_beta2=0.95, adam_epsilon=1e-08, max_grad_norm=1.0, num_train_epochs=10.0, max_steps=-1, lr_scheduler_type=<SchedulerType.COSINE: 'cosine'>, lr_scheduler_kwargs=None, warmup_ratio=0.1, warmup_steps=0, log_level='passive', log_level_replica='warning', log_on_each_node=True, logging_dir='/mnt/data/users/liamding/data/MMMT/lora/qwen2.5vl-7b-full_sft_ood/v1-20251004-154120/runs', logging_strategy=<IntervalStrategy.STEPS: 'steps'>, logging_first_step=True, logging_steps=5, logging_nan_inf_filter=True, save_strategy=<SaveStrategy.EPOCH: 'epoch'>, save_steps=500, save_total_limit=5, save_safetensors=True, save_on_each_node=False, save_only_model=False, restore_callback_states_from_checkpoint=False, no_cuda=False, use_cpu=False, use_mps_device=False, seed=42, data_seed=42, jit_mode_eval=False, use_ipex=False, bf16=True, fp16=False, fp16_opt_level='O1', half_precision_backend='auto', bf16_full_eval=False, fp16_full_eval=False, tf32=None, local_rank=0, ddp_backend=None, tpu_num_cores=None, tpu_metrics_debug=False, debug=[], dataloader_drop_last=False, eval_steps=None, dataloader_num_workers=4, dataloader_prefetch_factor=10, past_index=-1, run_name='/mnt/data/users/liamding/data/MMMT/lora/qwen2.5vl-7b-full_sft_ood/v1-20251004-154120', disable_tqdm=False, remove_unused_columns=False, label_names=None, load_best_model_at_end=True, metric_for_best_model='eval_loss', greater_is_better=False, ignore_data_skip=False, fsdp=[], fsdp_min_num_params=0, fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, fsdp_transformer_layer_cls_to_wrap=None, accelerator_config=AcceleratorConfig(split_batches=False, dispatch_batches=False, even_batches=True, use_seedable_sampler=True, non_blocking=False, gradient_accumulation_kwargs=None, use_configured_state=False), deepspeed={'fp16': {'enabled': 'auto', 'loss_scale': 0, 'loss_scale_window': 1000, 'initial_scale_power': 16, 'hysteresis': 2, 'min_loss_scale': 1}, 'bf16': {'enabled': 'auto'}, 'zero_optimization': {'stage': 3, 'offload_optimizer': {'device': 'none', 'pin_memory': True}, 'offload_param': {'device': 'none', 'pin_memory': True}, 'overlap_comm': False, 'contiguous_gradients': True, 'sub_group_size': 1000000000.0, 'reduce_bucket_size': 'auto', 'zero_quantized_weights': False, 'zero_quantized_gradients': False, 'stage3_prefetch_bucket_size': 'auto', 'stage3_param_persistence_threshold': 'auto', 'stage3_max_live_parameters': 1000000000.0, 'stage3_max_reuse_distance': 1000000000.0, 'stage3_gather_16bit_weights_on_model_save': True}, 'gradient_accumulation_steps': 'auto', 'gradient_clipping': 'auto', 'steps_per_print': 2000, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'wall_clock_breakdown': False}, label_smoothing_factor=0.0, optim=<OptimizerNames.ADAMW_TORCH: 'adamw_torch'>, optim_args=None, adafactor=False, group_by_length=False, length_column_name='length', report_to=['swanlab'], ddp_find_unused_parameters=None, ddp_bucket_cap_mb=None, ddp_broadcast_buffers=None, dataloader_pin_memory=True, dataloader_persistent_workers=False, skip_memory_metrics=True, use_legacy_prediction_loop=False, push_to_hub=False, resume_from_checkpoint=None, hub_model_id=None, hub_strategy=<HubStrategy.EVERY_SAVE: 'every_save'>, hub_token=None, hub_private_repo=None, hub_always_push=False, hub_revision=None, gradient_checkpointing=True, gradient_checkpointing_kwargs=None, include_inputs_for_metrics=False, include_for_metrics=[], eval_do_concat_batches=True, fp16_backend='auto', push_to_hub_model_id=None, push_to_hub_organization=None, push_to_hub_token=None, mp_parameters='', auto_find_batch_size=False, full_determinism=False, torchdynamo=None, ray_scope='last', ddp_timeout=18000000, torch_compile=False, torch_compile_backend=None, torch_compile_mode=None, include_tokens_per_second=None, include_num_input_tokens_seen=None, neftune_noise_alpha=None, optim_target_modules=None, batch_eval_metrics=False, eval_on_start=False, use_liger_kernel=False, liger_kernel_config=None, eval_use_gather_object=False, average_tokens_across_devices=None, sortish_sampler=False, predict_with_generate=False, generation_max_length=None, generation_num_beams=None, generation_config=None, tuner_backend='peft', vit_gradient_checkpointing=True, router_aux_loss_coef=0.0, enable_dft_loss=False, enable_channel_loss=False, check_model=True, acc_strategy='token', train_dataloader_shuffle=True, max_epochs=None, aligner_lr=None, vit_lr=None, use_logits_to_keep=None, ds3_gather_for_generation=True, resume_only_model=False, optimizer=None, loss_type=None, metric=None, eval_use_evalscope=False, eval_dataset=[], eval_dataset_args=None, eval_limit=None, eval_generation_config=None, extra_eval_args=None, use_flash_ckpt=False, sft_alpha=0, train_type='full', local_repo_path=None, galore_config=None)"
|
| 384 |
+
}
|
ood/qwen2.5vl-7b-full_sft_ood/v1-20251004-154120/checkpoint-304/added_tokens.json
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"</tool_call>": 151658,
|
| 3 |
+
"<tool_call>": 151657,
|
| 4 |
+
"<|box_end|>": 151649,
|
| 5 |
+
"<|box_start|>": 151648,
|
| 6 |
+
"<|endoftext|>": 151643,
|
| 7 |
+
"<|file_sep|>": 151664,
|
| 8 |
+
"<|fim_middle|>": 151660,
|
| 9 |
+
"<|fim_pad|>": 151662,
|
| 10 |
+
"<|fim_prefix|>": 151659,
|
| 11 |
+
"<|fim_suffix|>": 151661,
|
| 12 |
+
"<|im_end|>": 151645,
|
| 13 |
+
"<|im_start|>": 151644,
|
| 14 |
+
"<|image_pad|>": 151655,
|
| 15 |
+
"<|object_ref_end|>": 151647,
|
| 16 |
+
"<|object_ref_start|>": 151646,
|
| 17 |
+
"<|quad_end|>": 151651,
|
| 18 |
+
"<|quad_start|>": 151650,
|
| 19 |
+
"<|repo_name|>": 151663,
|
| 20 |
+
"<|video_pad|>": 151656,
|
| 21 |
+
"<|vision_end|>": 151653,
|
| 22 |
+
"<|vision_pad|>": 151654,
|
| 23 |
+
"<|vision_start|>": 151652
|
| 24 |
+
}
|
ood/qwen2.5vl-7b-full_sft_ood/v1-20251004-154120/checkpoint-304/args.json
ADDED
|
@@ -0,0 +1,384 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"output_dir": "/mnt/data/users/liamding/data/MMMT/lora/qwen2.5vl-7b-full_sft_ood/v1-20251004-154120",
|
| 3 |
+
"overwrite_output_dir": false,
|
| 4 |
+
"do_train": false,
|
| 5 |
+
"do_eval": false,
|
| 6 |
+
"do_predict": false,
|
| 7 |
+
"eval_strategy": "epoch",
|
| 8 |
+
"prediction_loss_only": false,
|
| 9 |
+
"per_device_train_batch_size": 2,
|
| 10 |
+
"per_device_eval_batch_size": 2,
|
| 11 |
+
"per_gpu_train_batch_size": null,
|
| 12 |
+
"per_gpu_eval_batch_size": null,
|
| 13 |
+
"gradient_accumulation_steps": 2,
|
| 14 |
+
"eval_accumulation_steps": null,
|
| 15 |
+
"eval_delay": 0,
|
| 16 |
+
"torch_empty_cache_steps": null,
|
| 17 |
+
"learning_rate": 5e-07,
|
| 18 |
+
"weight_decay": 0.1,
|
| 19 |
+
"adam_beta1": 0.9,
|
| 20 |
+
"adam_beta2": 0.95,
|
| 21 |
+
"adam_epsilon": 1e-08,
|
| 22 |
+
"max_grad_norm": 1.0,
|
| 23 |
+
"num_train_epochs": 10.0,
|
| 24 |
+
"max_steps": -1,
|
| 25 |
+
"lr_scheduler_type": "cosine",
|
| 26 |
+
"lr_scheduler_kwargs": null,
|
| 27 |
+
"warmup_ratio": 0.1,
|
| 28 |
+
"warmup_steps": 0,
|
| 29 |
+
"log_level": "passive",
|
| 30 |
+
"log_level_replica": "warning",
|
| 31 |
+
"log_on_each_node": true,
|
| 32 |
+
"logging_dir": "/mnt/data/users/liamding/data/MMMT/lora/qwen2.5vl-7b-full_sft_ood/v1-20251004-154120/runs",
|
| 33 |
+
"logging_strategy": "steps",
|
| 34 |
+
"logging_first_step": true,
|
| 35 |
+
"logging_steps": 5,
|
| 36 |
+
"logging_nan_inf_filter": true,
|
| 37 |
+
"save_strategy": "epoch",
|
| 38 |
+
"save_steps": 500,
|
| 39 |
+
"save_total_limit": 5,
|
| 40 |
+
"save_safetensors": true,
|
| 41 |
+
"save_on_each_node": false,
|
| 42 |
+
"save_only_model": false,
|
| 43 |
+
"restore_callback_states_from_checkpoint": false,
|
| 44 |
+
"no_cuda": false,
|
| 45 |
+
"use_cpu": false,
|
| 46 |
+
"use_mps_device": false,
|
| 47 |
+
"seed": 42,
|
| 48 |
+
"data_seed": 42,
|
| 49 |
+
"jit_mode_eval": false,
|
| 50 |
+
"use_ipex": false,
|
| 51 |
+
"bf16": true,
|
| 52 |
+
"fp16": false,
|
| 53 |
+
"fp16_opt_level": "O1",
|
| 54 |
+
"half_precision_backend": "auto",
|
| 55 |
+
"bf16_full_eval": false,
|
| 56 |
+
"fp16_full_eval": false,
|
| 57 |
+
"tf32": null,
|
| 58 |
+
"local_rank": 0,
|
| 59 |
+
"ddp_backend": null,
|
| 60 |
+
"tpu_num_cores": null,
|
| 61 |
+
"tpu_metrics_debug": false,
|
| 62 |
+
"debug": null,
|
| 63 |
+
"dataloader_drop_last": false,
|
| 64 |
+
"eval_steps": null,
|
| 65 |
+
"dataloader_num_workers": 4,
|
| 66 |
+
"dataloader_prefetch_factor": null,
|
| 67 |
+
"past_index": -1,
|
| 68 |
+
"run_name": "/mnt/data/users/liamding/data/MMMT/lora/qwen2.5vl-7b-full_sft_ood/v1-20251004-154120",
|
| 69 |
+
"disable_tqdm": null,
|
| 70 |
+
"remove_unused_columns": true,
|
| 71 |
+
"label_names": null,
|
| 72 |
+
"load_best_model_at_end": true,
|
| 73 |
+
"metric_for_best_model": "eval_loss",
|
| 74 |
+
"greater_is_better": false,
|
| 75 |
+
"ignore_data_skip": false,
|
| 76 |
+
"fsdp": "",
|
| 77 |
+
"fsdp_min_num_params": 0,
|
| 78 |
+
"fsdp_config": null,
|
| 79 |
+
"fsdp_transformer_layer_cls_to_wrap": null,
|
| 80 |
+
"accelerator_config": {
|
| 81 |
+
"dispatch_batches": false
|
| 82 |
+
},
|
| 83 |
+
"deepspeed": {
|
| 84 |
+
"fp16": {
|
| 85 |
+
"enabled": "auto",
|
| 86 |
+
"loss_scale": 0,
|
| 87 |
+
"loss_scale_window": 1000,
|
| 88 |
+
"initial_scale_power": 16,
|
| 89 |
+
"hysteresis": 2,
|
| 90 |
+
"min_loss_scale": 1
|
| 91 |
+
},
|
| 92 |
+
"bf16": {
|
| 93 |
+
"enabled": "auto"
|
| 94 |
+
},
|
| 95 |
+
"zero_optimization": {
|
| 96 |
+
"stage": 3,
|
| 97 |
+
"offload_optimizer": {
|
| 98 |
+
"device": "none",
|
| 99 |
+
"pin_memory": true
|
| 100 |
+
},
|
| 101 |
+
"offload_param": {
|
| 102 |
+
"device": "none",
|
| 103 |
+
"pin_memory": true
|
| 104 |
+
},
|
| 105 |
+
"overlap_comm": false,
|
| 106 |
+
"contiguous_gradients": true,
|
| 107 |
+
"sub_group_size": 1000000000.0,
|
| 108 |
+
"reduce_bucket_size": "auto",
|
| 109 |
+
"zero_quantized_weights": false,
|
| 110 |
+
"zero_quantized_gradients": false,
|
| 111 |
+
"stage3_prefetch_bucket_size": "auto",
|
| 112 |
+
"stage3_param_persistence_threshold": "auto",
|
| 113 |
+
"stage3_max_live_parameters": 1000000000.0,
|
| 114 |
+
"stage3_max_reuse_distance": 1000000000.0,
|
| 115 |
+
"stage3_gather_16bit_weights_on_model_save": true
|
| 116 |
+
},
|
| 117 |
+
"gradient_accumulation_steps": "auto",
|
| 118 |
+
"gradient_clipping": "auto",
|
| 119 |
+
"steps_per_print": 2000,
|
| 120 |
+
"train_batch_size": "auto",
|
| 121 |
+
"train_micro_batch_size_per_gpu": "auto",
|
| 122 |
+
"wall_clock_breakdown": false
|
| 123 |
+
},
|
| 124 |
+
"label_smoothing_factor": 0.0,
|
| 125 |
+
"optim": "adamw_torch",
|
| 126 |
+
"optim_args": null,
|
| 127 |
+
"adafactor": false,
|
| 128 |
+
"group_by_length": false,
|
| 129 |
+
"length_column_name": "length",
|
| 130 |
+
"report_to": [
|
| 131 |
+
"swanlab"
|
| 132 |
+
],
|
| 133 |
+
"ddp_find_unused_parameters": null,
|
| 134 |
+
"ddp_bucket_cap_mb": null,
|
| 135 |
+
"ddp_broadcast_buffers": null,
|
| 136 |
+
"dataloader_pin_memory": true,
|
| 137 |
+
"dataloader_persistent_workers": false,
|
| 138 |
+
"skip_memory_metrics": true,
|
| 139 |
+
"use_legacy_prediction_loop": false,
|
| 140 |
+
"push_to_hub": false,
|
| 141 |
+
"resume_from_checkpoint": null,
|
| 142 |
+
"hub_model_id": null,
|
| 143 |
+
"hub_strategy": "every_save",
|
| 144 |
+
"hub_token": null,
|
| 145 |
+
"hub_private_repo": null,
|
| 146 |
+
"hub_always_push": false,
|
| 147 |
+
"hub_revision": null,
|
| 148 |
+
"gradient_checkpointing": true,
|
| 149 |
+
"gradient_checkpointing_kwargs": null,
|
| 150 |
+
"include_inputs_for_metrics": false,
|
| 151 |
+
"include_for_metrics": [],
|
| 152 |
+
"eval_do_concat_batches": true,
|
| 153 |
+
"fp16_backend": "auto",
|
| 154 |
+
"push_to_hub_model_id": null,
|
| 155 |
+
"push_to_hub_organization": null,
|
| 156 |
+
"push_to_hub_token": null,
|
| 157 |
+
"mp_parameters": "",
|
| 158 |
+
"auto_find_batch_size": false,
|
| 159 |
+
"full_determinism": false,
|
| 160 |
+
"torchdynamo": null,
|
| 161 |
+
"ray_scope": "last",
|
| 162 |
+
"ddp_timeout": 18000000,
|
| 163 |
+
"torch_compile": false,
|
| 164 |
+
"torch_compile_backend": null,
|
| 165 |
+
"torch_compile_mode": null,
|
| 166 |
+
"include_tokens_per_second": false,
|
| 167 |
+
"include_num_input_tokens_seen": false,
|
| 168 |
+
"neftune_noise_alpha": null,
|
| 169 |
+
"optim_target_modules": null,
|
| 170 |
+
"batch_eval_metrics": false,
|
| 171 |
+
"eval_on_start": false,
|
| 172 |
+
"use_liger_kernel": false,
|
| 173 |
+
"liger_kernel_config": null,
|
| 174 |
+
"eval_use_gather_object": false,
|
| 175 |
+
"average_tokens_across_devices": true,
|
| 176 |
+
"sortish_sampler": false,
|
| 177 |
+
"predict_with_generate": false,
|
| 178 |
+
"generation_max_length": null,
|
| 179 |
+
"generation_num_beams": null,
|
| 180 |
+
"generation_config": null,
|
| 181 |
+
"tuner_backend": "peft",
|
| 182 |
+
"vit_gradient_checkpointing": null,
|
| 183 |
+
"router_aux_loss_coef": 0.0,
|
| 184 |
+
"enable_dft_loss": false,
|
| 185 |
+
"enable_channel_loss": false,
|
| 186 |
+
"check_model": true,
|
| 187 |
+
"acc_strategy": "token",
|
| 188 |
+
"train_dataloader_shuffle": true,
|
| 189 |
+
"max_epochs": null,
|
| 190 |
+
"aligner_lr": null,
|
| 191 |
+
"vit_lr": null,
|
| 192 |
+
"use_logits_to_keep": null,
|
| 193 |
+
"ds3_gather_for_generation": true,
|
| 194 |
+
"resume_only_model": false,
|
| 195 |
+
"optimizer": null,
|
| 196 |
+
"loss_type": null,
|
| 197 |
+
"metric": null,
|
| 198 |
+
"eval_use_evalscope": false,
|
| 199 |
+
"eval_dataset": [],
|
| 200 |
+
"eval_dataset_args": null,
|
| 201 |
+
"eval_limit": null,
|
| 202 |
+
"eval_generation_config": null,
|
| 203 |
+
"extra_eval_args": null,
|
| 204 |
+
"use_flash_ckpt": false,
|
| 205 |
+
"model": "/mnt/data/users/liamding/data/models/Qwen2.5-VL-7B-Instruct",
|
| 206 |
+
"model_type": "qwen2_5_vl",
|
| 207 |
+
"model_revision": null,
|
| 208 |
+
"task_type": "causal_lm",
|
| 209 |
+
"torch_dtype": "bfloat16",
|
| 210 |
+
"attn_impl": null,
|
| 211 |
+
"new_special_tokens": [],
|
| 212 |
+
"num_labels": null,
|
| 213 |
+
"problem_type": null,
|
| 214 |
+
"rope_scaling": null,
|
| 215 |
+
"device_map": null,
|
| 216 |
+
"max_memory": {},
|
| 217 |
+
"max_model_len": null,
|
| 218 |
+
"local_repo_path": null,
|
| 219 |
+
"init_strategy": null,
|
| 220 |
+
"template": "qwen2_5_vl",
|
| 221 |
+
"system": null,
|
| 222 |
+
"max_length": 32768,
|
| 223 |
+
"truncation_strategy": "delete",
|
| 224 |
+
"max_pixels": null,
|
| 225 |
+
"agent_template": null,
|
| 226 |
+
"norm_bbox": null,
|
| 227 |
+
"use_chat_template": true,
|
| 228 |
+
"padding_free": false,
|
| 229 |
+
"padding_side": "right",
|
| 230 |
+
"loss_scale": "default",
|
| 231 |
+
"sequence_parallel_size": 1,
|
| 232 |
+
"response_prefix": null,
|
| 233 |
+
"template_backend": "swift",
|
| 234 |
+
"dataset": [
|
| 235 |
+
"/mnt/data/users/liamding/data/3AM_Plus/final/ood_split/ambi_normal_train_aug_messages.json"
|
| 236 |
+
],
|
| 237 |
+
"val_dataset": [],
|
| 238 |
+
"split_dataset_ratio": 0.1,
|
| 239 |
+
"dataset_num_proc": 1,
|
| 240 |
+
"load_from_cache_file": true,
|
| 241 |
+
"dataset_shuffle": true,
|
| 242 |
+
"val_dataset_shuffle": false,
|
| 243 |
+
"streaming": false,
|
| 244 |
+
"interleave_prob": null,
|
| 245 |
+
"stopping_strategy": "first_exhausted",
|
| 246 |
+
"shuffle_buffer_size": 1000,
|
| 247 |
+
"download_mode": "reuse_dataset_if_exists",
|
| 248 |
+
"columns": {},
|
| 249 |
+
"strict": false,
|
| 250 |
+
"model_name": null,
|
| 251 |
+
"model_author": null,
|
| 252 |
+
"custom_dataset_info": [],
|
| 253 |
+
"quant_method": null,
|
| 254 |
+
"quant_bits": null,
|
| 255 |
+
"hqq_axis": null,
|
| 256 |
+
"bnb_4bit_compute_dtype": "bfloat16",
|
| 257 |
+
"bnb_4bit_quant_type": "nf4",
|
| 258 |
+
"bnb_4bit_use_double_quant": true,
|
| 259 |
+
"bnb_4bit_quant_storage": null,
|
| 260 |
+
"max_new_tokens": 64,
|
| 261 |
+
"temperature": 0.0,
|
| 262 |
+
"top_k": null,
|
| 263 |
+
"top_p": null,
|
| 264 |
+
"repetition_penalty": null,
|
| 265 |
+
"num_beams": 1,
|
| 266 |
+
"stream": false,
|
| 267 |
+
"stop_words": [],
|
| 268 |
+
"logprobs": false,
|
| 269 |
+
"top_logprobs": null,
|
| 270 |
+
"ckpt_dir": null,
|
| 271 |
+
"lora_modules": [],
|
| 272 |
+
"train_type": "full",
|
| 273 |
+
"adapters": [],
|
| 274 |
+
"external_plugins": [],
|
| 275 |
+
"model_kwargs": {},
|
| 276 |
+
"load_args": false,
|
| 277 |
+
"load_data_args": false,
|
| 278 |
+
"packing": false,
|
| 279 |
+
"packing_length": null,
|
| 280 |
+
"lazy_tokenize": true,
|
| 281 |
+
"cached_dataset": [],
|
| 282 |
+
"custom_register_path": [],
|
| 283 |
+
"use_hf": false,
|
| 284 |
+
"ignore_args_error": false,
|
| 285 |
+
"use_swift_lora": false,
|
| 286 |
+
"freeze_parameters": [
|
| 287 |
+
"model.visual",
|
| 288 |
+
"model.visual.merger"
|
| 289 |
+
],
|
| 290 |
+
"freeze_parameters_regex": null,
|
| 291 |
+
"freeze_parameters_ratio": 0.0,
|
| 292 |
+
"trainable_parameters": [],
|
| 293 |
+
"trainable_parameters_regex": null,
|
| 294 |
+
"freeze_llm": false,
|
| 295 |
+
"freeze_vit": true,
|
| 296 |
+
"freeze_aligner": true,
|
| 297 |
+
"target_modules": [
|
| 298 |
+
"all-linear"
|
| 299 |
+
],
|
| 300 |
+
"target_regex": null,
|
| 301 |
+
"target_parameters": null,
|
| 302 |
+
"modules_to_save": [],
|
| 303 |
+
"lora_rank": 8,
|
| 304 |
+
"lora_alpha": 32,
|
| 305 |
+
"lora_dropout": 0.05,
|
| 306 |
+
"lora_bias": "none",
|
| 307 |
+
"lora_dtype": null,
|
| 308 |
+
"lorap_lr_ratio": null,
|
| 309 |
+
"use_rslora": false,
|
| 310 |
+
"use_dora": false,
|
| 311 |
+
"lora_ga_batch_size": 2,
|
| 312 |
+
"lora_ga_iters": 2,
|
| 313 |
+
"lora_ga_max_length": 1024,
|
| 314 |
+
"lora_ga_direction": "ArB2r",
|
| 315 |
+
"lora_ga_scale": "stable",
|
| 316 |
+
"lora_ga_stable_gamma": 16,
|
| 317 |
+
"init_weights": true,
|
| 318 |
+
"fourier_n_frequency": 2000,
|
| 319 |
+
"fourier_scaling": 300.0,
|
| 320 |
+
"boft_block_size": 4,
|
| 321 |
+
"boft_block_num": 0,
|
| 322 |
+
"boft_n_butterfly_factor": 1,
|
| 323 |
+
"boft_dropout": 0.0,
|
| 324 |
+
"vera_rank": 256,
|
| 325 |
+
"vera_projection_prng_key": 0,
|
| 326 |
+
"vera_dropout": 0.0,
|
| 327 |
+
"vera_d_initial": 0.1,
|
| 328 |
+
"adapter_act": "gelu",
|
| 329 |
+
"adapter_length": 128,
|
| 330 |
+
"use_galore": false,
|
| 331 |
+
"galore_target_modules": null,
|
| 332 |
+
"galore_rank": 128,
|
| 333 |
+
"galore_update_proj_gap": 50,
|
| 334 |
+
"galore_scale": 1.0,
|
| 335 |
+
"galore_proj_type": "std",
|
| 336 |
+
"galore_optim_per_parameter": false,
|
| 337 |
+
"galore_with_embedding": false,
|
| 338 |
+
"galore_quantization": false,
|
| 339 |
+
"galore_proj_quant": false,
|
| 340 |
+
"galore_proj_bits": 4,
|
| 341 |
+
"galore_proj_group_size": 256,
|
| 342 |
+
"galore_cos_threshold": 0.4,
|
| 343 |
+
"galore_gamma_proj": 2,
|
| 344 |
+
"galore_queue_size": 5,
|
| 345 |
+
"adalora_target_r": 8,
|
| 346 |
+
"adalora_init_r": 12,
|
| 347 |
+
"adalora_tinit": 0,
|
| 348 |
+
"adalora_tfinal": 0,
|
| 349 |
+
"adalora_deltaT": 1,
|
| 350 |
+
"adalora_beta1": 0.85,
|
| 351 |
+
"adalora_beta2": 0.85,
|
| 352 |
+
"adalora_orth_reg_weight": 0.5,
|
| 353 |
+
"llamapro_num_new_blocks": 4,
|
| 354 |
+
"llamapro_num_groups": null,
|
| 355 |
+
"lisa_activated_layers": 0,
|
| 356 |
+
"lisa_step_interval": 20,
|
| 357 |
+
"reft_layer_key": null,
|
| 358 |
+
"reft_layers": null,
|
| 359 |
+
"reft_rank": 4,
|
| 360 |
+
"reft_intervention_type": "LoreftIntervention",
|
| 361 |
+
"reft_args": null,
|
| 362 |
+
"swanlab_token": null,
|
| 363 |
+
"swanlab_project": null,
|
| 364 |
+
"swanlab_workspace": null,
|
| 365 |
+
"swanlab_exp_name": "/mnt/data/users/liamding/data/MMMT/lora/qwen2.5vl-7b-full_sft_ood/v1-20251004-154120",
|
| 366 |
+
"swanlab_lark_webhook_url": null,
|
| 367 |
+
"swanlab_lark_secret": null,
|
| 368 |
+
"swanlab_mode": "cloud",
|
| 369 |
+
"add_version": true,
|
| 370 |
+
"create_checkpoint_symlink": false,
|
| 371 |
+
"zero_hpz_partition_size": null,
|
| 372 |
+
"deepspeed_autotp_size": null,
|
| 373 |
+
"early_stop_interval": null,
|
| 374 |
+
"rank": 0,
|
| 375 |
+
"global_world_size": 4,
|
| 376 |
+
"local_world_size": 4,
|
| 377 |
+
"model_suffix": "Qwen2.5-VL-7B-Instruct",
|
| 378 |
+
"model_info": "ModelInfo(model_type='qwen2_5_vl', model_dir='/mnt/data/users/liamding/data/models/Qwen2.5-VL-7B-Instruct', torch_dtype=torch.bfloat16, max_model_len=128000, quant_method=None, quant_bits=None, rope_scaling={'type': 'default', 'mrope_section': [16, 24, 24], 'rope_type': 'default'}, is_moe_model=False, config=None, task_type='causal_lm', num_labels=None)",
|
| 379 |
+
"model_meta": "ModelMeta(model_type='qwen2_5_vl', model_groups=[ModelGroup(models=[Model(ms_model_id='Qwen/Qwen2.5-VL-3B-Instruct', hf_model_id='Qwen/Qwen2.5-VL-3B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-VL-7B-Instruct', hf_model_id='Qwen/Qwen2.5-VL-7B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-VL-32B-Instruct', hf_model_id='Qwen/Qwen2.5-VL-32B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-VL-72B-Instruct', hf_model_id='Qwen/Qwen2.5-VL-72B-Instruct', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[]), ModelGroup(models=[Model(ms_model_id='Qwen/Qwen2.5-VL-3B-Instruct-AWQ', hf_model_id='Qwen/Qwen2.5-VL-3B-Instruct-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-VL-7B-Instruct-AWQ', hf_model_id='Qwen/Qwen2.5-VL-7B-Instruct-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-VL-32B-Instruct-AWQ', hf_model_id='Qwen/Qwen2.5-VL-32B-Instruct-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-VL-72B-Instruct-AWQ', hf_model_id='Qwen/Qwen2.5-VL-72B-Instruct-AWQ', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[])], template='qwen2_5_vl', get_function=<function get_model_tokenizer_qwen2_5_vl at 0x7f64da1f1cf0>, model_arch=MultiModelKeys(arch_name='qwen2_vl', embedding=None, module_list=None, lm_head=None, q_proj=None, k_proj=None, v_proj=None, o_proj=None, attention=None, mlp=None, down_proj=None, qkv_proj=None, qk_proj=None, qa_proj=None, qb_proj=None, kv_proj=None, kva_proj=None, kvb_proj=None, language_model=['model.language_model'], aligner=['model.visual.merger'], vision_tower=['model.visual'], generator=[]), architectures=['Qwen2_5_VLForConditionalGeneration'], additional_saved_files=[], torch_dtype=None, is_multimodal=True, is_reward=False, task_type=None, ignore_patterns=None, requires=['transformers>=4.49', 'qwen_vl_utils>=0.0.6', 'decord'], tags=['vision', 'video'])",
|
| 380 |
+
"model_dir": "/mnt/data/users/liamding/data/models/Qwen2.5-VL-7B-Instruct",
|
| 381 |
+
"hub": "<class 'swift.hub.hub.MSHub'>",
|
| 382 |
+
"evaluation_strategy": "epoch",
|
| 383 |
+
"training_args": "Seq2SeqTrainingArguments(output_dir='/mnt/data/users/liamding/data/MMMT/lora/qwen2.5vl-7b-full_sft_ood/v1-20251004-154120', overwrite_output_dir=False, do_train=False, do_eval=True, do_predict=False, eval_strategy=<IntervalStrategy.EPOCH: 'epoch'>, prediction_loss_only=False, per_device_train_batch_size=2, per_device_eval_batch_size=2, per_gpu_train_batch_size=None, per_gpu_eval_batch_size=None, gradient_accumulation_steps=2, eval_accumulation_steps=None, eval_delay=0, torch_empty_cache_steps=None, learning_rate=5e-07, weight_decay=0.1, adam_beta1=0.9, adam_beta2=0.95, adam_epsilon=1e-08, max_grad_norm=1.0, num_train_epochs=10.0, max_steps=-1, lr_scheduler_type=<SchedulerType.COSINE: 'cosine'>, lr_scheduler_kwargs=None, warmup_ratio=0.1, warmup_steps=0, log_level='passive', log_level_replica='warning', log_on_each_node=True, logging_dir='/mnt/data/users/liamding/data/MMMT/lora/qwen2.5vl-7b-full_sft_ood/v1-20251004-154120/runs', logging_strategy=<IntervalStrategy.STEPS: 'steps'>, logging_first_step=True, logging_steps=5, logging_nan_inf_filter=True, save_strategy=<SaveStrategy.EPOCH: 'epoch'>, save_steps=500, save_total_limit=5, save_safetensors=True, save_on_each_node=False, save_only_model=False, restore_callback_states_from_checkpoint=False, no_cuda=False, use_cpu=False, use_mps_device=False, seed=42, data_seed=42, jit_mode_eval=False, use_ipex=False, bf16=True, fp16=False, fp16_opt_level='O1', half_precision_backend='auto', bf16_full_eval=False, fp16_full_eval=False, tf32=None, local_rank=0, ddp_backend=None, tpu_num_cores=None, tpu_metrics_debug=False, debug=[], dataloader_drop_last=False, eval_steps=None, dataloader_num_workers=4, dataloader_prefetch_factor=10, past_index=-1, run_name='/mnt/data/users/liamding/data/MMMT/lora/qwen2.5vl-7b-full_sft_ood/v1-20251004-154120', disable_tqdm=False, remove_unused_columns=False, label_names=None, load_best_model_at_end=True, metric_for_best_model='eval_loss', greater_is_better=False, ignore_data_skip=False, fsdp=[], fsdp_min_num_params=0, fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, fsdp_transformer_layer_cls_to_wrap=None, accelerator_config=AcceleratorConfig(split_batches=False, dispatch_batches=False, even_batches=True, use_seedable_sampler=True, non_blocking=False, gradient_accumulation_kwargs=None, use_configured_state=False), deepspeed={'fp16': {'enabled': 'auto', 'loss_scale': 0, 'loss_scale_window': 1000, 'initial_scale_power': 16, 'hysteresis': 2, 'min_loss_scale': 1}, 'bf16': {'enabled': 'auto'}, 'zero_optimization': {'stage': 3, 'offload_optimizer': {'device': 'none', 'pin_memory': True}, 'offload_param': {'device': 'none', 'pin_memory': True}, 'overlap_comm': False, 'contiguous_gradients': True, 'sub_group_size': 1000000000.0, 'reduce_bucket_size': 'auto', 'zero_quantized_weights': False, 'zero_quantized_gradients': False, 'stage3_prefetch_bucket_size': 'auto', 'stage3_param_persistence_threshold': 'auto', 'stage3_max_live_parameters': 1000000000.0, 'stage3_max_reuse_distance': 1000000000.0, 'stage3_gather_16bit_weights_on_model_save': True}, 'gradient_accumulation_steps': 'auto', 'gradient_clipping': 'auto', 'steps_per_print': 2000, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'wall_clock_breakdown': False}, label_smoothing_factor=0.0, optim=<OptimizerNames.ADAMW_TORCH: 'adamw_torch'>, optim_args=None, adafactor=False, group_by_length=False, length_column_name='length', report_to=['swanlab'], ddp_find_unused_parameters=None, ddp_bucket_cap_mb=None, ddp_broadcast_buffers=None, dataloader_pin_memory=True, dataloader_persistent_workers=False, skip_memory_metrics=True, use_legacy_prediction_loop=False, push_to_hub=False, resume_from_checkpoint=None, hub_model_id=None, hub_strategy=<HubStrategy.EVERY_SAVE: 'every_save'>, hub_token=None, hub_private_repo=None, hub_always_push=False, hub_revision=None, gradient_checkpointing=True, gradient_checkpointing_kwargs=None, include_inputs_for_metrics=False, include_for_metrics=[], eval_do_concat_batches=True, fp16_backend='auto', push_to_hub_model_id=None, push_to_hub_organization=None, push_to_hub_token=None, mp_parameters='', auto_find_batch_size=False, full_determinism=False, torchdynamo=None, ray_scope='last', ddp_timeout=18000000, torch_compile=False, torch_compile_backend=None, torch_compile_mode=None, include_tokens_per_second=None, include_num_input_tokens_seen=None, neftune_noise_alpha=None, optim_target_modules=None, batch_eval_metrics=False, eval_on_start=False, use_liger_kernel=False, liger_kernel_config=None, eval_use_gather_object=False, average_tokens_across_devices=None, sortish_sampler=False, predict_with_generate=False, generation_max_length=None, generation_num_beams=None, generation_config=None, tuner_backend='peft', vit_gradient_checkpointing=True, router_aux_loss_coef=0.0, enable_dft_loss=False, enable_channel_loss=False, check_model=True, acc_strategy='token', train_dataloader_shuffle=True, max_epochs=None, aligner_lr=None, vit_lr=None, use_logits_to_keep=None, ds3_gather_for_generation=True, resume_only_model=False, optimizer=None, loss_type=None, metric=None, eval_use_evalscope=False, eval_dataset=[], eval_dataset_args=None, eval_limit=None, eval_generation_config=None, extra_eval_args=None, use_flash_ckpt=False, sft_alpha=0, train_type='full', local_repo_path=None, galore_config=None)"
|
| 384 |
+
}
|
ood/qwen2.5vl-7b-full_sft_ood/v1-20251004-154120/checkpoint-304/chat_template.jinja
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system
|
| 2 |
+
You are a helpful assistant.<|im_end|>
|
| 3 |
+
{% endif %}<|im_start|>{{ message['role'] }}
|
| 4 |
+
{% if message['content'] is string %}{{ message['content'] }}<|im_end|>
|
| 5 |
+
{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>
|
| 6 |
+
{% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant
|
| 7 |
+
{% endif %}
|
ood/qwen2.5vl-7b-full_sft_ood/v1-20251004-154120/checkpoint-304/config.json
ADDED
|
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"Qwen2_5_VLForConditionalGeneration"
|
| 4 |
+
],
|
| 5 |
+
"attention_dropout": 0.0,
|
| 6 |
+
"bos_token_id": 151643,
|
| 7 |
+
"eos_token_id": 151645,
|
| 8 |
+
"hidden_act": "silu",
|
| 9 |
+
"hidden_size": 3584,
|
| 10 |
+
"image_token_id": 151655,
|
| 11 |
+
"initializer_range": 0.02,
|
| 12 |
+
"intermediate_size": 18944,
|
| 13 |
+
"max_position_embeddings": 128000,
|
| 14 |
+
"max_window_layers": 28,
|
| 15 |
+
"model_type": "qwen2_5_vl",
|
| 16 |
+
"num_attention_heads": 28,
|
| 17 |
+
"num_hidden_layers": 28,
|
| 18 |
+
"num_key_value_heads": 4,
|
| 19 |
+
"pad_token_id": 151643,
|
| 20 |
+
"rms_norm_eps": 1e-06,
|
| 21 |
+
"rope_scaling": {
|
| 22 |
+
"mrope_section": [
|
| 23 |
+
16,
|
| 24 |
+
24,
|
| 25 |
+
24
|
| 26 |
+
],
|
| 27 |
+
"rope_type": "default",
|
| 28 |
+
"type": "default"
|
| 29 |
+
},
|
| 30 |
+
"rope_theta": 1000000.0,
|
| 31 |
+
"sliding_window": 32768,
|
| 32 |
+
"text_config": {
|
| 33 |
+
"architectures": [
|
| 34 |
+
"Qwen2_5_VLForConditionalGeneration"
|
| 35 |
+
],
|
| 36 |
+
"attention_dropout": 0.0,
|
| 37 |
+
"bos_token_id": 151643,
|
| 38 |
+
"eos_token_id": 151645,
|
| 39 |
+
"hidden_act": "silu",
|
| 40 |
+
"hidden_size": 3584,
|
| 41 |
+
"image_token_id": null,
|
| 42 |
+
"initializer_range": 0.02,
|
| 43 |
+
"intermediate_size": 18944,
|
| 44 |
+
"layer_types": [
|
| 45 |
+
"full_attention",
|
| 46 |
+
"full_attention",
|
| 47 |
+
"full_attention",
|
| 48 |
+
"full_attention",
|
| 49 |
+
"full_attention",
|
| 50 |
+
"full_attention",
|
| 51 |
+
"full_attention",
|
| 52 |
+
"full_attention",
|
| 53 |
+
"full_attention",
|
| 54 |
+
"full_attention",
|
| 55 |
+
"full_attention",
|
| 56 |
+
"full_attention",
|
| 57 |
+
"full_attention",
|
| 58 |
+
"full_attention",
|
| 59 |
+
"full_attention",
|
| 60 |
+
"full_attention",
|
| 61 |
+
"full_attention",
|
| 62 |
+
"full_attention",
|
| 63 |
+
"full_attention",
|
| 64 |
+
"full_attention",
|
| 65 |
+
"full_attention",
|
| 66 |
+
"full_attention",
|
| 67 |
+
"full_attention",
|
| 68 |
+
"full_attention",
|
| 69 |
+
"full_attention",
|
| 70 |
+
"full_attention",
|
| 71 |
+
"full_attention",
|
| 72 |
+
"full_attention"
|
| 73 |
+
],
|
| 74 |
+
"max_position_embeddings": 128000,
|
| 75 |
+
"max_window_layers": 28,
|
| 76 |
+
"model_type": "qwen2_5_vl_text",
|
| 77 |
+
"num_attention_heads": 28,
|
| 78 |
+
"num_hidden_layers": 28,
|
| 79 |
+
"num_key_value_heads": 4,
|
| 80 |
+
"pad_token_id": 151643,
|
| 81 |
+
"rms_norm_eps": 1e-06,
|
| 82 |
+
"rope_scaling": {
|
| 83 |
+
"mrope_section": [
|
| 84 |
+
16,
|
| 85 |
+
24,
|
| 86 |
+
24
|
| 87 |
+
],
|
| 88 |
+
"rope_type": "default",
|
| 89 |
+
"type": "default"
|
| 90 |
+
},
|
| 91 |
+
"rope_theta": 1000000.0,
|
| 92 |
+
"sliding_window": null,
|
| 93 |
+
"torch_dtype": "bfloat16",
|
| 94 |
+
"use_cache": false,
|
| 95 |
+
"use_sliding_window": false,
|
| 96 |
+
"video_token_id": null,
|
| 97 |
+
"vision_end_token_id": 151653,
|
| 98 |
+
"vision_start_token_id": 151652,
|
| 99 |
+
"vision_token_id": 151654,
|
| 100 |
+
"vocab_size": 152064
|
| 101 |
+
},
|
| 102 |
+
"tie_word_embeddings": false,
|
| 103 |
+
"torch_dtype": "bfloat16",
|
| 104 |
+
"transformers_version": "4.55.4",
|
| 105 |
+
"use_cache": false,
|
| 106 |
+
"use_sliding_window": false,
|
| 107 |
+
"video_token_id": 151656,
|
| 108 |
+
"vision_config": {
|
| 109 |
+
"depth": 32,
|
| 110 |
+
"fullatt_block_indexes": [
|
| 111 |
+
7,
|
| 112 |
+
15,
|
| 113 |
+
23,
|
| 114 |
+
31
|
| 115 |
+
],
|
| 116 |
+
"hidden_act": "silu",
|
| 117 |
+
"hidden_size": 1280,
|
| 118 |
+
"in_channels": 3,
|
| 119 |
+
"in_chans": 3,
|
| 120 |
+
"initializer_range": 0.02,
|
| 121 |
+
"intermediate_size": 3420,
|
| 122 |
+
"model_type": "qwen2_5_vl",
|
| 123 |
+
"num_heads": 16,
|
| 124 |
+
"out_hidden_size": 3584,
|
| 125 |
+
"pad_token_id": 151643,
|
| 126 |
+
"patch_size": 14,
|
| 127 |
+
"spatial_merge_size": 2,
|
| 128 |
+
"spatial_patch_size": 14,
|
| 129 |
+
"temporal_patch_size": 2,
|
| 130 |
+
"tokens_per_second": 2,
|
| 131 |
+
"torch_dtype": "bfloat16",
|
| 132 |
+
"window_size": 112
|
| 133 |
+
},
|
| 134 |
+
"vision_end_token_id": 151653,
|
| 135 |
+
"vision_start_token_id": 151652,
|
| 136 |
+
"vision_token_id": 151654,
|
| 137 |
+
"vocab_size": 152064
|
| 138 |
+
}
|
ood/qwen2.5vl-7b-full_sft_ood/v1-20251004-154120/checkpoint-304/generation_config.json
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token_id": 151643,
|
| 3 |
+
"do_sample": true,
|
| 4 |
+
"eos_token_id": [
|
| 5 |
+
151645,
|
| 6 |
+
151643
|
| 7 |
+
],
|
| 8 |
+
"pad_token_id": 151643,
|
| 9 |
+
"repetition_penalty": 1.05,
|
| 10 |
+
"temperature": 1e-06,
|
| 11 |
+
"transformers_version": "4.55.4"
|
| 12 |
+
}
|
ood/qwen2.5vl-7b-full_sft_ood/v1-20251004-154120/checkpoint-304/latest
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
global_step304
|
ood/qwen2.5vl-7b-full_sft_ood/v1-20251004-154120/logging.jsonl
ADDED
|
@@ -0,0 +1,166 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{"loss": 1.02941561, "grad_norm": 17.3159181, "learning_rate": 1e-08, "token_acc": 0.6969697, "epoch": 0.01315789, "global_step/max_steps": "1/760", "percentage": "0.13%", "elapsed_time": "12s", "remaining_time": "2h 40m 37s", "memory(GiB)": 50.34, "train_speed(iter/s)": 0.078752}
|
| 2 |
+
{"loss": 0.90563303, "grad_norm": 16.54827763, "learning_rate": 3e-08, "token_acc": 0.75214724, "epoch": 0.06578947, "global_step/max_steps": "5/760", "percentage": "0.66%", "elapsed_time": "29s", "remaining_time": "1h 14m 5s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.169836}
|
| 3 |
+
{"loss": 1.022719, "grad_norm": 15.42344434, "learning_rate": 7e-08, "token_acc": 0.72828096, "epoch": 0.13157895, "global_step/max_steps": "10/760", "percentage": "1.32%", "elapsed_time": "49s", "remaining_time": "1h 1m 48s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.20226}
|
| 4 |
+
{"loss": 0.92649784, "grad_norm": 16.17717353, "learning_rate": 1e-07, "token_acc": 0.76236749, "epoch": 0.19736842, "global_step/max_steps": "15/760", "percentage": "1.97%", "elapsed_time": "1m 9s", "remaining_time": "57m 20s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.216527}
|
| 5 |
+
{"loss": 0.94114761, "grad_norm": 16.91098064, "learning_rate": 1.3e-07, "token_acc": 0.74158326, "epoch": 0.26315789, "global_step/max_steps": "20/760", "percentage": "2.63%", "elapsed_time": "1m 29s", "remaining_time": "55m 6s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.22377}
|
| 6 |
+
{"loss": 0.897752, "grad_norm": 17.10921014, "learning_rate": 1.6e-07, "token_acc": 0.76012146, "epoch": 0.32894737, "global_step/max_steps": "25/760", "percentage": "3.29%", "elapsed_time": "1m 48s", "remaining_time": "53m 10s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.230368}
|
| 7 |
+
{"loss": 0.9137682, "grad_norm": 17.10710801, "learning_rate": 2e-07, "token_acc": 0.74127126, "epoch": 0.39473684, "global_step/max_steps": "30/760", "percentage": "3.95%", "elapsed_time": "2m 8s", "remaining_time": "52m 12s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.233052}
|
| 8 |
+
{"loss": 0.84548731, "grad_norm": 19.22796394, "learning_rate": 2.3e-07, "token_acc": 0.76445211, "epoch": 0.46052632, "global_step/max_steps": "35/760", "percentage": "4.61%", "elapsed_time": "2m 28s", "remaining_time": "51m 19s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.235414}
|
| 9 |
+
{"loss": 0.78939257, "grad_norm": 16.20044305, "learning_rate": 2.6e-07, "token_acc": 0.76708633, "epoch": 0.52631579, "global_step/max_steps": "40/760", "percentage": "5.26%", "elapsed_time": "2m 48s", "remaining_time": "50m 40s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.236828}
|
| 10 |
+
{"loss": 0.87271767, "grad_norm": 12.81356622, "learning_rate": 3e-07, "token_acc": 0.7574171, "epoch": 0.59210526, "global_step/max_steps": "45/760", "percentage": "5.92%", "elapsed_time": "3m 8s", "remaining_time": "50m 2s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.238149}
|
| 11 |
+
{"loss": 0.86138668, "grad_norm": 23.73855973, "learning_rate": 3.3e-07, "token_acc": 0.76438849, "epoch": 0.65789474, "global_step/max_steps": "50/760", "percentage": "6.58%", "elapsed_time": "3m 28s", "remaining_time": "49m 26s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.239373}
|
| 12 |
+
{"loss": 0.91995277, "grad_norm": 18.20488127, "learning_rate": 3.6e-07, "token_acc": 0.73500448, "epoch": 0.72368421, "global_step/max_steps": "55/760", "percentage": "7.24%", "elapsed_time": "3m 48s", "remaining_time": "48m 43s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.241188}
|
| 13 |
+
{"loss": 0.81555462, "grad_norm": 16.53984575, "learning_rate": 3.9e-07, "token_acc": 0.76759966, "epoch": 0.78947368, "global_step/max_steps": "60/760", "percentage": "7.89%", "elapsed_time": "4m 7s", "remaining_time": "48m 12s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.241968}
|
| 14 |
+
{"loss": 0.87123632, "grad_norm": 15.36130527, "learning_rate": 4.3e-07, "token_acc": 0.75, "epoch": 0.85526316, "global_step/max_steps": "65/760", "percentage": "8.55%", "elapsed_time": "4m 26s", "remaining_time": "47m 31s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.243726}
|
| 15 |
+
{"loss": 0.78044271, "grad_norm": 14.5636902, "learning_rate": 4.6e-07, "token_acc": 0.78176796, "epoch": 0.92105263, "global_step/max_steps": "70/760", "percentage": "9.21%", "elapsed_time": "4m 45s", "remaining_time": "46m 54s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.245171}
|
| 16 |
+
{"loss": 0.85664997, "grad_norm": 13.72730304, "learning_rate": 4.9e-07, "token_acc": 0.7480916, "epoch": 0.98684211, "global_step/max_steps": "75/760", "percentage": "9.87%", "elapsed_time": "5m 4s", "remaining_time": "46m 20s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.246347}
|
| 17 |
+
{"eval_loss": 0.82916719, "eval_runtime": 10.7249, "eval_samples_per_second": 12.587, "eval_steps_per_second": 1.585, "eval_token_acc": 0.75931842, "epoch": 1.0, "global_step/max_steps": "76/760", "percentage": "10.00%", "elapsed_time": "5m 19s", "remaining_time": "47m 52s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.238079}
|
| 18 |
+
{"loss": 0.84156723, "grad_norm": 13.78746715, "learning_rate": 5e-07, "token_acc": 0.75711575, "epoch": 1.05263158, "global_step/max_steps": "80/760", "percentage": "10.53%", "elapsed_time": "7m 18s", "remaining_time": "1h 2m 10s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.182271}
|
| 19 |
+
{"loss": 0.92336359, "grad_norm": 13.70548477, "learning_rate": 5e-07, "token_acc": 0.73505798, "epoch": 1.11842105, "global_step/max_steps": "85/760", "percentage": "11.18%", "elapsed_time": "7m 39s", "remaining_time": "1h 0m 45s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.185183}
|
| 20 |
+
{"loss": 0.83351564, "grad_norm": 13.82552424, "learning_rate": 5e-07, "token_acc": 0.76068376, "epoch": 1.18421053, "global_step/max_steps": "90/760", "percentage": "11.84%", "elapsed_time": "8m 0s", "remaining_time": "59m 35s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.187369}
|
| 21 |
+
{"loss": 0.74494662, "grad_norm": 13.81295197, "learning_rate": 5e-07, "token_acc": 0.77972028, "epoch": 1.25, "global_step/max_steps": "95/760", "percentage": "12.50%", "elapsed_time": "8m 19s", "remaining_time": "58m 18s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.190055}
|
| 22 |
+
{"loss": 0.78937058, "grad_norm": 12.97976986, "learning_rate": 5e-07, "token_acc": 0.76831683, "epoch": 1.31578947, "global_step/max_steps": "100/760", "percentage": "13.16%", "elapsed_time": "8m 39s", "remaining_time": "57m 8s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.192501}
|
| 23 |
+
{"loss": 0.78472328, "grad_norm": 11.91064209, "learning_rate": 5e-07, "token_acc": 0.76811594, "epoch": 1.38157895, "global_step/max_steps": "105/760", "percentage": "13.82%", "elapsed_time": "8m 59s", "remaining_time": "56m 4s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.194687}
|
| 24 |
+
{"loss": 0.73886271, "grad_norm": 15.20236435, "learning_rate": 5e-07, "token_acc": 0.77837838, "epoch": 1.44736842, "global_step/max_steps": "110/760", "percentage": "14.47%", "elapsed_time": "9m 19s", "remaining_time": "55m 3s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.196777}
|
| 25 |
+
{"loss": 0.78518505, "grad_norm": 13.92003311, "learning_rate": 5e-07, "token_acc": 0.76475478, "epoch": 1.51315789, "global_step/max_steps": "115/760", "percentage": "15.13%", "elapsed_time": "9m 38s", "remaining_time": "54m 3s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.198856}
|
| 26 |
+
{"loss": 0.76190891, "grad_norm": 12.38341881, "learning_rate": 4.9e-07, "token_acc": 0.76888489, "epoch": 1.57894737, "global_step/max_steps": "120/760", "percentage": "15.79%", "elapsed_time": "9m 57s", "remaining_time": "53m 7s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.200796}
|
| 27 |
+
{"loss": 0.74719434, "grad_norm": 15.64056532, "learning_rate": 4.9e-07, "token_acc": 0.78358209, "epoch": 1.64473684, "global_step/max_steps": "125/760", "percentage": "16.45%", "elapsed_time": "10m 16s", "remaining_time": "52m 14s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.202603}
|
| 28 |
+
{"loss": 0.79086919, "grad_norm": 13.49205976, "learning_rate": 4.9e-07, "token_acc": 0.78280961, "epoch": 1.71052632, "global_step/max_steps": "130/760", "percentage": "17.11%", "elapsed_time": "10m 36s", "remaining_time": "51m 23s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.204315}
|
| 29 |
+
{"loss": 0.78124485, "grad_norm": 14.52953203, "learning_rate": 4.9e-07, "token_acc": 0.77809798, "epoch": 1.77631579, "global_step/max_steps": "135/760", "percentage": "17.76%", "elapsed_time": "10m 55s", "remaining_time": "50m 34s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.205987}
|
| 30 |
+
{"loss": 0.76170378, "grad_norm": 13.77716196, "learning_rate": 4.9e-07, "token_acc": 0.78044444, "epoch": 1.84210526, "global_step/max_steps": "140/760", "percentage": "18.42%", "elapsed_time": "11m 15s", "remaining_time": "49m 53s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.207103}
|
| 31 |
+
{"loss": 0.80421877, "grad_norm": 11.94680249, "learning_rate": 4.9e-07, "token_acc": 0.77209302, "epoch": 1.90789474, "global_step/max_steps": "145/760", "percentage": "19.08%", "elapsed_time": "11m 35s", "remaining_time": "49m 7s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.208619}
|
| 32 |
+
{"loss": 0.78462038, "grad_norm": 14.27554967, "learning_rate": 4.9e-07, "token_acc": 0.78860294, "epoch": 1.97368421, "global_step/max_steps": "150/760", "percentage": "19.74%", "elapsed_time": "11m 54s", "remaining_time": "48m 25s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.20994}
|
| 33 |
+
{"eval_loss": 0.79742974, "eval_runtime": 11.3855, "eval_samples_per_second": 11.857, "eval_steps_per_second": 1.493, "eval_token_acc": 0.76996805, "epoch": 2.0, "global_step/max_steps": "152/760", "percentage": "20.00%", "elapsed_time": "12m 13s", "remaining_time": "48m 54s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.207198}
|
| 34 |
+
{"loss": 0.70446115, "grad_norm": 12.20828183, "learning_rate": 4.8e-07, "token_acc": 0.77549272, "epoch": 2.03947368, "global_step/max_steps": "155/760", "percentage": "20.39%", "elapsed_time": "14m 8s", "remaining_time": "55m 12s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.182634}
|
| 35 |
+
{"loss": 0.74434495, "grad_norm": 12.86816501, "learning_rate": 4.8e-07, "token_acc": 0.78288868, "epoch": 2.10526316, "global_step/max_steps": "160/760", "percentage": "21.05%", "elapsed_time": "14m 29s", "remaining_time": "54m 19s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.184089}
|
| 36 |
+
{"loss": 0.69011607, "grad_norm": 13.15553517, "learning_rate": 4.8e-07, "token_acc": 0.78975741, "epoch": 2.17105263, "global_step/max_steps": "165/760", "percentage": "21.71%", "elapsed_time": "14m 48s", "remaining_time": "53m 24s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.185681}
|
| 37 |
+
{"loss": 0.71562028, "grad_norm": 11.69440391, "learning_rate": 4.8e-07, "token_acc": 0.8040153, "epoch": 2.23684211, "global_step/max_steps": "170/760", "percentage": "22.37%", "elapsed_time": "15m 7s", "remaining_time": "52m 30s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.187272}
|
| 38 |
+
{"loss": 0.71393661, "grad_norm": 15.77797348, "learning_rate": 4.7e-07, "token_acc": 0.79822222, "epoch": 2.30263158, "global_step/max_steps": "175/760", "percentage": "23.03%", "elapsed_time": "15m 27s", "remaining_time": "51m 40s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.188693}
|
| 39 |
+
{"loss": 0.65476294, "grad_norm": 11.31427353, "learning_rate": 4.7e-07, "token_acc": 0.79822835, "epoch": 2.36842105, "global_step/max_steps": "180/760", "percentage": "23.68%", "elapsed_time": "15m 46s", "remaining_time": "50m 50s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.190163}
|
| 40 |
+
{"loss": 0.66260881, "grad_norm": 12.04881686, "learning_rate": 4.7e-07, "token_acc": 0.82744702, "epoch": 2.43421053, "global_step/max_steps": "185/760", "percentage": "24.34%", "elapsed_time": "16m 5s", "remaining_time": "50m 1s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.191577}
|
| 41 |
+
{"loss": 0.77575922, "grad_norm": 13.59218371, "learning_rate": 4.7e-07, "token_acc": 0.7822374, "epoch": 2.5, "global_step/max_steps": "190/760", "percentage": "25.00%", "elapsed_time": "16m 25s", "remaining_time": "49m 15s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.192878}
|
| 42 |
+
{"loss": 0.69286623, "grad_norm": 13.28972823, "learning_rate": 4.6e-07, "token_acc": 0.78975265, "epoch": 2.56578947, "global_step/max_steps": "195/760", "percentage": "25.66%", "elapsed_time": "16m 44s", "remaining_time": "48m 30s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.194104}
|
| 43 |
+
{"loss": 0.6376442, "grad_norm": 16.08999576, "learning_rate": 4.6e-07, "token_acc": 0.81484876, "epoch": 2.63157895, "global_step/max_steps": "200/760", "percentage": "26.32%", "elapsed_time": "17m 3s", "remaining_time": "47m 45s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.195422}
|
| 44 |
+
{"loss": 0.68135052, "grad_norm": 14.32753154, "learning_rate": 4.6e-07, "token_acc": 0.79595427, "epoch": 2.69736842, "global_step/max_steps": "205/760", "percentage": "26.97%", "elapsed_time": "17m 22s", "remaining_time": "47m 3s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.196564}
|
| 45 |
+
{"loss": 0.68764391, "grad_norm": 14.97718993, "learning_rate": 4.5e-07, "token_acc": 0.80453258, "epoch": 2.76315789, "global_step/max_steps": "210/760", "percentage": "27.63%", "elapsed_time": "17m 42s", "remaining_time": "46m 23s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.197612}
|
| 46 |
+
{"loss": 0.7278162, "grad_norm": 13.09284339, "learning_rate": 4.5e-07, "token_acc": 0.781491, "epoch": 2.82894737, "global_step/max_steps": "215/760", "percentage": "28.29%", "elapsed_time": "18m 2s", "remaining_time": "45m 44s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.198614}
|
| 47 |
+
{"loss": 0.73977337, "grad_norm": 14.42389646, "learning_rate": 4.5e-07, "token_acc": 0.78738116, "epoch": 2.89473684, "global_step/max_steps": "220/760", "percentage": "28.95%", "elapsed_time": "18m 21s", "remaining_time": "45m 4s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.199684}
|
| 48 |
+
{"loss": 0.74037175, "grad_norm": 14.00628898, "learning_rate": 4.4e-07, "token_acc": 0.77495463, "epoch": 2.96052632, "global_step/max_steps": "225/760", "percentage": "29.61%", "elapsed_time": "18m 40s", "remaining_time": "44m 25s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.200741}
|
| 49 |
+
{"eval_loss": 0.78277975, "eval_runtime": 11.2217, "eval_samples_per_second": 12.03, "eval_steps_per_second": 1.515, "eval_token_acc": 0.7715655, "epoch": 3.0, "global_step/max_steps": "228/760", "percentage": "30.00%", "elapsed_time": "19m 3s", "remaining_time": "44m 29s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.19931}
|
| 50 |
+
{"loss": 0.64765458, "grad_norm": 12.11796553, "learning_rate": 4.4e-07, "token_acc": 0.80246914, "epoch": 3.02631579, "global_step/max_steps": "230/760", "percentage": "30.26%", "elapsed_time": "21m 1s", "remaining_time": "48m 27s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.182256}
|
| 51 |
+
{"loss": 0.58833332, "grad_norm": 10.63458162, "learning_rate": 4.4e-07, "token_acc": 0.82829181, "epoch": 3.09210526, "global_step/max_steps": "235/760", "percentage": "30.92%", "elapsed_time": "21m 25s", "remaining_time": "47m 52s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.182795}
|
| 52 |
+
{"loss": 0.62636294, "grad_norm": 13.97241496, "learning_rate": 4.3e-07, "token_acc": 0.8032345, "epoch": 3.15789474, "global_step/max_steps": "240/760", "percentage": "31.58%", "elapsed_time": "21m 45s", "remaining_time": "47m 7s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.183883}
|
| 53 |
+
{"loss": 0.59209342, "grad_norm": 11.77574647, "learning_rate": 4.3e-07, "token_acc": 0.82899306, "epoch": 3.22368421, "global_step/max_steps": "245/760", "percentage": "32.24%", "elapsed_time": "22m 4s", "remaining_time": "46m 24s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.184953}
|
| 54 |
+
{"loss": 0.61780977, "grad_norm": 12.85864133, "learning_rate": 4.2e-07, "token_acc": 0.80993897, "epoch": 3.28947368, "global_step/max_steps": "250/760", "percentage": "32.89%", "elapsed_time": "22m 25s", "remaining_time": "45m 45s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.185746}
|
| 55 |
+
{"loss": 0.62229028, "grad_norm": 11.99203912, "learning_rate": 4.2e-07, "token_acc": 0.81783681, "epoch": 3.35526316, "global_step/max_steps": "255/760", "percentage": "33.55%", "elapsed_time": "22m 45s", "remaining_time": "45m 4s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.186754}
|
| 56 |
+
{"loss": 0.60951276, "grad_norm": 14.32377195, "learning_rate": 4.2e-07, "token_acc": 0.81622678, "epoch": 3.42105263, "global_step/max_steps": "260/760", "percentage": "34.21%", "elapsed_time": "23m 4s", "remaining_time": "44m 22s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.187827}
|
| 57 |
+
{"loss": 0.68783875, "grad_norm": 13.08060299, "learning_rate": 4.1e-07, "token_acc": 0.7913351, "epoch": 3.48684211, "global_step/max_steps": "265/760", "percentage": "34.87%", "elapsed_time": "23m 23s", "remaining_time": "43m 41s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.188795}
|
| 58 |
+
{"loss": 0.64720039, "grad_norm": 12.92149821, "learning_rate": 4.1e-07, "token_acc": 0.81897386, "epoch": 3.55263158, "global_step/max_steps": "270/760", "percentage": "35.53%", "elapsed_time": "23m 42s", "remaining_time": "43m 2s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.189755}
|
| 59 |
+
{"loss": 0.58908606, "grad_norm": 12.37553316, "learning_rate": 4e-07, "token_acc": 0.81750466, "epoch": 3.61842105, "global_step/max_steps": "275/760", "percentage": "36.18%", "elapsed_time": "24m 1s", "remaining_time": "42m 22s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.190778}
|
| 60 |
+
{"loss": 0.66937218, "grad_norm": 13.12920206, "learning_rate": 4e-07, "token_acc": 0.81687612, "epoch": 3.68421053, "global_step/max_steps": "280/760", "percentage": "36.84%", "elapsed_time": "24m 20s", "remaining_time": "41m 43s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.191723}
|
| 61 |
+
{"loss": 0.68171711, "grad_norm": 14.40747301, "learning_rate": 3.9e-07, "token_acc": 0.81320591, "epoch": 3.75, "global_step/max_steps": "285/760", "percentage": "37.50%", "elapsed_time": "24m 40s", "remaining_time": "41m 6s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.192559}
|
| 62 |
+
{"loss": 0.61392231, "grad_norm": 13.66264232, "learning_rate": 3.9e-07, "token_acc": 0.83055556, "epoch": 3.81578947, "global_step/max_steps": "290/760", "percentage": "38.16%", "elapsed_time": "24m 58s", "remaining_time": "40m 29s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.193474}
|
| 63 |
+
{"loss": 0.5279911, "grad_norm": 11.47237183, "learning_rate": 3.8e-07, "token_acc": 0.8325667, "epoch": 3.88157895, "global_step/max_steps": "295/760", "percentage": "38.82%", "elapsed_time": "25m 18s", "remaining_time": "39m 53s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.194304}
|
| 64 |
+
{"loss": 0.58143449, "grad_norm": 13.61124969, "learning_rate": 3.8e-07, "token_acc": 0.82510823, "epoch": 3.94736842, "global_step/max_steps": "300/760", "percentage": "39.47%", "elapsed_time": "25m 38s", "remaining_time": "39m 18s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.195026}
|
| 65 |
+
{"eval_loss": 0.78271794, "eval_runtime": 11.4603, "eval_samples_per_second": 11.78, "eval_steps_per_second": 1.483, "eval_token_acc": 0.77582535, "epoch": 4.0, "global_step/max_steps": "304/760", "percentage": "40.00%", "elapsed_time": "26m 4s", "remaining_time": "39m 7s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.194284}
|
| 66 |
+
{"loss": 0.49745297, "grad_norm": 12.23548274, "learning_rate": 3.7e-07, "token_acc": 0.85315315, "epoch": 4.01315789, "global_step/max_steps": "305/760", "percentage": "40.13%", "elapsed_time": "27m 57s", "remaining_time": "41m 42s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.181799}
|
| 67 |
+
{"loss": 0.52832932, "grad_norm": 11.58635468, "learning_rate": 3.7e-07, "token_acc": 0.83865248, "epoch": 4.07894737, "global_step/max_steps": "310/760", "percentage": "40.79%", "elapsed_time": "28m 19s", "remaining_time": "41m 6s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.182421}
|
| 68 |
+
{"loss": 0.51454048, "grad_norm": 12.06320791, "learning_rate": 3.6e-07, "token_acc": 0.83317168, "epoch": 4.14473684, "global_step/max_steps": "315/760", "percentage": "41.45%", "elapsed_time": "28m 39s", "remaining_time": "40m 28s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.183241}
|
| 69 |
+
{"loss": 0.52731872, "grad_norm": 12.35438662, "learning_rate": 3.6e-07, "token_acc": 0.83301158, "epoch": 4.21052632, "global_step/max_steps": "320/760", "percentage": "42.11%", "elapsed_time": "28m 59s", "remaining_time": "39m 51s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.184009}
|
| 70 |
+
{"loss": 0.49958558, "grad_norm": 11.79500267, "learning_rate": 3.5e-07, "token_acc": 0.85548012, "epoch": 4.27631579, "global_step/max_steps": "325/760", "percentage": "42.76%", "elapsed_time": "29m 18s", "remaining_time": "39m 13s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.184861}
|
| 71 |
+
{"loss": 0.53519773, "grad_norm": 12.79872778, "learning_rate": 3.5e-07, "token_acc": 0.85056543, "epoch": 4.34210526, "global_step/max_steps": "330/760", "percentage": "43.42%", "elapsed_time": "29m 38s", "remaining_time": "38m 37s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.185562}
|
| 72 |
+
{"loss": 0.51555848, "grad_norm": 12.73069417, "learning_rate": 3.4e-07, "token_acc": 0.85452962, "epoch": 4.40789474, "global_step/max_steps": "335/760", "percentage": "44.08%", "elapsed_time": "29m 56s", "remaining_time": "37m 59s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.186424}
|
| 73 |
+
{"loss": 0.53922853, "grad_norm": 14.48429791, "learning_rate": 3.4e-07, "token_acc": 0.83053839, "epoch": 4.47368421, "global_step/max_steps": "340/760", "percentage": "44.74%", "elapsed_time": "30m 16s", "remaining_time": "37m 24s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.187155}
|
| 74 |
+
{"loss": 0.47321472, "grad_norm": 11.49103641, "learning_rate": 3.3e-07, "token_acc": 0.86215539, "epoch": 4.53947368, "global_step/max_steps": "345/760", "percentage": "45.39%", "elapsed_time": "30m 36s", "remaining_time": "36m 49s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.187828}
|
| 75 |
+
{"loss": 0.56955152, "grad_norm": 11.33748545, "learning_rate": 3.3e-07, "token_acc": 0.83121597, "epoch": 4.60526316, "global_step/max_steps": "350/760", "percentage": "46.05%", "elapsed_time": "30m 55s", "remaining_time": "36m 13s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.188626}
|
| 76 |
+
{"loss": 0.46773705, "grad_norm": 12.56964612, "learning_rate": 3.2e-07, "token_acc": 0.86182903, "epoch": 4.67105263, "global_step/max_steps": "355/760", "percentage": "46.71%", "elapsed_time": "31m 15s", "remaining_time": "35m 39s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.189326}
|
| 77 |
+
{"loss": 0.55848804, "grad_norm": 15.81450431, "learning_rate": 3.2e-07, "token_acc": 0.838, "epoch": 4.73684211, "global_step/max_steps": "360/760", "percentage": "47.37%", "elapsed_time": "31m 34s", "remaining_time": "35m 4s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.190067}
|
| 78 |
+
{"loss": 0.50942087, "grad_norm": 12.02623901, "learning_rate": 3.1e-07, "token_acc": 0.8604878, "epoch": 4.80263158, "global_step/max_steps": "365/760", "percentage": "48.03%", "elapsed_time": "31m 52s", "remaining_time": "34m 29s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.190829}
|
| 79 |
+
{"loss": 0.49252219, "grad_norm": 13.29445423, "learning_rate": 3e-07, "token_acc": 0.86591696, "epoch": 4.86842105, "global_step/max_steps": "370/760", "percentage": "48.68%", "elapsed_time": "32m 12s", "remaining_time": "33m 56s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.191494}
|
| 80 |
+
{"loss": 0.54908113, "grad_norm": 12.42201337, "learning_rate": 3e-07, "token_acc": 0.84361037, "epoch": 4.93421053, "global_step/max_steps": "375/760", "percentage": "49.34%", "elapsed_time": "32m 31s", "remaining_time": "33m 23s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.192179}
|
| 81 |
+
{"loss": 0.53364162, "grad_norm": 15.73394585, "learning_rate": 2.9e-07, "token_acc": 0.84782609, "epoch": 5.0, "global_step/max_steps": "380/760", "percentage": "50.00%", "elapsed_time": "32m 50s", "remaining_time": "32m 50s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.192865}
|
| 82 |
+
{"eval_loss": 0.80552876, "eval_runtime": 11.9734, "eval_samples_per_second": 11.275, "eval_steps_per_second": 1.42, "eval_token_acc": 0.77316294, "epoch": 5.0, "global_step/max_steps": "380/760", "percentage": "50.00%", "elapsed_time": "33m 2s", "remaining_time": "33m 2s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.191698}
|
| 83 |
+
{"loss": 0.40581384, "grad_norm": 11.57931837, "learning_rate": 2.9e-07, "token_acc": 0.88921002, "epoch": 5.06578947, "global_step/max_steps": "385/760", "percentage": "50.66%", "elapsed_time": "35m 13s", "remaining_time": "34m 18s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.182185}
|
| 84 |
+
{"loss": 0.4946116, "grad_norm": 13.46633141, "learning_rate": 2.8e-07, "token_acc": 0.85820204, "epoch": 5.13157895, "global_step/max_steps": "390/760", "percentage": "51.32%", "elapsed_time": "35m 32s", "remaining_time": "33m 43s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.182884}
|
| 85 |
+
{"loss": 0.44744673, "grad_norm": 12.70964931, "learning_rate": 2.8e-07, "token_acc": 0.85558853, "epoch": 5.19736842, "global_step/max_steps": "395/760", "percentage": "51.97%", "elapsed_time": "35m 52s", "remaining_time": "33m 9s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.183508}
|
| 86 |
+
{"loss": 0.39708123, "grad_norm": 10.08172646, "learning_rate": 2.7e-07, "token_acc": 0.87523105, "epoch": 5.26315789, "global_step/max_steps": "400/760", "percentage": "52.63%", "elapsed_time": "36m 13s", "remaining_time": "32m 35s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.184054}
|
| 87 |
+
{"loss": 0.44630985, "grad_norm": 13.12329007, "learning_rate": 2.6e-07, "token_acc": 0.86736475, "epoch": 5.32894737, "global_step/max_steps": "405/760", "percentage": "53.29%", "elapsed_time": "36m 32s", "remaining_time": "32m 1s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.184738}
|
| 88 |
+
{"loss": 0.44240837, "grad_norm": 12.55092676, "learning_rate": 2.6e-07, "token_acc": 0.8709369, "epoch": 5.39473684, "global_step/max_steps": "410/760", "percentage": "53.95%", "elapsed_time": "36m 52s", "remaining_time": "31m 28s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.185309}
|
| 89 |
+
{"loss": 0.41203389, "grad_norm": 12.15700096, "learning_rate": 2.5e-07, "token_acc": 0.88372093, "epoch": 5.46052632, "global_step/max_steps": "415/760", "percentage": "54.61%", "elapsed_time": "37m 11s", "remaining_time": "30m 55s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.185936}
|
| 90 |
+
{"loss": 0.39818454, "grad_norm": 11.79770449, "learning_rate": 2.5e-07, "token_acc": 0.87344029, "epoch": 5.52631579, "global_step/max_steps": "420/760", "percentage": "55.26%", "elapsed_time": "37m 30s", "remaining_time": "30m 21s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.18662}
|
| 91 |
+
{"loss": 0.45466576, "grad_norm": 12.74542293, "learning_rate": 2.4e-07, "token_acc": 0.87047101, "epoch": 5.59210526, "global_step/max_steps": "425/760", "percentage": "55.92%", "elapsed_time": "37m 50s", "remaining_time": "29m 49s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.187161}
|
| 92 |
+
{"loss": 0.45757322, "grad_norm": 13.32476096, "learning_rate": 2.4e-07, "token_acc": 0.87181904, "epoch": 5.65789474, "global_step/max_steps": "430/760", "percentage": "56.58%", "elapsed_time": "38m 9s", "remaining_time": "29m 17s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.187815}
|
| 93 |
+
{"loss": 0.45713186, "grad_norm": 12.04309303, "learning_rate": 2.3e-07, "token_acc": 0.8784965, "epoch": 5.72368421, "global_step/max_steps": "435/760", "percentage": "57.24%", "elapsed_time": "38m 29s", "remaining_time": "28m 45s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.188389}
|
| 94 |
+
{"loss": 0.43521323, "grad_norm": 15.65857642, "learning_rate": 2.2e-07, "token_acc": 0.88613407, "epoch": 5.78947368, "global_step/max_steps": "440/760", "percentage": "57.89%", "elapsed_time": "38m 47s", "remaining_time": "28m 12s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.189029}
|
| 95 |
+
{"loss": 0.49906354, "grad_norm": 11.14674096, "learning_rate": 2.2e-07, "token_acc": 0.86254296, "epoch": 5.85526316, "global_step/max_steps": "445/760", "percentage": "58.55%", "elapsed_time": "39m 6s", "remaining_time": "27m 41s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.189638}
|
| 96 |
+
{"loss": 0.43380098, "grad_norm": 10.82007543, "learning_rate": 2.1e-07, "token_acc": 0.86727123, "epoch": 5.92105263, "global_step/max_steps": "450/760", "percentage": "59.21%", "elapsed_time": "39m 25s", "remaining_time": "27m 9s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.190252}
|
| 97 |
+
{"loss": 0.37601833, "grad_norm": 10.22116608, "learning_rate": 2.1e-07, "token_acc": 0.89473684, "epoch": 5.98684211, "global_step/max_steps": "455/760", "percentage": "59.87%", "elapsed_time": "39m 43s", "remaining_time": "26m 37s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.19087}
|
| 98 |
+
{"eval_loss": 0.8343057, "eval_runtime": 11.7159, "eval_samples_per_second": 11.523, "eval_steps_per_second": 1.451, "eval_token_acc": 0.77263046, "epoch": 6.0, "global_step/max_steps": "456/760", "percentage": "60.00%", "elapsed_time": "39m 59s", "remaining_time": "26m 39s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.19007}
|
| 99 |
+
{"loss": 0.4425354, "grad_norm": 13.36737778, "learning_rate": 2e-07, "token_acc": 0.88156638, "epoch": 6.05263158, "global_step/max_steps": "460/760", "percentage": "60.53%", "elapsed_time": "42m 11s", "remaining_time": "27m 30s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.181719}
|
| 100 |
+
{"loss": 0.38719044, "grad_norm": 13.88518015, "learning_rate": 2e-07, "token_acc": 0.88651163, "epoch": 6.11842105, "global_step/max_steps": "465/760", "percentage": "61.18%", "elapsed_time": "42m 31s", "remaining_time": "26m 58s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.18228}
|
| 101 |
+
{"loss": 0.39121766, "grad_norm": 12.74952726, "learning_rate": 1.9e-07, "token_acc": 0.87396352, "epoch": 6.18421053, "global_step/max_steps": "470/760", "percentage": "61.84%", "elapsed_time": "42m 52s", "remaining_time": "26m 27s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.18268}
|
| 102 |
+
{"loss": 0.3415463, "grad_norm": 11.11411976, "learning_rate": 1.9e-07, "token_acc": 0.89875836, "epoch": 6.25, "global_step/max_steps": "475/760", "percentage": "62.50%", "elapsed_time": "43m 12s", "remaining_time": "25m 55s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.183206}
|
| 103 |
+
{"loss": 0.38265395, "grad_norm": 9.05417936, "learning_rate": 1.8e-07, "token_acc": 0.8994709, "epoch": 6.31578947, "global_step/max_steps": "480/760", "percentage": "63.16%", "elapsed_time": "43m 31s", "remaining_time": "25m 23s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.183785}
|
| 104 |
+
{"loss": 0.34448581, "grad_norm": 12.76382944, "learning_rate": 1.7e-07, "token_acc": 0.89911504, "epoch": 6.38157895, "global_step/max_steps": "485/760", "percentage": "63.82%", "elapsed_time": "43m 51s", "remaining_time": "24m 51s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.18434}
|
| 105 |
+
{"loss": 0.32860575, "grad_norm": 13.03501464, "learning_rate": 1.7e-07, "token_acc": 0.90650779, "epoch": 6.44736842, "global_step/max_steps": "490/760", "percentage": "64.47%", "elapsed_time": "44m 9s", "remaining_time": "24m 19s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.184939}
|
| 106 |
+
{"loss": 0.32854912, "grad_norm": 10.83126851, "learning_rate": 1.6e-07, "token_acc": 0.90980052, "epoch": 6.51315789, "global_step/max_steps": "495/760", "percentage": "65.13%", "elapsed_time": "44m 29s", "remaining_time": "23m 48s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.18546}
|
| 107 |
+
{"loss": 0.33108892, "grad_norm": 9.93697018, "learning_rate": 1.6e-07, "token_acc": 0.90366972, "epoch": 6.57894737, "global_step/max_steps": "500/760", "percentage": "65.79%", "elapsed_time": "44m 48s", "remaining_time": "23m 17s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.185998}
|
| 108 |
+
{"loss": 0.41943026, "grad_norm": 13.7162715, "learning_rate": 1.5e-07, "token_acc": 0.87477954, "epoch": 6.64473684, "global_step/max_steps": "505/760", "percentage": "66.45%", "elapsed_time": "45m 6s", "remaining_time": "22m 46s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.18656}
|
| 109 |
+
{"loss": 0.42898965, "grad_norm": 13.39988271, "learning_rate": 1.5e-07, "token_acc": 0.88362919, "epoch": 6.71052632, "global_step/max_steps": "510/760", "percentage": "67.11%", "elapsed_time": "45m 25s", "remaining_time": "22m 16s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.187118}
|
| 110 |
+
{"loss": 0.38254173, "grad_norm": 12.18274469, "learning_rate": 1.4e-07, "token_acc": 0.88393686, "epoch": 6.77631579, "global_step/max_steps": "515/760", "percentage": "67.76%", "elapsed_time": "45m 43s", "remaining_time": "21m 45s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.187696}
|
| 111 |
+
{"loss": 0.34045658, "grad_norm": 9.70718661, "learning_rate": 1.4e-07, "token_acc": 0.89765101, "epoch": 6.84210526, "global_step/max_steps": "520/760", "percentage": "68.42%", "elapsed_time": "46m 3s", "remaining_time": "21m 15s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.188197}
|
| 112 |
+
{"loss": 0.35535841, "grad_norm": 11.16728077, "learning_rate": 1.3e-07, "token_acc": 0.89400922, "epoch": 6.90789474, "global_step/max_steps": "525/760", "percentage": "69.08%", "elapsed_time": "46m 21s", "remaining_time": "20m 45s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.188723}
|
| 113 |
+
{"loss": 0.39788885, "grad_norm": 12.33942622, "learning_rate": 1.3e-07, "token_acc": 0.89342629, "epoch": 6.97368421, "global_step/max_steps": "530/760", "percentage": "69.74%", "elapsed_time": "46m 40s", "remaining_time": "20m 15s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.189226}
|
| 114 |
+
{"eval_loss": 0.87638384, "eval_runtime": 11.7441, "eval_samples_per_second": 11.495, "eval_steps_per_second": 1.448, "eval_token_acc": 0.76943557, "epoch": 7.0, "global_step/max_steps": "532/760", "percentage": "70.00%", "elapsed_time": "47m 0s", "remaining_time": "20m 8s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.188651}
|
| 115 |
+
{"loss": 0.35242004, "grad_norm": 7.7182241, "learning_rate": 1.2e-07, "token_acc": 0.89974511, "epoch": 7.03947368, "global_step/max_steps": "535/760", "percentage": "70.39%", "elapsed_time": "49m 1s", "remaining_time": "20m 36s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.181909}
|
| 116 |
+
{"loss": 0.39486873, "grad_norm": 14.45331168, "learning_rate": 1.2e-07, "token_acc": 0.91481481, "epoch": 7.10526316, "global_step/max_steps": "540/760", "percentage": "71.05%", "elapsed_time": "49m 21s", "remaining_time": "20m 6s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.182363}
|
| 117 |
+
{"loss": 0.34441402, "grad_norm": 11.37036945, "learning_rate": 1.1e-07, "token_acc": 0.89748549, "epoch": 7.17105263, "global_step/max_steps": "545/760", "percentage": "71.71%", "elapsed_time": "49m 39s", "remaining_time": "19m 35s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.182904}
|
| 118 |
+
{"loss": 0.32244649, "grad_norm": 9.90080573, "learning_rate": 1.1e-07, "token_acc": 0.90873016, "epoch": 7.23684211, "global_step/max_steps": "550/760", "percentage": "72.37%", "elapsed_time": "49m 59s", "remaining_time": "19m 5s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.183391}
|
| 119 |
+
{"loss": 0.33222942, "grad_norm": 14.14349121, "learning_rate": 1e-07, "token_acc": 0.90391791, "epoch": 7.30263158, "global_step/max_steps": "555/760", "percentage": "73.03%", "elapsed_time": "50m 18s", "remaining_time": "18m 34s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.18388}
|
| 120 |
+
{"loss": 0.30484335, "grad_norm": 11.05255347, "learning_rate": 1e-07, "token_acc": 0.91179291, "epoch": 7.36842105, "global_step/max_steps": "560/760", "percentage": "73.68%", "elapsed_time": "50m 37s", "remaining_time": "18m 4s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.184381}
|
| 121 |
+
{"loss": 0.35793195, "grad_norm": 11.36206043, "learning_rate": 9e-08, "token_acc": 0.90137221, "epoch": 7.43421053, "global_step/max_steps": "565/760", "percentage": "74.34%", "elapsed_time": "50m 56s", "remaining_time": "17m 34s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.184848}
|
| 122 |
+
{"loss": 0.34636035, "grad_norm": 11.5419839, "learning_rate": 9e-08, "token_acc": 0.89878893, "epoch": 7.5, "global_step/max_steps": "570/760", "percentage": "75.00%", "elapsed_time": "51m 15s", "remaining_time": "17m 5s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.185334}
|
| 123 |
+
{"loss": 0.26542242, "grad_norm": 13.11391942, "learning_rate": 8e-08, "token_acc": 0.91387126, "epoch": 7.56578947, "global_step/max_steps": "575/760", "percentage": "75.66%", "elapsed_time": "51m 34s", "remaining_time": "16m 35s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.185809}
|
| 124 |
+
{"loss": 0.3173897, "grad_norm": 11.94004068, "learning_rate": 8e-08, "token_acc": 0.9028777, "epoch": 7.63157895, "global_step/max_steps": "580/760", "percentage": "76.32%", "elapsed_time": "51m 53s", "remaining_time": "16m 6s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.186273}
|
| 125 |
+
{"loss": 0.31118855, "grad_norm": 14.66870429, "learning_rate": 8e-08, "token_acc": 0.91055276, "epoch": 7.69736842, "global_step/max_steps": "585/760", "percentage": "76.97%", "elapsed_time": "52m 13s", "remaining_time": "15m 37s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.186708}
|
| 126 |
+
{"loss": 0.32157631, "grad_norm": 12.06775911, "learning_rate": 7e-08, "token_acc": 0.90810811, "epoch": 7.76315789, "global_step/max_steps": "590/760", "percentage": "77.63%", "elapsed_time": "52m 32s", "remaining_time": "15m 8s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.18717}
|
| 127 |
+
{"loss": 0.30963364, "grad_norm": 11.37855567, "learning_rate": 7e-08, "token_acc": 0.90719861, "epoch": 7.82894737, "global_step/max_steps": "595/760", "percentage": "78.29%", "elapsed_time": "52m 50s", "remaining_time": "14m 39s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.187665}
|
| 128 |
+
{"loss": 0.35222101, "grad_norm": 10.968135, "learning_rate": 6e-08, "token_acc": 0.9011178, "epoch": 7.89473684, "global_step/max_steps": "600/760", "percentage": "78.95%", "elapsed_time": "53m 9s", "remaining_time": "14m 10s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.188114}
|
| 129 |
+
{"loss": 0.38019013, "grad_norm": 13.62190038, "learning_rate": 6e-08, "token_acc": 0.87465438, "epoch": 7.96052632, "global_step/max_steps": "605/760", "percentage": "79.61%", "elapsed_time": "53m 28s", "remaining_time": "13m 42s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.188561}
|
| 130 |
+
{"eval_loss": 0.90744567, "eval_runtime": 11.7559, "eval_samples_per_second": 11.484, "eval_steps_per_second": 1.446, "eval_token_acc": 0.76783813, "epoch": 8.0, "global_step/max_steps": "608/760", "percentage": "80.00%", "elapsed_time": "53m 51s", "remaining_time": "13m 27s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.188122}
|
| 131 |
+
{"loss": 0.328544, "grad_norm": 11.09485671, "learning_rate": 6e-08, "token_acc": 0.90344249, "epoch": 8.02631579, "global_step/max_steps": "610/760", "percentage": "80.26%", "elapsed_time": "55m 51s", "remaining_time": "13m 44s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.181997}
|
| 132 |
+
{"loss": 0.36517785, "grad_norm": 13.5692201, "learning_rate": 5e-08, "token_acc": 0.90467626, "epoch": 8.09210526, "global_step/max_steps": "615/760", "percentage": "80.92%", "elapsed_time": "56m 12s", "remaining_time": "13m 15s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.182366}
|
| 133 |
+
{"loss": 0.33747931, "grad_norm": 12.34830502, "learning_rate": 5e-08, "token_acc": 0.89779736, "epoch": 8.15789474, "global_step/max_steps": "620/760", "percentage": "81.58%", "elapsed_time": "56m 32s", "remaining_time": "12m 46s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.182734}
|
| 134 |
+
{"loss": 0.31133468, "grad_norm": 13.37335466, "learning_rate": 5e-08, "token_acc": 0.90362511, "epoch": 8.22368421, "global_step/max_steps": "625/760", "percentage": "82.24%", "elapsed_time": "56m 52s", "remaining_time": "12m 17s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.183125}
|
| 135 |
+
{"loss": 0.32176852, "grad_norm": 9.70805811, "learning_rate": 4e-08, "token_acc": 0.9171123, "epoch": 8.28947368, "global_step/max_steps": "630/760", "percentage": "82.89%", "elapsed_time": "57m 12s", "remaining_time": "11m 48s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.18353}
|
| 136 |
+
{"loss": 0.32019682, "grad_norm": 11.08719854, "learning_rate": 4e-08, "token_acc": 0.91584602, "epoch": 8.35526316, "global_step/max_steps": "635/760", "percentage": "83.55%", "elapsed_time": "57m 31s", "remaining_time": "11m 19s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.183973}
|
| 137 |
+
{"loss": 0.33460722, "grad_norm": 11.68847763, "learning_rate": 4e-08, "token_acc": 0.9139265, "epoch": 8.42105263, "global_step/max_steps": "640/760", "percentage": "84.21%", "elapsed_time": "57m 50s", "remaining_time": "10m 50s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.184391}
|
| 138 |
+
{"loss": 0.25271811, "grad_norm": 10.1263874, "learning_rate": 3e-08, "token_acc": 0.92458101, "epoch": 8.48684211, "global_step/max_steps": "645/760", "percentage": "84.87%", "elapsed_time": "58m 10s", "remaining_time": "10m 22s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.184813}
|
| 139 |
+
{"loss": 0.28289604, "grad_norm": 10.57567859, "learning_rate": 3e-08, "token_acc": 0.91559001, "epoch": 8.55263158, "global_step/max_steps": "650/760", "percentage": "85.53%", "elapsed_time": "58m 28s", "remaining_time": "9m 53s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.185247}
|
| 140 |
+
{"loss": 0.30494127, "grad_norm": 11.25375302, "learning_rate": 3e-08, "token_acc": 0.90987536, "epoch": 8.61842105, "global_step/max_steps": "655/760", "percentage": "86.18%", "elapsed_time": "58m 48s", "remaining_time": "9m 25s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.185649}
|
| 141 |
+
{"loss": 0.26085703, "grad_norm": 9.62444181, "learning_rate": 3e-08, "token_acc": 0.92429577, "epoch": 8.68421053, "global_step/max_steps": "660/760", "percentage": "86.84%", "elapsed_time": "59m 7s", "remaining_time": "8m 57s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.186035}
|
| 142 |
+
{"loss": 0.31142015, "grad_norm": 14.0443236, "learning_rate": 2e-08, "token_acc": 0.90163934, "epoch": 8.75, "global_step/max_steps": "665/760", "percentage": "87.50%", "elapsed_time": "59m 26s", "remaining_time": "8m 29s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.186436}
|
| 143 |
+
{"loss": 0.33683982, "grad_norm": 14.93347267, "learning_rate": 2e-08, "token_acc": 0.90386521, "epoch": 8.81578947, "global_step/max_steps": "670/760", "percentage": "88.16%", "elapsed_time": "59m 45s", "remaining_time": "8m 1s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.18686}
|
| 144 |
+
{"loss": 0.30484743, "grad_norm": 12.3242763, "learning_rate": 2e-08, "token_acc": 0.91292639, "epoch": 8.88157895, "global_step/max_steps": "675/760", "percentage": "88.82%", "elapsed_time": "1h 0m 5s", "remaining_time": "7m 33s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.187226}
|
| 145 |
+
{"loss": 0.34318013, "grad_norm": 14.10461969, "learning_rate": 2e-08, "token_acc": 0.89927405, "epoch": 8.94736842, "global_step/max_steps": "680/760", "percentage": "89.47%", "elapsed_time": "1h 0m 24s", "remaining_time": "7m 6s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.187631}
|
| 146 |
+
{"eval_loss": 0.92236823, "eval_runtime": 11.8005, "eval_samples_per_second": 11.44, "eval_steps_per_second": 1.441, "eval_token_acc": 0.76411076, "epoch": 9.0, "global_step/max_steps": "684/760", "percentage": "90.00%", "elapsed_time": "1h 0m 50s", "remaining_time": "6m 45s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.187389}
|
| 147 |
+
{"loss": 0.30799294, "grad_norm": 13.72248565, "learning_rate": 1e-08, "token_acc": 0.9157197, "epoch": 9.01315789, "global_step/max_steps": "685/760", "percentage": "90.13%", "elapsed_time": "1h 2m 48s", "remaining_time": "6m 52s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.181788}
|
| 148 |
+
{"loss": 0.32543039, "grad_norm": 11.08344386, "learning_rate": 1e-08, "token_acc": 0.90828924, "epoch": 9.07894737, "global_step/max_steps": "690/760", "percentage": "90.79%", "elapsed_time": "1h 3m 7s", "remaining_time": "6m 24s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.182179}
|
| 149 |
+
{"loss": 0.29225836, "grad_norm": 11.90729093, "learning_rate": 1e-08, "token_acc": 0.92395105, "epoch": 9.14473684, "global_step/max_steps": "695/760", "percentage": "91.45%", "elapsed_time": "1h 3m 27s", "remaining_time": "5m 56s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.182555}
|
| 150 |
+
{"loss": 0.30806551, "grad_norm": 10.73859046, "learning_rate": 1e-08, "token_acc": 0.91734052, "epoch": 9.21052632, "global_step/max_steps": "700/760", "percentage": "92.11%", "elapsed_time": "1h 3m 47s", "remaining_time": "5m 28s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.182896}
|
| 151 |
+
{"loss": 0.32995567, "grad_norm": 11.6545818, "learning_rate": 1e-08, "token_acc": 0.91465677, "epoch": 9.27631579, "global_step/max_steps": "705/760", "percentage": "92.76%", "elapsed_time": "1h 4m 6s", "remaining_time": "5m 0s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.183297}
|
| 152 |
+
{"loss": 0.35297151, "grad_norm": 10.71563719, "learning_rate": 1e-08, "token_acc": 0.88929889, "epoch": 9.34210526, "global_step/max_steps": "710/760", "percentage": "93.42%", "elapsed_time": "1h 4m 24s", "remaining_time": "4m 32s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.18371}
|
| 153 |
+
{"loss": 0.29446287, "grad_norm": 14.8354412, "learning_rate": 1e-08, "token_acc": 0.90686736, "epoch": 9.40789474, "global_step/max_steps": "715/760", "percentage": "94.08%", "elapsed_time": "1h 4m 43s", "remaining_time": "4m 4s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.184104}
|
| 154 |
+
{"loss": 0.27518668, "grad_norm": 13.32190376, "learning_rate": 0.0, "token_acc": 0.92389381, "epoch": 9.47368421, "global_step/max_steps": "720/760", "percentage": "94.74%", "elapsed_time": "1h 5m 3s", "remaining_time": "3m 36s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.184429}
|
| 155 |
+
{"loss": 0.28573508, "grad_norm": 10.25606923, "learning_rate": 0.0, "token_acc": 0.9112478, "epoch": 9.53947368, "global_step/max_steps": "725/760", "percentage": "95.39%", "elapsed_time": "1h 5m 22s", "remaining_time": "3m 9s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.184828}
|
| 156 |
+
{"loss": 0.31120999, "grad_norm": 12.03400386, "learning_rate": 0.0, "token_acc": 0.9164396, "epoch": 9.60526316, "global_step/max_steps": "730/760", "percentage": "96.05%", "elapsed_time": "1h 5m 41s", "remaining_time": "2m 41s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.185186}
|
| 157 |
+
{"loss": 0.27546597, "grad_norm": 12.60717672, "learning_rate": 0.0, "token_acc": 0.93406593, "epoch": 9.67105263, "global_step/max_steps": "735/760", "percentage": "96.71%", "elapsed_time": "1h 6m 1s", "remaining_time": "2m 14s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.185559}
|
| 158 |
+
{"loss": 0.29992547, "grad_norm": 14.49252127, "learning_rate": 0.0, "token_acc": 0.91187739, "epoch": 9.73684211, "global_step/max_steps": "740/760", "percentage": "97.37%", "elapsed_time": "1h 6m 20s", "remaining_time": "1m 47s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.185929}
|
| 159 |
+
{"loss": 0.28825083, "grad_norm": 10.32137161, "learning_rate": 0.0, "token_acc": 0.91651206, "epoch": 9.80263158, "global_step/max_steps": "745/760", "percentage": "98.03%", "elapsed_time": "1h 6m 38s", "remaining_time": "1m 20s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.186314}
|
| 160 |
+
{"loss": 0.29370143, "grad_norm": 11.55867552, "learning_rate": 0.0, "token_acc": 0.91425993, "epoch": 9.86842105, "global_step/max_steps": "750/760", "percentage": "98.68%", "elapsed_time": "1h 6m 57s", "remaining_time": "53s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.186691}
|
| 161 |
+
{"loss": 0.27887554, "grad_norm": 10.06715286, "learning_rate": 0.0, "token_acc": 0.92258652, "epoch": 9.93421053, "global_step/max_steps": "755/760", "percentage": "99.34%", "elapsed_time": "1h 7m 16s", "remaining_time": "26s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.187024}
|
| 162 |
+
{"loss": 0.33659291, "grad_norm": 16.67109379, "learning_rate": 0.0, "token_acc": 0.90573372, "epoch": 10.0, "global_step/max_steps": "760/760", "percentage": "100.00%", "elapsed_time": "1h 7m 35s", "remaining_time": "0s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.187412}
|
| 163 |
+
{"eval_loss": 0.92296737, "eval_runtime": 11.7093, "eval_samples_per_second": 11.529, "eval_steps_per_second": 1.452, "eval_token_acc": 0.76464324, "epoch": 10.0, "global_step/max_steps": "760/760", "percentage": "100.00%", "elapsed_time": "1h 7m 46s", "remaining_time": "0s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.186872}
|
| 164 |
+
{"eval_loss": 0.92296737, "eval_runtime": 14.567, "eval_samples_per_second": 9.268, "eval_steps_per_second": 1.167, "eval_token_acc": 0.76464324, "epoch": 10.0, "global_step/max_steps": "760/760", "percentage": "100.00%", "elapsed_time": "1h 9m 53s", "remaining_time": "0s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.181228}
|
| 165 |
+
{"train_runtime": 4384.4417, "train_samples_per_second": 2.776, "train_steps_per_second": 0.173, "total_flos": 69848077049856.0, "train_loss": 0.52662418, "epoch": 10.0, "global_step/max_steps": "760/760", "percentage": "100.00%", "elapsed_time": "1h 12m 59s", "remaining_time": "0s", "memory(GiB)": 54.13, "train_speed(iter/s)": 0.173556}
|
| 166 |
+
{"model_parameter_info": "Qwen2_5_VLForConditionalGeneration: 8292.1667M Params (7615.6165M Trainable [91.8411%]), 0.0019M Buffers.", "last_model_checkpoint": "/mnt/data/users/liamding/data/MMMT/lora/qwen2.5vl-7b-full_sft_ood/v1-20251004-154120/checkpoint-760", "best_model_checkpoint": "/mnt/data/users/liamding/data/MMMT/lora/qwen2.5vl-7b-full_sft_ood/v1-20251004-154120/checkpoint-304", "best_metric": 0.78271794, "global_step": 760, "log_history": [{"loss": 1.0294156074523926, "grad_norm": 17.31591809508792, "learning_rate": 6.578947368421052e-09, "token_acc": 0.696969696969697, "epoch": 0.013157894736842105, "step": 1}, {"loss": 0.9056330323219299, "grad_norm": 16.548277625750544, "learning_rate": 3.289473684210526e-08, "token_acc": 0.7521472392638037, "epoch": 0.06578947368421052, "step": 5}, {"loss": 1.0227190017700196, "grad_norm": 15.423444339129516, "learning_rate": 6.578947368421052e-08, "token_acc": 0.7282809611829945, "epoch": 0.13157894736842105, "step": 10}, {"loss": 0.9264978408813477, "grad_norm": 16.177173530018614, "learning_rate": 9.868421052631579e-08, "token_acc": 0.7623674911660777, "epoch": 0.19736842105263158, "step": 15}, {"loss": 0.9411476135253907, "grad_norm": 16.91098064197238, "learning_rate": 1.3157894736842104e-07, "token_acc": 0.7415832575068244, "epoch": 0.2631578947368421, "step": 20}, {"loss": 0.8977519989013671, "grad_norm": 17.10921014210045, "learning_rate": 1.6447368421052632e-07, "token_acc": 0.7601214574898786, "epoch": 0.32894736842105265, "step": 25}, {"loss": 0.913768196105957, "grad_norm": 17.1071080088263, "learning_rate": 1.9736842105263157e-07, "token_acc": 0.7412712623097583, "epoch": 0.39473684210526316, "step": 30}, {"loss": 0.8454873085021972, "grad_norm": 19.227963936500046, "learning_rate": 2.3026315789473683e-07, "token_acc": 0.7644521138912856, "epoch": 0.4605263157894737, "step": 35}, {"loss": 0.7893925666809082, "grad_norm": 16.200443052460418, "learning_rate": 2.631578947368421e-07, "token_acc": 0.7670863309352518, "epoch": 0.5263157894736842, "step": 40}, {"loss": 0.8727176666259766, "grad_norm": 12.81356621670082, "learning_rate": 2.960526315789473e-07, "token_acc": 0.7574171029668412, "epoch": 0.5921052631578947, "step": 45}, {"loss": 0.8613866806030274, "grad_norm": 23.738559726620743, "learning_rate": 3.2894736842105264e-07, "token_acc": 0.7643884892086331, "epoch": 0.6578947368421053, "step": 50}, {"loss": 0.9199527740478516, "grad_norm": 18.20488127129436, "learning_rate": 3.618421052631579e-07, "token_acc": 0.7350044762757386, "epoch": 0.7236842105263158, "step": 55}, {"loss": 0.8155546188354492, "grad_norm": 16.53984575296178, "learning_rate": 3.9473684210526315e-07, "token_acc": 0.7675996607294318, "epoch": 0.7894736842105263, "step": 60}, {"loss": 0.8712363243103027, "grad_norm": 15.361305270466637, "learning_rate": 4.2763157894736837e-07, "token_acc": 0.75, "epoch": 0.8552631578947368, "step": 65}, {"loss": 0.7804427146911621, "grad_norm": 14.563690201290349, "learning_rate": 4.6052631578947365e-07, "token_acc": 0.7817679558011049, "epoch": 0.9210526315789473, "step": 70}, {"loss": 0.8566499710083008, "grad_norm": 13.727303038868559, "learning_rate": 4.934210526315789e-07, "token_acc": 0.7480916030534351, "epoch": 0.9868421052631579, "step": 75}, {"eval_loss": 0.8291671872138977, "eval_runtime": 10.7249, "eval_samples_per_second": 12.587, "eval_steps_per_second": 1.585, "eval_token_acc": 0.759318423855165, "epoch": 1.0, "step": 76}, {"loss": 0.8415672302246093, "grad_norm": 13.787467147156026, "learning_rate": 4.999578104083306e-07, "token_acc": 0.7571157495256167, "epoch": 1.0526315789473684, "step": 80}, {"loss": 0.9233635902404785, "grad_norm": 13.705484769366395, "learning_rate": 4.997864395968252e-07, "token_acc": 0.7350579839429081, "epoch": 1.118421052631579, "step": 85}, {"loss": 0.8335156440734863, "grad_norm": 13.825524241955167, "learning_rate": 4.994833410208486e-07, "token_acc": 0.7606837606837606, "epoch": 1.1842105263157894, "step": 90}, {"loss": 0.7449466228485108, "grad_norm": 13.812951965959309, "learning_rate": 4.990486745229364e-07, "token_acc": 0.7797202797202797, "epoch": 1.25, "step": 95}, {"loss": 0.7893705844879151, "grad_norm": 12.979769860353171, "learning_rate": 4.984826693294873e-07, "token_acc": 0.7683168316831683, "epoch": 1.3157894736842106, "step": 100}, {"loss": 0.7847232818603516, "grad_norm": 11.910642093630177, "learning_rate": 4.977856239298789e-07, "token_acc": 0.7681159420289855, "epoch": 1.381578947368421, "step": 105}, {"loss": 0.7388627052307128, "grad_norm": 15.20236434989543, "learning_rate": 4.969579059190548e-07, "token_acc": 0.7783783783783784, "epoch": 1.4473684210526316, "step": 110}, {"loss": 0.7851850509643554, "grad_norm": 13.920033113899239, "learning_rate": 4.9599995180367e-07, "token_acc": 0.7647547797173733, "epoch": 1.513157894736842, "step": 115}, {"loss": 0.7619089126586914, "grad_norm": 12.383418809687024, "learning_rate": 4.949122667718934e-07, "token_acc": 0.7688848920863309, "epoch": 1.5789473684210527, "step": 120}, {"loss": 0.7471943378448487, "grad_norm": 15.64056532443421, "learning_rate": 4.936954244269917e-07, "token_acc": 0.7835820895522388, "epoch": 1.6447368421052633, "step": 125}, {"loss": 0.7908691883087158, "grad_norm": 13.492059761704322, "learning_rate": 4.923500664848326e-07, "token_acc": 0.7828096118299446, "epoch": 1.7105263157894737, "step": 130}, {"loss": 0.7812448501586914, "grad_norm": 14.529532033698874, "learning_rate": 4.908769024354683e-07, "token_acc": 0.7780979827089337, "epoch": 1.776315789473684, "step": 135}, {"loss": 0.7617037773132325, "grad_norm": 13.777161961650751, "learning_rate": 4.892767091689785e-07, "token_acc": 0.7804444444444445, "epoch": 1.8421052631578947, "step": 140}, {"loss": 0.8042187690734863, "grad_norm": 11.946802493317874, "learning_rate": 4.875503305657677e-07, "token_acc": 0.772093023255814, "epoch": 1.9078947368421053, "step": 145}, {"loss": 0.7846203804016113, "grad_norm": 14.275549665617376, "learning_rate": 4.856986770515357e-07, "token_acc": 0.7886029411764706, "epoch": 1.973684210526316, "step": 150}, {"eval_loss": 0.7974297404289246, "eval_runtime": 11.3855, "eval_samples_per_second": 11.857, "eval_steps_per_second": 1.493, "eval_token_acc": 0.7699680511182109, "epoch": 2.0, "step": 152}, {"loss": 0.704461145401001, "grad_norm": 12.208281829044838, "learning_rate": 4.837227251171537e-07, "token_acc": 0.7754927163667523, "epoch": 2.039473684210526, "step": 155}, {"loss": 0.74434494972229, "grad_norm": 12.86816500951879, "learning_rate": 4.816235168037004e-07, "token_acc": 0.7828886844526219, "epoch": 2.1052631578947367, "step": 160}, {"loss": 0.6901160717010498, "grad_norm": 13.155535172747632, "learning_rate": 4.794021591529302e-07, "token_acc": 0.7897574123989218, "epoch": 2.1710526315789473, "step": 165}, {"loss": 0.7156202793121338, "grad_norm": 11.694403910193117, "learning_rate": 4.770598236234616e-07, "token_acc": 0.8040152963671128, "epoch": 2.236842105263158, "step": 170}, {"loss": 0.7139366149902344, "grad_norm": 15.777973481351275, "learning_rate": 4.745977454729947e-07, "token_acc": 0.7982222222222223, "epoch": 2.3026315789473686, "step": 175}, {"loss": 0.6547629356384277, "grad_norm": 11.314273525019125, "learning_rate": 4.720172231068844e-07, "token_acc": 0.7982283464566929, "epoch": 2.3684210526315788, "step": 180}, {"loss": 0.6626088142395019, "grad_norm": 12.048816862642347, "learning_rate": 4.693196173934107e-07, "token_acc": 0.82744702320888, "epoch": 2.4342105263157894, "step": 185}, {"loss": 0.775759220123291, "grad_norm": 13.592183711828891, "learning_rate": 4.6650635094610966e-07, "token_acc": 0.7822374039282665, "epoch": 2.5, "step": 190}, {"loss": 0.6928662300109864, "grad_norm": 13.289728232283066, "learning_rate": 4.635789073735412e-07, "token_acc": 0.7897526501766784, "epoch": 2.5657894736842106, "step": 195}, {"loss": 0.6376441955566406, "grad_norm": 16.08999575789933, "learning_rate": 4.605388304968914e-07, "token_acc": 0.8148487626031164, "epoch": 2.6315789473684212, "step": 200}, {"loss": 0.6813505172729493, "grad_norm": 14.327531535921459, "learning_rate": 4.5738772353582033e-07, "token_acc": 0.7959542656112577, "epoch": 2.6973684210526314, "step": 205}, {"loss": 0.6876439094543457, "grad_norm": 14.977189931535413, "learning_rate": 4.541272482629857e-07, "token_acc": 0.8045325779036827, "epoch": 2.763157894736842, "step": 210}, {"loss": 0.7278162002563476, "grad_norm": 13.092843394229808, "learning_rate": 4.507591241276879e-07, "token_acc": 0.781491002570694, "epoch": 2.8289473684210527, "step": 215}, {"loss": 0.7397733688354492, "grad_norm": 14.423896457474601, "learning_rate": 4.472851273490984e-07, "token_acc": 0.7873811581676751, "epoch": 2.8947368421052633, "step": 220}, {"loss": 0.7403717517852784, "grad_norm": 14.00628897732342, "learning_rate": 4.437070899795503e-07, "token_acc": 0.7749546279491834, "epoch": 2.9605263157894735, "step": 225}, {"eval_loss": 0.7827797532081604, "eval_runtime": 11.2217, "eval_samples_per_second": 12.03, "eval_steps_per_second": 1.515, "eval_token_acc": 0.7715654952076677, "epoch": 3.0, "step": 228}, {"loss": 0.6476545810699463, "grad_norm": 12.117965531246165, "learning_rate": 4.4002689893838405e-07, "token_acc": 0.8024691358024691, "epoch": 3.026315789473684, "step": 230}, {"loss": 0.5883333206176757, "grad_norm": 10.634581616763157, "learning_rate": 4.3624649501685923e-07, "token_acc": 0.8282918149466192, "epoch": 3.0921052631578947, "step": 235}, {"loss": 0.626362943649292, "grad_norm": 13.972414955051068, "learning_rate": 4.323678718546552e-07, "token_acc": 0.8032345013477089, "epoch": 3.1578947368421053, "step": 240}, {"loss": 0.5920934200286865, "grad_norm": 11.77574647255366, "learning_rate": 4.2839307488850264e-07, "token_acc": 0.8289930555555556, "epoch": 3.223684210526316, "step": 245}, {"loss": 0.6178097724914551, "grad_norm": 12.858641327934963, "learning_rate": 4.243242002734988e-07, "token_acc": 0.8099389712292938, "epoch": 3.2894736842105265, "step": 250}, {"loss": 0.6222902774810791, "grad_norm": 11.992039120911935, "learning_rate": 4.201633937776759e-07, "token_acc": 0.8178368121442126, "epoch": 3.3552631578947367, "step": 255}, {"loss": 0.6095127582550048, "grad_norm": 14.323771950489155, "learning_rate": 4.159128496504053e-07, "token_acc": 0.8162267839687195, "epoch": 3.4210526315789473, "step": 260}, {"loss": 0.6878387451171875, "grad_norm": 13.080602987140287, "learning_rate": 4.115748094652352e-07, "token_acc": 0.7913351016799293, "epoch": 3.486842105263158, "step": 265}, {"loss": 0.6472003936767579, "grad_norm": 12.921498214011155, "learning_rate": 4.071515609377705e-07, "token_acc": 0.818973862536302, "epoch": 3.5526315789473686, "step": 270}, {"loss": 0.5890860557556152, "grad_norm": 12.375533162441062, "learning_rate": 4.026454367192199e-07, "token_acc": 0.8175046554934823, "epoch": 3.6184210526315788, "step": 275}, {"loss": 0.6693721771240234, "grad_norm": 13.129202055657805, "learning_rate": 3.9805881316624503e-07, "token_acc": 0.8168761220825853, "epoch": 3.6842105263157894, "step": 280}, {"loss": 0.6817171096801757, "grad_norm": 14.407473010348072, "learning_rate": 3.933941090877615e-07, "token_acc": 0.8132059079061685, "epoch": 3.75, "step": 285}, {"loss": 0.6139223098754882, "grad_norm": 13.662642322032315, "learning_rate": 3.8865378446935217e-07, "token_acc": 0.8305555555555556, "epoch": 3.8157894736842106, "step": 290}, {"loss": 0.5279911041259766, "grad_norm": 11.472371827393802, "learning_rate": 3.8384033917596515e-07, "token_acc": 0.8325666973321068, "epoch": 3.8815789473684212, "step": 295}, {"loss": 0.5814344882965088, "grad_norm": 13.611249691490164, "learning_rate": 3.78956311633581e-07, "token_acc": 0.8251082251082251, "epoch": 3.9473684210526314, "step": 300}, {"eval_loss": 0.7827179431915283, "eval_runtime": 11.4603, "eval_samples_per_second": 11.78, "eval_steps_per_second": 1.483, "eval_token_acc": 0.775825346112886, "epoch": 4.0, "step": 304}, {"loss": 0.497452974319458, "grad_norm": 12.235482741873291, "learning_rate": 3.740042774905449e-07, "token_acc": 0.8531531531531531, "epoch": 4.0131578947368425, "step": 305}, {"loss": 0.52832932472229, "grad_norm": 11.58635467783027, "learning_rate": 3.689868482592684e-07, "token_acc": 0.8386524822695035, "epoch": 4.078947368421052, "step": 310}, {"loss": 0.5145404815673829, "grad_norm": 12.063207912616575, "learning_rate": 3.6390666993901826e-07, "token_acc": 0.8331716779825412, "epoch": 4.144736842105263, "step": 315}, {"loss": 0.5273187160491943, "grad_norm": 12.354386618427899, "learning_rate": 3.587664216205183e-07, "token_acc": 0.833011583011583, "epoch": 4.2105263157894735, "step": 320}, {"loss": 0.4995855808258057, "grad_norm": 11.795002670717809, "learning_rate": 3.535688140730997e-07, "token_acc": 0.8554801163918526, "epoch": 4.276315789473684, "step": 325}, {"loss": 0.5351977348327637, "grad_norm": 12.798727775273417, "learning_rate": 3.4831658831514575e-07, "token_acc": 0.8505654281098546, "epoch": 4.342105263157895, "step": 330}, {"loss": 0.5155584812164307, "grad_norm": 12.730694168346282, "learning_rate": 3.4301251416858403e-07, "token_acc": 0.8545296167247387, "epoch": 4.407894736842105, "step": 335}, {"loss": 0.5392285346984863, "grad_norm": 14.484297908634066, "learning_rate": 3.376593887981886e-07, "token_acc": 0.8305383936451898, "epoch": 4.473684210526316, "step": 340}, {"loss": 0.4732147216796875, "grad_norm": 11.491036408986366, "learning_rate": 3.322600352364633e-07, "token_acc": 0.8621553884711779, "epoch": 4.5394736842105265, "step": 345}, {"loss": 0.5695515155792237, "grad_norm": 11.33748544855005, "learning_rate": 3.268173008948826e-07, "token_acc": 0.8312159709618875, "epoch": 4.605263157894737, "step": 350}, {"loss": 0.4677370548248291, "grad_norm": 12.56964612455139, "learning_rate": 3.213340560622763e-07, "token_acc": 0.8618290258449304, "epoch": 4.671052631578947, "step": 355}, {"loss": 0.5584880352020264, "grad_norm": 15.814504311164637, "learning_rate": 3.1581319239114976e-07, "token_acc": 0.838, "epoch": 4.7368421052631575, "step": 360}, {"loss": 0.5094208717346191, "grad_norm": 12.026239005665621, "learning_rate": 3.1025762137273735e-07, "token_acc": 0.8604878048780488, "epoch": 4.802631578947368, "step": 365}, {"loss": 0.4925221920013428, "grad_norm": 13.29445423103592, "learning_rate": 3.0467027280159394e-07, "token_acc": 0.865916955017301, "epoch": 4.868421052631579, "step": 370}, {"loss": 0.5490811347961426, "grad_norm": 12.422013374565926, "learning_rate": 2.99054093230534e-07, "token_acc": 0.8436103663985701, "epoch": 4.934210526315789, "step": 375}, {"loss": 0.5336416244506836, "grad_norm": 15.73394585068487, "learning_rate": 2.934120444167326e-07, "token_acc": 0.8478260869565217, "epoch": 5.0, "step": 380}, {"eval_loss": 0.8055287599563599, "eval_runtime": 11.9734, "eval_samples_per_second": 11.275, "eval_steps_per_second": 1.42, "eval_token_acc": 0.7731629392971247, "epoch": 5.0, "step": 380}, {"loss": 0.4058138370513916, "grad_norm": 11.579318373185998, "learning_rate": 2.877471017598092e-07, "token_acc": 0.8892100192678227, "epoch": 5.065789473684211, "step": 385}, {"loss": 0.4946115970611572, "grad_norm": 13.466331406979641, "learning_rate": 2.820622527327158e-07, "token_acc": 0.8582020389249305, "epoch": 5.131578947368421, "step": 390}, {"loss": 0.44744672775268557, "grad_norm": 12.70964930976301, "learning_rate": 2.7636049530625844e-07, "token_acc": 0.8555885262116716, "epoch": 5.197368421052632, "step": 395}, {"loss": 0.39708123207092283, "grad_norm": 10.081726462821392, "learning_rate": 2.706448363680831e-07, "token_acc": 0.8752310536044362, "epoch": 5.2631578947368425, "step": 400}, {"loss": 0.44630985260009765, "grad_norm": 13.123290073548652, "learning_rate": 2.649182901369585e-07, "token_acc": 0.8673647469458988, "epoch": 5.328947368421053, "step": 405}, {"loss": 0.4424083709716797, "grad_norm": 12.55092675524107, "learning_rate": 2.591838765731931e-07, "token_acc": 0.8709369024856597, "epoch": 5.394736842105263, "step": 410}, {"loss": 0.4120338916778564, "grad_norm": 12.157000958625412, "learning_rate": 2.5344461978602413e-07, "token_acc": 0.8837209302325582, "epoch": 5.4605263157894735, "step": 415}, {"loss": 0.39818453788757324, "grad_norm": 11.797704490185449, "learning_rate": 2.477035464388184e-07, "token_acc": 0.8734402852049911, "epoch": 5.526315789473684, "step": 420}, {"loss": 0.4546657562255859, "grad_norm": 12.745422925981462, "learning_rate": 2.419636841529271e-07, "token_acc": 0.8704710144927537, "epoch": 5.592105263157895, "step": 425}, {"loss": 0.45757322311401366, "grad_norm": 13.324760956982862, "learning_rate": 2.3622805991103358e-07, "token_acc": 0.8718190386427899, "epoch": 5.657894736842105, "step": 430}, {"loss": 0.45713186264038086, "grad_norm": 12.043093034906175, "learning_rate": 2.3049969846084053e-07, "token_acc": 0.8784965034965035, "epoch": 5.723684210526316, "step": 435}, {"loss": 0.4352132320404053, "grad_norm": 15.658576422078957, "learning_rate": 2.2478162071993296e-07, "token_acc": 0.8861340679522498, "epoch": 5.7894736842105265, "step": 440}, {"loss": 0.49906353950500487, "grad_norm": 11.146740957754833, "learning_rate": 2.1907684218266306e-07, "token_acc": 0.8625429553264605, "epoch": 5.855263157894737, "step": 445}, {"loss": 0.43380098342895507, "grad_norm": 10.820075426555121, "learning_rate": 2.133883713298946e-07, "token_acc": 0.8672712283594394, "epoch": 5.921052631578947, "step": 450}, {"loss": 0.3760183334350586, "grad_norm": 10.22116607799743, "learning_rate": 2.0771920804244537e-07, "token_acc": 0.8947368421052632, "epoch": 5.9868421052631575, "step": 455}, {"eval_loss": 0.8343057036399841, "eval_runtime": 11.7159, "eval_samples_per_second": 11.523, "eval_steps_per_second": 1.451, "eval_token_acc": 0.7726304579339723, "epoch": 6.0, "step": 456}, {"loss": 0.442535400390625, "grad_norm": 13.367377775022812, "learning_rate": 2.0207234201906545e-07, "token_acc": 0.8815663801337154, "epoch": 6.052631578947368, "step": 460}, {"loss": 0.3871904373168945, "grad_norm": 13.885180152614968, "learning_rate": 1.9645075119978528e-07, "token_acc": 0.8865116279069768, "epoch": 6.118421052631579, "step": 465}, {"loss": 0.3912176609039307, "grad_norm": 12.749527263262742, "learning_rate": 1.9085740019546436e-07, "token_acc": 0.8739635157545605, "epoch": 6.184210526315789, "step": 470}, {"loss": 0.34154629707336426, "grad_norm": 11.114119762408144, "learning_rate": 1.8529523872436977e-07, "token_acc": 0.8987583572110793, "epoch": 6.25, "step": 475}, {"loss": 0.38265395164489746, "grad_norm": 9.05417935997834, "learning_rate": 1.7976720005660767e-07, "token_acc": 0.8994708994708994, "epoch": 6.315789473684211, "step": 480}, {"loss": 0.34448580741882323, "grad_norm": 12.763829441208104, "learning_rate": 1.742761994672297e-07, "token_acc": 0.8991150442477877, "epoch": 6.381578947368421, "step": 485}, {"loss": 0.32860574722290037, "grad_norm": 13.035014643295293, "learning_rate": 1.6882513269882913e-07, "token_acc": 0.9065077910174152, "epoch": 6.447368421052632, "step": 490}, {"loss": 0.32854912281036375, "grad_norm": 10.831268513096093, "learning_rate": 1.6341687443443738e-07, "token_acc": 0.9098005203816132, "epoch": 6.5131578947368425, "step": 495}, {"loss": 0.331088924407959, "grad_norm": 9.93697018265633, "learning_rate": 1.5805427678152674e-07, "token_acc": 0.9036697247706422, "epoch": 6.578947368421053, "step": 500}, {"loss": 0.4194302558898926, "grad_norm": 13.716271495126469, "learning_rate": 1.527401677679185e-07, "token_acc": 0.8747795414462081, "epoch": 6.644736842105263, "step": 505}, {"loss": 0.4289896488189697, "grad_norm": 13.399882710387233, "learning_rate": 1.4747734985039e-07, "token_acc": 0.883629191321499, "epoch": 6.7105263157894735, "step": 510}, {"loss": 0.38254172801971437, "grad_norm": 12.182744689817845, "learning_rate": 1.422685984367664e-07, "token_acc": 0.8839368616527391, "epoch": 6.776315789473684, "step": 515}, {"loss": 0.34045658111572263, "grad_norm": 9.707186610053322, "learning_rate": 1.371166604222777e-07, "token_acc": 0.8976510067114094, "epoch": 6.842105263157895, "step": 520}, {"loss": 0.3553584098815918, "grad_norm": 11.167280766500607, "learning_rate": 1.3202425274095116e-07, "token_acc": 0.8940092165898618, "epoch": 6.907894736842105, "step": 525}, {"loss": 0.3978888511657715, "grad_norm": 12.339426222895359, "learning_rate": 1.2699406093280545e-07, "token_acc": 0.8934262948207171, "epoch": 6.973684210526316, "step": 530}, {"eval_loss": 0.8763838410377502, "eval_runtime": 11.7441, "eval_samples_per_second": 11.495, "eval_steps_per_second": 1.448, "eval_token_acc": 0.7694355697550586, "epoch": 7.0, "step": 532}, {"loss": 0.3524200439453125, "grad_norm": 7.718224097938739, "learning_rate": 1.220287377275998e-07, "token_acc": 0.8997451146983857, "epoch": 7.0394736842105265, "step": 535}, {"loss": 0.39486873149871826, "grad_norm": 14.453311680486095, "learning_rate": 1.1713090164588606e-07, "token_acc": 0.9148148148148149, "epoch": 7.105263157894737, "step": 540}, {"loss": 0.3444140195846558, "grad_norm": 11.37036944935638, "learning_rate": 1.1230313561810215e-07, "token_acc": 0.8974854932301741, "epoch": 7.171052631578948, "step": 545}, {"loss": 0.32244648933410647, "grad_norm": 9.900805732362533, "learning_rate": 1.0754798562243344e-07, "token_acc": 0.9087301587301587, "epoch": 7.2368421052631575, "step": 550}, {"loss": 0.3322294235229492, "grad_norm": 14.143491211947243, "learning_rate": 1.0286795934216203e-07, "token_acc": 0.9039179104477612, "epoch": 7.302631578947368, "step": 555}, {"loss": 0.3048433542251587, "grad_norm": 11.052553470405742, "learning_rate": 9.826552484321085e-08, "token_acc": 0.9117929050814957, "epoch": 7.368421052631579, "step": 560}, {"loss": 0.35793194770812986, "grad_norm": 11.362060430165748, "learning_rate": 9.374310927258038e-08, "token_acc": 0.9013722126929674, "epoch": 7.434210526315789, "step": 565}, {"loss": 0.3463603496551514, "grad_norm": 11.541983896518747, "learning_rate": 8.930309757836516e-08, "token_acc": 0.8987889273356401, "epoch": 7.5, "step": 570}, {"loss": 0.2654224157333374, "grad_norm": 13.113919421742889, "learning_rate": 8.49478312520235e-08, "token_acc": 0.9138712601994561, "epoch": 7.565789473684211, "step": 575}, {"loss": 0.317389702796936, "grad_norm": 11.94004067837314, "learning_rate": 8.067960709356478e-08, "token_acc": 0.9028776978417267, "epoch": 7.631578947368421, "step": 580}, {"loss": 0.3111885547637939, "grad_norm": 14.668704290159432, "learning_rate": 7.650067600030583e-08, "token_acc": 0.9105527638190954, "epoch": 7.697368421052632, "step": 585}, {"loss": 0.3215763092041016, "grad_norm": 12.067759105613176, "learning_rate": 7.241324177983399e-08, "token_acc": 0.9081081081081082, "epoch": 7.7631578947368425, "step": 590}, {"loss": 0.3096336364746094, "grad_norm": 11.378555670601354, "learning_rate": 6.841945998780374e-08, "token_acc": 0.9071986123156982, "epoch": 7.828947368421053, "step": 595}, {"loss": 0.3522210121154785, "grad_norm": 10.968135004904445, "learning_rate": 6.452143679117964e-08, "token_acc": 0.9011177987962167, "epoch": 7.894736842105263, "step": 600}, {"loss": 0.3801901340484619, "grad_norm": 13.621900375202348, "learning_rate": 6.072122785752448e-08, "token_acc": 0.8746543778801843, "epoch": 7.9605263157894735, "step": 605}, {"eval_loss": 0.9074456691741943, "eval_runtime": 11.7559, "eval_samples_per_second": 11.484, "eval_steps_per_second": 1.446, "eval_token_acc": 0.7678381256656017, "epoch": 8.0, "step": 608}, {"loss": 0.32854399681091306, "grad_norm": 11.094856706759074, "learning_rate": 5.702083727091977e-08, "token_acc": 0.9034424853064652, "epoch": 8.026315789473685, "step": 610}, {"loss": 0.365177845954895, "grad_norm": 13.56922009827688, "learning_rate": 5.34222164750886e-08, "token_acc": 0.9046762589928058, "epoch": 8.092105263157896, "step": 615}, {"loss": 0.337479305267334, "grad_norm": 12.348305022353873, "learning_rate": 4.992726324427901e-08, "token_acc": 0.8977973568281938, "epoch": 8.157894736842104, "step": 620}, {"loss": 0.3113346815109253, "grad_norm": 13.373354658665798, "learning_rate": 4.653782068245127e-08, "token_acc": 0.9036251105216623, "epoch": 8.223684210526315, "step": 625}, {"loss": 0.32176852226257324, "grad_norm": 9.70805810993063, "learning_rate": 4.325567625129545e-08, "token_acc": 0.9171122994652406, "epoch": 8.289473684210526, "step": 630}, {"loss": 0.32019681930541993, "grad_norm": 11.087198542630968, "learning_rate": 4.008256082759284e-08, "token_acc": 0.9158460161145927, "epoch": 8.355263157894736, "step": 635}, {"loss": 0.3346072196960449, "grad_norm": 11.68847762915729, "learning_rate": 3.702014779041826e-08, "token_acc": 0.913926499032882, "epoch": 8.421052631578947, "step": 640}, {"loss": 0.2527181148529053, "grad_norm": 10.12638740099469, "learning_rate": 3.407005213866393e-08, "token_acc": 0.9245810055865922, "epoch": 8.486842105263158, "step": 645}, {"loss": 0.2828960418701172, "grad_norm": 10.575678593551583, "learning_rate": 3.123382963935156e-08, "token_acc": 0.9155900086132644, "epoch": 8.552631578947368, "step": 650}, {"loss": 0.3049412727355957, "grad_norm": 11.253753023963641, "learning_rate": 2.85129760071805e-08, "token_acc": 0.909875359539789, "epoch": 8.618421052631579, "step": 655}, {"loss": 0.2608570337295532, "grad_norm": 9.624441812742798, "learning_rate": 2.5908926115744994e-08, "token_acc": 0.9242957746478874, "epoch": 8.68421052631579, "step": 660}, {"loss": 0.3114201545715332, "grad_norm": 14.044323598527022, "learning_rate": 2.3423053240837514e-08, "token_acc": 0.9016393442622951, "epoch": 8.75, "step": 665}, {"loss": 0.3368398189544678, "grad_norm": 14.93347266826451, "learning_rate": 2.1056668336235623e-08, "token_acc": 0.9038652130822596, "epoch": 8.81578947368421, "step": 670}, {"loss": 0.30484743118286134, "grad_norm": 12.32427630355762, "learning_rate": 1.8811019342355433e-08, "token_acc": 0.9129263913824057, "epoch": 8.881578947368421, "step": 675}, {"loss": 0.34318013191223146, "grad_norm": 14.104619692060185, "learning_rate": 1.6687290528135722e-08, "token_acc": 0.8992740471869328, "epoch": 8.947368421052632, "step": 680}, {"eval_loss": 0.9223682284355164, "eval_runtime": 11.8005, "eval_samples_per_second": 11.44, "eval_steps_per_second": 1.441, "eval_token_acc": 0.7641107561235356, "epoch": 9.0, "step": 684}, {"loss": 0.30799293518066406, "grad_norm": 13.722485651177253, "learning_rate": 1.4686601866500115e-08, "token_acc": 0.915719696969697, "epoch": 9.013157894736842, "step": 685}, {"loss": 0.32543039321899414, "grad_norm": 11.083443862370641, "learning_rate": 1.2810008443726456e-08, "token_acc": 0.908289241622575, "epoch": 9.078947368421053, "step": 690}, {"loss": 0.29225835800170896, "grad_norm": 11.90729092801513, "learning_rate": 1.105849990303473e-08, "token_acc": 0.923951048951049, "epoch": 9.144736842105264, "step": 695}, {"loss": 0.30806550979614256, "grad_norm": 10.738590457271043, "learning_rate": 9.432999922687396e-09, "token_acc": 0.917340521114106, "epoch": 9.210526315789474, "step": 700}, {"loss": 0.3299556732177734, "grad_norm": 11.654581799055522, "learning_rate": 7.934365728877068e-09, "token_acc": 0.9146567717996289, "epoch": 9.276315789473685, "step": 705}, {"loss": 0.3529715061187744, "grad_norm": 10.715637193504373, "learning_rate": 6.563387643658075e-09, "token_acc": 0.8892988929889298, "epoch": 9.342105263157896, "step": 710}, {"loss": 0.2944628715515137, "grad_norm": 14.835441198761986, "learning_rate": 5.3207886681613804e-09, "token_acc": 0.9068673565380997, "epoch": 9.407894736842104, "step": 715}, {"loss": 0.27518668174743655, "grad_norm": 13.321903757763652, "learning_rate": 4.207224101311246e-09, "token_acc": 0.9238938053097345, "epoch": 9.473684210526315, "step": 720}, {"loss": 0.2857350826263428, "grad_norm": 10.256069230948793, "learning_rate": 3.223281194245975e-09, "token_acc": 0.9112478031634447, "epoch": 9.539473684210526, "step": 725}, {"loss": 0.3112099885940552, "grad_norm": 12.03400386189763, "learning_rate": 2.3694788406241894e-09, "token_acc": 0.9164396003633061, "epoch": 9.605263157894736, "step": 730}, {"loss": 0.2754659652709961, "grad_norm": 12.607176716250494, "learning_rate": 1.6462673029802587e-09, "token_acc": 0.9340659340659341, "epoch": 9.671052631578947, "step": 735}, {"loss": 0.2999254703521729, "grad_norm": 14.492521266612545, "learning_rate": 1.0540279752731252e-09, "token_acc": 0.9118773946360154, "epoch": 9.736842105263158, "step": 740}, {"loss": 0.28825082778930666, "grad_norm": 10.321371605871532, "learning_rate": 5.930731817538893e-10, "token_acc": 0.9165120593692022, "epoch": 9.802631578947368, "step": 745}, {"loss": 0.29370143413543703, "grad_norm": 11.558675521812575, "learning_rate": 2.636460122578399e-10, "token_acc": 0.9142599277978339, "epoch": 9.868421052631579, "step": 750}, {"loss": 0.27887554168701173, "grad_norm": 10.067152860690346, "learning_rate": 6.592019400841753e-11, "token_acc": 0.9225865209471766, "epoch": 9.93421052631579, "step": 755}, {"loss": 0.3365929126739502, "grad_norm": 16.67109379317691, "learning_rate": 0.0, "token_acc": 0.9057337220602527, "epoch": 10.0, "step": 760}, {"eval_loss": 0.9229673743247986, "eval_runtime": 11.7093, "eval_samples_per_second": 11.529, "eval_steps_per_second": 1.452, "eval_token_acc": 0.7646432374866879, "epoch": 10.0, "step": 760}, {"eval_loss": 0.9229673743247986, "eval_runtime": 14.567, "eval_samples_per_second": 9.268, "eval_steps_per_second": 1.167, "eval_token_acc": 0.7646432374866879, "epoch": 10.0, "step": 760}, {"train_runtime": 4384.4417, "train_samples_per_second": 2.776, "train_steps_per_second": 0.173, "total_flos": 69848077049856.0, "train_loss": 0.5266241832783348, "epoch": 10.0, "step": 760}], "memory": 54.130859375}
|