Add fine-tuned model files
Browse files- README.md +62 -0
- adapter_config.json +26 -0
- adapter_model.safetensors +3 -0
- added_tokens.json +4 -0
- all_results.json +8 -0
- bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt +3 -0
- bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt +3 -0
- chat_template.json +3 -0
- latest +1 -0
- llamaboard_config.yaml +68 -0
- mp_rank_00_model_states.pt +3 -0
- preprocessor_config.json +51 -0
- processor_config.json +7 -0
- rng_state_0.pth +3 -0
- rng_state_1.pth +3 -0
- running_log.txt +834 -0
- scheduler.pt +3 -0
- special_tokens_map.json +30 -0
- tokenizer.json +0 -0
- tokenizer.model +3 -0
- tokenizer_config.json +68 -0
- train_results.json +8 -0
- trainer_log.jsonl +5 -0
- trainer_state.json +61 -0
- training_args.bin +3 -0
- training_args.yaml +34 -0
- training_loss.png +0 -0
- zero_to_fp32.py +604 -0
README.md
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
library_name: peft
|
| 3 |
+
license: other
|
| 4 |
+
base_model: llava-hf/LLaVA-NeXT-Video-7B-hf
|
| 5 |
+
tags:
|
| 6 |
+
- llama-factory
|
| 7 |
+
- lora
|
| 8 |
+
- generated_from_trainer
|
| 9 |
+
model-index:
|
| 10 |
+
- name: train_2024-12-01-18-22-24
|
| 11 |
+
results: []
|
| 12 |
+
---
|
| 13 |
+
|
| 14 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
| 15 |
+
should probably proofread and complete it, then remove this comment. -->
|
| 16 |
+
|
| 17 |
+
# train_2024-12-01-18-22-24
|
| 18 |
+
|
| 19 |
+
This model is a fine-tuned version of [llava-hf/LLaVA-NeXT-Video-7B-hf](https://huggingface.co/llava-hf/LLaVA-NeXT-Video-7B-hf) on the merger, the LLM_dataset(4o) and the LLM_dataset(4mini) datasets.
|
| 20 |
+
|
| 21 |
+
## Model description
|
| 22 |
+
|
| 23 |
+
More information needed
|
| 24 |
+
|
| 25 |
+
## Intended uses & limitations
|
| 26 |
+
|
| 27 |
+
More information needed
|
| 28 |
+
|
| 29 |
+
## Training and evaluation data
|
| 30 |
+
|
| 31 |
+
More information needed
|
| 32 |
+
|
| 33 |
+
## Training procedure
|
| 34 |
+
|
| 35 |
+
### Training hyperparameters
|
| 36 |
+
|
| 37 |
+
The following hyperparameters were used during training:
|
| 38 |
+
- learning_rate: 5e-05
|
| 39 |
+
- train_batch_size: 2
|
| 40 |
+
- eval_batch_size: 8
|
| 41 |
+
- seed: 42
|
| 42 |
+
- distributed_type: multi-GPU
|
| 43 |
+
- num_devices: 2
|
| 44 |
+
- gradient_accumulation_steps: 8
|
| 45 |
+
- total_train_batch_size: 32
|
| 46 |
+
- total_eval_batch_size: 16
|
| 47 |
+
- optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
|
| 48 |
+
- lr_scheduler_type: cosine
|
| 49 |
+
- lr_scheduler_warmup_steps: 100
|
| 50 |
+
- num_epochs: 1.0
|
| 51 |
+
|
| 52 |
+
### Training results
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
### Framework versions
|
| 57 |
+
|
| 58 |
+
- PEFT 0.12.0
|
| 59 |
+
- Transformers 4.46.1
|
| 60 |
+
- Pytorch 2.3.1+cu121
|
| 61 |
+
- Datasets 3.1.0
|
| 62 |
+
- Tokenizers 0.20.3
|
adapter_config.json
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"alpha_pattern": {},
|
| 3 |
+
"auto_mapping": null,
|
| 4 |
+
"base_model_name_or_path": "llava-hf/LLaVA-NeXT-Video-7B-hf",
|
| 5 |
+
"bias": "none",
|
| 6 |
+
"fan_in_fan_out": false,
|
| 7 |
+
"inference_mode": true,
|
| 8 |
+
"init_lora_weights": true,
|
| 9 |
+
"layer_replication": null,
|
| 10 |
+
"layers_pattern": null,
|
| 11 |
+
"layers_to_transform": null,
|
| 12 |
+
"loftq_config": {},
|
| 13 |
+
"lora_alpha": 16,
|
| 14 |
+
"lora_dropout": 0,
|
| 15 |
+
"megatron_config": null,
|
| 16 |
+
"megatron_core": "megatron.core",
|
| 17 |
+
"modules_to_save": null,
|
| 18 |
+
"peft_type": "LORA",
|
| 19 |
+
"r": 8,
|
| 20 |
+
"rank_pattern": {},
|
| 21 |
+
"revision": null,
|
| 22 |
+
"target_modules": "^(?!.*vision_tower).*(?:down_proj|v_proj|k_proj|q_proj|up_proj|o_proj|gate_proj).*",
|
| 23 |
+
"task_type": "CAUSAL_LM",
|
| 24 |
+
"use_dora": false,
|
| 25 |
+
"use_rslora": false
|
| 26 |
+
}
|
adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:16c3e62651b9f8eb60399f5f1140c8b200297419a976c8830d669d09d29a8813
|
| 3 |
+
size 40043208
|
added_tokens.json
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"<image>": 32001,
|
| 3 |
+
"<video>": 32000
|
| 4 |
+
}
|
all_results.json
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"epoch": 0.9832402234636871,
|
| 3 |
+
"total_flos": 8.043290589292134e+16,
|
| 4 |
+
"train_loss": 0.8802601207386364,
|
| 5 |
+
"train_runtime": 618.3986,
|
| 6 |
+
"train_samples_per_second": 1.155,
|
| 7 |
+
"train_steps_per_second": 0.036
|
| 8 |
+
}
|
bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d8326ca28c7c35adb2f9ecc7ecbb84faee0d1d7449e25b8c4ae71aa53ed7da23
|
| 3 |
+
size 119962160
|
bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cd062851371296c614e0bdae1d95959deaaa82e72269ea37d527d89c79951903
|
| 3 |
+
size 119962288
|
chat_template.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"chat_template": "{% for message in messages %}{% if message['role'] != 'system' %}{{ message['role'].upper() + ': '}}{% endif %}{# Render all images first #}{% for content in message['content'] | selectattr('type', 'equalto', 'image') %}{{ '<image>\n' }}{% endfor %}{# Render all images first #}{% for content in message['content'] | selectattr('type', 'equalto', 'video') %}{{ '<video>\n' }}{% endfor %}{# Render all text next #}{% if message['role'] != 'assistant' %}{% for content in message['content'] | selectattr('type', 'equalto', 'text') %}{{ content['text'] + ' '}}{% endfor %}{% else %}{% for content in message['content'] | selectattr('type', 'equalto', 'text') %}{% generation %}{{ content['text'] + ' '}}{% endgeneration %}{% endfor %}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'ASSISTANT:' }}{% endif %}"
|
| 3 |
+
}
|
latest
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
global_step22
|
llamaboard_config.yaml
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
top.booster: flashattn2
|
| 2 |
+
top.checkpoint_path: []
|
| 3 |
+
top.finetuning_type: lora
|
| 4 |
+
top.model_name: LLaVA-NeXT-Video-7B-Chat
|
| 5 |
+
top.quantization_bit: '4'
|
| 6 |
+
top.quantization_method: bitsandbytes
|
| 7 |
+
top.rope_scaling: none
|
| 8 |
+
top.template: llava_next_video
|
| 9 |
+
train.additional_target: ''
|
| 10 |
+
train.badam_mode: layer
|
| 11 |
+
train.badam_switch_interval: 50
|
| 12 |
+
train.badam_switch_mode: ascending
|
| 13 |
+
train.badam_update_ratio: 0.05
|
| 14 |
+
train.batch_size: 2
|
| 15 |
+
train.compute_type: bf16
|
| 16 |
+
train.create_new_adapter: false
|
| 17 |
+
train.cutoff_len: 4096
|
| 18 |
+
train.dataset:
|
| 19 |
+
- merger
|
| 20 |
+
- LLM_dataset(4o)
|
| 21 |
+
- LLM_dataset(4mini)
|
| 22 |
+
train.dataset_dir: /media/dl/7DC4-B1CE/500_video
|
| 23 |
+
train.ds_offload: false
|
| 24 |
+
train.ds_stage: '2'
|
| 25 |
+
train.extra_args: '{"optim": "adamw_torch"}'
|
| 26 |
+
train.freeze_extra_modules: ''
|
| 27 |
+
train.freeze_trainable_layers: 2
|
| 28 |
+
train.freeze_trainable_modules: all
|
| 29 |
+
train.galore_rank: 16
|
| 30 |
+
train.galore_scale: 0.25
|
| 31 |
+
train.galore_target: all
|
| 32 |
+
train.galore_update_interval: 200
|
| 33 |
+
train.gradient_accumulation_steps: 8
|
| 34 |
+
train.learning_rate: 5e-5
|
| 35 |
+
train.logging_steps: 5
|
| 36 |
+
train.lora_alpha: 16
|
| 37 |
+
train.lora_dropout: 0
|
| 38 |
+
train.lora_rank: 8
|
| 39 |
+
train.lora_target: ''
|
| 40 |
+
train.loraplus_lr_ratio: 0
|
| 41 |
+
train.lr_scheduler_type: cosine
|
| 42 |
+
train.mask_history: false
|
| 43 |
+
train.max_grad_norm: '1.0'
|
| 44 |
+
train.max_samples: '100000'
|
| 45 |
+
train.neat_packing: false
|
| 46 |
+
train.neftune_alpha: 0
|
| 47 |
+
train.num_train_epochs: '1'
|
| 48 |
+
train.packing: false
|
| 49 |
+
train.ppo_score_norm: false
|
| 50 |
+
train.ppo_whiten_rewards: false
|
| 51 |
+
train.pref_beta: 0.1
|
| 52 |
+
train.pref_ftx: 0
|
| 53 |
+
train.pref_loss: sigmoid
|
| 54 |
+
train.report_to: false
|
| 55 |
+
train.resize_vocab: false
|
| 56 |
+
train.reward_model: null
|
| 57 |
+
train.save_steps: 100
|
| 58 |
+
train.shift_attn: false
|
| 59 |
+
train.train_on_prompt: false
|
| 60 |
+
train.training_stage: Supervised Fine-Tuning
|
| 61 |
+
train.use_badam: false
|
| 62 |
+
train.use_dora: false
|
| 63 |
+
train.use_galore: false
|
| 64 |
+
train.use_llama_pro: false
|
| 65 |
+
train.use_pissa: false
|
| 66 |
+
train.use_rslora: false
|
| 67 |
+
train.val_size: 0
|
| 68 |
+
train.warmup_steps: 100
|
mp_rank_00_model_states.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b2b6318bc36a97cfe9e658ba309603c187ee05cdb5ca8ca7faa8c488c8b3366d
|
| 3 |
+
size 149489983
|
preprocessor_config.json
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"crop_size": {
|
| 3 |
+
"height": 336,
|
| 4 |
+
"width": 336
|
| 5 |
+
},
|
| 6 |
+
"do_center_crop": true,
|
| 7 |
+
"do_convert_rgb": true,
|
| 8 |
+
"do_normalize": true,
|
| 9 |
+
"do_pad": true,
|
| 10 |
+
"do_rescale": true,
|
| 11 |
+
"do_resize": true,
|
| 12 |
+
"image_grid_pinpoints": [
|
| 13 |
+
[
|
| 14 |
+
336,
|
| 15 |
+
672
|
| 16 |
+
],
|
| 17 |
+
[
|
| 18 |
+
672,
|
| 19 |
+
336
|
| 20 |
+
],
|
| 21 |
+
[
|
| 22 |
+
672,
|
| 23 |
+
672
|
| 24 |
+
],
|
| 25 |
+
[
|
| 26 |
+
1008,
|
| 27 |
+
336
|
| 28 |
+
],
|
| 29 |
+
[
|
| 30 |
+
336,
|
| 31 |
+
1008
|
| 32 |
+
]
|
| 33 |
+
],
|
| 34 |
+
"image_mean": [
|
| 35 |
+
0.48145466,
|
| 36 |
+
0.4578275,
|
| 37 |
+
0.40821073
|
| 38 |
+
],
|
| 39 |
+
"image_processor_type": "LlavaNextImageProcessor",
|
| 40 |
+
"image_std": [
|
| 41 |
+
0.26862954,
|
| 42 |
+
0.26130258,
|
| 43 |
+
0.27577711
|
| 44 |
+
],
|
| 45 |
+
"processor_class": "LlavaNextVideoProcessor",
|
| 46 |
+
"resample": 3,
|
| 47 |
+
"rescale_factor": 0.00392156862745098,
|
| 48 |
+
"size": {
|
| 49 |
+
"shortest_edge": 336
|
| 50 |
+
}
|
| 51 |
+
}
|
processor_config.json
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"image_token": "<image>",
|
| 3 |
+
"patch_size": 14,
|
| 4 |
+
"processor_class": "LlavaNextVideoProcessor",
|
| 5 |
+
"video_token": "<video>",
|
| 6 |
+
"vision_feature_select_strategy": "default"
|
| 7 |
+
}
|
rng_state_0.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c8d6a959372d5e0c2ea025dd26c9d0ad2046fce19352056cae8074dcbd0a6fd4
|
| 3 |
+
size 14512
|
rng_state_1.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0f68a37892a1b445d21bb35cc10bf7a058a6f9ec8c363f5ed156ff4f49d90fb6
|
| 3 |
+
size 14512
|
running_log.txt
ADDED
|
@@ -0,0 +1,834 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[INFO|2024-12-01 18:24:23] parser.py:355 >> Process rank: 1, device: cuda:1, n_gpu: 1, distributed training: True, compute dtype: torch.bfloat16
|
| 2 |
+
|
| 3 |
+
[WARNING|2024-12-01 18:24:23] logging.py:162 >> We recommend enable `upcast_layernorm` in quantized training.
|
| 4 |
+
|
| 5 |
+
[WARNING|2024-12-01 18:24:23] logging.py:162 >> `ddp_find_unused_parameters` needs to be set as False for LoRA in DDP training.
|
| 6 |
+
|
| 7 |
+
[INFO|2024-12-01 18:24:23] parser.py:355 >> Process rank: 0, device: cuda:0, n_gpu: 1, distributed training: True, compute dtype: torch.bfloat16
|
| 8 |
+
|
| 9 |
+
[INFO|2024-12-01 18:24:23] configuration_utils.py:679 >> loading configuration file config.json from cache at /home/dl/.cache/huggingface/hub/models--llava-hf--LLaVA-NeXT-Video-7B-hf/snapshots/b3b624d0915bb487ef1abb15255aaa2cd5581205/config.json
|
| 10 |
+
|
| 11 |
+
[INFO|2024-12-01 18:24:23] configuration_utils.py:746 >> Model config LlavaNextVideoConfig {
|
| 12 |
+
"_name_or_path": "llava-hf/LLaVA-NeXT-Video-7B-hf",
|
| 13 |
+
"architectures": [
|
| 14 |
+
"LlavaNextVideoForConditionalGeneration"
|
| 15 |
+
],
|
| 16 |
+
"ignore_index": -100,
|
| 17 |
+
"image_grid_pinpoints": [
|
| 18 |
+
[
|
| 19 |
+
336,
|
| 20 |
+
672
|
| 21 |
+
],
|
| 22 |
+
[
|
| 23 |
+
672,
|
| 24 |
+
336
|
| 25 |
+
],
|
| 26 |
+
[
|
| 27 |
+
672,
|
| 28 |
+
672
|
| 29 |
+
],
|
| 30 |
+
[
|
| 31 |
+
1008,
|
| 32 |
+
336
|
| 33 |
+
],
|
| 34 |
+
[
|
| 35 |
+
336,
|
| 36 |
+
1008
|
| 37 |
+
]
|
| 38 |
+
],
|
| 39 |
+
"image_seq_length": 576,
|
| 40 |
+
"image_token_index": 32001,
|
| 41 |
+
"model_type": "llava_next_video",
|
| 42 |
+
"projector_hidden_act": "gelu",
|
| 43 |
+
"spatial_pool_mode": "average",
|
| 44 |
+
"spatial_pool_out_channels": 1024,
|
| 45 |
+
"spatial_pool_stride": 2,
|
| 46 |
+
"text_config": {
|
| 47 |
+
"_attn_implementation_autoset": false,
|
| 48 |
+
"_name_or_path": "lmsys/vicuna-7b-v1.5",
|
| 49 |
+
"add_cross_attention": false,
|
| 50 |
+
"architectures": [
|
| 51 |
+
"LlamaForCausalLM"
|
| 52 |
+
],
|
| 53 |
+
"attention_bias": false,
|
| 54 |
+
"attention_dropout": 0.0,
|
| 55 |
+
"bad_words_ids": null,
|
| 56 |
+
"begin_suppress_tokens": null,
|
| 57 |
+
"bos_token_id": 1,
|
| 58 |
+
"chunk_size_feed_forward": 0,
|
| 59 |
+
"cross_attention_hidden_size": null,
|
| 60 |
+
"decoder_start_token_id": null,
|
| 61 |
+
"diversity_penalty": 0.0,
|
| 62 |
+
"do_sample": false,
|
| 63 |
+
"early_stopping": false,
|
| 64 |
+
"encoder_no_repeat_ngram_size": 0,
|
| 65 |
+
"eos_token_id": 2,
|
| 66 |
+
"exponential_decay_length_penalty": null,
|
| 67 |
+
"finetuning_task": null,
|
| 68 |
+
"forced_bos_token_id": null,
|
| 69 |
+
"forced_eos_token_id": null,
|
| 70 |
+
"head_dim": 128,
|
| 71 |
+
"hidden_act": "silu",
|
| 72 |
+
"hidden_size": 4096,
|
| 73 |
+
"id2label": {
|
| 74 |
+
"0": "LABEL_0",
|
| 75 |
+
"1": "LABEL_1"
|
| 76 |
+
},
|
| 77 |
+
"initializer_range": 0.02,
|
| 78 |
+
"intermediate_size": 11008,
|
| 79 |
+
"is_decoder": false,
|
| 80 |
+
"is_encoder_decoder": false,
|
| 81 |
+
"label2id": {
|
| 82 |
+
"LABEL_0": 0,
|
| 83 |
+
"LABEL_1": 1
|
| 84 |
+
},
|
| 85 |
+
"length_penalty": 1.0,
|
| 86 |
+
"max_length": 20,
|
| 87 |
+
"max_position_embeddings": 4096,
|
| 88 |
+
"min_length": 0,
|
| 89 |
+
"mlp_bias": false,
|
| 90 |
+
"model_type": "llama",
|
| 91 |
+
"no_repeat_ngram_size": 0,
|
| 92 |
+
"num_attention_heads": 32,
|
| 93 |
+
"num_beam_groups": 1,
|
| 94 |
+
"num_beams": 1,
|
| 95 |
+
"num_hidden_layers": 32,
|
| 96 |
+
"num_key_value_heads": 32,
|
| 97 |
+
"num_return_sequences": 1,
|
| 98 |
+
"output_attentions": false,
|
| 99 |
+
"output_hidden_states": false,
|
| 100 |
+
"output_scores": false,
|
| 101 |
+
"pad_token_id": 0,
|
| 102 |
+
"prefix": null,
|
| 103 |
+
"pretraining_tp": 1,
|
| 104 |
+
"problem_type": null,
|
| 105 |
+
"pruned_heads": {},
|
| 106 |
+
"remove_invalid_values": false,
|
| 107 |
+
"repetition_penalty": 1.0,
|
| 108 |
+
"return_dict": true,
|
| 109 |
+
"return_dict_in_generate": false,
|
| 110 |
+
"rms_norm_eps": 1e-05,
|
| 111 |
+
"rope_scaling": {
|
| 112 |
+
"factor": 2.5,
|
| 113 |
+
"rope_type": "linear",
|
| 114 |
+
"type": "linear"
|
| 115 |
+
},
|
| 116 |
+
"rope_theta": 10000.0,
|
| 117 |
+
"sep_token_id": null,
|
| 118 |
+
"suppress_tokens": null,
|
| 119 |
+
"task_specific_params": null,
|
| 120 |
+
"temperature": 1.0,
|
| 121 |
+
"tf_legacy_loss": false,
|
| 122 |
+
"tie_encoder_decoder": false,
|
| 123 |
+
"tie_word_embeddings": false,
|
| 124 |
+
"tokenizer_class": null,
|
| 125 |
+
"top_k": 50,
|
| 126 |
+
"top_p": 1.0,
|
| 127 |
+
"torch_dtype": "float16",
|
| 128 |
+
"torchscript": false,
|
| 129 |
+
"type": "linear",
|
| 130 |
+
"typical_p": 1.0,
|
| 131 |
+
"use_bfloat16": false,
|
| 132 |
+
"use_cache": true,
|
| 133 |
+
"vocab_size": 32064
|
| 134 |
+
},
|
| 135 |
+
"tie_word_embeddings": false,
|
| 136 |
+
"torch_dtype": "bfloat16",
|
| 137 |
+
"transformers_version": "4.46.1",
|
| 138 |
+
"use_image_newline_parameter": true,
|
| 139 |
+
"video_seq_length": 288,
|
| 140 |
+
"video_token_index": 32000,
|
| 141 |
+
"vision_config": {
|
| 142 |
+
"_attn_implementation_autoset": false,
|
| 143 |
+
"_name_or_path": "",
|
| 144 |
+
"add_cross_attention": false,
|
| 145 |
+
"architectures": null,
|
| 146 |
+
"attention_dropout": 0.0,
|
| 147 |
+
"bad_words_ids": null,
|
| 148 |
+
"begin_suppress_tokens": null,
|
| 149 |
+
"bos_token_id": null,
|
| 150 |
+
"chunk_size_feed_forward": 0,
|
| 151 |
+
"cross_attention_hidden_size": null,
|
| 152 |
+
"decoder_start_token_id": null,
|
| 153 |
+
"diversity_penalty": 0.0,
|
| 154 |
+
"do_sample": false,
|
| 155 |
+
"early_stopping": false,
|
| 156 |
+
"encoder_no_repeat_ngram_size": 0,
|
| 157 |
+
"eos_token_id": null,
|
| 158 |
+
"exponential_decay_length_penalty": null,
|
| 159 |
+
"finetuning_task": null,
|
| 160 |
+
"forced_bos_token_id": null,
|
| 161 |
+
"forced_eos_token_id": null,
|
| 162 |
+
"hidden_act": "quick_gelu",
|
| 163 |
+
"hidden_size": 1024,
|
| 164 |
+
"id2label": {
|
| 165 |
+
"0": "LABEL_0",
|
| 166 |
+
"1": "LABEL_1"
|
| 167 |
+
},
|
| 168 |
+
"image_size": 336,
|
| 169 |
+
"initializer_factor": 1.0,
|
| 170 |
+
"initializer_range": 0.02,
|
| 171 |
+
"intermediate_size": 4096,
|
| 172 |
+
"is_decoder": false,
|
| 173 |
+
"is_encoder_decoder": false,
|
| 174 |
+
"label2id": {
|
| 175 |
+
"LABEL_0": 0,
|
| 176 |
+
"LABEL_1": 1
|
| 177 |
+
},
|
| 178 |
+
"layer_norm_eps": 1e-05,
|
| 179 |
+
"length_penalty": 1.0,
|
| 180 |
+
"max_length": 20,
|
| 181 |
+
"min_length": 0,
|
| 182 |
+
"model_type": "clip_vision_model",
|
| 183 |
+
"no_repeat_ngram_size": 0,
|
| 184 |
+
"num_attention_heads": 16,
|
| 185 |
+
"num_beam_groups": 1,
|
| 186 |
+
"num_beams": 1,
|
| 187 |
+
"num_channels": 3,
|
| 188 |
+
"num_hidden_layers": 24,
|
| 189 |
+
"num_return_sequences": 1,
|
| 190 |
+
"output_attentions": false,
|
| 191 |
+
"output_hidden_states": false,
|
| 192 |
+
"output_scores": false,
|
| 193 |
+
"pad_token_id": null,
|
| 194 |
+
"patch_size": 14,
|
| 195 |
+
"prefix": null,
|
| 196 |
+
"problem_type": null,
|
| 197 |
+
"projection_dim": 768,
|
| 198 |
+
"pruned_heads": {},
|
| 199 |
+
"remove_invalid_values": false,
|
| 200 |
+
"repetition_penalty": 1.0,
|
| 201 |
+
"return_dict": true,
|
| 202 |
+
"return_dict_in_generate": false,
|
| 203 |
+
"sep_token_id": null,
|
| 204 |
+
"suppress_tokens": null,
|
| 205 |
+
"task_specific_params": null,
|
| 206 |
+
"temperature": 1.0,
|
| 207 |
+
"tf_legacy_loss": false,
|
| 208 |
+
"tie_encoder_decoder": false,
|
| 209 |
+
"tie_word_embeddings": true,
|
| 210 |
+
"tokenizer_class": null,
|
| 211 |
+
"top_k": 50,
|
| 212 |
+
"top_p": 1.0,
|
| 213 |
+
"torch_dtype": null,
|
| 214 |
+
"torchscript": false,
|
| 215 |
+
"typical_p": 1.0,
|
| 216 |
+
"use_bfloat16": false,
|
| 217 |
+
"vocab_size": 32000
|
| 218 |
+
},
|
| 219 |
+
"vision_feature_layer": -2,
|
| 220 |
+
"vision_feature_select_strategy": "default"
|
| 221 |
+
}
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
[INFO|2024-12-01 18:24:23] tokenization_utils_base.py:2211 >> loading file tokenizer.model from cache at /home/dl/.cache/huggingface/hub/models--llava-hf--LLaVA-NeXT-Video-7B-hf/snapshots/b3b624d0915bb487ef1abb15255aaa2cd5581205/tokenizer.model
|
| 225 |
+
|
| 226 |
+
[INFO|2024-12-01 18:24:23] tokenization_utils_base.py:2211 >> loading file tokenizer.json from cache at /home/dl/.cache/huggingface/hub/models--llava-hf--LLaVA-NeXT-Video-7B-hf/snapshots/b3b624d0915bb487ef1abb15255aaa2cd5581205/tokenizer.json
|
| 227 |
+
|
| 228 |
+
[INFO|2024-12-01 18:24:23] tokenization_utils_base.py:2211 >> loading file added_tokens.json from cache at /home/dl/.cache/huggingface/hub/models--llava-hf--LLaVA-NeXT-Video-7B-hf/snapshots/b3b624d0915bb487ef1abb15255aaa2cd5581205/added_tokens.json
|
| 229 |
+
|
| 230 |
+
[INFO|2024-12-01 18:24:23] tokenization_utils_base.py:2211 >> loading file special_tokens_map.json from cache at /home/dl/.cache/huggingface/hub/models--llava-hf--LLaVA-NeXT-Video-7B-hf/snapshots/b3b624d0915bb487ef1abb15255aaa2cd5581205/special_tokens_map.json
|
| 231 |
+
|
| 232 |
+
[INFO|2024-12-01 18:24:23] tokenization_utils_base.py:2211 >> loading file tokenizer_config.json from cache at /home/dl/.cache/huggingface/hub/models--llava-hf--LLaVA-NeXT-Video-7B-hf/snapshots/b3b624d0915bb487ef1abb15255aaa2cd5581205/tokenizer_config.json
|
| 233 |
+
|
| 234 |
+
[INFO|2024-12-01 18:24:23] tokenization_utils_base.py:2475 >> Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
|
| 235 |
+
|
| 236 |
+
[INFO|2024-12-01 18:24:24] processing_utils.py:695 >> loading configuration file processor_config.json from cache at /home/dl/.cache/huggingface/hub/models--llava-hf--LLaVA-NeXT-Video-7B-hf/snapshots/b3b624d0915bb487ef1abb15255aaa2cd5581205/processor_config.json
|
| 237 |
+
|
| 238 |
+
[INFO|2024-12-01 18:24:24] image_processing_base.py:375 >> loading configuration file preprocessor_config.json from cache at /home/dl/.cache/huggingface/hub/models--llava-hf--LLaVA-NeXT-Video-7B-hf/snapshots/b3b624d0915bb487ef1abb15255aaa2cd5581205/preprocessor_config.json
|
| 239 |
+
|
| 240 |
+
[INFO|2024-12-01 18:24:24] image_processing_base.py:429 >> Image processor LlavaNextVideoImageProcessor {
|
| 241 |
+
"crop_size": {
|
| 242 |
+
"height": 336,
|
| 243 |
+
"width": 336
|
| 244 |
+
},
|
| 245 |
+
"do_center_crop": true,
|
| 246 |
+
"do_convert_rgb": true,
|
| 247 |
+
"do_normalize": true,
|
| 248 |
+
"do_pad": true,
|
| 249 |
+
"do_rescale": true,
|
| 250 |
+
"do_resize": true,
|
| 251 |
+
"image_grid_pinpoints": [
|
| 252 |
+
[
|
| 253 |
+
336,
|
| 254 |
+
672
|
| 255 |
+
],
|
| 256 |
+
[
|
| 257 |
+
672,
|
| 258 |
+
336
|
| 259 |
+
],
|
| 260 |
+
[
|
| 261 |
+
672,
|
| 262 |
+
672
|
| 263 |
+
],
|
| 264 |
+
[
|
| 265 |
+
1008,
|
| 266 |
+
336
|
| 267 |
+
],
|
| 268 |
+
[
|
| 269 |
+
336,
|
| 270 |
+
1008
|
| 271 |
+
]
|
| 272 |
+
],
|
| 273 |
+
"image_mean": [
|
| 274 |
+
0.48145466,
|
| 275 |
+
0.4578275,
|
| 276 |
+
0.40821073
|
| 277 |
+
],
|
| 278 |
+
"image_processor_type": "LlavaNextVideoImageProcessor",
|
| 279 |
+
"image_std": [
|
| 280 |
+
0.26862954,
|
| 281 |
+
0.26130258,
|
| 282 |
+
0.27577711
|
| 283 |
+
],
|
| 284 |
+
"processor_class": "LlavaNextVideoProcessor",
|
| 285 |
+
"resample": 3,
|
| 286 |
+
"rescale_factor": 0.00392156862745098,
|
| 287 |
+
"size": {
|
| 288 |
+
"shortest_edge": 336
|
| 289 |
+
}
|
| 290 |
+
}
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
[INFO|2024-12-01 18:24:24] image_processing_base.py:375 >> loading configuration file preprocessor_config.json from cache at /home/dl/.cache/huggingface/hub/models--llava-hf--LLaVA-NeXT-Video-7B-hf/snapshots/b3b624d0915bb487ef1abb15255aaa2cd5581205/preprocessor_config.json
|
| 294 |
+
|
| 295 |
+
[INFO|2024-12-01 18:24:24] image_processing_base.py:429 >> Image processor LlavaNextImageProcessor {
|
| 296 |
+
"crop_size": {
|
| 297 |
+
"height": 336,
|
| 298 |
+
"width": 336
|
| 299 |
+
},
|
| 300 |
+
"do_center_crop": true,
|
| 301 |
+
"do_convert_rgb": true,
|
| 302 |
+
"do_normalize": true,
|
| 303 |
+
"do_pad": true,
|
| 304 |
+
"do_rescale": true,
|
| 305 |
+
"do_resize": true,
|
| 306 |
+
"image_grid_pinpoints": [
|
| 307 |
+
[
|
| 308 |
+
336,
|
| 309 |
+
672
|
| 310 |
+
],
|
| 311 |
+
[
|
| 312 |
+
672,
|
| 313 |
+
336
|
| 314 |
+
],
|
| 315 |
+
[
|
| 316 |
+
672,
|
| 317 |
+
672
|
| 318 |
+
],
|
| 319 |
+
[
|
| 320 |
+
1008,
|
| 321 |
+
336
|
| 322 |
+
],
|
| 323 |
+
[
|
| 324 |
+
336,
|
| 325 |
+
1008
|
| 326 |
+
]
|
| 327 |
+
],
|
| 328 |
+
"image_mean": [
|
| 329 |
+
0.48145466,
|
| 330 |
+
0.4578275,
|
| 331 |
+
0.40821073
|
| 332 |
+
],
|
| 333 |
+
"image_processor_type": "LlavaNextImageProcessor",
|
| 334 |
+
"image_std": [
|
| 335 |
+
0.26862954,
|
| 336 |
+
0.26130258,
|
| 337 |
+
0.27577711
|
| 338 |
+
],
|
| 339 |
+
"processor_class": "LlavaNextVideoProcessor",
|
| 340 |
+
"resample": 3,
|
| 341 |
+
"rescale_factor": 0.00392156862745098,
|
| 342 |
+
"size": {
|
| 343 |
+
"shortest_edge": 336
|
| 344 |
+
}
|
| 345 |
+
}
|
| 346 |
+
|
| 347 |
+
|
| 348 |
+
[INFO|2024-12-01 18:24:25] tokenization_utils_base.py:2211 >> loading file tokenizer.model from cache at /home/dl/.cache/huggingface/hub/models--llava-hf--LLaVA-NeXT-Video-7B-hf/snapshots/b3b624d0915bb487ef1abb15255aaa2cd5581205/tokenizer.model
|
| 349 |
+
|
| 350 |
+
[INFO|2024-12-01 18:24:25] tokenization_utils_base.py:2211 >> loading file tokenizer.json from cache at /home/dl/.cache/huggingface/hub/models--llava-hf--LLaVA-NeXT-Video-7B-hf/snapshots/b3b624d0915bb487ef1abb15255aaa2cd5581205/tokenizer.json
|
| 351 |
+
|
| 352 |
+
[INFO|2024-12-01 18:24:25] tokenization_utils_base.py:2211 >> loading file added_tokens.json from cache at /home/dl/.cache/huggingface/hub/models--llava-hf--LLaVA-NeXT-Video-7B-hf/snapshots/b3b624d0915bb487ef1abb15255aaa2cd5581205/added_tokens.json
|
| 353 |
+
|
| 354 |
+
[INFO|2024-12-01 18:24:25] tokenization_utils_base.py:2211 >> loading file special_tokens_map.json from cache at /home/dl/.cache/huggingface/hub/models--llava-hf--LLaVA-NeXT-Video-7B-hf/snapshots/b3b624d0915bb487ef1abb15255aaa2cd5581205/special_tokens_map.json
|
| 355 |
+
|
| 356 |
+
[INFO|2024-12-01 18:24:25] tokenization_utils_base.py:2211 >> loading file tokenizer_config.json from cache at /home/dl/.cache/huggingface/hub/models--llava-hf--LLaVA-NeXT-Video-7B-hf/snapshots/b3b624d0915bb487ef1abb15255aaa2cd5581205/tokenizer_config.json
|
| 357 |
+
|
| 358 |
+
[INFO|2024-12-01 18:24:25] tokenization_utils_base.py:2475 >> Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
|
| 359 |
+
|
| 360 |
+
[INFO|2024-12-01 18:24:25] processing_utils.py:695 >> loading configuration file processor_config.json from cache at /home/dl/.cache/huggingface/hub/models--llava-hf--LLaVA-NeXT-Video-7B-hf/snapshots/b3b624d0915bb487ef1abb15255aaa2cd5581205/processor_config.json
|
| 361 |
+
|
| 362 |
+
[WARNING|2024-12-01 18:24:25] processing_utils.py:1005 >> Some kwargs in processor config are unused and will not have any effect: num_additional_image_tokens.
|
| 363 |
+
|
| 364 |
+
[INFO|2024-12-01 18:24:25] processing_utils.py:755 >> Processor LlavaNextVideoProcessor:
|
| 365 |
+
- video_processor: LlavaNextVideoImageProcessor {
|
| 366 |
+
"crop_size": {
|
| 367 |
+
"height": 336,
|
| 368 |
+
"width": 336
|
| 369 |
+
},
|
| 370 |
+
"do_center_crop": true,
|
| 371 |
+
"do_convert_rgb": true,
|
| 372 |
+
"do_normalize": true,
|
| 373 |
+
"do_pad": true,
|
| 374 |
+
"do_rescale": true,
|
| 375 |
+
"do_resize": true,
|
| 376 |
+
"image_grid_pinpoints": [
|
| 377 |
+
[
|
| 378 |
+
336,
|
| 379 |
+
672
|
| 380 |
+
],
|
| 381 |
+
[
|
| 382 |
+
672,
|
| 383 |
+
336
|
| 384 |
+
],
|
| 385 |
+
[
|
| 386 |
+
672,
|
| 387 |
+
672
|
| 388 |
+
],
|
| 389 |
+
[
|
| 390 |
+
1008,
|
| 391 |
+
336
|
| 392 |
+
],
|
| 393 |
+
[
|
| 394 |
+
336,
|
| 395 |
+
1008
|
| 396 |
+
]
|
| 397 |
+
],
|
| 398 |
+
"image_mean": [
|
| 399 |
+
0.48145466,
|
| 400 |
+
0.4578275,
|
| 401 |
+
0.40821073
|
| 402 |
+
],
|
| 403 |
+
"image_processor_type": "LlavaNextVideoImageProcessor",
|
| 404 |
+
"image_std": [
|
| 405 |
+
0.26862954,
|
| 406 |
+
0.26130258,
|
| 407 |
+
0.27577711
|
| 408 |
+
],
|
| 409 |
+
"processor_class": "LlavaNextVideoProcessor",
|
| 410 |
+
"resample": 3,
|
| 411 |
+
"rescale_factor": 0.00392156862745098,
|
| 412 |
+
"size": {
|
| 413 |
+
"shortest_edge": 336
|
| 414 |
+
}
|
| 415 |
+
}
|
| 416 |
+
|
| 417 |
+
- image_processor: LlavaNextImageProcessor {
|
| 418 |
+
"crop_size": {
|
| 419 |
+
"height": 336,
|
| 420 |
+
"width": 336
|
| 421 |
+
},
|
| 422 |
+
"do_center_crop": true,
|
| 423 |
+
"do_convert_rgb": true,
|
| 424 |
+
"do_normalize": true,
|
| 425 |
+
"do_pad": true,
|
| 426 |
+
"do_rescale": true,
|
| 427 |
+
"do_resize": true,
|
| 428 |
+
"image_grid_pinpoints": [
|
| 429 |
+
[
|
| 430 |
+
336,
|
| 431 |
+
672
|
| 432 |
+
],
|
| 433 |
+
[
|
| 434 |
+
672,
|
| 435 |
+
336
|
| 436 |
+
],
|
| 437 |
+
[
|
| 438 |
+
672,
|
| 439 |
+
672
|
| 440 |
+
],
|
| 441 |
+
[
|
| 442 |
+
1008,
|
| 443 |
+
336
|
| 444 |
+
],
|
| 445 |
+
[
|
| 446 |
+
336,
|
| 447 |
+
1008
|
| 448 |
+
]
|
| 449 |
+
],
|
| 450 |
+
"image_mean": [
|
| 451 |
+
0.48145466,
|
| 452 |
+
0.4578275,
|
| 453 |
+
0.40821073
|
| 454 |
+
],
|
| 455 |
+
"image_processor_type": "LlavaNextImageProcessor",
|
| 456 |
+
"image_std": [
|
| 457 |
+
0.26862954,
|
| 458 |
+
0.26130258,
|
| 459 |
+
0.27577711
|
| 460 |
+
],
|
| 461 |
+
"processor_class": "LlavaNextVideoProcessor",
|
| 462 |
+
"resample": 3,
|
| 463 |
+
"rescale_factor": 0.00392156862745098,
|
| 464 |
+
"size": {
|
| 465 |
+
"shortest_edge": 336
|
| 466 |
+
}
|
| 467 |
+
}
|
| 468 |
+
|
| 469 |
+
- tokenizer: LlamaTokenizerFast(name_or_path='llava-hf/LLaVA-NeXT-Video-7B-hf', vocab_size=32000, model_max_length=4096, is_fast=True, padding_side='left', truncation_side='right', special_tokens={'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<unk>'}, clean_up_tokenization_spaces=False), added_tokens_decoder={
|
| 470 |
+
0: AddedToken("<unk>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),
|
| 471 |
+
1: AddedToken("<s>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),
|
| 472 |
+
2: AddedToken("</s>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),
|
| 473 |
+
32000: AddedToken("<video>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),
|
| 474 |
+
32001: AddedToken("<image>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),
|
| 475 |
+
}
|
| 476 |
+
|
| 477 |
+
{
|
| 478 |
+
"image_token": "<image>",
|
| 479 |
+
"patch_size": 14,
|
| 480 |
+
"processor_class": "LlavaNextVideoProcessor",
|
| 481 |
+
"video_token": "<video>",
|
| 482 |
+
"vision_feature_select_strategy": "default"
|
| 483 |
+
}
|
| 484 |
+
|
| 485 |
+
|
| 486 |
+
[INFO|2024-12-01 18:24:25] logging.py:157 >> Loading dataset merger500.json...
|
| 487 |
+
|
| 488 |
+
[WARNING|2024-12-01 18:24:25] processing_utils.py:1005 >> Some kwargs in processor config are unused and will not have any effect: num_additional_image_tokens.
|
| 489 |
+
|
| 490 |
+
[INFO|2024-12-01 18:24:27] logging.py:157 >> Loading dataset LLM_dataset(4o).json...
|
| 491 |
+
|
| 492 |
+
[INFO|2024-12-01 18:24:27] logging.py:157 >> Loading dataset LLM_dataset(4mini).json...
|
| 493 |
+
|
| 494 |
+
[INFO|2024-12-01 18:25:48] configuration_utils.py:679 >> loading configuration file config.json from cache at /home/dl/.cache/huggingface/hub/models--llava-hf--LLaVA-NeXT-Video-7B-hf/snapshots/b3b624d0915bb487ef1abb15255aaa2cd5581205/config.json
|
| 495 |
+
|
| 496 |
+
[INFO|2024-12-01 18:25:48] configuration_utils.py:746 >> Model config LlavaNextVideoConfig {
|
| 497 |
+
"_name_or_path": "llava-hf/LLaVA-NeXT-Video-7B-hf",
|
| 498 |
+
"architectures": [
|
| 499 |
+
"LlavaNextVideoForConditionalGeneration"
|
| 500 |
+
],
|
| 501 |
+
"ignore_index": -100,
|
| 502 |
+
"image_grid_pinpoints": [
|
| 503 |
+
[
|
| 504 |
+
336,
|
| 505 |
+
672
|
| 506 |
+
],
|
| 507 |
+
[
|
| 508 |
+
672,
|
| 509 |
+
336
|
| 510 |
+
],
|
| 511 |
+
[
|
| 512 |
+
672,
|
| 513 |
+
672
|
| 514 |
+
],
|
| 515 |
+
[
|
| 516 |
+
1008,
|
| 517 |
+
336
|
| 518 |
+
],
|
| 519 |
+
[
|
| 520 |
+
336,
|
| 521 |
+
1008
|
| 522 |
+
]
|
| 523 |
+
],
|
| 524 |
+
"image_seq_length": 576,
|
| 525 |
+
"image_token_index": 32001,
|
| 526 |
+
"model_type": "llava_next_video",
|
| 527 |
+
"projector_hidden_act": "gelu",
|
| 528 |
+
"spatial_pool_mode": "average",
|
| 529 |
+
"spatial_pool_out_channels": 1024,
|
| 530 |
+
"spatial_pool_stride": 2,
|
| 531 |
+
"text_config": {
|
| 532 |
+
"_attn_implementation_autoset": false,
|
| 533 |
+
"_name_or_path": "lmsys/vicuna-7b-v1.5",
|
| 534 |
+
"add_cross_attention": false,
|
| 535 |
+
"architectures": [
|
| 536 |
+
"LlamaForCausalLM"
|
| 537 |
+
],
|
| 538 |
+
"attention_bias": false,
|
| 539 |
+
"attention_dropout": 0.0,
|
| 540 |
+
"bad_words_ids": null,
|
| 541 |
+
"begin_suppress_tokens": null,
|
| 542 |
+
"bos_token_id": 1,
|
| 543 |
+
"chunk_size_feed_forward": 0,
|
| 544 |
+
"cross_attention_hidden_size": null,
|
| 545 |
+
"decoder_start_token_id": null,
|
| 546 |
+
"diversity_penalty": 0.0,
|
| 547 |
+
"do_sample": false,
|
| 548 |
+
"early_stopping": false,
|
| 549 |
+
"encoder_no_repeat_ngram_size": 0,
|
| 550 |
+
"eos_token_id": 2,
|
| 551 |
+
"exponential_decay_length_penalty": null,
|
| 552 |
+
"finetuning_task": null,
|
| 553 |
+
"forced_bos_token_id": null,
|
| 554 |
+
"forced_eos_token_id": null,
|
| 555 |
+
"head_dim": 128,
|
| 556 |
+
"hidden_act": "silu",
|
| 557 |
+
"hidden_size": 4096,
|
| 558 |
+
"id2label": {
|
| 559 |
+
"0": "LABEL_0",
|
| 560 |
+
"1": "LABEL_1"
|
| 561 |
+
},
|
| 562 |
+
"initializer_range": 0.02,
|
| 563 |
+
"intermediate_size": 11008,
|
| 564 |
+
"is_decoder": false,
|
| 565 |
+
"is_encoder_decoder": false,
|
| 566 |
+
"label2id": {
|
| 567 |
+
"LABEL_0": 0,
|
| 568 |
+
"LABEL_1": 1
|
| 569 |
+
},
|
| 570 |
+
"length_penalty": 1.0,
|
| 571 |
+
"max_length": 20,
|
| 572 |
+
"max_position_embeddings": 4096,
|
| 573 |
+
"min_length": 0,
|
| 574 |
+
"mlp_bias": false,
|
| 575 |
+
"model_type": "llama",
|
| 576 |
+
"no_repeat_ngram_size": 0,
|
| 577 |
+
"num_attention_heads": 32,
|
| 578 |
+
"num_beam_groups": 1,
|
| 579 |
+
"num_beams": 1,
|
| 580 |
+
"num_hidden_layers": 32,
|
| 581 |
+
"num_key_value_heads": 32,
|
| 582 |
+
"num_return_sequences": 1,
|
| 583 |
+
"output_attentions": false,
|
| 584 |
+
"output_hidden_states": false,
|
| 585 |
+
"output_scores": false,
|
| 586 |
+
"pad_token_id": 0,
|
| 587 |
+
"prefix": null,
|
| 588 |
+
"pretraining_tp": 1,
|
| 589 |
+
"problem_type": null,
|
| 590 |
+
"pruned_heads": {},
|
| 591 |
+
"remove_invalid_values": false,
|
| 592 |
+
"repetition_penalty": 1.0,
|
| 593 |
+
"return_dict": true,
|
| 594 |
+
"return_dict_in_generate": false,
|
| 595 |
+
"rms_norm_eps": 1e-05,
|
| 596 |
+
"rope_scaling": {
|
| 597 |
+
"factor": 2.5,
|
| 598 |
+
"rope_type": "linear",
|
| 599 |
+
"type": "linear"
|
| 600 |
+
},
|
| 601 |
+
"rope_theta": 10000.0,
|
| 602 |
+
"sep_token_id": null,
|
| 603 |
+
"suppress_tokens": null,
|
| 604 |
+
"task_specific_params": null,
|
| 605 |
+
"temperature": 1.0,
|
| 606 |
+
"tf_legacy_loss": false,
|
| 607 |
+
"tie_encoder_decoder": false,
|
| 608 |
+
"tie_word_embeddings": false,
|
| 609 |
+
"tokenizer_class": null,
|
| 610 |
+
"top_k": 50,
|
| 611 |
+
"top_p": 1.0,
|
| 612 |
+
"torch_dtype": "float16",
|
| 613 |
+
"torchscript": false,
|
| 614 |
+
"type": "linear",
|
| 615 |
+
"typical_p": 1.0,
|
| 616 |
+
"use_bfloat16": false,
|
| 617 |
+
"use_cache": true,
|
| 618 |
+
"vocab_size": 32064
|
| 619 |
+
},
|
| 620 |
+
"tie_word_embeddings": false,
|
| 621 |
+
"torch_dtype": "bfloat16",
|
| 622 |
+
"transformers_version": "4.46.1",
|
| 623 |
+
"use_image_newline_parameter": true,
|
| 624 |
+
"video_seq_length": 288,
|
| 625 |
+
"video_token_index": 32000,
|
| 626 |
+
"vision_config": {
|
| 627 |
+
"_attn_implementation_autoset": false,
|
| 628 |
+
"_name_or_path": "",
|
| 629 |
+
"add_cross_attention": false,
|
| 630 |
+
"architectures": null,
|
| 631 |
+
"attention_dropout": 0.0,
|
| 632 |
+
"bad_words_ids": null,
|
| 633 |
+
"begin_suppress_tokens": null,
|
| 634 |
+
"bos_token_id": null,
|
| 635 |
+
"chunk_size_feed_forward": 0,
|
| 636 |
+
"cross_attention_hidden_size": null,
|
| 637 |
+
"decoder_start_token_id": null,
|
| 638 |
+
"diversity_penalty": 0.0,
|
| 639 |
+
"do_sample": false,
|
| 640 |
+
"early_stopping": false,
|
| 641 |
+
"encoder_no_repeat_ngram_size": 0,
|
| 642 |
+
"eos_token_id": null,
|
| 643 |
+
"exponential_decay_length_penalty": null,
|
| 644 |
+
"finetuning_task": null,
|
| 645 |
+
"forced_bos_token_id": null,
|
| 646 |
+
"forced_eos_token_id": null,
|
| 647 |
+
"hidden_act": "quick_gelu",
|
| 648 |
+
"hidden_size": 1024,
|
| 649 |
+
"id2label": {
|
| 650 |
+
"0": "LABEL_0",
|
| 651 |
+
"1": "LABEL_1"
|
| 652 |
+
},
|
| 653 |
+
"image_size": 336,
|
| 654 |
+
"initializer_factor": 1.0,
|
| 655 |
+
"initializer_range": 0.02,
|
| 656 |
+
"intermediate_size": 4096,
|
| 657 |
+
"is_decoder": false,
|
| 658 |
+
"is_encoder_decoder": false,
|
| 659 |
+
"label2id": {
|
| 660 |
+
"LABEL_0": 0,
|
| 661 |
+
"LABEL_1": 1
|
| 662 |
+
},
|
| 663 |
+
"layer_norm_eps": 1e-05,
|
| 664 |
+
"length_penalty": 1.0,
|
| 665 |
+
"max_length": 20,
|
| 666 |
+
"min_length": 0,
|
| 667 |
+
"model_type": "clip_vision_model",
|
| 668 |
+
"no_repeat_ngram_size": 0,
|
| 669 |
+
"num_attention_heads": 16,
|
| 670 |
+
"num_beam_groups": 1,
|
| 671 |
+
"num_beams": 1,
|
| 672 |
+
"num_channels": 3,
|
| 673 |
+
"num_hidden_layers": 24,
|
| 674 |
+
"num_return_sequences": 1,
|
| 675 |
+
"output_attentions": false,
|
| 676 |
+
"output_hidden_states": false,
|
| 677 |
+
"output_scores": false,
|
| 678 |
+
"pad_token_id": null,
|
| 679 |
+
"patch_size": 14,
|
| 680 |
+
"prefix": null,
|
| 681 |
+
"problem_type": null,
|
| 682 |
+
"projection_dim": 768,
|
| 683 |
+
"pruned_heads": {},
|
| 684 |
+
"remove_invalid_values": false,
|
| 685 |
+
"repetition_penalty": 1.0,
|
| 686 |
+
"return_dict": true,
|
| 687 |
+
"return_dict_in_generate": false,
|
| 688 |
+
"sep_token_id": null,
|
| 689 |
+
"suppress_tokens": null,
|
| 690 |
+
"task_specific_params": null,
|
| 691 |
+
"temperature": 1.0,
|
| 692 |
+
"tf_legacy_loss": false,
|
| 693 |
+
"tie_encoder_decoder": false,
|
| 694 |
+
"tie_word_embeddings": true,
|
| 695 |
+
"tokenizer_class": null,
|
| 696 |
+
"top_k": 50,
|
| 697 |
+
"top_p": 1.0,
|
| 698 |
+
"torch_dtype": null,
|
| 699 |
+
"torchscript": false,
|
| 700 |
+
"typical_p": 1.0,
|
| 701 |
+
"use_bfloat16": false,
|
| 702 |
+
"vocab_size": 32000
|
| 703 |
+
},
|
| 704 |
+
"vision_feature_layer": -2,
|
| 705 |
+
"vision_feature_select_strategy": "default"
|
| 706 |
+
}
|
| 707 |
+
|
| 708 |
+
|
| 709 |
+
[INFO|2024-12-01 18:25:48] logging.py:157 >> Quantizing model to 4 bit with bitsandbytes.
|
| 710 |
+
|
| 711 |
+
[INFO|2024-12-01 18:25:48] modeling_utils.py:3937 >> loading weights file model.safetensors from cache at /home/dl/.cache/huggingface/hub/models--llava-hf--LLaVA-NeXT-Video-7B-hf/snapshots/b3b624d0915bb487ef1abb15255aaa2cd5581205/model.safetensors.index.json
|
| 712 |
+
|
| 713 |
+
[INFO|2024-12-01 18:25:48] modeling_utils.py:1670 >> Instantiating LlavaNextVideoForConditionalGeneration model under default dtype torch.bfloat16.
|
| 714 |
+
|
| 715 |
+
[INFO|2024-12-01 18:25:48] configuration_utils.py:1096 >> Generate config GenerationConfig {}
|
| 716 |
+
|
| 717 |
+
|
| 718 |
+
[INFO|2024-12-01 18:25:48] modeling_utils.py:1670 >> Instantiating CLIPVisionModel model under default dtype torch.bfloat16.
|
| 719 |
+
|
| 720 |
+
[INFO|2024-12-01 18:25:48] modeling_utils.py:1670 >> Instantiating LlamaForCausalLM model under default dtype torch.bfloat16.
|
| 721 |
+
|
| 722 |
+
[INFO|2024-12-01 18:25:48] configuration_utils.py:1096 >> Generate config GenerationConfig {
|
| 723 |
+
"bos_token_id": 1,
|
| 724 |
+
"eos_token_id": 2,
|
| 725 |
+
"pad_token_id": 0
|
| 726 |
+
}
|
| 727 |
+
|
| 728 |
+
|
| 729 |
+
[INFO|2024-12-01 18:25:52] modeling_utils.py:4800 >> All model checkpoint weights were used when initializing LlavaNextVideoForConditionalGeneration.
|
| 730 |
+
|
| 731 |
+
|
| 732 |
+
[INFO|2024-12-01 18:25:52] modeling_utils.py:4808 >> All the weights of LlavaNextVideoForConditionalGeneration were initialized from the model checkpoint at llava-hf/LLaVA-NeXT-Video-7B-hf.
|
| 733 |
+
If your task is similar to the task the model of the checkpoint was trained on, you can already use LlavaNextVideoForConditionalGeneration for predictions without further training.
|
| 734 |
+
|
| 735 |
+
[INFO|2024-12-01 18:25:52] configuration_utils.py:1051 >> loading configuration file generation_config.json from cache at /home/dl/.cache/huggingface/hub/models--llava-hf--LLaVA-NeXT-Video-7B-hf/snapshots/b3b624d0915bb487ef1abb15255aaa2cd5581205/generation_config.json
|
| 736 |
+
|
| 737 |
+
[INFO|2024-12-01 18:25:52] configuration_utils.py:1096 >> Generate config GenerationConfig {
|
| 738 |
+
"bos_token_id": 1,
|
| 739 |
+
"eos_token_id": 2,
|
| 740 |
+
"pad_token_id": 0
|
| 741 |
+
}
|
| 742 |
+
|
| 743 |
+
|
| 744 |
+
[INFO|2024-12-01 18:25:53] logging.py:157 >> Gradient checkpointing enabled.
|
| 745 |
+
|
| 746 |
+
[INFO|2024-12-01 18:25:53] logging.py:157 >> Casting multimodal projector outputs in torch.bfloat16.
|
| 747 |
+
|
| 748 |
+
[INFO|2024-12-01 18:25:53] logging.py:157 >> Using FlashAttention-2 for faster training and inference.
|
| 749 |
+
|
| 750 |
+
[INFO|2024-12-01 18:25:53] logging.py:157 >> Upcasting trainable params to float32.
|
| 751 |
+
|
| 752 |
+
[INFO|2024-12-01 18:25:53] logging.py:157 >> Fine-tuning method: LoRA
|
| 753 |
+
|
| 754 |
+
[INFO|2024-12-01 18:25:53] logging.py:157 >> Found linear modules: down_proj,v_proj,k_proj,q_proj,up_proj,o_proj,gate_proj
|
| 755 |
+
|
| 756 |
+
[INFO|2024-12-01 18:25:53] logging.py:157 >> trainable params: 19,988,480 || all params: 7,083,419,648 || trainable%: 0.2822
|
| 757 |
+
|
| 758 |
+
[INFO|2024-12-01 18:25:53] trainer.py:698 >> Using auto half precision backend
|
| 759 |
+
|
| 760 |
+
[INFO|2024-12-01 18:25:55] trainer.py:2313 >> ***** Running training *****
|
| 761 |
+
|
| 762 |
+
[INFO|2024-12-01 18:25:55] trainer.py:2314 >> Num examples = 714
|
| 763 |
+
|
| 764 |
+
[INFO|2024-12-01 18:25:55] trainer.py:2315 >> Num Epochs = 1
|
| 765 |
+
|
| 766 |
+
[INFO|2024-12-01 18:25:55] trainer.py:2316 >> Instantaneous batch size per device = 2
|
| 767 |
+
|
| 768 |
+
[INFO|2024-12-01 18:25:55] trainer.py:2319 >> Total train batch size (w. parallel, distributed & accumulation) = 32
|
| 769 |
+
|
| 770 |
+
[INFO|2024-12-01 18:25:55] trainer.py:2320 >> Gradient Accumulation steps = 8
|
| 771 |
+
|
| 772 |
+
[INFO|2024-12-01 18:25:55] trainer.py:2321 >> Total optimization steps = 22
|
| 773 |
+
|
| 774 |
+
[INFO|2024-12-01 18:25:55] trainer.py:2322 >> Number of trainable parameters = 19,988,480
|
| 775 |
+
|
| 776 |
+
[WARNING|2024-12-01 18:25:59] logging.py:168 >> `use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.
|
| 777 |
+
|
| 778 |
+
[INFO|2024-12-01 18:28:23] logging.py:157 >> {'loss': 0.8615, 'learning_rate': 2.5000e-06, 'epoch': 0.22}
|
| 779 |
+
|
| 780 |
+
[INFO|2024-12-01 18:30:41] logging.py:157 >> {'loss': 0.8940, 'learning_rate': 5.0000e-06, 'epoch': 0.45}
|
| 781 |
+
|
| 782 |
+
[INFO|2024-12-01 18:33:04] logging.py:157 >> {'loss': 0.8808, 'learning_rate': 7.5000e-06, 'epoch': 0.67}
|
| 783 |
+
|
| 784 |
+
[INFO|2024-12-01 18:35:22] logging.py:157 >> {'loss': 0.8746, 'learning_rate': 1.0000e-05, 'epoch': 0.89}
|
| 785 |
+
|
| 786 |
+
[INFO|2024-12-01 18:36:12] trainer.py:3801 >> Saving model checkpoint to saves/LLaVA-NeXT-Video-7B-Chat/lora/train_2024-12-01-18-22-24/checkpoint-22
|
| 787 |
+
|
| 788 |
+
[INFO|2024-12-01 18:36:13] tokenization_utils_base.py:2646 >> tokenizer config file saved in saves/LLaVA-NeXT-Video-7B-Chat/lora/train_2024-12-01-18-22-24/checkpoint-22/tokenizer_config.json
|
| 789 |
+
|
| 790 |
+
[INFO|2024-12-01 18:36:13] tokenization_utils_base.py:2655 >> Special tokens file saved in saves/LLaVA-NeXT-Video-7B-Chat/lora/train_2024-12-01-18-22-24/checkpoint-22/special_tokens_map.json
|
| 791 |
+
|
| 792 |
+
[INFO|2024-12-01 18:36:13] image_processing_base.py:258 >> Image processor saved in saves/LLaVA-NeXT-Video-7B-Chat/lora/train_2024-12-01-18-22-24/checkpoint-22/preprocessor_config.json
|
| 793 |
+
|
| 794 |
+
[INFO|2024-12-01 18:36:13] image_processing_base.py:258 >> Image processor saved in saves/LLaVA-NeXT-Video-7B-Chat/lora/train_2024-12-01-18-22-24/checkpoint-22/preprocessor_config.json
|
| 795 |
+
|
| 796 |
+
[INFO|2024-12-01 18:36:13] tokenization_utils_base.py:2646 >> tokenizer config file saved in saves/LLaVA-NeXT-Video-7B-Chat/lora/train_2024-12-01-18-22-24/checkpoint-22/tokenizer_config.json
|
| 797 |
+
|
| 798 |
+
[INFO|2024-12-01 18:36:13] tokenization_utils_base.py:2655 >> Special tokens file saved in saves/LLaVA-NeXT-Video-7B-Chat/lora/train_2024-12-01-18-22-24/checkpoint-22/special_tokens_map.json
|
| 799 |
+
|
| 800 |
+
[INFO|2024-12-01 18:36:13] processing_utils.py:541 >> chat template saved in saves/LLaVA-NeXT-Video-7B-Chat/lora/train_2024-12-01-18-22-24/checkpoint-22/chat_template.json
|
| 801 |
+
|
| 802 |
+
[INFO|2024-12-01 18:36:13] processing_utils.py:547 >> processor saved in saves/LLaVA-NeXT-Video-7B-Chat/lora/train_2024-12-01-18-22-24/checkpoint-22/processor_config.json
|
| 803 |
+
|
| 804 |
+
[INFO|2024-12-01 18:36:13] trainer.py:2584 >>
|
| 805 |
+
|
| 806 |
+
Training completed. Do not forget to share your model on huggingface.co/models =)
|
| 807 |
+
|
| 808 |
+
|
| 809 |
+
|
| 810 |
+
[INFO|2024-12-01 18:36:13] image_processing_base.py:258 >> Image processor saved in saves/LLaVA-NeXT-Video-7B-Chat/lora/train_2024-12-01-18-22-24/preprocessor_config.json
|
| 811 |
+
|
| 812 |
+
[INFO|2024-12-01 18:36:13] image_processing_base.py:258 >> Image processor saved in saves/LLaVA-NeXT-Video-7B-Chat/lora/train_2024-12-01-18-22-24/preprocessor_config.json
|
| 813 |
+
|
| 814 |
+
[INFO|2024-12-01 18:36:13] tokenization_utils_base.py:2646 >> tokenizer config file saved in saves/LLaVA-NeXT-Video-7B-Chat/lora/train_2024-12-01-18-22-24/tokenizer_config.json
|
| 815 |
+
|
| 816 |
+
[INFO|2024-12-01 18:36:13] tokenization_utils_base.py:2655 >> Special tokens file saved in saves/LLaVA-NeXT-Video-7B-Chat/lora/train_2024-12-01-18-22-24/special_tokens_map.json
|
| 817 |
+
|
| 818 |
+
[INFO|2024-12-01 18:36:14] processing_utils.py:541 >> chat template saved in saves/LLaVA-NeXT-Video-7B-Chat/lora/train_2024-12-01-18-22-24/chat_template.json
|
| 819 |
+
|
| 820 |
+
[INFO|2024-12-01 18:36:14] processing_utils.py:547 >> processor saved in saves/LLaVA-NeXT-Video-7B-Chat/lora/train_2024-12-01-18-22-24/processor_config.json
|
| 821 |
+
|
| 822 |
+
[INFO|2024-12-01 18:36:14] trainer.py:3801 >> Saving model checkpoint to saves/LLaVA-NeXT-Video-7B-Chat/lora/train_2024-12-01-18-22-24
|
| 823 |
+
|
| 824 |
+
[INFO|2024-12-01 18:36:15] tokenization_utils_base.py:2646 >> tokenizer config file saved in saves/LLaVA-NeXT-Video-7B-Chat/lora/train_2024-12-01-18-22-24/tokenizer_config.json
|
| 825 |
+
|
| 826 |
+
[INFO|2024-12-01 18:36:15] tokenization_utils_base.py:2655 >> Special tokens file saved in saves/LLaVA-NeXT-Video-7B-Chat/lora/train_2024-12-01-18-22-24/special_tokens_map.json
|
| 827 |
+
|
| 828 |
+
[WARNING|2024-12-01 18:36:15] logging.py:162 >> No metric eval_loss to plot.
|
| 829 |
+
|
| 830 |
+
[WARNING|2024-12-01 18:36:15] logging.py:162 >> No metric eval_accuracy to plot.
|
| 831 |
+
|
| 832 |
+
[INFO|2024-12-01 18:36:15] modelcard.py:449 >> Dropping the following result as it does not have all the necessary fields:
|
| 833 |
+
{'task': {'name': 'Causal Language Modeling', 'type': 'text-generation'}}
|
| 834 |
+
|
scheduler.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ab66602d7e6c10eee8866ffc8c6b4541b799bb1c62eb8be9713bd239d7fe2942
|
| 3 |
+
size 1064
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token": {
|
| 3 |
+
"content": "<s>",
|
| 4 |
+
"lstrip": false,
|
| 5 |
+
"normalized": false,
|
| 6 |
+
"rstrip": false,
|
| 7 |
+
"single_word": false
|
| 8 |
+
},
|
| 9 |
+
"eos_token": {
|
| 10 |
+
"content": "</s>",
|
| 11 |
+
"lstrip": false,
|
| 12 |
+
"normalized": false,
|
| 13 |
+
"rstrip": false,
|
| 14 |
+
"single_word": false
|
| 15 |
+
},
|
| 16 |
+
"pad_token": {
|
| 17 |
+
"content": "<unk>",
|
| 18 |
+
"lstrip": false,
|
| 19 |
+
"normalized": false,
|
| 20 |
+
"rstrip": false,
|
| 21 |
+
"single_word": false
|
| 22 |
+
},
|
| 23 |
+
"unk_token": {
|
| 24 |
+
"content": "<unk>",
|
| 25 |
+
"lstrip": false,
|
| 26 |
+
"normalized": false,
|
| 27 |
+
"rstrip": false,
|
| 28 |
+
"single_word": false
|
| 29 |
+
}
|
| 30 |
+
}
|
tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
tokenizer.model
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
|
| 3 |
+
size 499723
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_bos_token": true,
|
| 3 |
+
"add_eos_token": false,
|
| 4 |
+
"add_prefix_space": true,
|
| 5 |
+
"added_tokens_decoder": {
|
| 6 |
+
"0": {
|
| 7 |
+
"content": "<unk>",
|
| 8 |
+
"lstrip": false,
|
| 9 |
+
"normalized": false,
|
| 10 |
+
"rstrip": false,
|
| 11 |
+
"single_word": false,
|
| 12 |
+
"special": true
|
| 13 |
+
},
|
| 14 |
+
"1": {
|
| 15 |
+
"content": "<s>",
|
| 16 |
+
"lstrip": false,
|
| 17 |
+
"normalized": false,
|
| 18 |
+
"rstrip": false,
|
| 19 |
+
"single_word": false,
|
| 20 |
+
"special": true
|
| 21 |
+
},
|
| 22 |
+
"2": {
|
| 23 |
+
"content": "</s>",
|
| 24 |
+
"lstrip": false,
|
| 25 |
+
"normalized": false,
|
| 26 |
+
"rstrip": false,
|
| 27 |
+
"single_word": false,
|
| 28 |
+
"special": true
|
| 29 |
+
},
|
| 30 |
+
"32000": {
|
| 31 |
+
"content": "<video>",
|
| 32 |
+
"lstrip": false,
|
| 33 |
+
"normalized": false,
|
| 34 |
+
"rstrip": false,
|
| 35 |
+
"single_word": false,
|
| 36 |
+
"special": true
|
| 37 |
+
},
|
| 38 |
+
"32001": {
|
| 39 |
+
"content": "<image>",
|
| 40 |
+
"lstrip": false,
|
| 41 |
+
"normalized": false,
|
| 42 |
+
"rstrip": false,
|
| 43 |
+
"single_word": false,
|
| 44 |
+
"special": true
|
| 45 |
+
}
|
| 46 |
+
},
|
| 47 |
+
"bos_token": "<s>",
|
| 48 |
+
"chat_template": "{% set system_message = 'A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user\\'s questions.' %}{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% endif %}{% if system_message is defined %}{{ system_message }}{% endif %}{% for message in loop_messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ 'USER: ' + content + ' ASSISTANT:' }}{% elif message['role'] == 'assistant' %}{{ content + '</s>' }}{% endif %}{% endfor %}",
|
| 49 |
+
"clean_up_tokenization_spaces": false,
|
| 50 |
+
"eos_token": "</s>",
|
| 51 |
+
"extra_special_tokens": {
|
| 52 |
+
"image_token": "<image>",
|
| 53 |
+
"video_token": "<video>"
|
| 54 |
+
},
|
| 55 |
+
"image_token": "<image>",
|
| 56 |
+
"legacy": false,
|
| 57 |
+
"model_max_length": 4096,
|
| 58 |
+
"pad_token": "<unk>",
|
| 59 |
+
"padding_side": "right",
|
| 60 |
+
"processor_class": "LlavaNextVideoProcessor",
|
| 61 |
+
"sp_model_kwargs": {},
|
| 62 |
+
"spaces_between_special_tokens": false,
|
| 63 |
+
"split_special_tokens": false,
|
| 64 |
+
"tokenizer_class": "LlamaTokenizer",
|
| 65 |
+
"unk_token": "<unk>",
|
| 66 |
+
"use_default_system_prompt": false,
|
| 67 |
+
"video_token": "<video>"
|
| 68 |
+
}
|
train_results.json
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"epoch": 0.9832402234636871,
|
| 3 |
+
"total_flos": 8.043290589292134e+16,
|
| 4 |
+
"train_loss": 0.8802601207386364,
|
| 5 |
+
"train_runtime": 618.3986,
|
| 6 |
+
"train_samples_per_second": 1.155,
|
| 7 |
+
"train_steps_per_second": 0.036
|
| 8 |
+
}
|
trainer_log.jsonl
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{"current_steps": 5, "total_steps": 22, "loss": 0.8615, "lr": 2.5e-06, "epoch": 0.22346368715083798, "percentage": 22.73, "elapsed_time": "0:02:28", "remaining_time": "0:08:24"}
|
| 2 |
+
{"current_steps": 10, "total_steps": 22, "loss": 0.894, "lr": 5e-06, "epoch": 0.44692737430167595, "percentage": 45.45, "elapsed_time": "0:04:46", "remaining_time": "0:05:43"}
|
| 3 |
+
{"current_steps": 15, "total_steps": 22, "loss": 0.8808, "lr": 7.5e-06, "epoch": 0.6703910614525139, "percentage": 68.18, "elapsed_time": "0:07:08", "remaining_time": "0:03:20"}
|
| 4 |
+
{"current_steps": 20, "total_steps": 22, "loss": 0.8746, "lr": 1e-05, "epoch": 0.8938547486033519, "percentage": 90.91, "elapsed_time": "0:09:27", "remaining_time": "0:00:56"}
|
| 5 |
+
{"current_steps": 22, "total_steps": 22, "epoch": 0.9832402234636871, "percentage": 100.0, "elapsed_time": "0:10:18", "remaining_time": "0:00:00"}
|
trainer_state.json
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"best_metric": null,
|
| 3 |
+
"best_model_checkpoint": null,
|
| 4 |
+
"epoch": 0.9832402234636871,
|
| 5 |
+
"eval_steps": 500,
|
| 6 |
+
"global_step": 22,
|
| 7 |
+
"is_hyper_param_search": false,
|
| 8 |
+
"is_local_process_zero": true,
|
| 9 |
+
"is_world_process_zero": true,
|
| 10 |
+
"log_history": [
|
| 11 |
+
{
|
| 12 |
+
"epoch": 0.22346368715083798,
|
| 13 |
+
"grad_norm": 0.4495355188846588,
|
| 14 |
+
"learning_rate": 2.5e-06,
|
| 15 |
+
"loss": 0.8615,
|
| 16 |
+
"step": 5
|
| 17 |
+
},
|
| 18 |
+
{
|
| 19 |
+
"epoch": 0.44692737430167595,
|
| 20 |
+
"grad_norm": 0.49592095613479614,
|
| 21 |
+
"learning_rate": 5e-06,
|
| 22 |
+
"loss": 0.894,
|
| 23 |
+
"step": 10
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"epoch": 0.6703910614525139,
|
| 27 |
+
"grad_norm": 0.47344380617141724,
|
| 28 |
+
"learning_rate": 7.5e-06,
|
| 29 |
+
"loss": 0.8808,
|
| 30 |
+
"step": 15
|
| 31 |
+
},
|
| 32 |
+
{
|
| 33 |
+
"epoch": 0.8938547486033519,
|
| 34 |
+
"grad_norm": 0.5614600777626038,
|
| 35 |
+
"learning_rate": 1e-05,
|
| 36 |
+
"loss": 0.8746,
|
| 37 |
+
"step": 20
|
| 38 |
+
}
|
| 39 |
+
],
|
| 40 |
+
"logging_steps": 5,
|
| 41 |
+
"max_steps": 22,
|
| 42 |
+
"num_input_tokens_seen": 0,
|
| 43 |
+
"num_train_epochs": 1,
|
| 44 |
+
"save_steps": 100,
|
| 45 |
+
"stateful_callbacks": {
|
| 46 |
+
"TrainerControl": {
|
| 47 |
+
"args": {
|
| 48 |
+
"should_epoch_stop": false,
|
| 49 |
+
"should_evaluate": false,
|
| 50 |
+
"should_log": false,
|
| 51 |
+
"should_save": true,
|
| 52 |
+
"should_training_stop": true
|
| 53 |
+
},
|
| 54 |
+
"attributes": {}
|
| 55 |
+
}
|
| 56 |
+
},
|
| 57 |
+
"total_flos": 8.043290589292134e+16,
|
| 58 |
+
"train_batch_size": 2,
|
| 59 |
+
"trial_name": null,
|
| 60 |
+
"trial_params": null
|
| 61 |
+
}
|
training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c4458e5e72517233363af05f835b35082429196f135ba58f1802787df1f5a0da
|
| 3 |
+
size 7032
|
training_args.yaml
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
bf16: true
|
| 2 |
+
cutoff_len: 4096
|
| 3 |
+
dataset: merger,LLM_dataset(4o),LLM_dataset(4mini)
|
| 4 |
+
dataset_dir: /media/dl/7DC4-B1CE/500_video
|
| 5 |
+
ddp_timeout: 180000000
|
| 6 |
+
deepspeed: cache/ds_z2_config.json
|
| 7 |
+
do_train: true
|
| 8 |
+
finetuning_type: lora
|
| 9 |
+
flash_attn: fa2
|
| 10 |
+
gradient_accumulation_steps: 8
|
| 11 |
+
learning_rate: 5.0e-05
|
| 12 |
+
logging_steps: 5
|
| 13 |
+
lora_alpha: 16
|
| 14 |
+
lora_dropout: 0
|
| 15 |
+
lora_rank: 8
|
| 16 |
+
lora_target: all
|
| 17 |
+
lr_scheduler_type: cosine
|
| 18 |
+
max_grad_norm: 1.0
|
| 19 |
+
max_samples: 100000
|
| 20 |
+
model_name_or_path: llava-hf/LLaVA-NeXT-Video-7B-hf
|
| 21 |
+
num_train_epochs: 1.0
|
| 22 |
+
optim: adamw_torch
|
| 23 |
+
output_dir: saves/LLaVA-NeXT-Video-7B-Chat/lora/train_2024-12-01-18-22-24
|
| 24 |
+
packing: false
|
| 25 |
+
per_device_train_batch_size: 2
|
| 26 |
+
plot_loss: true
|
| 27 |
+
preprocessing_num_workers: 16
|
| 28 |
+
quantization_bit: 4
|
| 29 |
+
quantization_method: bitsandbytes
|
| 30 |
+
report_to: none
|
| 31 |
+
save_steps: 100
|
| 32 |
+
stage: sft
|
| 33 |
+
template: llava_next_video
|
| 34 |
+
warmup_steps: 100
|
training_loss.png
ADDED
|
zero_to_fp32.py
ADDED
|
@@ -0,0 +1,604 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
|
| 3 |
+
# Copyright (c) Microsoft Corporation.
|
| 4 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 5 |
+
|
| 6 |
+
# DeepSpeed Team
|
| 7 |
+
|
| 8 |
+
# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
|
| 9 |
+
# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
|
| 10 |
+
# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
|
| 11 |
+
# application.
|
| 12 |
+
#
|
| 13 |
+
# example: python zero_to_fp32.py . pytorch_model.bin
|
| 14 |
+
|
| 15 |
+
import argparse
|
| 16 |
+
import torch
|
| 17 |
+
import glob
|
| 18 |
+
import math
|
| 19 |
+
import os
|
| 20 |
+
import re
|
| 21 |
+
from collections import OrderedDict
|
| 22 |
+
from dataclasses import dataclass
|
| 23 |
+
|
| 24 |
+
# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
|
| 25 |
+
# DeepSpeed data structures it has to be available in the current python environment.
|
| 26 |
+
from deepspeed.utils import logger
|
| 27 |
+
from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
|
| 28 |
+
FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
|
| 29 |
+
FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
@dataclass
|
| 33 |
+
class zero_model_state:
|
| 34 |
+
buffers: dict()
|
| 35 |
+
param_shapes: dict()
|
| 36 |
+
shared_params: list
|
| 37 |
+
ds_version: int
|
| 38 |
+
frozen_param_shapes: dict()
|
| 39 |
+
frozen_param_fragments: dict()
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
debug = 0
|
| 43 |
+
|
| 44 |
+
# load to cpu
|
| 45 |
+
device = torch.device('cpu')
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def atoi(text):
|
| 49 |
+
return int(text) if text.isdigit() else text
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def natural_keys(text):
|
| 53 |
+
'''
|
| 54 |
+
alist.sort(key=natural_keys) sorts in human order
|
| 55 |
+
http://nedbatchelder.com/blog/200712/human_sorting.html
|
| 56 |
+
(See Toothy's implementation in the comments)
|
| 57 |
+
'''
|
| 58 |
+
return [atoi(c) for c in re.split(r'(\d+)', text)]
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def get_model_state_file(checkpoint_dir, zero_stage):
|
| 62 |
+
if not os.path.isdir(checkpoint_dir):
|
| 63 |
+
raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
|
| 64 |
+
|
| 65 |
+
# there should be only one file
|
| 66 |
+
if zero_stage <= 2:
|
| 67 |
+
file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
|
| 68 |
+
elif zero_stage == 3:
|
| 69 |
+
file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
|
| 70 |
+
|
| 71 |
+
if not os.path.exists(file):
|
| 72 |
+
raise FileNotFoundError(f"can't find model states file at '{file}'")
|
| 73 |
+
|
| 74 |
+
return file
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def get_checkpoint_files(checkpoint_dir, glob_pattern):
|
| 78 |
+
# XXX: need to test that this simple glob rule works for multi-node setup too
|
| 79 |
+
ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
|
| 80 |
+
|
| 81 |
+
if len(ckpt_files) == 0:
|
| 82 |
+
raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
|
| 83 |
+
|
| 84 |
+
return ckpt_files
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def get_optim_files(checkpoint_dir):
|
| 88 |
+
return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def get_model_state_files(checkpoint_dir):
|
| 92 |
+
return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def parse_model_states(files):
|
| 96 |
+
zero_model_states = []
|
| 97 |
+
for file in files:
|
| 98 |
+
state_dict = torch.load(file, map_location=device)
|
| 99 |
+
|
| 100 |
+
if BUFFER_NAMES not in state_dict:
|
| 101 |
+
raise ValueError(f"{file} is not a model state checkpoint")
|
| 102 |
+
buffer_names = state_dict[BUFFER_NAMES]
|
| 103 |
+
if debug:
|
| 104 |
+
print("Found buffers:", buffer_names)
|
| 105 |
+
|
| 106 |
+
# recover just the buffers while restoring them to fp32 if they were saved in fp16
|
| 107 |
+
buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
|
| 108 |
+
param_shapes = state_dict[PARAM_SHAPES]
|
| 109 |
+
|
| 110 |
+
# collect parameters that are included in param_shapes
|
| 111 |
+
param_names = []
|
| 112 |
+
for s in param_shapes:
|
| 113 |
+
for name in s.keys():
|
| 114 |
+
param_names.append(name)
|
| 115 |
+
|
| 116 |
+
# update with frozen parameters
|
| 117 |
+
frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
|
| 118 |
+
if frozen_param_shapes is not None:
|
| 119 |
+
if debug:
|
| 120 |
+
print(f"Found frozen_param_shapes: {frozen_param_shapes}")
|
| 121 |
+
param_names += list(frozen_param_shapes.keys())
|
| 122 |
+
|
| 123 |
+
# handle shared params
|
| 124 |
+
shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
|
| 125 |
+
|
| 126 |
+
ds_version = state_dict.get(DS_VERSION, None)
|
| 127 |
+
|
| 128 |
+
frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
|
| 129 |
+
|
| 130 |
+
z_model_state = zero_model_state(buffers=buffers,
|
| 131 |
+
param_shapes=param_shapes,
|
| 132 |
+
shared_params=shared_params,
|
| 133 |
+
ds_version=ds_version,
|
| 134 |
+
frozen_param_shapes=frozen_param_shapes,
|
| 135 |
+
frozen_param_fragments=frozen_param_fragments)
|
| 136 |
+
zero_model_states.append(z_model_state)
|
| 137 |
+
|
| 138 |
+
return zero_model_states
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
def parse_optim_states(files, ds_checkpoint_dir):
|
| 142 |
+
|
| 143 |
+
total_files = len(files)
|
| 144 |
+
state_dicts = []
|
| 145 |
+
for f in files:
|
| 146 |
+
state_dict = torch.load(f, map_location=device)
|
| 147 |
+
# immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
|
| 148 |
+
# and also handle the case where it was already removed by another helper script
|
| 149 |
+
state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
|
| 150 |
+
state_dicts.append(state_dict)
|
| 151 |
+
|
| 152 |
+
if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
|
| 153 |
+
raise ValueError(f"{files[0]} is not a zero checkpoint")
|
| 154 |
+
zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
|
| 155 |
+
world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
|
| 156 |
+
|
| 157 |
+
# For ZeRO-2 each param group can have different partition_count as data parallelism for expert
|
| 158 |
+
# parameters can be different from data parallelism for non-expert parameters. So we can just
|
| 159 |
+
# use the max of the partition_count to get the dp world_size.
|
| 160 |
+
|
| 161 |
+
if type(world_size) is list:
|
| 162 |
+
world_size = max(world_size)
|
| 163 |
+
|
| 164 |
+
if world_size != total_files:
|
| 165 |
+
raise ValueError(
|
| 166 |
+
f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
|
| 167 |
+
"Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
|
| 168 |
+
)
|
| 169 |
+
|
| 170 |
+
# the groups are named differently in each stage
|
| 171 |
+
if zero_stage <= 2:
|
| 172 |
+
fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
|
| 173 |
+
elif zero_stage == 3:
|
| 174 |
+
fp32_groups_key = FP32_FLAT_GROUPS
|
| 175 |
+
else:
|
| 176 |
+
raise ValueError(f"unknown zero stage {zero_stage}")
|
| 177 |
+
|
| 178 |
+
if zero_stage <= 2:
|
| 179 |
+
fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
|
| 180 |
+
elif zero_stage == 3:
|
| 181 |
+
# if there is more than one param group, there will be multiple flattened tensors - one
|
| 182 |
+
# flattened tensor per group - for simplicity merge them into a single tensor
|
| 183 |
+
#
|
| 184 |
+
# XXX: could make the script more memory efficient for when there are multiple groups - it
|
| 185 |
+
# will require matching the sub-lists of param_shapes for each param group flattened tensor
|
| 186 |
+
|
| 187 |
+
fp32_flat_groups = [
|
| 188 |
+
torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
|
| 189 |
+
]
|
| 190 |
+
|
| 191 |
+
return zero_stage, world_size, fp32_flat_groups
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
|
| 195 |
+
"""
|
| 196 |
+
Returns fp32 state_dict reconstructed from ds checkpoint
|
| 197 |
+
|
| 198 |
+
Args:
|
| 199 |
+
- ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
|
| 200 |
+
|
| 201 |
+
"""
|
| 202 |
+
print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
|
| 203 |
+
|
| 204 |
+
optim_files = get_optim_files(ds_checkpoint_dir)
|
| 205 |
+
zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
|
| 206 |
+
print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
|
| 207 |
+
|
| 208 |
+
model_files = get_model_state_files(ds_checkpoint_dir)
|
| 209 |
+
|
| 210 |
+
zero_model_states = parse_model_states(model_files)
|
| 211 |
+
print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
|
| 212 |
+
|
| 213 |
+
if zero_stage <= 2:
|
| 214 |
+
return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
| 215 |
+
exclude_frozen_parameters)
|
| 216 |
+
elif zero_stage == 3:
|
| 217 |
+
return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
| 218 |
+
exclude_frozen_parameters)
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
def _zero2_merge_frozen_params(state_dict, zero_model_states):
|
| 222 |
+
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
|
| 223 |
+
return
|
| 224 |
+
|
| 225 |
+
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
|
| 226 |
+
frozen_param_fragments = zero_model_states[0].frozen_param_fragments
|
| 227 |
+
|
| 228 |
+
if debug:
|
| 229 |
+
num_elem = sum(s.numel() for s in frozen_param_shapes.values())
|
| 230 |
+
print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
|
| 231 |
+
|
| 232 |
+
wanted_params = len(frozen_param_shapes)
|
| 233 |
+
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
|
| 234 |
+
avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
|
| 235 |
+
print(f'Frozen params: Have {avail_numel} numels to process.')
|
| 236 |
+
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
|
| 237 |
+
|
| 238 |
+
total_params = 0
|
| 239 |
+
total_numel = 0
|
| 240 |
+
for name, shape in frozen_param_shapes.items():
|
| 241 |
+
total_params += 1
|
| 242 |
+
unpartitioned_numel = shape.numel()
|
| 243 |
+
total_numel += unpartitioned_numel
|
| 244 |
+
|
| 245 |
+
state_dict[name] = frozen_param_fragments[name]
|
| 246 |
+
|
| 247 |
+
if debug:
|
| 248 |
+
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
|
| 249 |
+
|
| 250 |
+
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
def _has_callable(obj, fn):
|
| 254 |
+
attr = getattr(obj, fn, None)
|
| 255 |
+
return callable(attr)
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
|
| 259 |
+
param_shapes = zero_model_states[0].param_shapes
|
| 260 |
+
|
| 261 |
+
# Reconstruction protocol:
|
| 262 |
+
#
|
| 263 |
+
# XXX: document this
|
| 264 |
+
|
| 265 |
+
if debug:
|
| 266 |
+
for i in range(world_size):
|
| 267 |
+
for j in range(len(fp32_flat_groups[0])):
|
| 268 |
+
print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
|
| 269 |
+
|
| 270 |
+
# XXX: memory usage doubles here (zero2)
|
| 271 |
+
num_param_groups = len(fp32_flat_groups[0])
|
| 272 |
+
merged_single_partition_of_fp32_groups = []
|
| 273 |
+
for i in range(num_param_groups):
|
| 274 |
+
merged_partitions = [sd[i] for sd in fp32_flat_groups]
|
| 275 |
+
full_single_fp32_vector = torch.cat(merged_partitions, 0)
|
| 276 |
+
merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
|
| 277 |
+
avail_numel = sum(
|
| 278 |
+
[full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
|
| 279 |
+
|
| 280 |
+
if debug:
|
| 281 |
+
wanted_params = sum([len(shapes) for shapes in param_shapes])
|
| 282 |
+
wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
|
| 283 |
+
# not asserting if there is a mismatch due to possible padding
|
| 284 |
+
print(f"Have {avail_numel} numels to process.")
|
| 285 |
+
print(f"Need {wanted_numel} numels in {wanted_params} params.")
|
| 286 |
+
|
| 287 |
+
# params
|
| 288 |
+
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
|
| 289 |
+
# out-of-core computing solution
|
| 290 |
+
total_numel = 0
|
| 291 |
+
total_params = 0
|
| 292 |
+
for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
|
| 293 |
+
offset = 0
|
| 294 |
+
avail_numel = full_single_fp32_vector.numel()
|
| 295 |
+
for name, shape in shapes.items():
|
| 296 |
+
|
| 297 |
+
unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
|
| 298 |
+
total_numel += unpartitioned_numel
|
| 299 |
+
total_params += 1
|
| 300 |
+
|
| 301 |
+
if debug:
|
| 302 |
+
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
|
| 303 |
+
state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
|
| 304 |
+
offset += unpartitioned_numel
|
| 305 |
+
|
| 306 |
+
# Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
|
| 307 |
+
# avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
|
| 308 |
+
# paddings performed in the code it's almost impossible to predict the exact numbers w/o the
|
| 309 |
+
# live optimizer object, so we are checking that the numbers are within the right range
|
| 310 |
+
align_to = 2 * world_size
|
| 311 |
+
|
| 312 |
+
def zero2_align(x):
|
| 313 |
+
return align_to * math.ceil(x / align_to)
|
| 314 |
+
|
| 315 |
+
if debug:
|
| 316 |
+
print(f"original offset={offset}, avail_numel={avail_numel}")
|
| 317 |
+
|
| 318 |
+
offset = zero2_align(offset)
|
| 319 |
+
avail_numel = zero2_align(avail_numel)
|
| 320 |
+
|
| 321 |
+
if debug:
|
| 322 |
+
print(f"aligned offset={offset}, avail_numel={avail_numel}")
|
| 323 |
+
|
| 324 |
+
# Sanity check
|
| 325 |
+
if offset != avail_numel:
|
| 326 |
+
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
|
| 327 |
+
|
| 328 |
+
print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
|
| 329 |
+
|
| 330 |
+
|
| 331 |
+
def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
| 332 |
+
exclude_frozen_parameters):
|
| 333 |
+
state_dict = OrderedDict()
|
| 334 |
+
|
| 335 |
+
# buffers
|
| 336 |
+
buffers = zero_model_states[0].buffers
|
| 337 |
+
state_dict.update(buffers)
|
| 338 |
+
if debug:
|
| 339 |
+
print(f"added {len(buffers)} buffers")
|
| 340 |
+
|
| 341 |
+
if not exclude_frozen_parameters:
|
| 342 |
+
_zero2_merge_frozen_params(state_dict, zero_model_states)
|
| 343 |
+
|
| 344 |
+
_zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
|
| 345 |
+
|
| 346 |
+
# recover shared parameters
|
| 347 |
+
for pair in zero_model_states[0].shared_params:
|
| 348 |
+
if pair[1] in state_dict:
|
| 349 |
+
state_dict[pair[0]] = state_dict[pair[1]]
|
| 350 |
+
|
| 351 |
+
return state_dict
|
| 352 |
+
|
| 353 |
+
|
| 354 |
+
def zero3_partitioned_param_info(unpartitioned_numel, world_size):
|
| 355 |
+
remainder = unpartitioned_numel % world_size
|
| 356 |
+
padding_numel = (world_size - remainder) if remainder else 0
|
| 357 |
+
partitioned_numel = math.ceil(unpartitioned_numel / world_size)
|
| 358 |
+
return partitioned_numel, padding_numel
|
| 359 |
+
|
| 360 |
+
|
| 361 |
+
def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
|
| 362 |
+
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
|
| 363 |
+
return
|
| 364 |
+
|
| 365 |
+
if debug:
|
| 366 |
+
for i in range(world_size):
|
| 367 |
+
num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
|
| 368 |
+
print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
|
| 369 |
+
|
| 370 |
+
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
|
| 371 |
+
wanted_params = len(frozen_param_shapes)
|
| 372 |
+
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
|
| 373 |
+
avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
|
| 374 |
+
print(f'Frozen params: Have {avail_numel} numels to process.')
|
| 375 |
+
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
|
| 376 |
+
|
| 377 |
+
total_params = 0
|
| 378 |
+
total_numel = 0
|
| 379 |
+
for name, shape in zero_model_states[0].frozen_param_shapes.items():
|
| 380 |
+
total_params += 1
|
| 381 |
+
unpartitioned_numel = shape.numel()
|
| 382 |
+
total_numel += unpartitioned_numel
|
| 383 |
+
|
| 384 |
+
param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
|
| 385 |
+
state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
|
| 386 |
+
|
| 387 |
+
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
|
| 388 |
+
|
| 389 |
+
if debug:
|
| 390 |
+
print(
|
| 391 |
+
f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
|
| 392 |
+
)
|
| 393 |
+
|
| 394 |
+
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
|
| 395 |
+
|
| 396 |
+
|
| 397 |
+
def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
|
| 398 |
+
param_shapes = zero_model_states[0].param_shapes
|
| 399 |
+
avail_numel = fp32_flat_groups[0].numel() * world_size
|
| 400 |
+
# Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
|
| 401 |
+
# param, re-consolidating each param, while dealing with padding if any
|
| 402 |
+
|
| 403 |
+
# merge list of dicts, preserving order
|
| 404 |
+
param_shapes = {k: v for d in param_shapes for k, v in d.items()}
|
| 405 |
+
|
| 406 |
+
if debug:
|
| 407 |
+
for i in range(world_size):
|
| 408 |
+
print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
|
| 409 |
+
|
| 410 |
+
wanted_params = len(param_shapes)
|
| 411 |
+
wanted_numel = sum(shape.numel() for shape in param_shapes.values())
|
| 412 |
+
# not asserting if there is a mismatch due to possible padding
|
| 413 |
+
avail_numel = fp32_flat_groups[0].numel() * world_size
|
| 414 |
+
print(f"Trainable params: Have {avail_numel} numels to process.")
|
| 415 |
+
print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
|
| 416 |
+
|
| 417 |
+
# params
|
| 418 |
+
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
|
| 419 |
+
# out-of-core computing solution
|
| 420 |
+
offset = 0
|
| 421 |
+
total_numel = 0
|
| 422 |
+
total_params = 0
|
| 423 |
+
for name, shape in param_shapes.items():
|
| 424 |
+
|
| 425 |
+
unpartitioned_numel = shape.numel()
|
| 426 |
+
total_numel += unpartitioned_numel
|
| 427 |
+
total_params += 1
|
| 428 |
+
|
| 429 |
+
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
|
| 430 |
+
|
| 431 |
+
if debug:
|
| 432 |
+
print(
|
| 433 |
+
f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
|
| 434 |
+
)
|
| 435 |
+
|
| 436 |
+
# XXX: memory usage doubles here
|
| 437 |
+
state_dict[name] = torch.cat(
|
| 438 |
+
tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
|
| 439 |
+
0).narrow(0, 0, unpartitioned_numel).view(shape)
|
| 440 |
+
offset += partitioned_numel
|
| 441 |
+
|
| 442 |
+
offset *= world_size
|
| 443 |
+
|
| 444 |
+
# Sanity check
|
| 445 |
+
if offset != avail_numel:
|
| 446 |
+
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
|
| 447 |
+
|
| 448 |
+
print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
|
| 449 |
+
|
| 450 |
+
|
| 451 |
+
def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
| 452 |
+
exclude_frozen_parameters):
|
| 453 |
+
state_dict = OrderedDict()
|
| 454 |
+
|
| 455 |
+
# buffers
|
| 456 |
+
buffers = zero_model_states[0].buffers
|
| 457 |
+
state_dict.update(buffers)
|
| 458 |
+
if debug:
|
| 459 |
+
print(f"added {len(buffers)} buffers")
|
| 460 |
+
|
| 461 |
+
if not exclude_frozen_parameters:
|
| 462 |
+
_zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
|
| 463 |
+
|
| 464 |
+
_zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
|
| 465 |
+
|
| 466 |
+
# recover shared parameters
|
| 467 |
+
for pair in zero_model_states[0].shared_params:
|
| 468 |
+
if pair[1] in state_dict:
|
| 469 |
+
state_dict[pair[0]] = state_dict[pair[1]]
|
| 470 |
+
|
| 471 |
+
return state_dict
|
| 472 |
+
|
| 473 |
+
|
| 474 |
+
def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None, exclude_frozen_parameters=False):
|
| 475 |
+
"""
|
| 476 |
+
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
|
| 477 |
+
``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
|
| 478 |
+
via a model hub.
|
| 479 |
+
|
| 480 |
+
Args:
|
| 481 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder
|
| 482 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
|
| 483 |
+
- ``exclude_frozen_parameters``: exclude frozen parameters
|
| 484 |
+
|
| 485 |
+
Returns:
|
| 486 |
+
- pytorch ``state_dict``
|
| 487 |
+
|
| 488 |
+
Note: this approach may not work if your application doesn't have sufficient free CPU memory and
|
| 489 |
+
you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
|
| 490 |
+
the checkpoint.
|
| 491 |
+
|
| 492 |
+
A typical usage might be ::
|
| 493 |
+
|
| 494 |
+
from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
|
| 495 |
+
# do the training and checkpoint saving
|
| 496 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
|
| 497 |
+
model = model.cpu() # move to cpu
|
| 498 |
+
model.load_state_dict(state_dict)
|
| 499 |
+
# submit to model hub or save the model to share with others
|
| 500 |
+
|
| 501 |
+
In this example the ``model`` will no longer be usable in the deepspeed context of the same
|
| 502 |
+
application. i.e. you will need to re-initialize the deepspeed engine, since
|
| 503 |
+
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
|
| 504 |
+
|
| 505 |
+
If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
|
| 506 |
+
|
| 507 |
+
"""
|
| 508 |
+
if tag is None:
|
| 509 |
+
latest_path = os.path.join(checkpoint_dir, 'latest')
|
| 510 |
+
if os.path.isfile(latest_path):
|
| 511 |
+
with open(latest_path, 'r') as fd:
|
| 512 |
+
tag = fd.read().strip()
|
| 513 |
+
else:
|
| 514 |
+
raise ValueError(f"Unable to find 'latest' file at {latest_path}")
|
| 515 |
+
|
| 516 |
+
ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
|
| 517 |
+
|
| 518 |
+
if not os.path.isdir(ds_checkpoint_dir):
|
| 519 |
+
raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
|
| 520 |
+
|
| 521 |
+
return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
|
| 522 |
+
|
| 523 |
+
|
| 524 |
+
def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None, exclude_frozen_parameters=False):
|
| 525 |
+
"""
|
| 526 |
+
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
|
| 527 |
+
loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
|
| 528 |
+
|
| 529 |
+
Args:
|
| 530 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
|
| 531 |
+
- ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
|
| 532 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
|
| 533 |
+
- ``exclude_frozen_parameters``: exclude frozen parameters
|
| 534 |
+
"""
|
| 535 |
+
|
| 536 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag, exclude_frozen_parameters)
|
| 537 |
+
print(f"Saving fp32 state dict to {output_file}")
|
| 538 |
+
torch.save(state_dict, output_file)
|
| 539 |
+
|
| 540 |
+
|
| 541 |
+
def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
|
| 542 |
+
"""
|
| 543 |
+
1. Put the provided model to cpu
|
| 544 |
+
2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
|
| 545 |
+
3. Load it into the provided model
|
| 546 |
+
|
| 547 |
+
Args:
|
| 548 |
+
- ``model``: the model object to update
|
| 549 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
|
| 550 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
|
| 551 |
+
|
| 552 |
+
Returns:
|
| 553 |
+
- ``model`: modified model
|
| 554 |
+
|
| 555 |
+
Make sure you have plenty of CPU memory available before you call this function. If you don't
|
| 556 |
+
have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
|
| 557 |
+
conveniently placed for you in the checkpoint folder.
|
| 558 |
+
|
| 559 |
+
A typical usage might be ::
|
| 560 |
+
|
| 561 |
+
from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
|
| 562 |
+
model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
|
| 563 |
+
# submit to model hub or save the model to share with others
|
| 564 |
+
|
| 565 |
+
Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
|
| 566 |
+
of the same application. i.e. you will need to re-initialize the deepspeed engine, since
|
| 567 |
+
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
|
| 568 |
+
|
| 569 |
+
"""
|
| 570 |
+
logger.info(f"Extracting fp32 weights")
|
| 571 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
|
| 572 |
+
|
| 573 |
+
logger.info(f"Overwriting model with fp32 weights")
|
| 574 |
+
model = model.cpu()
|
| 575 |
+
model.load_state_dict(state_dict, strict=False)
|
| 576 |
+
|
| 577 |
+
return model
|
| 578 |
+
|
| 579 |
+
|
| 580 |
+
if __name__ == "__main__":
|
| 581 |
+
|
| 582 |
+
parser = argparse.ArgumentParser()
|
| 583 |
+
parser.add_argument("checkpoint_dir",
|
| 584 |
+
type=str,
|
| 585 |
+
help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
|
| 586 |
+
parser.add_argument(
|
| 587 |
+
"output_file",
|
| 588 |
+
type=str,
|
| 589 |
+
help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
|
| 590 |
+
parser.add_argument("-t",
|
| 591 |
+
"--tag",
|
| 592 |
+
type=str,
|
| 593 |
+
default=None,
|
| 594 |
+
help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
|
| 595 |
+
parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
|
| 596 |
+
parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
|
| 597 |
+
args = parser.parse_args()
|
| 598 |
+
|
| 599 |
+
debug = args.debug
|
| 600 |
+
|
| 601 |
+
convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
|
| 602 |
+
args.output_file,
|
| 603 |
+
tag=args.tag,
|
| 604 |
+
exclude_frozen_parameters=args.exclude_frozen_parameters)
|