Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- grpo/qwen2.5vl-7b-thinking_v2_full_comet_grpo/v13-20250907-200700/checkpoint-750/model-00001-of-00004.safetensors +3 -0
- llava-ov-lora/added_tokens.json +7 -0
- llava-ov-lora/args.json +362 -0
- llava-ov-lora/chat_template.json +3 -0
- llava-ov-lora/config.json +211 -0
- llava-ov-lora/generation_config.json +6 -0
- llava-ov-lora/merges.txt +0 -0
- llava-ov-lora/model.safetensors.index.json +772 -0
- ood/internvl3-8b-instruct-lora-ood-210/model.safetensors.index.json +693 -0
- ood/internvl3-8b-instruct-lora-ood-210/modeling_intern_vit.py +431 -0
- ood/internvl3-8b-instruct-lora-ood-210/modeling_internvl_chat.py +359 -0
- ood/internvl3-8b-instruct-lora-ood-210/preprocessor_config.json +19 -0
- ood/internvl3-8b-instruct-lora-ood-210/special_tokens_map.json +31 -0
- ood/internvl3-8b-instruct-lora-ood-210/tokenizer_config.json +280 -0
- ood/internvl3-8b-instruct-lora-ood-210/vocab.json +0 -0
- qwen2.5vl-7b-lora_epoch10_2e-5/added_tokens.json +24 -0
- qwen2.5vl-7b-lora_epoch10_2e-5/args.json +362 -0
- qwen2.5vl-7b-lora_epoch10_2e-5/chat_template.json +3 -0
- qwen2.5vl-7b-lora_epoch10_2e-5/config.json +66 -0
- qwen2.5vl-7b-lora_epoch10_2e-5/generation_config.json +12 -0
- qwen2.5vl-7b-lora_epoch10_2e-5/merges.txt +0 -0
- qwen2.5vl-7b-lora_epoch10_2e-5/model.safetensors.index.json +736 -0
- qwen2.5vl-7b-lora_epoch10_2e-5/preprocessor_config.json +19 -0
- qwen2.5vl-7b-lora_epoch10_2e-5/special_tokens_map.json +31 -0
- qwen2.5vl-7b-lora_epoch10_2e-5/tokenizer_config.json +209 -0
- qwen2.5vl-7b-lora_epoch10_2e-5/vocab.json +0 -0
- selective_loss/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148/args.json +375 -0
- selective_loss/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148/checkpoint-280/added_tokens.json +24 -0
- selective_loss/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148/checkpoint-280/args.json +375 -0
- selective_loss/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148/checkpoint-280/chat_template.json +3 -0
- selective_loss/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148/checkpoint-280/config.json +66 -0
- selective_loss/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148/checkpoint-280/generation_config.json +12 -0
- selective_loss/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148/checkpoint-280/latest +1 -0
- selective_loss/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148/checkpoint-280/merges.txt +0 -0
- selective_loss/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148/checkpoint-280/model.safetensors.index.json +736 -0
- selective_loss/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148/checkpoint-280/preprocessor_config.json +19 -0
- selective_loss/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148/checkpoint-280/special_tokens_map.json +31 -0
- selective_loss/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148/checkpoint-280/tokenizer_config.json +209 -0
- selective_loss/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148/checkpoint-280/trainer_state.json +658 -0
- selective_loss/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148/checkpoint-280/vocab.json +0 -0
- selective_loss/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148/checkpoint-280/zero_to_fp32.py +760 -0
- selective_loss/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148/logging.jsonl +65 -0
- selective_loss/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148/val_dataset.jsonl +0 -0
- selective_loss/qwen2.5vl-7b-thinking_full_v2-spwr_lr1e-6_wd1e-3/v0-20250818-120403/args.json +375 -0
- selective_loss/qwen2.5vl-7b-thinking_full_v2-spwr_lr1e-6_wd1e-3/v0-20250818-120403/checkpoint-280/added_tokens.json +24 -0
- selective_loss/qwen2.5vl-7b-thinking_full_v2-spwr_lr1e-6_wd1e-3/v0-20250818-120403/checkpoint-280/args.json +375 -0
- selective_loss/qwen2.5vl-7b-thinking_full_v2-spwr_lr1e-6_wd1e-3/v0-20250818-120403/checkpoint-280/chat_template.json +3 -0
- selective_loss/qwen2.5vl-7b-thinking_full_v2-spwr_lr1e-6_wd1e-3/v0-20250818-120403/checkpoint-280/config.json +66 -0
- selective_loss/qwen2.5vl-7b-thinking_full_v2-spwr_lr1e-6_wd1e-3/v0-20250818-120403/checkpoint-280/generation_config.json +12 -0
- selective_loss/qwen2.5vl-7b-thinking_full_v2-spwr_lr1e-6_wd1e-3/v0-20250818-120403/checkpoint-280/latest +1 -0
grpo/qwen2.5vl-7b-thinking_v2_full_comet_grpo/v13-20250907-200700/checkpoint-750/model-00001-of-00004.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3ff2219e091e43c64073dfb754c48178b74bdcedc7f1551d832421c53d9ba857
|
| 3 |
+
size 4968243304
|
llava-ov-lora/added_tokens.json
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"<image>": 151646,
|
| 3 |
+
"<video>": 151647,
|
| 4 |
+
"<|endoftext|>": 151643,
|
| 5 |
+
"<|im_end|>": 151645,
|
| 6 |
+
"<|im_start|>": 151644
|
| 7 |
+
}
|
llava-ov-lora/args.json
ADDED
|
@@ -0,0 +1,362 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model": "/mnt/data/users/liamding/data/models/llava-onevision-qwen2-7b-ov-hf",
|
| 3 |
+
"model_type": "llava_onevision_hf",
|
| 4 |
+
"model_revision": null,
|
| 5 |
+
"task_type": "causal_lm",
|
| 6 |
+
"torch_dtype": "float16",
|
| 7 |
+
"attn_impl": null,
|
| 8 |
+
"num_labels": null,
|
| 9 |
+
"problem_type": null,
|
| 10 |
+
"rope_scaling": null,
|
| 11 |
+
"device_map": null,
|
| 12 |
+
"max_memory": {},
|
| 13 |
+
"local_repo_path": null,
|
| 14 |
+
"template": "llava_onevision_hf",
|
| 15 |
+
"system": null,
|
| 16 |
+
"max_length": 32768,
|
| 17 |
+
"truncation_strategy": "delete",
|
| 18 |
+
"max_pixels": null,
|
| 19 |
+
"agent_template": null,
|
| 20 |
+
"norm_bbox": null,
|
| 21 |
+
"response_prefix": null,
|
| 22 |
+
"padding_side": "right",
|
| 23 |
+
"loss_scale": "default",
|
| 24 |
+
"sequence_parallel_size": 1,
|
| 25 |
+
"use_chat_template": true,
|
| 26 |
+
"template_backend": "swift",
|
| 27 |
+
"dataset": [
|
| 28 |
+
"/mnt/data/users/liamding/data/3AM_Plus/final/training/ambi_normal_772.json",
|
| 29 |
+
"/mnt/data/users/liamding/data/3AM_Plus/final/training/mma_train_126.json",
|
| 30 |
+
"/mnt/data/users/liamding/data/3AM_Plus/final/training/sp_train_102.json"
|
| 31 |
+
],
|
| 32 |
+
"val_dataset": [],
|
| 33 |
+
"split_dataset_ratio": 0.01,
|
| 34 |
+
"data_seed": 42,
|
| 35 |
+
"dataset_num_proc": 1,
|
| 36 |
+
"dataset_shuffle": true,
|
| 37 |
+
"val_dataset_shuffle": false,
|
| 38 |
+
"streaming": false,
|
| 39 |
+
"interleave_prob": null,
|
| 40 |
+
"stopping_strategy": "first_exhausted",
|
| 41 |
+
"shuffle_buffer_size": 1000,
|
| 42 |
+
"enable_cache": false,
|
| 43 |
+
"download_mode": "reuse_dataset_if_exists",
|
| 44 |
+
"columns": {},
|
| 45 |
+
"strict": false,
|
| 46 |
+
"remove_unused_columns": true,
|
| 47 |
+
"model_name": [
|
| 48 |
+
null,
|
| 49 |
+
null
|
| 50 |
+
],
|
| 51 |
+
"model_author": [
|
| 52 |
+
null,
|
| 53 |
+
null
|
| 54 |
+
],
|
| 55 |
+
"custom_dataset_info": [],
|
| 56 |
+
"quant_method": null,
|
| 57 |
+
"quant_bits": null,
|
| 58 |
+
"hqq_axis": null,
|
| 59 |
+
"bnb_4bit_compute_dtype": "float32",
|
| 60 |
+
"bnb_4bit_quant_type": "nf4",
|
| 61 |
+
"bnb_4bit_use_double_quant": true,
|
| 62 |
+
"bnb_4bit_quant_storage": null,
|
| 63 |
+
"max_new_tokens": 64,
|
| 64 |
+
"temperature": 0.0,
|
| 65 |
+
"top_k": null,
|
| 66 |
+
"top_p": null,
|
| 67 |
+
"repetition_penalty": null,
|
| 68 |
+
"num_beams": 1,
|
| 69 |
+
"stream": false,
|
| 70 |
+
"stop_words": [],
|
| 71 |
+
"logprobs": false,
|
| 72 |
+
"top_logprobs": null,
|
| 73 |
+
"ckpt_dir": null,
|
| 74 |
+
"load_dataset_config": null,
|
| 75 |
+
"lora_modules": [],
|
| 76 |
+
"tuner_backend": "peft",
|
| 77 |
+
"train_type": "lora",
|
| 78 |
+
"adapters": [],
|
| 79 |
+
"external_plugins": [],
|
| 80 |
+
"seed": 42,
|
| 81 |
+
"model_kwargs": {},
|
| 82 |
+
"load_args": false,
|
| 83 |
+
"load_data_args": false,
|
| 84 |
+
"use_hf": false,
|
| 85 |
+
"hub_token": null,
|
| 86 |
+
"custom_register_path": [],
|
| 87 |
+
"ignore_args_error": false,
|
| 88 |
+
"use_swift_lora": false,
|
| 89 |
+
"output_dir": "/mnt/data/users/liamding/data/MMMT/lora/llava_ov-lora/v1-20250528-230243",
|
| 90 |
+
"overwrite_output_dir": false,
|
| 91 |
+
"do_train": false,
|
| 92 |
+
"do_eval": false,
|
| 93 |
+
"do_predict": false,
|
| 94 |
+
"eval_strategy": "epoch",
|
| 95 |
+
"prediction_loss_only": false,
|
| 96 |
+
"per_device_train_batch_size": 4,
|
| 97 |
+
"per_device_eval_batch_size": 4,
|
| 98 |
+
"per_gpu_train_batch_size": null,
|
| 99 |
+
"per_gpu_eval_batch_size": null,
|
| 100 |
+
"gradient_accumulation_steps": 2,
|
| 101 |
+
"eval_accumulation_steps": null,
|
| 102 |
+
"eval_delay": 0,
|
| 103 |
+
"torch_empty_cache_steps": null,
|
| 104 |
+
"learning_rate": 2e-05,
|
| 105 |
+
"weight_decay": 0.1,
|
| 106 |
+
"adam_beta1": 0.9,
|
| 107 |
+
"adam_beta2": 0.95,
|
| 108 |
+
"adam_epsilon": 1e-08,
|
| 109 |
+
"max_grad_norm": 1.0,
|
| 110 |
+
"num_train_epochs": 10.0,
|
| 111 |
+
"max_steps": -1,
|
| 112 |
+
"lr_scheduler_type": "cosine",
|
| 113 |
+
"lr_scheduler_kwargs": null,
|
| 114 |
+
"warmup_ratio": 0.05,
|
| 115 |
+
"warmup_steps": 0,
|
| 116 |
+
"log_level": "passive",
|
| 117 |
+
"log_level_replica": "warning",
|
| 118 |
+
"log_on_each_node": true,
|
| 119 |
+
"logging_dir": "/mnt/data/users/liamding/data/MMMT/lora/llava_ov-lora/v1-20250528-230243/runs",
|
| 120 |
+
"logging_strategy": "steps",
|
| 121 |
+
"logging_first_step": true,
|
| 122 |
+
"logging_steps": 1,
|
| 123 |
+
"logging_nan_inf_filter": true,
|
| 124 |
+
"save_strategy": "steps",
|
| 125 |
+
"save_steps": 500,
|
| 126 |
+
"save_total_limit": 5,
|
| 127 |
+
"save_safetensors": true,
|
| 128 |
+
"save_on_each_node": false,
|
| 129 |
+
"save_only_model": false,
|
| 130 |
+
"restore_callback_states_from_checkpoint": false,
|
| 131 |
+
"no_cuda": false,
|
| 132 |
+
"use_cpu": false,
|
| 133 |
+
"use_mps_device": false,
|
| 134 |
+
"jit_mode_eval": false,
|
| 135 |
+
"use_ipex": false,
|
| 136 |
+
"bf16": false,
|
| 137 |
+
"fp16": true,
|
| 138 |
+
"fp16_opt_level": "O1",
|
| 139 |
+
"half_precision_backend": "auto",
|
| 140 |
+
"bf16_full_eval": false,
|
| 141 |
+
"fp16_full_eval": false,
|
| 142 |
+
"tf32": null,
|
| 143 |
+
"local_rank": 0,
|
| 144 |
+
"ddp_backend": null,
|
| 145 |
+
"tpu_num_cores": null,
|
| 146 |
+
"tpu_metrics_debug": false,
|
| 147 |
+
"debug": null,
|
| 148 |
+
"dataloader_drop_last": false,
|
| 149 |
+
"eval_steps": null,
|
| 150 |
+
"dataloader_num_workers": 4,
|
| 151 |
+
"dataloader_prefetch_factor": null,
|
| 152 |
+
"past_index": -1,
|
| 153 |
+
"run_name": null,
|
| 154 |
+
"disable_tqdm": null,
|
| 155 |
+
"label_names": null,
|
| 156 |
+
"load_best_model_at_end": false,
|
| 157 |
+
"metric_for_best_model": "loss",
|
| 158 |
+
"greater_is_better": false,
|
| 159 |
+
"ignore_data_skip": false,
|
| 160 |
+
"fsdp": "",
|
| 161 |
+
"fsdp_min_num_params": 0,
|
| 162 |
+
"fsdp_config": null,
|
| 163 |
+
"tp_size": 0,
|
| 164 |
+
"fsdp_transformer_layer_cls_to_wrap": null,
|
| 165 |
+
"accelerator_config": {
|
| 166 |
+
"dispatch_batches": false
|
| 167 |
+
},
|
| 168 |
+
"deepspeed": {
|
| 169 |
+
"fp16": {
|
| 170 |
+
"enabled": "auto",
|
| 171 |
+
"loss_scale": 0,
|
| 172 |
+
"loss_scale_window": 1000,
|
| 173 |
+
"initial_scale_power": 16,
|
| 174 |
+
"hysteresis": 2,
|
| 175 |
+
"min_loss_scale": 1
|
| 176 |
+
},
|
| 177 |
+
"bf16": {
|
| 178 |
+
"enabled": "auto"
|
| 179 |
+
},
|
| 180 |
+
"zero_optimization": {
|
| 181 |
+
"stage": 2,
|
| 182 |
+
"offload_optimizer": {
|
| 183 |
+
"device": "none",
|
| 184 |
+
"pin_memory": true
|
| 185 |
+
},
|
| 186 |
+
"allgather_partitions": true,
|
| 187 |
+
"allgather_bucket_size": 200000000.0,
|
| 188 |
+
"overlap_comm": false,
|
| 189 |
+
"reduce_scatter": true,
|
| 190 |
+
"reduce_bucket_size": 200000000.0,
|
| 191 |
+
"contiguous_gradients": true
|
| 192 |
+
},
|
| 193 |
+
"gradient_accumulation_steps": "auto",
|
| 194 |
+
"gradient_clipping": "auto",
|
| 195 |
+
"steps_per_print": 2000,
|
| 196 |
+
"train_batch_size": "auto",
|
| 197 |
+
"train_micro_batch_size_per_gpu": "auto",
|
| 198 |
+
"wall_clock_breakdown": false
|
| 199 |
+
},
|
| 200 |
+
"label_smoothing_factor": 0.0,
|
| 201 |
+
"optim": "adamw_torch",
|
| 202 |
+
"optim_args": null,
|
| 203 |
+
"adafactor": false,
|
| 204 |
+
"group_by_length": false,
|
| 205 |
+
"length_column_name": "length",
|
| 206 |
+
"report_to": [
|
| 207 |
+
"wandb"
|
| 208 |
+
],
|
| 209 |
+
"ddp_find_unused_parameters": null,
|
| 210 |
+
"ddp_bucket_cap_mb": null,
|
| 211 |
+
"ddp_broadcast_buffers": null,
|
| 212 |
+
"dataloader_pin_memory": true,
|
| 213 |
+
"dataloader_persistent_workers": false,
|
| 214 |
+
"skip_memory_metrics": true,
|
| 215 |
+
"use_legacy_prediction_loop": false,
|
| 216 |
+
"push_to_hub": false,
|
| 217 |
+
"resume_from_checkpoint": null,
|
| 218 |
+
"hub_model_id": null,
|
| 219 |
+
"hub_strategy": "every_save",
|
| 220 |
+
"hub_private_repo": null,
|
| 221 |
+
"hub_always_push": false,
|
| 222 |
+
"gradient_checkpointing": true,
|
| 223 |
+
"gradient_checkpointing_kwargs": null,
|
| 224 |
+
"include_inputs_for_metrics": false,
|
| 225 |
+
"include_for_metrics": [],
|
| 226 |
+
"eval_do_concat_batches": true,
|
| 227 |
+
"fp16_backend": "auto",
|
| 228 |
+
"push_to_hub_model_id": null,
|
| 229 |
+
"push_to_hub_organization": null,
|
| 230 |
+
"push_to_hub_token": null,
|
| 231 |
+
"mp_parameters": "",
|
| 232 |
+
"auto_find_batch_size": false,
|
| 233 |
+
"full_determinism": false,
|
| 234 |
+
"torchdynamo": null,
|
| 235 |
+
"ray_scope": "last",
|
| 236 |
+
"ddp_timeout": 1800,
|
| 237 |
+
"torch_compile": false,
|
| 238 |
+
"torch_compile_backend": null,
|
| 239 |
+
"torch_compile_mode": null,
|
| 240 |
+
"include_tokens_per_second": false,
|
| 241 |
+
"include_num_input_tokens_seen": false,
|
| 242 |
+
"neftune_noise_alpha": null,
|
| 243 |
+
"optim_target_modules": null,
|
| 244 |
+
"batch_eval_metrics": false,
|
| 245 |
+
"eval_on_start": false,
|
| 246 |
+
"use_liger_kernel": false,
|
| 247 |
+
"eval_use_gather_object": false,
|
| 248 |
+
"average_tokens_across_devices": false,
|
| 249 |
+
"sortish_sampler": false,
|
| 250 |
+
"predict_with_generate": false,
|
| 251 |
+
"generation_max_length": null,
|
| 252 |
+
"generation_num_beams": null,
|
| 253 |
+
"generation_config": null,
|
| 254 |
+
"check_model": true,
|
| 255 |
+
"acc_strategy": "token",
|
| 256 |
+
"train_dataloader_shuffle": true,
|
| 257 |
+
"metric_warmup_step": 0,
|
| 258 |
+
"fsdp_num": 1,
|
| 259 |
+
"acc_steps": 1,
|
| 260 |
+
"eval_use_evalscope": false,
|
| 261 |
+
"eval_datasets": [],
|
| 262 |
+
"eval_limit": null,
|
| 263 |
+
"eval_datasets_args": null,
|
| 264 |
+
"eval_generation_config": null,
|
| 265 |
+
"freeze_parameters": [
|
| 266 |
+
"vision_tower",
|
| 267 |
+
"multi_modal_projector"
|
| 268 |
+
],
|
| 269 |
+
"freeze_parameters_ratio": 0.0,
|
| 270 |
+
"trainable_parameters": [],
|
| 271 |
+
"freeze_llm": false,
|
| 272 |
+
"freeze_vit": true,
|
| 273 |
+
"freeze_aligner": true,
|
| 274 |
+
"target_modules": [
|
| 275 |
+
"all-linear"
|
| 276 |
+
],
|
| 277 |
+
"target_regex": null,
|
| 278 |
+
"modules_to_save": [],
|
| 279 |
+
"lora_rank": 8,
|
| 280 |
+
"lora_alpha": 16,
|
| 281 |
+
"lora_dropout": 0.1,
|
| 282 |
+
"lora_bias": "none",
|
| 283 |
+
"lora_dtype": null,
|
| 284 |
+
"lorap_lr_ratio": null,
|
| 285 |
+
"use_rslora": false,
|
| 286 |
+
"use_dora": false,
|
| 287 |
+
"lora_ga_batch_size": 2,
|
| 288 |
+
"lora_ga_iters": 2,
|
| 289 |
+
"lora_ga_max_length": 1024,
|
| 290 |
+
"lora_ga_direction": "ArB2r",
|
| 291 |
+
"lora_ga_scale": "stable",
|
| 292 |
+
"lora_ga_stable_gamma": 16,
|
| 293 |
+
"init_weights": true,
|
| 294 |
+
"fourier_n_frequency": 2000,
|
| 295 |
+
"fourier_scaling": 300.0,
|
| 296 |
+
"boft_block_size": 4,
|
| 297 |
+
"boft_block_num": 0,
|
| 298 |
+
"boft_n_butterfly_factor": 1,
|
| 299 |
+
"boft_dropout": 0.0,
|
| 300 |
+
"vera_rank": 256,
|
| 301 |
+
"vera_projection_prng_key": 0,
|
| 302 |
+
"vera_dropout": 0.0,
|
| 303 |
+
"vera_d_initial": 0.1,
|
| 304 |
+
"adapter_act": "gelu",
|
| 305 |
+
"adapter_length": 128,
|
| 306 |
+
"use_galore": false,
|
| 307 |
+
"galore_target_modules": null,
|
| 308 |
+
"galore_rank": 128,
|
| 309 |
+
"galore_update_proj_gap": 50,
|
| 310 |
+
"galore_scale": 1.0,
|
| 311 |
+
"galore_proj_type": "std",
|
| 312 |
+
"galore_optim_per_parameter": false,
|
| 313 |
+
"galore_with_embedding": false,
|
| 314 |
+
"galore_quantization": false,
|
| 315 |
+
"galore_proj_quant": false,
|
| 316 |
+
"galore_proj_bits": 4,
|
| 317 |
+
"galore_proj_group_size": 256,
|
| 318 |
+
"galore_cos_threshold": 0.4,
|
| 319 |
+
"galore_gamma_proj": 2,
|
| 320 |
+
"galore_queue_size": 5,
|
| 321 |
+
"adalora_target_r": 8,
|
| 322 |
+
"adalora_init_r": 12,
|
| 323 |
+
"adalora_tinit": 0,
|
| 324 |
+
"adalora_tfinal": 0,
|
| 325 |
+
"adalora_deltaT": 1,
|
| 326 |
+
"adalora_beta1": 0.85,
|
| 327 |
+
"adalora_beta2": 0.85,
|
| 328 |
+
"adalora_orth_reg_weight": 0.5,
|
| 329 |
+
"llamapro_num_new_blocks": 4,
|
| 330 |
+
"llamapro_num_groups": null,
|
| 331 |
+
"lisa_activated_layers": 0,
|
| 332 |
+
"lisa_step_interval": 20,
|
| 333 |
+
"reft_layer_key": null,
|
| 334 |
+
"reft_layers": null,
|
| 335 |
+
"reft_rank": 4,
|
| 336 |
+
"reft_intervention_type": "LoreftIntervention",
|
| 337 |
+
"reft_args": null,
|
| 338 |
+
"swanlab_token": null,
|
| 339 |
+
"swanlab_project": null,
|
| 340 |
+
"swanlab_workspace": null,
|
| 341 |
+
"swanlab_exp_name": null,
|
| 342 |
+
"swanlab_mode": "cloud",
|
| 343 |
+
"add_version": true,
|
| 344 |
+
"resume_only_model": false,
|
| 345 |
+
"create_checkpoint_symlink": false,
|
| 346 |
+
"packing": false,
|
| 347 |
+
"lazy_tokenize": true,
|
| 348 |
+
"loss_type": null,
|
| 349 |
+
"optimizer": null,
|
| 350 |
+
"metric": null,
|
| 351 |
+
"zero_hpz_partition_size": null,
|
| 352 |
+
"rank": 0,
|
| 353 |
+
"global_world_size": 4,
|
| 354 |
+
"local_world_size": 4,
|
| 355 |
+
"model_suffix": "llava-onevision-qwen2-7b-ov-hf",
|
| 356 |
+
"model_info": "ModelInfo(model_type='llava_onevision_hf', model_dir='/mnt/data/users/liamding/data/models/llava-onevision-qwen2-7b-ov-hf', torch_dtype=torch.float16, max_model_len=32768, quant_method=None, quant_bits=None, rope_scaling=None, config=None, task_type='causal_lm', num_labels=None)",
|
| 357 |
+
"model_meta": "ModelMeta(model_type='llava_onevision_hf', model_groups=[ModelGroup(models=[Model(ms_model_id='llava-hf/llava-onevision-qwen2-0.5b-ov-hf', hf_model_id='llava-hf/llava-onevision-qwen2-0.5b-ov-hf', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='llava-hf/llava-onevision-qwen2-7b-ov-hf', hf_model_id='llava-hf/llava-onevision-qwen2-7b-ov-hf', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='llava-hf/llava-onevision-qwen2-72b-ov-hf', hf_model_id='llava-hf/llava-onevision-qwen2-72b-ov-hf', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[])], template='llava_onevision_hf', get_function=<function get_model_tokenizer_llava_onevision at 0x7f09b3b4e3b0>, model_arch='llava_hf', architectures=['LlavaOnevisionForConditionalGeneration'], additional_saved_files=[], torch_dtype=None, is_multimodal=True, is_reward=False, task_type=None, ignore_patterns=None, requires=['transformers>=4.45'], tags=[])",
|
| 358 |
+
"model_dir": "/mnt/data/users/liamding/data/models/llava-onevision-qwen2-7b-ov-hf",
|
| 359 |
+
"hub": "<class 'swift.hub.hub.MSHub'>",
|
| 360 |
+
"evaluation_strategy": "epoch",
|
| 361 |
+
"training_args": "Seq2SeqTrainingArguments(output_dir='/mnt/data/users/liamding/data/MMMT/lora/llava_ov-lora/v1-20250528-230243', overwrite_output_dir=False, do_train=False, do_eval=True, do_predict=False, eval_strategy=<IntervalStrategy.EPOCH: 'epoch'>, prediction_loss_only=False, per_device_train_batch_size=4, per_device_eval_batch_size=4, per_gpu_train_batch_size=None, per_gpu_eval_batch_size=None, gradient_accumulation_steps=2, eval_accumulation_steps=None, eval_delay=0, torch_empty_cache_steps=None, learning_rate=2e-05, weight_decay=0.1, adam_beta1=0.9, adam_beta2=0.95, adam_epsilon=1e-08, max_grad_norm=1.0, num_train_epochs=10.0, max_steps=-1, lr_scheduler_type=<SchedulerType.COSINE: 'cosine'>, lr_scheduler_kwargs=None, warmup_ratio=0.05, warmup_steps=0, log_level='passive', log_level_replica='warning', log_on_each_node=True, logging_dir='/mnt/data/users/liamding/data/MMMT/lora/llava_ov-lora/v1-20250528-230243/runs', logging_strategy=<IntervalStrategy.STEPS: 'steps'>, logging_first_step=True, logging_steps=1, logging_nan_inf_filter=True, save_strategy=<SaveStrategy.STEPS: 'steps'>, save_steps=500, save_total_limit=5, save_safetensors=True, save_on_each_node=False, save_only_model=False, restore_callback_states_from_checkpoint=False, no_cuda=False, use_cpu=False, use_mps_device=False, seed=42, data_seed=42, jit_mode_eval=False, use_ipex=False, bf16=False, fp16=True, fp16_opt_level='O1', half_precision_backend='auto', bf16_full_eval=False, fp16_full_eval=False, tf32=None, local_rank=0, ddp_backend=None, tpu_num_cores=None, tpu_metrics_debug=False, debug=[], dataloader_drop_last=False, eval_steps=None, dataloader_num_workers=4, dataloader_prefetch_factor=10, past_index=-1, run_name='/mnt/data/users/liamding/data/MMMT/lora/llava_ov-lora/v1-20250528-230243', disable_tqdm=False, remove_unused_columns=False, label_names=None, load_best_model_at_end=False, metric_for_best_model='loss', greater_is_better=False, ignore_data_skip=False, fsdp=[], fsdp_min_num_params=0, fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, tp_size=0, fsdp_transformer_layer_cls_to_wrap=None, accelerator_config=AcceleratorConfig(split_batches=False, dispatch_batches=False, even_batches=True, use_seedable_sampler=True, non_blocking=False, gradient_accumulation_kwargs=None, use_configured_state=False), deepspeed={'fp16': {'enabled': 'auto', 'loss_scale': 0, 'loss_scale_window': 1000, 'initial_scale_power': 16, 'hysteresis': 2, 'min_loss_scale': 1}, 'bf16': {'enabled': 'auto'}, 'zero_optimization': {'stage': 2, 'offload_optimizer': {'device': 'none', 'pin_memory': True}, 'allgather_partitions': True, 'allgather_bucket_size': 200000000.0, 'overlap_comm': False, 'reduce_scatter': True, 'reduce_bucket_size': 200000000.0, 'contiguous_gradients': True}, 'gradient_accumulation_steps': 'auto', 'gradient_clipping': 'auto', 'steps_per_print': 2000, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'wall_clock_breakdown': False}, label_smoothing_factor=0.0, optim=<OptimizerNames.ADAMW_TORCH: 'adamw_torch'>, optim_args=None, adafactor=False, group_by_length=False, length_column_name='length', report_to=['wandb'], ddp_find_unused_parameters=None, ddp_bucket_cap_mb=None, ddp_broadcast_buffers=None, dataloader_pin_memory=True, dataloader_persistent_workers=False, skip_memory_metrics=True, use_legacy_prediction_loop=False, push_to_hub=False, resume_from_checkpoint=None, hub_model_id=None, hub_strategy=<HubStrategy.EVERY_SAVE: 'every_save'>, hub_token=None, hub_private_repo=None, hub_always_push=False, gradient_checkpointing=True, gradient_checkpointing_kwargs=None, include_inputs_for_metrics=False, include_for_metrics=[], eval_do_concat_batches=True, fp16_backend='auto', push_to_hub_model_id=None, push_to_hub_organization=None, push_to_hub_token=None, mp_parameters='', auto_find_batch_size=False, full_determinism=False, torchdynamo=None, ray_scope='last', ddp_timeout=1800, torch_compile=False, torch_compile_backend=None, torch_compile_mode=None, include_tokens_per_second=None, include_num_input_tokens_seen=None, neftune_noise_alpha=None, optim_target_modules=None, batch_eval_metrics=False, eval_on_start=False, use_liger_kernel=False, eval_use_gather_object=False, average_tokens_across_devices=None, sortish_sampler=False, predict_with_generate=False, generation_max_length=None, generation_num_beams=None, generation_config=None, check_model=True, acc_strategy='token', train_dataloader_shuffle=True, metric_warmup_step=0, fsdp_num=1, acc_steps=1, eval_use_evalscope=False, eval_datasets=[], eval_limit=None, eval_datasets_args=None, eval_generation_config=None, train_type='lora', optimizer=None, local_repo_path=None, galore_config=None)"
|
| 362 |
+
}
|
llava-ov-lora/chat_template.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + ' '}}{# Render all images first #}{% for content in message['content'] | selectattr('type', 'equalto', 'image') %}{{ '<image>' }}{% endfor %}{# Render all video then #}{% for content in message['content'] | selectattr('type', 'equalto', 'video') %}{{ '<video>' }}{% endfor %}{# Render all text next #}{% if message['role'] != 'assistant' %}{% for content in message['content'] | selectattr('type', 'equalto', 'text') %}{{ '\n' + content['text'] }}{% endfor %}{% else %}{% for content in message['content'] | selectattr('type', 'equalto', 'text') %}{% generation %}{{ '\n' + content['text'] }}{% endgeneration %}{% endfor %}{% endif %}{{'<|im_end|>'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"
|
| 3 |
+
}
|
llava-ov-lora/config.json
ADDED
|
@@ -0,0 +1,211 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"LlavaOnevisionForConditionalGeneration"
|
| 4 |
+
],
|
| 5 |
+
"hidden_size": 3584,
|
| 6 |
+
"ignore_index": -100,
|
| 7 |
+
"image_grid_pinpoints": [
|
| 8 |
+
[
|
| 9 |
+
384,
|
| 10 |
+
384
|
| 11 |
+
],
|
| 12 |
+
[
|
| 13 |
+
384,
|
| 14 |
+
768
|
| 15 |
+
],
|
| 16 |
+
[
|
| 17 |
+
384,
|
| 18 |
+
1152
|
| 19 |
+
],
|
| 20 |
+
[
|
| 21 |
+
384,
|
| 22 |
+
1536
|
| 23 |
+
],
|
| 24 |
+
[
|
| 25 |
+
384,
|
| 26 |
+
1920
|
| 27 |
+
],
|
| 28 |
+
[
|
| 29 |
+
384,
|
| 30 |
+
2304
|
| 31 |
+
],
|
| 32 |
+
[
|
| 33 |
+
768,
|
| 34 |
+
384
|
| 35 |
+
],
|
| 36 |
+
[
|
| 37 |
+
768,
|
| 38 |
+
768
|
| 39 |
+
],
|
| 40 |
+
[
|
| 41 |
+
768,
|
| 42 |
+
1152
|
| 43 |
+
],
|
| 44 |
+
[
|
| 45 |
+
768,
|
| 46 |
+
1536
|
| 47 |
+
],
|
| 48 |
+
[
|
| 49 |
+
768,
|
| 50 |
+
1920
|
| 51 |
+
],
|
| 52 |
+
[
|
| 53 |
+
768,
|
| 54 |
+
2304
|
| 55 |
+
],
|
| 56 |
+
[
|
| 57 |
+
1152,
|
| 58 |
+
384
|
| 59 |
+
],
|
| 60 |
+
[
|
| 61 |
+
1152,
|
| 62 |
+
768
|
| 63 |
+
],
|
| 64 |
+
[
|
| 65 |
+
1152,
|
| 66 |
+
1152
|
| 67 |
+
],
|
| 68 |
+
[
|
| 69 |
+
1152,
|
| 70 |
+
1536
|
| 71 |
+
],
|
| 72 |
+
[
|
| 73 |
+
1152,
|
| 74 |
+
1920
|
| 75 |
+
],
|
| 76 |
+
[
|
| 77 |
+
1152,
|
| 78 |
+
2304
|
| 79 |
+
],
|
| 80 |
+
[
|
| 81 |
+
1536,
|
| 82 |
+
384
|
| 83 |
+
],
|
| 84 |
+
[
|
| 85 |
+
1536,
|
| 86 |
+
768
|
| 87 |
+
],
|
| 88 |
+
[
|
| 89 |
+
1536,
|
| 90 |
+
1152
|
| 91 |
+
],
|
| 92 |
+
[
|
| 93 |
+
1536,
|
| 94 |
+
1536
|
| 95 |
+
],
|
| 96 |
+
[
|
| 97 |
+
1536,
|
| 98 |
+
1920
|
| 99 |
+
],
|
| 100 |
+
[
|
| 101 |
+
1536,
|
| 102 |
+
2304
|
| 103 |
+
],
|
| 104 |
+
[
|
| 105 |
+
1920,
|
| 106 |
+
384
|
| 107 |
+
],
|
| 108 |
+
[
|
| 109 |
+
1920,
|
| 110 |
+
768
|
| 111 |
+
],
|
| 112 |
+
[
|
| 113 |
+
1920,
|
| 114 |
+
1152
|
| 115 |
+
],
|
| 116 |
+
[
|
| 117 |
+
1920,
|
| 118 |
+
1536
|
| 119 |
+
],
|
| 120 |
+
[
|
| 121 |
+
1920,
|
| 122 |
+
1920
|
| 123 |
+
],
|
| 124 |
+
[
|
| 125 |
+
1920,
|
| 126 |
+
2304
|
| 127 |
+
],
|
| 128 |
+
[
|
| 129 |
+
2304,
|
| 130 |
+
384
|
| 131 |
+
],
|
| 132 |
+
[
|
| 133 |
+
2304,
|
| 134 |
+
768
|
| 135 |
+
],
|
| 136 |
+
[
|
| 137 |
+
2304,
|
| 138 |
+
1152
|
| 139 |
+
],
|
| 140 |
+
[
|
| 141 |
+
2304,
|
| 142 |
+
1536
|
| 143 |
+
],
|
| 144 |
+
[
|
| 145 |
+
2304,
|
| 146 |
+
1920
|
| 147 |
+
],
|
| 148 |
+
[
|
| 149 |
+
2304,
|
| 150 |
+
2304
|
| 151 |
+
]
|
| 152 |
+
],
|
| 153 |
+
"image_token_index": 151646,
|
| 154 |
+
"keys_to_ignore_at_inference": [
|
| 155 |
+
"past_key_values"
|
| 156 |
+
],
|
| 157 |
+
"model_type": "llava_onevision",
|
| 158 |
+
"multimodal_projector_bias": true,
|
| 159 |
+
"pad_token_id": 151643,
|
| 160 |
+
"projector_hidden_act": "gelu",
|
| 161 |
+
"text_config": {
|
| 162 |
+
"_name_or_path": "Qwen/Qwen2-7B-Instruct",
|
| 163 |
+
"architectures": [
|
| 164 |
+
"Qwen2ForCausalLM"
|
| 165 |
+
],
|
| 166 |
+
"attention_dropout": 0.0,
|
| 167 |
+
"bos_token_id": 151643,
|
| 168 |
+
"eos_token_id": 151645,
|
| 169 |
+
"hidden_act": "silu",
|
| 170 |
+
"hidden_size": 3584,
|
| 171 |
+
"initializer_range": 0.02,
|
| 172 |
+
"intermediate_size": 18944,
|
| 173 |
+
"max_position_embeddings": 32768,
|
| 174 |
+
"max_window_layers": 28,
|
| 175 |
+
"model_type": "qwen2",
|
| 176 |
+
"num_attention_heads": 28,
|
| 177 |
+
"num_hidden_layers": 28,
|
| 178 |
+
"num_key_value_heads": 4,
|
| 179 |
+
"rms_norm_eps": 1e-06,
|
| 180 |
+
"rope_scaling": null,
|
| 181 |
+
"rope_theta": 1000000.0,
|
| 182 |
+
"sliding_window": 4096,
|
| 183 |
+
"torch_dtype": "float16",
|
| 184 |
+
"use_cache": true,
|
| 185 |
+
"use_sliding_window": false,
|
| 186 |
+
"vocab_size": 152128
|
| 187 |
+
},
|
| 188 |
+
"tie_word_embeddings": false,
|
| 189 |
+
"torch_dtype": "float16",
|
| 190 |
+
"transformers_version": "4.51.3",
|
| 191 |
+
"use_image_newline_parameter": true,
|
| 192 |
+
"video_token_index": 151647,
|
| 193 |
+
"vision_aspect_ratio": "anyres_max_9",
|
| 194 |
+
"vision_config": {
|
| 195 |
+
"attention_dropout": 0.0,
|
| 196 |
+
"hidden_act": "gelu_pytorch_tanh",
|
| 197 |
+
"hidden_size": 1152,
|
| 198 |
+
"image_size": 384,
|
| 199 |
+
"intermediate_size": 4304,
|
| 200 |
+
"layer_norm_eps": 1e-06,
|
| 201 |
+
"model_type": "siglip_vision_model",
|
| 202 |
+
"num_attention_heads": 16,
|
| 203 |
+
"num_channels": 3,
|
| 204 |
+
"num_hidden_layers": 26,
|
| 205 |
+
"patch_size": 14,
|
| 206 |
+
"torch_dtype": "float16",
|
| 207 |
+
"vision_use_head": false
|
| 208 |
+
},
|
| 209 |
+
"vision_feature_layer": -1,
|
| 210 |
+
"vision_feature_select_strategy": "full"
|
| 211 |
+
}
|
llava-ov-lora/generation_config.json
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_from_model_config": true,
|
| 3 |
+
"bos_token_id": 151643,
|
| 4 |
+
"eos_token_id": 151645,
|
| 5 |
+
"transformers_version": "4.51.3"
|
| 6 |
+
}
|
llava-ov-lora/merges.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
llava-ov-lora/model.safetensors.index.json
ADDED
|
@@ -0,0 +1,772 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"metadata": {
|
| 3 |
+
"total_size": 16061615168
|
| 4 |
+
},
|
| 5 |
+
"weight_map": {
|
| 6 |
+
"image_newline": "model-00001-of-00004.safetensors",
|
| 7 |
+
"language_model.lm_head.weight": "model-00004-of-00004.safetensors",
|
| 8 |
+
"language_model.model.embed_tokens.weight": "model-00001-of-00004.safetensors",
|
| 9 |
+
"language_model.model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 10 |
+
"language_model.model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 11 |
+
"language_model.model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 12 |
+
"language_model.model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 13 |
+
"language_model.model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 14 |
+
"language_model.model.layers.0.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 15 |
+
"language_model.model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 16 |
+
"language_model.model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 17 |
+
"language_model.model.layers.0.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 18 |
+
"language_model.model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 19 |
+
"language_model.model.layers.0.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 20 |
+
"language_model.model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 21 |
+
"language_model.model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 22 |
+
"language_model.model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 23 |
+
"language_model.model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 24 |
+
"language_model.model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 25 |
+
"language_model.model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 26 |
+
"language_model.model.layers.1.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 27 |
+
"language_model.model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 28 |
+
"language_model.model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 29 |
+
"language_model.model.layers.1.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 30 |
+
"language_model.model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 31 |
+
"language_model.model.layers.1.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 32 |
+
"language_model.model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 33 |
+
"language_model.model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 34 |
+
"language_model.model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 35 |
+
"language_model.model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 36 |
+
"language_model.model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 37 |
+
"language_model.model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 38 |
+
"language_model.model.layers.10.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 39 |
+
"language_model.model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 40 |
+
"language_model.model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 41 |
+
"language_model.model.layers.10.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 42 |
+
"language_model.model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 43 |
+
"language_model.model.layers.10.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 44 |
+
"language_model.model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 45 |
+
"language_model.model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 46 |
+
"language_model.model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 47 |
+
"language_model.model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 48 |
+
"language_model.model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 49 |
+
"language_model.model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 50 |
+
"language_model.model.layers.11.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 51 |
+
"language_model.model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 52 |
+
"language_model.model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 53 |
+
"language_model.model.layers.11.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 54 |
+
"language_model.model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 55 |
+
"language_model.model.layers.11.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 56 |
+
"language_model.model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 57 |
+
"language_model.model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 58 |
+
"language_model.model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 59 |
+
"language_model.model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 60 |
+
"language_model.model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 61 |
+
"language_model.model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 62 |
+
"language_model.model.layers.12.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 63 |
+
"language_model.model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 64 |
+
"language_model.model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 65 |
+
"language_model.model.layers.12.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 66 |
+
"language_model.model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 67 |
+
"language_model.model.layers.12.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 68 |
+
"language_model.model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 69 |
+
"language_model.model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 70 |
+
"language_model.model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 71 |
+
"language_model.model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 72 |
+
"language_model.model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 73 |
+
"language_model.model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 74 |
+
"language_model.model.layers.13.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 75 |
+
"language_model.model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 76 |
+
"language_model.model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 77 |
+
"language_model.model.layers.13.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 78 |
+
"language_model.model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 79 |
+
"language_model.model.layers.13.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 80 |
+
"language_model.model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 81 |
+
"language_model.model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 82 |
+
"language_model.model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 83 |
+
"language_model.model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 84 |
+
"language_model.model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 85 |
+
"language_model.model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 86 |
+
"language_model.model.layers.14.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 87 |
+
"language_model.model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 88 |
+
"language_model.model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 89 |
+
"language_model.model.layers.14.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 90 |
+
"language_model.model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 91 |
+
"language_model.model.layers.14.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 92 |
+
"language_model.model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 93 |
+
"language_model.model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 94 |
+
"language_model.model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 95 |
+
"language_model.model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 96 |
+
"language_model.model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 97 |
+
"language_model.model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 98 |
+
"language_model.model.layers.15.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 99 |
+
"language_model.model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 100 |
+
"language_model.model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 101 |
+
"language_model.model.layers.15.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 102 |
+
"language_model.model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 103 |
+
"language_model.model.layers.15.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 104 |
+
"language_model.model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 105 |
+
"language_model.model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 106 |
+
"language_model.model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 107 |
+
"language_model.model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 108 |
+
"language_model.model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 109 |
+
"language_model.model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 110 |
+
"language_model.model.layers.16.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 111 |
+
"language_model.model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 112 |
+
"language_model.model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 113 |
+
"language_model.model.layers.16.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 114 |
+
"language_model.model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 115 |
+
"language_model.model.layers.16.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 116 |
+
"language_model.model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 117 |
+
"language_model.model.layers.17.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 118 |
+
"language_model.model.layers.17.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 119 |
+
"language_model.model.layers.17.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 120 |
+
"language_model.model.layers.17.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 121 |
+
"language_model.model.layers.17.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 122 |
+
"language_model.model.layers.17.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 123 |
+
"language_model.model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 124 |
+
"language_model.model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 125 |
+
"language_model.model.layers.17.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 126 |
+
"language_model.model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 127 |
+
"language_model.model.layers.17.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 128 |
+
"language_model.model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 129 |
+
"language_model.model.layers.18.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 130 |
+
"language_model.model.layers.18.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 131 |
+
"language_model.model.layers.18.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 132 |
+
"language_model.model.layers.18.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 133 |
+
"language_model.model.layers.18.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 134 |
+
"language_model.model.layers.18.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 135 |
+
"language_model.model.layers.18.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 136 |
+
"language_model.model.layers.18.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 137 |
+
"language_model.model.layers.18.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 138 |
+
"language_model.model.layers.18.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 139 |
+
"language_model.model.layers.18.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 140 |
+
"language_model.model.layers.18.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 141 |
+
"language_model.model.layers.19.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 142 |
+
"language_model.model.layers.19.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 143 |
+
"language_model.model.layers.19.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 144 |
+
"language_model.model.layers.19.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 145 |
+
"language_model.model.layers.19.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 146 |
+
"language_model.model.layers.19.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 147 |
+
"language_model.model.layers.19.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 148 |
+
"language_model.model.layers.19.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 149 |
+
"language_model.model.layers.19.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 150 |
+
"language_model.model.layers.19.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 151 |
+
"language_model.model.layers.19.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 152 |
+
"language_model.model.layers.19.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 153 |
+
"language_model.model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 154 |
+
"language_model.model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 155 |
+
"language_model.model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 156 |
+
"language_model.model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 157 |
+
"language_model.model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 158 |
+
"language_model.model.layers.2.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 159 |
+
"language_model.model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 160 |
+
"language_model.model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 161 |
+
"language_model.model.layers.2.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 162 |
+
"language_model.model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 163 |
+
"language_model.model.layers.2.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 164 |
+
"language_model.model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 165 |
+
"language_model.model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 166 |
+
"language_model.model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 167 |
+
"language_model.model.layers.20.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 168 |
+
"language_model.model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 169 |
+
"language_model.model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 170 |
+
"language_model.model.layers.20.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 171 |
+
"language_model.model.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 172 |
+
"language_model.model.layers.20.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 173 |
+
"language_model.model.layers.20.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 174 |
+
"language_model.model.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 175 |
+
"language_model.model.layers.20.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 176 |
+
"language_model.model.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 177 |
+
"language_model.model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 178 |
+
"language_model.model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 179 |
+
"language_model.model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 180 |
+
"language_model.model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 181 |
+
"language_model.model.layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 182 |
+
"language_model.model.layers.21.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 183 |
+
"language_model.model.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 184 |
+
"language_model.model.layers.21.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 185 |
+
"language_model.model.layers.21.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 186 |
+
"language_model.model.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 187 |
+
"language_model.model.layers.21.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 188 |
+
"language_model.model.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 189 |
+
"language_model.model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 190 |
+
"language_model.model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 191 |
+
"language_model.model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 192 |
+
"language_model.model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 193 |
+
"language_model.model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 194 |
+
"language_model.model.layers.22.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 195 |
+
"language_model.model.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 196 |
+
"language_model.model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 197 |
+
"language_model.model.layers.22.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 198 |
+
"language_model.model.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 199 |
+
"language_model.model.layers.22.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 200 |
+
"language_model.model.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 201 |
+
"language_model.model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 202 |
+
"language_model.model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 203 |
+
"language_model.model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 204 |
+
"language_model.model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 205 |
+
"language_model.model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 206 |
+
"language_model.model.layers.23.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 207 |
+
"language_model.model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 208 |
+
"language_model.model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 209 |
+
"language_model.model.layers.23.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 210 |
+
"language_model.model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 211 |
+
"language_model.model.layers.23.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 212 |
+
"language_model.model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 213 |
+
"language_model.model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 214 |
+
"language_model.model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 215 |
+
"language_model.model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 216 |
+
"language_model.model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 217 |
+
"language_model.model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 218 |
+
"language_model.model.layers.24.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 219 |
+
"language_model.model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 220 |
+
"language_model.model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 221 |
+
"language_model.model.layers.24.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 222 |
+
"language_model.model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 223 |
+
"language_model.model.layers.24.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 224 |
+
"language_model.model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 225 |
+
"language_model.model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 226 |
+
"language_model.model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 227 |
+
"language_model.model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 228 |
+
"language_model.model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 229 |
+
"language_model.model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 230 |
+
"language_model.model.layers.25.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 231 |
+
"language_model.model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 232 |
+
"language_model.model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 233 |
+
"language_model.model.layers.25.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 234 |
+
"language_model.model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 235 |
+
"language_model.model.layers.25.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 236 |
+
"language_model.model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 237 |
+
"language_model.model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 238 |
+
"language_model.model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 239 |
+
"language_model.model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 240 |
+
"language_model.model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 241 |
+
"language_model.model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 242 |
+
"language_model.model.layers.26.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 243 |
+
"language_model.model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 244 |
+
"language_model.model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 245 |
+
"language_model.model.layers.26.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 246 |
+
"language_model.model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 247 |
+
"language_model.model.layers.26.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 248 |
+
"language_model.model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 249 |
+
"language_model.model.layers.27.input_layernorm.weight": "model-00004-of-00004.safetensors",
|
| 250 |
+
"language_model.model.layers.27.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
|
| 251 |
+
"language_model.model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 252 |
+
"language_model.model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 253 |
+
"language_model.model.layers.27.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
|
| 254 |
+
"language_model.model.layers.27.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 255 |
+
"language_model.model.layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 256 |
+
"language_model.model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 257 |
+
"language_model.model.layers.27.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 258 |
+
"language_model.model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 259 |
+
"language_model.model.layers.27.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 260 |
+
"language_model.model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 261 |
+
"language_model.model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 262 |
+
"language_model.model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 263 |
+
"language_model.model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 264 |
+
"language_model.model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 265 |
+
"language_model.model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 266 |
+
"language_model.model.layers.3.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 267 |
+
"language_model.model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 268 |
+
"language_model.model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 269 |
+
"language_model.model.layers.3.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 270 |
+
"language_model.model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 271 |
+
"language_model.model.layers.3.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 272 |
+
"language_model.model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 273 |
+
"language_model.model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 274 |
+
"language_model.model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 275 |
+
"language_model.model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 276 |
+
"language_model.model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 277 |
+
"language_model.model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 278 |
+
"language_model.model.layers.4.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 279 |
+
"language_model.model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 280 |
+
"language_model.model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 281 |
+
"language_model.model.layers.4.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 282 |
+
"language_model.model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 283 |
+
"language_model.model.layers.4.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 284 |
+
"language_model.model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 285 |
+
"language_model.model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 286 |
+
"language_model.model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 287 |
+
"language_model.model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 288 |
+
"language_model.model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 289 |
+
"language_model.model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 290 |
+
"language_model.model.layers.5.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 291 |
+
"language_model.model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 292 |
+
"language_model.model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 293 |
+
"language_model.model.layers.5.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 294 |
+
"language_model.model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 295 |
+
"language_model.model.layers.5.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 296 |
+
"language_model.model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 297 |
+
"language_model.model.layers.6.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 298 |
+
"language_model.model.layers.6.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 299 |
+
"language_model.model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 300 |
+
"language_model.model.layers.6.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 301 |
+
"language_model.model.layers.6.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 302 |
+
"language_model.model.layers.6.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 303 |
+
"language_model.model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 304 |
+
"language_model.model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 305 |
+
"language_model.model.layers.6.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 306 |
+
"language_model.model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 307 |
+
"language_model.model.layers.6.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 308 |
+
"language_model.model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 309 |
+
"language_model.model.layers.7.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 310 |
+
"language_model.model.layers.7.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 311 |
+
"language_model.model.layers.7.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 312 |
+
"language_model.model.layers.7.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 313 |
+
"language_model.model.layers.7.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 314 |
+
"language_model.model.layers.7.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 315 |
+
"language_model.model.layers.7.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 316 |
+
"language_model.model.layers.7.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 317 |
+
"language_model.model.layers.7.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 318 |
+
"language_model.model.layers.7.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 319 |
+
"language_model.model.layers.7.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 320 |
+
"language_model.model.layers.7.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 321 |
+
"language_model.model.layers.8.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 322 |
+
"language_model.model.layers.8.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 323 |
+
"language_model.model.layers.8.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 324 |
+
"language_model.model.layers.8.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 325 |
+
"language_model.model.layers.8.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 326 |
+
"language_model.model.layers.8.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 327 |
+
"language_model.model.layers.8.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 328 |
+
"language_model.model.layers.8.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 329 |
+
"language_model.model.layers.8.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 330 |
+
"language_model.model.layers.8.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 331 |
+
"language_model.model.layers.8.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 332 |
+
"language_model.model.layers.8.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 333 |
+
"language_model.model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 334 |
+
"language_model.model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 335 |
+
"language_model.model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 336 |
+
"language_model.model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 337 |
+
"language_model.model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 338 |
+
"language_model.model.layers.9.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 339 |
+
"language_model.model.layers.9.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 340 |
+
"language_model.model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 341 |
+
"language_model.model.layers.9.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 342 |
+
"language_model.model.layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 343 |
+
"language_model.model.layers.9.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 344 |
+
"language_model.model.layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 345 |
+
"language_model.model.norm.weight": "model-00004-of-00004.safetensors",
|
| 346 |
+
"multi_modal_projector.linear_1.bias": "model-00001-of-00004.safetensors",
|
| 347 |
+
"multi_modal_projector.linear_1.weight": "model-00001-of-00004.safetensors",
|
| 348 |
+
"multi_modal_projector.linear_2.bias": "model-00001-of-00004.safetensors",
|
| 349 |
+
"multi_modal_projector.linear_2.weight": "model-00001-of-00004.safetensors",
|
| 350 |
+
"vision_tower.vision_model.embeddings.patch_embedding.bias": "model-00001-of-00004.safetensors",
|
| 351 |
+
"vision_tower.vision_model.embeddings.patch_embedding.weight": "model-00001-of-00004.safetensors",
|
| 352 |
+
"vision_tower.vision_model.embeddings.position_embedding.weight": "model-00001-of-00004.safetensors",
|
| 353 |
+
"vision_tower.vision_model.encoder.layers.0.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
| 354 |
+
"vision_tower.vision_model.encoder.layers.0.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
| 355 |
+
"vision_tower.vision_model.encoder.layers.0.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
| 356 |
+
"vision_tower.vision_model.encoder.layers.0.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
| 357 |
+
"vision_tower.vision_model.encoder.layers.0.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 358 |
+
"vision_tower.vision_model.encoder.layers.0.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 359 |
+
"vision_tower.vision_model.encoder.layers.0.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 360 |
+
"vision_tower.vision_model.encoder.layers.0.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 361 |
+
"vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 362 |
+
"vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 363 |
+
"vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
| 364 |
+
"vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
| 365 |
+
"vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 366 |
+
"vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 367 |
+
"vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 368 |
+
"vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 369 |
+
"vision_tower.vision_model.encoder.layers.1.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
| 370 |
+
"vision_tower.vision_model.encoder.layers.1.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
| 371 |
+
"vision_tower.vision_model.encoder.layers.1.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
| 372 |
+
"vision_tower.vision_model.encoder.layers.1.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
| 373 |
+
"vision_tower.vision_model.encoder.layers.1.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 374 |
+
"vision_tower.vision_model.encoder.layers.1.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 375 |
+
"vision_tower.vision_model.encoder.layers.1.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 376 |
+
"vision_tower.vision_model.encoder.layers.1.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 377 |
+
"vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 378 |
+
"vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 379 |
+
"vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
| 380 |
+
"vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
| 381 |
+
"vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 382 |
+
"vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 383 |
+
"vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 384 |
+
"vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 385 |
+
"vision_tower.vision_model.encoder.layers.10.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
| 386 |
+
"vision_tower.vision_model.encoder.layers.10.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
| 387 |
+
"vision_tower.vision_model.encoder.layers.10.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
| 388 |
+
"vision_tower.vision_model.encoder.layers.10.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
| 389 |
+
"vision_tower.vision_model.encoder.layers.10.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 390 |
+
"vision_tower.vision_model.encoder.layers.10.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 391 |
+
"vision_tower.vision_model.encoder.layers.10.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 392 |
+
"vision_tower.vision_model.encoder.layers.10.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 393 |
+
"vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 394 |
+
"vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 395 |
+
"vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
| 396 |
+
"vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
| 397 |
+
"vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 398 |
+
"vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 399 |
+
"vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 400 |
+
"vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 401 |
+
"vision_tower.vision_model.encoder.layers.11.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
| 402 |
+
"vision_tower.vision_model.encoder.layers.11.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
| 403 |
+
"vision_tower.vision_model.encoder.layers.11.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
| 404 |
+
"vision_tower.vision_model.encoder.layers.11.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
| 405 |
+
"vision_tower.vision_model.encoder.layers.11.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 406 |
+
"vision_tower.vision_model.encoder.layers.11.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 407 |
+
"vision_tower.vision_model.encoder.layers.11.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 408 |
+
"vision_tower.vision_model.encoder.layers.11.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 409 |
+
"vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 410 |
+
"vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 411 |
+
"vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
| 412 |
+
"vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
| 413 |
+
"vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 414 |
+
"vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 415 |
+
"vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 416 |
+
"vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 417 |
+
"vision_tower.vision_model.encoder.layers.12.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
| 418 |
+
"vision_tower.vision_model.encoder.layers.12.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
| 419 |
+
"vision_tower.vision_model.encoder.layers.12.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
| 420 |
+
"vision_tower.vision_model.encoder.layers.12.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
| 421 |
+
"vision_tower.vision_model.encoder.layers.12.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 422 |
+
"vision_tower.vision_model.encoder.layers.12.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 423 |
+
"vision_tower.vision_model.encoder.layers.12.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 424 |
+
"vision_tower.vision_model.encoder.layers.12.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 425 |
+
"vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 426 |
+
"vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 427 |
+
"vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
| 428 |
+
"vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
| 429 |
+
"vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 430 |
+
"vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 431 |
+
"vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 432 |
+
"vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 433 |
+
"vision_tower.vision_model.encoder.layers.13.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
| 434 |
+
"vision_tower.vision_model.encoder.layers.13.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
| 435 |
+
"vision_tower.vision_model.encoder.layers.13.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
| 436 |
+
"vision_tower.vision_model.encoder.layers.13.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
| 437 |
+
"vision_tower.vision_model.encoder.layers.13.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 438 |
+
"vision_tower.vision_model.encoder.layers.13.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 439 |
+
"vision_tower.vision_model.encoder.layers.13.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 440 |
+
"vision_tower.vision_model.encoder.layers.13.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 441 |
+
"vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 442 |
+
"vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 443 |
+
"vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
| 444 |
+
"vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
| 445 |
+
"vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 446 |
+
"vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 447 |
+
"vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 448 |
+
"vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 449 |
+
"vision_tower.vision_model.encoder.layers.14.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
| 450 |
+
"vision_tower.vision_model.encoder.layers.14.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
| 451 |
+
"vision_tower.vision_model.encoder.layers.14.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
| 452 |
+
"vision_tower.vision_model.encoder.layers.14.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
| 453 |
+
"vision_tower.vision_model.encoder.layers.14.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 454 |
+
"vision_tower.vision_model.encoder.layers.14.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 455 |
+
"vision_tower.vision_model.encoder.layers.14.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 456 |
+
"vision_tower.vision_model.encoder.layers.14.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 457 |
+
"vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 458 |
+
"vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 459 |
+
"vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
| 460 |
+
"vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
| 461 |
+
"vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 462 |
+
"vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 463 |
+
"vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 464 |
+
"vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 465 |
+
"vision_tower.vision_model.encoder.layers.15.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
| 466 |
+
"vision_tower.vision_model.encoder.layers.15.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
| 467 |
+
"vision_tower.vision_model.encoder.layers.15.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
| 468 |
+
"vision_tower.vision_model.encoder.layers.15.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
| 469 |
+
"vision_tower.vision_model.encoder.layers.15.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 470 |
+
"vision_tower.vision_model.encoder.layers.15.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 471 |
+
"vision_tower.vision_model.encoder.layers.15.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 472 |
+
"vision_tower.vision_model.encoder.layers.15.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 473 |
+
"vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 474 |
+
"vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 475 |
+
"vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
| 476 |
+
"vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
| 477 |
+
"vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 478 |
+
"vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 479 |
+
"vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 480 |
+
"vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 481 |
+
"vision_tower.vision_model.encoder.layers.16.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
| 482 |
+
"vision_tower.vision_model.encoder.layers.16.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
| 483 |
+
"vision_tower.vision_model.encoder.layers.16.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
| 484 |
+
"vision_tower.vision_model.encoder.layers.16.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
| 485 |
+
"vision_tower.vision_model.encoder.layers.16.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 486 |
+
"vision_tower.vision_model.encoder.layers.16.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 487 |
+
"vision_tower.vision_model.encoder.layers.16.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 488 |
+
"vision_tower.vision_model.encoder.layers.16.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 489 |
+
"vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 490 |
+
"vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 491 |
+
"vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
| 492 |
+
"vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
| 493 |
+
"vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 494 |
+
"vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 495 |
+
"vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 496 |
+
"vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 497 |
+
"vision_tower.vision_model.encoder.layers.17.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
| 498 |
+
"vision_tower.vision_model.encoder.layers.17.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
| 499 |
+
"vision_tower.vision_model.encoder.layers.17.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
| 500 |
+
"vision_tower.vision_model.encoder.layers.17.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
| 501 |
+
"vision_tower.vision_model.encoder.layers.17.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 502 |
+
"vision_tower.vision_model.encoder.layers.17.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 503 |
+
"vision_tower.vision_model.encoder.layers.17.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 504 |
+
"vision_tower.vision_model.encoder.layers.17.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 505 |
+
"vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 506 |
+
"vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 507 |
+
"vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
| 508 |
+
"vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
| 509 |
+
"vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 510 |
+
"vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 511 |
+
"vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 512 |
+
"vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 513 |
+
"vision_tower.vision_model.encoder.layers.18.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
| 514 |
+
"vision_tower.vision_model.encoder.layers.18.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
| 515 |
+
"vision_tower.vision_model.encoder.layers.18.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
| 516 |
+
"vision_tower.vision_model.encoder.layers.18.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
| 517 |
+
"vision_tower.vision_model.encoder.layers.18.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 518 |
+
"vision_tower.vision_model.encoder.layers.18.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 519 |
+
"vision_tower.vision_model.encoder.layers.18.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 520 |
+
"vision_tower.vision_model.encoder.layers.18.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 521 |
+
"vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 522 |
+
"vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 523 |
+
"vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
| 524 |
+
"vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
| 525 |
+
"vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 526 |
+
"vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 527 |
+
"vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 528 |
+
"vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 529 |
+
"vision_tower.vision_model.encoder.layers.19.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
| 530 |
+
"vision_tower.vision_model.encoder.layers.19.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
| 531 |
+
"vision_tower.vision_model.encoder.layers.19.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
| 532 |
+
"vision_tower.vision_model.encoder.layers.19.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
| 533 |
+
"vision_tower.vision_model.encoder.layers.19.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 534 |
+
"vision_tower.vision_model.encoder.layers.19.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 535 |
+
"vision_tower.vision_model.encoder.layers.19.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 536 |
+
"vision_tower.vision_model.encoder.layers.19.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 537 |
+
"vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 538 |
+
"vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 539 |
+
"vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
| 540 |
+
"vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
| 541 |
+
"vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 542 |
+
"vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 543 |
+
"vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 544 |
+
"vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 545 |
+
"vision_tower.vision_model.encoder.layers.2.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
| 546 |
+
"vision_tower.vision_model.encoder.layers.2.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
| 547 |
+
"vision_tower.vision_model.encoder.layers.2.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
| 548 |
+
"vision_tower.vision_model.encoder.layers.2.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
| 549 |
+
"vision_tower.vision_model.encoder.layers.2.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 550 |
+
"vision_tower.vision_model.encoder.layers.2.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 551 |
+
"vision_tower.vision_model.encoder.layers.2.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 552 |
+
"vision_tower.vision_model.encoder.layers.2.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 553 |
+
"vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 554 |
+
"vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 555 |
+
"vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
| 556 |
+
"vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
| 557 |
+
"vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 558 |
+
"vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 559 |
+
"vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 560 |
+
"vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 561 |
+
"vision_tower.vision_model.encoder.layers.20.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
| 562 |
+
"vision_tower.vision_model.encoder.layers.20.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
| 563 |
+
"vision_tower.vision_model.encoder.layers.20.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
| 564 |
+
"vision_tower.vision_model.encoder.layers.20.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
| 565 |
+
"vision_tower.vision_model.encoder.layers.20.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 566 |
+
"vision_tower.vision_model.encoder.layers.20.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 567 |
+
"vision_tower.vision_model.encoder.layers.20.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 568 |
+
"vision_tower.vision_model.encoder.layers.20.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 569 |
+
"vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 570 |
+
"vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 571 |
+
"vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
| 572 |
+
"vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
| 573 |
+
"vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 574 |
+
"vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 575 |
+
"vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 576 |
+
"vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 577 |
+
"vision_tower.vision_model.encoder.layers.21.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
| 578 |
+
"vision_tower.vision_model.encoder.layers.21.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
| 579 |
+
"vision_tower.vision_model.encoder.layers.21.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
| 580 |
+
"vision_tower.vision_model.encoder.layers.21.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
| 581 |
+
"vision_tower.vision_model.encoder.layers.21.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 582 |
+
"vision_tower.vision_model.encoder.layers.21.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 583 |
+
"vision_tower.vision_model.encoder.layers.21.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 584 |
+
"vision_tower.vision_model.encoder.layers.21.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 585 |
+
"vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 586 |
+
"vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 587 |
+
"vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
| 588 |
+
"vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
| 589 |
+
"vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 590 |
+
"vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 591 |
+
"vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 592 |
+
"vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 593 |
+
"vision_tower.vision_model.encoder.layers.22.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
| 594 |
+
"vision_tower.vision_model.encoder.layers.22.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
| 595 |
+
"vision_tower.vision_model.encoder.layers.22.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
| 596 |
+
"vision_tower.vision_model.encoder.layers.22.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
| 597 |
+
"vision_tower.vision_model.encoder.layers.22.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 598 |
+
"vision_tower.vision_model.encoder.layers.22.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 599 |
+
"vision_tower.vision_model.encoder.layers.22.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 600 |
+
"vision_tower.vision_model.encoder.layers.22.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 601 |
+
"vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 602 |
+
"vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 603 |
+
"vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
| 604 |
+
"vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
| 605 |
+
"vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 606 |
+
"vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 607 |
+
"vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 608 |
+
"vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 609 |
+
"vision_tower.vision_model.encoder.layers.23.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
| 610 |
+
"vision_tower.vision_model.encoder.layers.23.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
| 611 |
+
"vision_tower.vision_model.encoder.layers.23.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
| 612 |
+
"vision_tower.vision_model.encoder.layers.23.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
| 613 |
+
"vision_tower.vision_model.encoder.layers.23.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 614 |
+
"vision_tower.vision_model.encoder.layers.23.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 615 |
+
"vision_tower.vision_model.encoder.layers.23.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 616 |
+
"vision_tower.vision_model.encoder.layers.23.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 617 |
+
"vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 618 |
+
"vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 619 |
+
"vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
| 620 |
+
"vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
| 621 |
+
"vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 622 |
+
"vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 623 |
+
"vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 624 |
+
"vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 625 |
+
"vision_tower.vision_model.encoder.layers.24.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
| 626 |
+
"vision_tower.vision_model.encoder.layers.24.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
| 627 |
+
"vision_tower.vision_model.encoder.layers.24.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
| 628 |
+
"vision_tower.vision_model.encoder.layers.24.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
| 629 |
+
"vision_tower.vision_model.encoder.layers.24.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 630 |
+
"vision_tower.vision_model.encoder.layers.24.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 631 |
+
"vision_tower.vision_model.encoder.layers.24.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 632 |
+
"vision_tower.vision_model.encoder.layers.24.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 633 |
+
"vision_tower.vision_model.encoder.layers.24.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 634 |
+
"vision_tower.vision_model.encoder.layers.24.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 635 |
+
"vision_tower.vision_model.encoder.layers.24.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
| 636 |
+
"vision_tower.vision_model.encoder.layers.24.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
| 637 |
+
"vision_tower.vision_model.encoder.layers.24.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 638 |
+
"vision_tower.vision_model.encoder.layers.24.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 639 |
+
"vision_tower.vision_model.encoder.layers.24.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 640 |
+
"vision_tower.vision_model.encoder.layers.24.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 641 |
+
"vision_tower.vision_model.encoder.layers.25.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
| 642 |
+
"vision_tower.vision_model.encoder.layers.25.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
| 643 |
+
"vision_tower.vision_model.encoder.layers.25.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
| 644 |
+
"vision_tower.vision_model.encoder.layers.25.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
| 645 |
+
"vision_tower.vision_model.encoder.layers.25.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 646 |
+
"vision_tower.vision_model.encoder.layers.25.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 647 |
+
"vision_tower.vision_model.encoder.layers.25.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 648 |
+
"vision_tower.vision_model.encoder.layers.25.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 649 |
+
"vision_tower.vision_model.encoder.layers.25.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 650 |
+
"vision_tower.vision_model.encoder.layers.25.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 651 |
+
"vision_tower.vision_model.encoder.layers.25.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
| 652 |
+
"vision_tower.vision_model.encoder.layers.25.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
| 653 |
+
"vision_tower.vision_model.encoder.layers.25.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 654 |
+
"vision_tower.vision_model.encoder.layers.25.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 655 |
+
"vision_tower.vision_model.encoder.layers.25.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 656 |
+
"vision_tower.vision_model.encoder.layers.25.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 657 |
+
"vision_tower.vision_model.encoder.layers.3.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
| 658 |
+
"vision_tower.vision_model.encoder.layers.3.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
| 659 |
+
"vision_tower.vision_model.encoder.layers.3.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
| 660 |
+
"vision_tower.vision_model.encoder.layers.3.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
| 661 |
+
"vision_tower.vision_model.encoder.layers.3.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 662 |
+
"vision_tower.vision_model.encoder.layers.3.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 663 |
+
"vision_tower.vision_model.encoder.layers.3.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 664 |
+
"vision_tower.vision_model.encoder.layers.3.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 665 |
+
"vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 666 |
+
"vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 667 |
+
"vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
| 668 |
+
"vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
| 669 |
+
"vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 670 |
+
"vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 671 |
+
"vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 672 |
+
"vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 673 |
+
"vision_tower.vision_model.encoder.layers.4.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
| 674 |
+
"vision_tower.vision_model.encoder.layers.4.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
| 675 |
+
"vision_tower.vision_model.encoder.layers.4.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
| 676 |
+
"vision_tower.vision_model.encoder.layers.4.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
| 677 |
+
"vision_tower.vision_model.encoder.layers.4.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 678 |
+
"vision_tower.vision_model.encoder.layers.4.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 679 |
+
"vision_tower.vision_model.encoder.layers.4.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 680 |
+
"vision_tower.vision_model.encoder.layers.4.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 681 |
+
"vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 682 |
+
"vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 683 |
+
"vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
| 684 |
+
"vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
| 685 |
+
"vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 686 |
+
"vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 687 |
+
"vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 688 |
+
"vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 689 |
+
"vision_tower.vision_model.encoder.layers.5.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
| 690 |
+
"vision_tower.vision_model.encoder.layers.5.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
| 691 |
+
"vision_tower.vision_model.encoder.layers.5.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
| 692 |
+
"vision_tower.vision_model.encoder.layers.5.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
| 693 |
+
"vision_tower.vision_model.encoder.layers.5.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 694 |
+
"vision_tower.vision_model.encoder.layers.5.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 695 |
+
"vision_tower.vision_model.encoder.layers.5.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 696 |
+
"vision_tower.vision_model.encoder.layers.5.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 697 |
+
"vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 698 |
+
"vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 699 |
+
"vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
| 700 |
+
"vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
| 701 |
+
"vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 702 |
+
"vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 703 |
+
"vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 704 |
+
"vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 705 |
+
"vision_tower.vision_model.encoder.layers.6.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
| 706 |
+
"vision_tower.vision_model.encoder.layers.6.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
| 707 |
+
"vision_tower.vision_model.encoder.layers.6.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
| 708 |
+
"vision_tower.vision_model.encoder.layers.6.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
| 709 |
+
"vision_tower.vision_model.encoder.layers.6.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 710 |
+
"vision_tower.vision_model.encoder.layers.6.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 711 |
+
"vision_tower.vision_model.encoder.layers.6.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 712 |
+
"vision_tower.vision_model.encoder.layers.6.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 713 |
+
"vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 714 |
+
"vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 715 |
+
"vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
| 716 |
+
"vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
| 717 |
+
"vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 718 |
+
"vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 719 |
+
"vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 720 |
+
"vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 721 |
+
"vision_tower.vision_model.encoder.layers.7.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
| 722 |
+
"vision_tower.vision_model.encoder.layers.7.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
| 723 |
+
"vision_tower.vision_model.encoder.layers.7.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
| 724 |
+
"vision_tower.vision_model.encoder.layers.7.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
| 725 |
+
"vision_tower.vision_model.encoder.layers.7.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 726 |
+
"vision_tower.vision_model.encoder.layers.7.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 727 |
+
"vision_tower.vision_model.encoder.layers.7.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 728 |
+
"vision_tower.vision_model.encoder.layers.7.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 729 |
+
"vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 730 |
+
"vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 731 |
+
"vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
| 732 |
+
"vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
| 733 |
+
"vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 734 |
+
"vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 735 |
+
"vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 736 |
+
"vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 737 |
+
"vision_tower.vision_model.encoder.layers.8.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
| 738 |
+
"vision_tower.vision_model.encoder.layers.8.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
| 739 |
+
"vision_tower.vision_model.encoder.layers.8.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
| 740 |
+
"vision_tower.vision_model.encoder.layers.8.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
| 741 |
+
"vision_tower.vision_model.encoder.layers.8.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 742 |
+
"vision_tower.vision_model.encoder.layers.8.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 743 |
+
"vision_tower.vision_model.encoder.layers.8.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 744 |
+
"vision_tower.vision_model.encoder.layers.8.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 745 |
+
"vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 746 |
+
"vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 747 |
+
"vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
| 748 |
+
"vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
| 749 |
+
"vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 750 |
+
"vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 751 |
+
"vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 752 |
+
"vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 753 |
+
"vision_tower.vision_model.encoder.layers.9.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
| 754 |
+
"vision_tower.vision_model.encoder.layers.9.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
| 755 |
+
"vision_tower.vision_model.encoder.layers.9.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
| 756 |
+
"vision_tower.vision_model.encoder.layers.9.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
| 757 |
+
"vision_tower.vision_model.encoder.layers.9.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 758 |
+
"vision_tower.vision_model.encoder.layers.9.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 759 |
+
"vision_tower.vision_model.encoder.layers.9.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 760 |
+
"vision_tower.vision_model.encoder.layers.9.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 761 |
+
"vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 762 |
+
"vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 763 |
+
"vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
| 764 |
+
"vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
| 765 |
+
"vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 766 |
+
"vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 767 |
+
"vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 768 |
+
"vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 769 |
+
"vision_tower.vision_model.post_layernorm.bias": "model-00001-of-00004.safetensors",
|
| 770 |
+
"vision_tower.vision_model.post_layernorm.weight": "model-00001-of-00004.safetensors"
|
| 771 |
+
}
|
| 772 |
+
}
|
ood/internvl3-8b-instruct-lora-ood-210/model.safetensors.index.json
ADDED
|
@@ -0,0 +1,693 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"metadata": {
|
| 3 |
+
"total_parameters": 7944373760,
|
| 4 |
+
"total_size": 15888747520
|
| 5 |
+
},
|
| 6 |
+
"weight_map": {
|
| 7 |
+
"language_model.lm_head.weight": "model-00004-of-00004.safetensors",
|
| 8 |
+
"language_model.model.embed_tokens.weight": "model-00001-of-00004.safetensors",
|
| 9 |
+
"language_model.model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 10 |
+
"language_model.model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 11 |
+
"language_model.model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 12 |
+
"language_model.model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 13 |
+
"language_model.model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 14 |
+
"language_model.model.layers.0.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 15 |
+
"language_model.model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 16 |
+
"language_model.model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 17 |
+
"language_model.model.layers.0.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 18 |
+
"language_model.model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 19 |
+
"language_model.model.layers.0.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 20 |
+
"language_model.model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 21 |
+
"language_model.model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 22 |
+
"language_model.model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 23 |
+
"language_model.model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 24 |
+
"language_model.model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 25 |
+
"language_model.model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 26 |
+
"language_model.model.layers.1.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 27 |
+
"language_model.model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 28 |
+
"language_model.model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 29 |
+
"language_model.model.layers.1.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 30 |
+
"language_model.model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 31 |
+
"language_model.model.layers.1.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 32 |
+
"language_model.model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 33 |
+
"language_model.model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 34 |
+
"language_model.model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 35 |
+
"language_model.model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 36 |
+
"language_model.model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 37 |
+
"language_model.model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 38 |
+
"language_model.model.layers.10.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 39 |
+
"language_model.model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 40 |
+
"language_model.model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 41 |
+
"language_model.model.layers.10.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 42 |
+
"language_model.model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 43 |
+
"language_model.model.layers.10.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 44 |
+
"language_model.model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 45 |
+
"language_model.model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 46 |
+
"language_model.model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 47 |
+
"language_model.model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 48 |
+
"language_model.model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 49 |
+
"language_model.model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 50 |
+
"language_model.model.layers.11.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 51 |
+
"language_model.model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 52 |
+
"language_model.model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 53 |
+
"language_model.model.layers.11.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 54 |
+
"language_model.model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 55 |
+
"language_model.model.layers.11.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 56 |
+
"language_model.model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 57 |
+
"language_model.model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 58 |
+
"language_model.model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 59 |
+
"language_model.model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 60 |
+
"language_model.model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 61 |
+
"language_model.model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 62 |
+
"language_model.model.layers.12.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 63 |
+
"language_model.model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 64 |
+
"language_model.model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 65 |
+
"language_model.model.layers.12.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 66 |
+
"language_model.model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 67 |
+
"language_model.model.layers.12.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 68 |
+
"language_model.model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 69 |
+
"language_model.model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 70 |
+
"language_model.model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 71 |
+
"language_model.model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 72 |
+
"language_model.model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 73 |
+
"language_model.model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 74 |
+
"language_model.model.layers.13.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 75 |
+
"language_model.model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 76 |
+
"language_model.model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 77 |
+
"language_model.model.layers.13.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 78 |
+
"language_model.model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 79 |
+
"language_model.model.layers.13.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 80 |
+
"language_model.model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 81 |
+
"language_model.model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 82 |
+
"language_model.model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 83 |
+
"language_model.model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 84 |
+
"language_model.model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 85 |
+
"language_model.model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 86 |
+
"language_model.model.layers.14.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 87 |
+
"language_model.model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 88 |
+
"language_model.model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 89 |
+
"language_model.model.layers.14.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 90 |
+
"language_model.model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 91 |
+
"language_model.model.layers.14.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 92 |
+
"language_model.model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 93 |
+
"language_model.model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 94 |
+
"language_model.model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 95 |
+
"language_model.model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 96 |
+
"language_model.model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 97 |
+
"language_model.model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 98 |
+
"language_model.model.layers.15.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 99 |
+
"language_model.model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 100 |
+
"language_model.model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 101 |
+
"language_model.model.layers.15.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 102 |
+
"language_model.model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 103 |
+
"language_model.model.layers.15.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 104 |
+
"language_model.model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 105 |
+
"language_model.model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 106 |
+
"language_model.model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 107 |
+
"language_model.model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 108 |
+
"language_model.model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 109 |
+
"language_model.model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 110 |
+
"language_model.model.layers.16.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 111 |
+
"language_model.model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 112 |
+
"language_model.model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 113 |
+
"language_model.model.layers.16.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 114 |
+
"language_model.model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 115 |
+
"language_model.model.layers.16.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 116 |
+
"language_model.model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 117 |
+
"language_model.model.layers.17.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 118 |
+
"language_model.model.layers.17.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 119 |
+
"language_model.model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 120 |
+
"language_model.model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 121 |
+
"language_model.model.layers.17.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 122 |
+
"language_model.model.layers.17.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 123 |
+
"language_model.model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 124 |
+
"language_model.model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 125 |
+
"language_model.model.layers.17.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 126 |
+
"language_model.model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 127 |
+
"language_model.model.layers.17.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 128 |
+
"language_model.model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 129 |
+
"language_model.model.layers.18.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 130 |
+
"language_model.model.layers.18.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 131 |
+
"language_model.model.layers.18.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 132 |
+
"language_model.model.layers.18.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 133 |
+
"language_model.model.layers.18.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 134 |
+
"language_model.model.layers.18.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 135 |
+
"language_model.model.layers.18.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 136 |
+
"language_model.model.layers.18.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 137 |
+
"language_model.model.layers.18.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 138 |
+
"language_model.model.layers.18.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 139 |
+
"language_model.model.layers.18.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 140 |
+
"language_model.model.layers.18.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 141 |
+
"language_model.model.layers.19.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 142 |
+
"language_model.model.layers.19.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 143 |
+
"language_model.model.layers.19.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 144 |
+
"language_model.model.layers.19.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 145 |
+
"language_model.model.layers.19.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 146 |
+
"language_model.model.layers.19.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 147 |
+
"language_model.model.layers.19.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 148 |
+
"language_model.model.layers.19.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 149 |
+
"language_model.model.layers.19.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 150 |
+
"language_model.model.layers.19.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 151 |
+
"language_model.model.layers.19.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 152 |
+
"language_model.model.layers.19.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 153 |
+
"language_model.model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 154 |
+
"language_model.model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 155 |
+
"language_model.model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 156 |
+
"language_model.model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 157 |
+
"language_model.model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 158 |
+
"language_model.model.layers.2.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 159 |
+
"language_model.model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 160 |
+
"language_model.model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 161 |
+
"language_model.model.layers.2.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 162 |
+
"language_model.model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 163 |
+
"language_model.model.layers.2.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 164 |
+
"language_model.model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 165 |
+
"language_model.model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 166 |
+
"language_model.model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 167 |
+
"language_model.model.layers.20.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 168 |
+
"language_model.model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 169 |
+
"language_model.model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 170 |
+
"language_model.model.layers.20.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 171 |
+
"language_model.model.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 172 |
+
"language_model.model.layers.20.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 173 |
+
"language_model.model.layers.20.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 174 |
+
"language_model.model.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 175 |
+
"language_model.model.layers.20.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 176 |
+
"language_model.model.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 177 |
+
"language_model.model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 178 |
+
"language_model.model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 179 |
+
"language_model.model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 180 |
+
"language_model.model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 181 |
+
"language_model.model.layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 182 |
+
"language_model.model.layers.21.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 183 |
+
"language_model.model.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 184 |
+
"language_model.model.layers.21.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 185 |
+
"language_model.model.layers.21.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 186 |
+
"language_model.model.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 187 |
+
"language_model.model.layers.21.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 188 |
+
"language_model.model.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 189 |
+
"language_model.model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 190 |
+
"language_model.model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 191 |
+
"language_model.model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 192 |
+
"language_model.model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 193 |
+
"language_model.model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 194 |
+
"language_model.model.layers.22.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 195 |
+
"language_model.model.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 196 |
+
"language_model.model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 197 |
+
"language_model.model.layers.22.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 198 |
+
"language_model.model.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 199 |
+
"language_model.model.layers.22.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 200 |
+
"language_model.model.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 201 |
+
"language_model.model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 202 |
+
"language_model.model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 203 |
+
"language_model.model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 204 |
+
"language_model.model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 205 |
+
"language_model.model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 206 |
+
"language_model.model.layers.23.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 207 |
+
"language_model.model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 208 |
+
"language_model.model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 209 |
+
"language_model.model.layers.23.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 210 |
+
"language_model.model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 211 |
+
"language_model.model.layers.23.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 212 |
+
"language_model.model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 213 |
+
"language_model.model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 214 |
+
"language_model.model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 215 |
+
"language_model.model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 216 |
+
"language_model.model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 217 |
+
"language_model.model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 218 |
+
"language_model.model.layers.24.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 219 |
+
"language_model.model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 220 |
+
"language_model.model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 221 |
+
"language_model.model.layers.24.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 222 |
+
"language_model.model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 223 |
+
"language_model.model.layers.24.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 224 |
+
"language_model.model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 225 |
+
"language_model.model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 226 |
+
"language_model.model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 227 |
+
"language_model.model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 228 |
+
"language_model.model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 229 |
+
"language_model.model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 230 |
+
"language_model.model.layers.25.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 231 |
+
"language_model.model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 232 |
+
"language_model.model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 233 |
+
"language_model.model.layers.25.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 234 |
+
"language_model.model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 235 |
+
"language_model.model.layers.25.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 236 |
+
"language_model.model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 237 |
+
"language_model.model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 238 |
+
"language_model.model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 239 |
+
"language_model.model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 240 |
+
"language_model.model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 241 |
+
"language_model.model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 242 |
+
"language_model.model.layers.26.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 243 |
+
"language_model.model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 244 |
+
"language_model.model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 245 |
+
"language_model.model.layers.26.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 246 |
+
"language_model.model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 247 |
+
"language_model.model.layers.26.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 248 |
+
"language_model.model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 249 |
+
"language_model.model.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 250 |
+
"language_model.model.layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 251 |
+
"language_model.model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 252 |
+
"language_model.model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 253 |
+
"language_model.model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 254 |
+
"language_model.model.layers.27.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 255 |
+
"language_model.model.layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 256 |
+
"language_model.model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 257 |
+
"language_model.model.layers.27.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 258 |
+
"language_model.model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 259 |
+
"language_model.model.layers.27.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 260 |
+
"language_model.model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 261 |
+
"language_model.model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 262 |
+
"language_model.model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 263 |
+
"language_model.model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 264 |
+
"language_model.model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 265 |
+
"language_model.model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 266 |
+
"language_model.model.layers.3.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 267 |
+
"language_model.model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 268 |
+
"language_model.model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 269 |
+
"language_model.model.layers.3.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 270 |
+
"language_model.model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 271 |
+
"language_model.model.layers.3.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 272 |
+
"language_model.model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 273 |
+
"language_model.model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 274 |
+
"language_model.model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 275 |
+
"language_model.model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 276 |
+
"language_model.model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 277 |
+
"language_model.model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 278 |
+
"language_model.model.layers.4.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 279 |
+
"language_model.model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 280 |
+
"language_model.model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 281 |
+
"language_model.model.layers.4.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 282 |
+
"language_model.model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 283 |
+
"language_model.model.layers.4.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 284 |
+
"language_model.model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 285 |
+
"language_model.model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 286 |
+
"language_model.model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 287 |
+
"language_model.model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 288 |
+
"language_model.model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 289 |
+
"language_model.model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 290 |
+
"language_model.model.layers.5.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 291 |
+
"language_model.model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 292 |
+
"language_model.model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 293 |
+
"language_model.model.layers.5.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 294 |
+
"language_model.model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 295 |
+
"language_model.model.layers.5.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 296 |
+
"language_model.model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 297 |
+
"language_model.model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 298 |
+
"language_model.model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 299 |
+
"language_model.model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 300 |
+
"language_model.model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 301 |
+
"language_model.model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 302 |
+
"language_model.model.layers.6.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 303 |
+
"language_model.model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 304 |
+
"language_model.model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 305 |
+
"language_model.model.layers.6.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 306 |
+
"language_model.model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 307 |
+
"language_model.model.layers.6.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 308 |
+
"language_model.model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 309 |
+
"language_model.model.layers.7.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 310 |
+
"language_model.model.layers.7.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 311 |
+
"language_model.model.layers.7.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 312 |
+
"language_model.model.layers.7.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 313 |
+
"language_model.model.layers.7.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 314 |
+
"language_model.model.layers.7.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 315 |
+
"language_model.model.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 316 |
+
"language_model.model.layers.7.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 317 |
+
"language_model.model.layers.7.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 318 |
+
"language_model.model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 319 |
+
"language_model.model.layers.7.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 320 |
+
"language_model.model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 321 |
+
"language_model.model.layers.8.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 322 |
+
"language_model.model.layers.8.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 323 |
+
"language_model.model.layers.8.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 324 |
+
"language_model.model.layers.8.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 325 |
+
"language_model.model.layers.8.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 326 |
+
"language_model.model.layers.8.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 327 |
+
"language_model.model.layers.8.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 328 |
+
"language_model.model.layers.8.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 329 |
+
"language_model.model.layers.8.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 330 |
+
"language_model.model.layers.8.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 331 |
+
"language_model.model.layers.8.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 332 |
+
"language_model.model.layers.8.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 333 |
+
"language_model.model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 334 |
+
"language_model.model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 335 |
+
"language_model.model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 336 |
+
"language_model.model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 337 |
+
"language_model.model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 338 |
+
"language_model.model.layers.9.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 339 |
+
"language_model.model.layers.9.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 340 |
+
"language_model.model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 341 |
+
"language_model.model.layers.9.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 342 |
+
"language_model.model.layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 343 |
+
"language_model.model.layers.9.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 344 |
+
"language_model.model.layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 345 |
+
"language_model.model.norm.weight": "model-00003-of-00004.safetensors",
|
| 346 |
+
"mlp1.0.bias": "model-00004-of-00004.safetensors",
|
| 347 |
+
"mlp1.0.weight": "model-00004-of-00004.safetensors",
|
| 348 |
+
"mlp1.1.bias": "model-00004-of-00004.safetensors",
|
| 349 |
+
"mlp1.1.weight": "model-00004-of-00004.safetensors",
|
| 350 |
+
"mlp1.3.bias": "model-00004-of-00004.safetensors",
|
| 351 |
+
"mlp1.3.weight": "model-00004-of-00004.safetensors",
|
| 352 |
+
"vision_model.embeddings.class_embedding": "model-00001-of-00004.safetensors",
|
| 353 |
+
"vision_model.embeddings.patch_embedding.bias": "model-00001-of-00004.safetensors",
|
| 354 |
+
"vision_model.embeddings.patch_embedding.weight": "model-00001-of-00004.safetensors",
|
| 355 |
+
"vision_model.embeddings.position_embedding": "model-00001-of-00004.safetensors",
|
| 356 |
+
"vision_model.encoder.layers.0.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 357 |
+
"vision_model.encoder.layers.0.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 358 |
+
"vision_model.encoder.layers.0.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 359 |
+
"vision_model.encoder.layers.0.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 360 |
+
"vision_model.encoder.layers.0.ls1": "model-00001-of-00004.safetensors",
|
| 361 |
+
"vision_model.encoder.layers.0.ls2": "model-00001-of-00004.safetensors",
|
| 362 |
+
"vision_model.encoder.layers.0.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 363 |
+
"vision_model.encoder.layers.0.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 364 |
+
"vision_model.encoder.layers.0.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 365 |
+
"vision_model.encoder.layers.0.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 366 |
+
"vision_model.encoder.layers.0.norm1.bias": "model-00001-of-00004.safetensors",
|
| 367 |
+
"vision_model.encoder.layers.0.norm1.weight": "model-00001-of-00004.safetensors",
|
| 368 |
+
"vision_model.encoder.layers.0.norm2.bias": "model-00001-of-00004.safetensors",
|
| 369 |
+
"vision_model.encoder.layers.0.norm2.weight": "model-00001-of-00004.safetensors",
|
| 370 |
+
"vision_model.encoder.layers.1.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 371 |
+
"vision_model.encoder.layers.1.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 372 |
+
"vision_model.encoder.layers.1.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 373 |
+
"vision_model.encoder.layers.1.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 374 |
+
"vision_model.encoder.layers.1.ls1": "model-00001-of-00004.safetensors",
|
| 375 |
+
"vision_model.encoder.layers.1.ls2": "model-00001-of-00004.safetensors",
|
| 376 |
+
"vision_model.encoder.layers.1.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 377 |
+
"vision_model.encoder.layers.1.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 378 |
+
"vision_model.encoder.layers.1.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 379 |
+
"vision_model.encoder.layers.1.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 380 |
+
"vision_model.encoder.layers.1.norm1.bias": "model-00001-of-00004.safetensors",
|
| 381 |
+
"vision_model.encoder.layers.1.norm1.weight": "model-00001-of-00004.safetensors",
|
| 382 |
+
"vision_model.encoder.layers.1.norm2.bias": "model-00001-of-00004.safetensors",
|
| 383 |
+
"vision_model.encoder.layers.1.norm2.weight": "model-00001-of-00004.safetensors",
|
| 384 |
+
"vision_model.encoder.layers.10.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 385 |
+
"vision_model.encoder.layers.10.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 386 |
+
"vision_model.encoder.layers.10.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 387 |
+
"vision_model.encoder.layers.10.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 388 |
+
"vision_model.encoder.layers.10.ls1": "model-00001-of-00004.safetensors",
|
| 389 |
+
"vision_model.encoder.layers.10.ls2": "model-00001-of-00004.safetensors",
|
| 390 |
+
"vision_model.encoder.layers.10.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 391 |
+
"vision_model.encoder.layers.10.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 392 |
+
"vision_model.encoder.layers.10.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 393 |
+
"vision_model.encoder.layers.10.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 394 |
+
"vision_model.encoder.layers.10.norm1.bias": "model-00001-of-00004.safetensors",
|
| 395 |
+
"vision_model.encoder.layers.10.norm1.weight": "model-00001-of-00004.safetensors",
|
| 396 |
+
"vision_model.encoder.layers.10.norm2.bias": "model-00001-of-00004.safetensors",
|
| 397 |
+
"vision_model.encoder.layers.10.norm2.weight": "model-00001-of-00004.safetensors",
|
| 398 |
+
"vision_model.encoder.layers.11.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 399 |
+
"vision_model.encoder.layers.11.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 400 |
+
"vision_model.encoder.layers.11.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 401 |
+
"vision_model.encoder.layers.11.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 402 |
+
"vision_model.encoder.layers.11.ls1": "model-00001-of-00004.safetensors",
|
| 403 |
+
"vision_model.encoder.layers.11.ls2": "model-00001-of-00004.safetensors",
|
| 404 |
+
"vision_model.encoder.layers.11.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 405 |
+
"vision_model.encoder.layers.11.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 406 |
+
"vision_model.encoder.layers.11.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 407 |
+
"vision_model.encoder.layers.11.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 408 |
+
"vision_model.encoder.layers.11.norm1.bias": "model-00001-of-00004.safetensors",
|
| 409 |
+
"vision_model.encoder.layers.11.norm1.weight": "model-00001-of-00004.safetensors",
|
| 410 |
+
"vision_model.encoder.layers.11.norm2.bias": "model-00001-of-00004.safetensors",
|
| 411 |
+
"vision_model.encoder.layers.11.norm2.weight": "model-00001-of-00004.safetensors",
|
| 412 |
+
"vision_model.encoder.layers.12.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 413 |
+
"vision_model.encoder.layers.12.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 414 |
+
"vision_model.encoder.layers.12.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 415 |
+
"vision_model.encoder.layers.12.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 416 |
+
"vision_model.encoder.layers.12.ls1": "model-00001-of-00004.safetensors",
|
| 417 |
+
"vision_model.encoder.layers.12.ls2": "model-00001-of-00004.safetensors",
|
| 418 |
+
"vision_model.encoder.layers.12.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 419 |
+
"vision_model.encoder.layers.12.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 420 |
+
"vision_model.encoder.layers.12.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 421 |
+
"vision_model.encoder.layers.12.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 422 |
+
"vision_model.encoder.layers.12.norm1.bias": "model-00001-of-00004.safetensors",
|
| 423 |
+
"vision_model.encoder.layers.12.norm1.weight": "model-00001-of-00004.safetensors",
|
| 424 |
+
"vision_model.encoder.layers.12.norm2.bias": "model-00001-of-00004.safetensors",
|
| 425 |
+
"vision_model.encoder.layers.12.norm2.weight": "model-00001-of-00004.safetensors",
|
| 426 |
+
"vision_model.encoder.layers.13.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 427 |
+
"vision_model.encoder.layers.13.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 428 |
+
"vision_model.encoder.layers.13.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 429 |
+
"vision_model.encoder.layers.13.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 430 |
+
"vision_model.encoder.layers.13.ls1": "model-00001-of-00004.safetensors",
|
| 431 |
+
"vision_model.encoder.layers.13.ls2": "model-00001-of-00004.safetensors",
|
| 432 |
+
"vision_model.encoder.layers.13.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 433 |
+
"vision_model.encoder.layers.13.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 434 |
+
"vision_model.encoder.layers.13.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 435 |
+
"vision_model.encoder.layers.13.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 436 |
+
"vision_model.encoder.layers.13.norm1.bias": "model-00001-of-00004.safetensors",
|
| 437 |
+
"vision_model.encoder.layers.13.norm1.weight": "model-00001-of-00004.safetensors",
|
| 438 |
+
"vision_model.encoder.layers.13.norm2.bias": "model-00001-of-00004.safetensors",
|
| 439 |
+
"vision_model.encoder.layers.13.norm2.weight": "model-00001-of-00004.safetensors",
|
| 440 |
+
"vision_model.encoder.layers.14.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 441 |
+
"vision_model.encoder.layers.14.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 442 |
+
"vision_model.encoder.layers.14.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 443 |
+
"vision_model.encoder.layers.14.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 444 |
+
"vision_model.encoder.layers.14.ls1": "model-00001-of-00004.safetensors",
|
| 445 |
+
"vision_model.encoder.layers.14.ls2": "model-00001-of-00004.safetensors",
|
| 446 |
+
"vision_model.encoder.layers.14.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 447 |
+
"vision_model.encoder.layers.14.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 448 |
+
"vision_model.encoder.layers.14.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 449 |
+
"vision_model.encoder.layers.14.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 450 |
+
"vision_model.encoder.layers.14.norm1.bias": "model-00001-of-00004.safetensors",
|
| 451 |
+
"vision_model.encoder.layers.14.norm1.weight": "model-00001-of-00004.safetensors",
|
| 452 |
+
"vision_model.encoder.layers.14.norm2.bias": "model-00001-of-00004.safetensors",
|
| 453 |
+
"vision_model.encoder.layers.14.norm2.weight": "model-00001-of-00004.safetensors",
|
| 454 |
+
"vision_model.encoder.layers.15.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 455 |
+
"vision_model.encoder.layers.15.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 456 |
+
"vision_model.encoder.layers.15.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 457 |
+
"vision_model.encoder.layers.15.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 458 |
+
"vision_model.encoder.layers.15.ls1": "model-00001-of-00004.safetensors",
|
| 459 |
+
"vision_model.encoder.layers.15.ls2": "model-00001-of-00004.safetensors",
|
| 460 |
+
"vision_model.encoder.layers.15.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 461 |
+
"vision_model.encoder.layers.15.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 462 |
+
"vision_model.encoder.layers.15.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 463 |
+
"vision_model.encoder.layers.15.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 464 |
+
"vision_model.encoder.layers.15.norm1.bias": "model-00001-of-00004.safetensors",
|
| 465 |
+
"vision_model.encoder.layers.15.norm1.weight": "model-00001-of-00004.safetensors",
|
| 466 |
+
"vision_model.encoder.layers.15.norm2.bias": "model-00001-of-00004.safetensors",
|
| 467 |
+
"vision_model.encoder.layers.15.norm2.weight": "model-00001-of-00004.safetensors",
|
| 468 |
+
"vision_model.encoder.layers.16.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 469 |
+
"vision_model.encoder.layers.16.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 470 |
+
"vision_model.encoder.layers.16.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 471 |
+
"vision_model.encoder.layers.16.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 472 |
+
"vision_model.encoder.layers.16.ls1": "model-00001-of-00004.safetensors",
|
| 473 |
+
"vision_model.encoder.layers.16.ls2": "model-00001-of-00004.safetensors",
|
| 474 |
+
"vision_model.encoder.layers.16.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 475 |
+
"vision_model.encoder.layers.16.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 476 |
+
"vision_model.encoder.layers.16.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 477 |
+
"vision_model.encoder.layers.16.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 478 |
+
"vision_model.encoder.layers.16.norm1.bias": "model-00001-of-00004.safetensors",
|
| 479 |
+
"vision_model.encoder.layers.16.norm1.weight": "model-00001-of-00004.safetensors",
|
| 480 |
+
"vision_model.encoder.layers.16.norm2.bias": "model-00001-of-00004.safetensors",
|
| 481 |
+
"vision_model.encoder.layers.16.norm2.weight": "model-00001-of-00004.safetensors",
|
| 482 |
+
"vision_model.encoder.layers.17.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 483 |
+
"vision_model.encoder.layers.17.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 484 |
+
"vision_model.encoder.layers.17.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 485 |
+
"vision_model.encoder.layers.17.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 486 |
+
"vision_model.encoder.layers.17.ls1": "model-00001-of-00004.safetensors",
|
| 487 |
+
"vision_model.encoder.layers.17.ls2": "model-00001-of-00004.safetensors",
|
| 488 |
+
"vision_model.encoder.layers.17.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 489 |
+
"vision_model.encoder.layers.17.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 490 |
+
"vision_model.encoder.layers.17.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 491 |
+
"vision_model.encoder.layers.17.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 492 |
+
"vision_model.encoder.layers.17.norm1.bias": "model-00001-of-00004.safetensors",
|
| 493 |
+
"vision_model.encoder.layers.17.norm1.weight": "model-00001-of-00004.safetensors",
|
| 494 |
+
"vision_model.encoder.layers.17.norm2.bias": "model-00001-of-00004.safetensors",
|
| 495 |
+
"vision_model.encoder.layers.17.norm2.weight": "model-00001-of-00004.safetensors",
|
| 496 |
+
"vision_model.encoder.layers.18.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 497 |
+
"vision_model.encoder.layers.18.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 498 |
+
"vision_model.encoder.layers.18.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 499 |
+
"vision_model.encoder.layers.18.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 500 |
+
"vision_model.encoder.layers.18.ls1": "model-00001-of-00004.safetensors",
|
| 501 |
+
"vision_model.encoder.layers.18.ls2": "model-00001-of-00004.safetensors",
|
| 502 |
+
"vision_model.encoder.layers.18.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 503 |
+
"vision_model.encoder.layers.18.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 504 |
+
"vision_model.encoder.layers.18.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 505 |
+
"vision_model.encoder.layers.18.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 506 |
+
"vision_model.encoder.layers.18.norm1.bias": "model-00001-of-00004.safetensors",
|
| 507 |
+
"vision_model.encoder.layers.18.norm1.weight": "model-00001-of-00004.safetensors",
|
| 508 |
+
"vision_model.encoder.layers.18.norm2.bias": "model-00001-of-00004.safetensors",
|
| 509 |
+
"vision_model.encoder.layers.18.norm2.weight": "model-00001-of-00004.safetensors",
|
| 510 |
+
"vision_model.encoder.layers.19.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 511 |
+
"vision_model.encoder.layers.19.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 512 |
+
"vision_model.encoder.layers.19.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 513 |
+
"vision_model.encoder.layers.19.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 514 |
+
"vision_model.encoder.layers.19.ls1": "model-00001-of-00004.safetensors",
|
| 515 |
+
"vision_model.encoder.layers.19.ls2": "model-00001-of-00004.safetensors",
|
| 516 |
+
"vision_model.encoder.layers.19.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 517 |
+
"vision_model.encoder.layers.19.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 518 |
+
"vision_model.encoder.layers.19.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 519 |
+
"vision_model.encoder.layers.19.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 520 |
+
"vision_model.encoder.layers.19.norm1.bias": "model-00001-of-00004.safetensors",
|
| 521 |
+
"vision_model.encoder.layers.19.norm1.weight": "model-00001-of-00004.safetensors",
|
| 522 |
+
"vision_model.encoder.layers.19.norm2.bias": "model-00001-of-00004.safetensors",
|
| 523 |
+
"vision_model.encoder.layers.19.norm2.weight": "model-00001-of-00004.safetensors",
|
| 524 |
+
"vision_model.encoder.layers.2.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 525 |
+
"vision_model.encoder.layers.2.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 526 |
+
"vision_model.encoder.layers.2.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 527 |
+
"vision_model.encoder.layers.2.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 528 |
+
"vision_model.encoder.layers.2.ls1": "model-00001-of-00004.safetensors",
|
| 529 |
+
"vision_model.encoder.layers.2.ls2": "model-00001-of-00004.safetensors",
|
| 530 |
+
"vision_model.encoder.layers.2.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 531 |
+
"vision_model.encoder.layers.2.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 532 |
+
"vision_model.encoder.layers.2.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 533 |
+
"vision_model.encoder.layers.2.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 534 |
+
"vision_model.encoder.layers.2.norm1.bias": "model-00001-of-00004.safetensors",
|
| 535 |
+
"vision_model.encoder.layers.2.norm1.weight": "model-00001-of-00004.safetensors",
|
| 536 |
+
"vision_model.encoder.layers.2.norm2.bias": "model-00001-of-00004.safetensors",
|
| 537 |
+
"vision_model.encoder.layers.2.norm2.weight": "model-00001-of-00004.safetensors",
|
| 538 |
+
"vision_model.encoder.layers.20.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 539 |
+
"vision_model.encoder.layers.20.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 540 |
+
"vision_model.encoder.layers.20.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 541 |
+
"vision_model.encoder.layers.20.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 542 |
+
"vision_model.encoder.layers.20.ls1": "model-00001-of-00004.safetensors",
|
| 543 |
+
"vision_model.encoder.layers.20.ls2": "model-00001-of-00004.safetensors",
|
| 544 |
+
"vision_model.encoder.layers.20.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 545 |
+
"vision_model.encoder.layers.20.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 546 |
+
"vision_model.encoder.layers.20.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 547 |
+
"vision_model.encoder.layers.20.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 548 |
+
"vision_model.encoder.layers.20.norm1.bias": "model-00001-of-00004.safetensors",
|
| 549 |
+
"vision_model.encoder.layers.20.norm1.weight": "model-00001-of-00004.safetensors",
|
| 550 |
+
"vision_model.encoder.layers.20.norm2.bias": "model-00001-of-00004.safetensors",
|
| 551 |
+
"vision_model.encoder.layers.20.norm2.weight": "model-00001-of-00004.safetensors",
|
| 552 |
+
"vision_model.encoder.layers.21.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 553 |
+
"vision_model.encoder.layers.21.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 554 |
+
"vision_model.encoder.layers.21.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 555 |
+
"vision_model.encoder.layers.21.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 556 |
+
"vision_model.encoder.layers.21.ls1": "model-00001-of-00004.safetensors",
|
| 557 |
+
"vision_model.encoder.layers.21.ls2": "model-00001-of-00004.safetensors",
|
| 558 |
+
"vision_model.encoder.layers.21.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 559 |
+
"vision_model.encoder.layers.21.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 560 |
+
"vision_model.encoder.layers.21.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 561 |
+
"vision_model.encoder.layers.21.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 562 |
+
"vision_model.encoder.layers.21.norm1.bias": "model-00001-of-00004.safetensors",
|
| 563 |
+
"vision_model.encoder.layers.21.norm1.weight": "model-00001-of-00004.safetensors",
|
| 564 |
+
"vision_model.encoder.layers.21.norm2.bias": "model-00001-of-00004.safetensors",
|
| 565 |
+
"vision_model.encoder.layers.21.norm2.weight": "model-00001-of-00004.safetensors",
|
| 566 |
+
"vision_model.encoder.layers.22.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 567 |
+
"vision_model.encoder.layers.22.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 568 |
+
"vision_model.encoder.layers.22.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 569 |
+
"vision_model.encoder.layers.22.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 570 |
+
"vision_model.encoder.layers.22.ls1": "model-00001-of-00004.safetensors",
|
| 571 |
+
"vision_model.encoder.layers.22.ls2": "model-00001-of-00004.safetensors",
|
| 572 |
+
"vision_model.encoder.layers.22.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 573 |
+
"vision_model.encoder.layers.22.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 574 |
+
"vision_model.encoder.layers.22.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 575 |
+
"vision_model.encoder.layers.22.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 576 |
+
"vision_model.encoder.layers.22.norm1.bias": "model-00001-of-00004.safetensors",
|
| 577 |
+
"vision_model.encoder.layers.22.norm1.weight": "model-00001-of-00004.safetensors",
|
| 578 |
+
"vision_model.encoder.layers.22.norm2.bias": "model-00001-of-00004.safetensors",
|
| 579 |
+
"vision_model.encoder.layers.22.norm2.weight": "model-00001-of-00004.safetensors",
|
| 580 |
+
"vision_model.encoder.layers.23.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 581 |
+
"vision_model.encoder.layers.23.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 582 |
+
"vision_model.encoder.layers.23.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 583 |
+
"vision_model.encoder.layers.23.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 584 |
+
"vision_model.encoder.layers.23.ls1": "model-00001-of-00004.safetensors",
|
| 585 |
+
"vision_model.encoder.layers.23.ls2": "model-00001-of-00004.safetensors",
|
| 586 |
+
"vision_model.encoder.layers.23.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 587 |
+
"vision_model.encoder.layers.23.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 588 |
+
"vision_model.encoder.layers.23.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 589 |
+
"vision_model.encoder.layers.23.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 590 |
+
"vision_model.encoder.layers.23.norm1.bias": "model-00001-of-00004.safetensors",
|
| 591 |
+
"vision_model.encoder.layers.23.norm1.weight": "model-00001-of-00004.safetensors",
|
| 592 |
+
"vision_model.encoder.layers.23.norm2.bias": "model-00001-of-00004.safetensors",
|
| 593 |
+
"vision_model.encoder.layers.23.norm2.weight": "model-00001-of-00004.safetensors",
|
| 594 |
+
"vision_model.encoder.layers.3.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 595 |
+
"vision_model.encoder.layers.3.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 596 |
+
"vision_model.encoder.layers.3.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 597 |
+
"vision_model.encoder.layers.3.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 598 |
+
"vision_model.encoder.layers.3.ls1": "model-00001-of-00004.safetensors",
|
| 599 |
+
"vision_model.encoder.layers.3.ls2": "model-00001-of-00004.safetensors",
|
| 600 |
+
"vision_model.encoder.layers.3.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 601 |
+
"vision_model.encoder.layers.3.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 602 |
+
"vision_model.encoder.layers.3.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 603 |
+
"vision_model.encoder.layers.3.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 604 |
+
"vision_model.encoder.layers.3.norm1.bias": "model-00001-of-00004.safetensors",
|
| 605 |
+
"vision_model.encoder.layers.3.norm1.weight": "model-00001-of-00004.safetensors",
|
| 606 |
+
"vision_model.encoder.layers.3.norm2.bias": "model-00001-of-00004.safetensors",
|
| 607 |
+
"vision_model.encoder.layers.3.norm2.weight": "model-00001-of-00004.safetensors",
|
| 608 |
+
"vision_model.encoder.layers.4.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 609 |
+
"vision_model.encoder.layers.4.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 610 |
+
"vision_model.encoder.layers.4.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 611 |
+
"vision_model.encoder.layers.4.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 612 |
+
"vision_model.encoder.layers.4.ls1": "model-00001-of-00004.safetensors",
|
| 613 |
+
"vision_model.encoder.layers.4.ls2": "model-00001-of-00004.safetensors",
|
| 614 |
+
"vision_model.encoder.layers.4.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 615 |
+
"vision_model.encoder.layers.4.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 616 |
+
"vision_model.encoder.layers.4.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 617 |
+
"vision_model.encoder.layers.4.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 618 |
+
"vision_model.encoder.layers.4.norm1.bias": "model-00001-of-00004.safetensors",
|
| 619 |
+
"vision_model.encoder.layers.4.norm1.weight": "model-00001-of-00004.safetensors",
|
| 620 |
+
"vision_model.encoder.layers.4.norm2.bias": "model-00001-of-00004.safetensors",
|
| 621 |
+
"vision_model.encoder.layers.4.norm2.weight": "model-00001-of-00004.safetensors",
|
| 622 |
+
"vision_model.encoder.layers.5.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 623 |
+
"vision_model.encoder.layers.5.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 624 |
+
"vision_model.encoder.layers.5.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 625 |
+
"vision_model.encoder.layers.5.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 626 |
+
"vision_model.encoder.layers.5.ls1": "model-00001-of-00004.safetensors",
|
| 627 |
+
"vision_model.encoder.layers.5.ls2": "model-00001-of-00004.safetensors",
|
| 628 |
+
"vision_model.encoder.layers.5.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 629 |
+
"vision_model.encoder.layers.5.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 630 |
+
"vision_model.encoder.layers.5.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 631 |
+
"vision_model.encoder.layers.5.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 632 |
+
"vision_model.encoder.layers.5.norm1.bias": "model-00001-of-00004.safetensors",
|
| 633 |
+
"vision_model.encoder.layers.5.norm1.weight": "model-00001-of-00004.safetensors",
|
| 634 |
+
"vision_model.encoder.layers.5.norm2.bias": "model-00001-of-00004.safetensors",
|
| 635 |
+
"vision_model.encoder.layers.5.norm2.weight": "model-00001-of-00004.safetensors",
|
| 636 |
+
"vision_model.encoder.layers.6.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 637 |
+
"vision_model.encoder.layers.6.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 638 |
+
"vision_model.encoder.layers.6.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 639 |
+
"vision_model.encoder.layers.6.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 640 |
+
"vision_model.encoder.layers.6.ls1": "model-00001-of-00004.safetensors",
|
| 641 |
+
"vision_model.encoder.layers.6.ls2": "model-00001-of-00004.safetensors",
|
| 642 |
+
"vision_model.encoder.layers.6.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 643 |
+
"vision_model.encoder.layers.6.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 644 |
+
"vision_model.encoder.layers.6.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 645 |
+
"vision_model.encoder.layers.6.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 646 |
+
"vision_model.encoder.layers.6.norm1.bias": "model-00001-of-00004.safetensors",
|
| 647 |
+
"vision_model.encoder.layers.6.norm1.weight": "model-00001-of-00004.safetensors",
|
| 648 |
+
"vision_model.encoder.layers.6.norm2.bias": "model-00001-of-00004.safetensors",
|
| 649 |
+
"vision_model.encoder.layers.6.norm2.weight": "model-00001-of-00004.safetensors",
|
| 650 |
+
"vision_model.encoder.layers.7.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 651 |
+
"vision_model.encoder.layers.7.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 652 |
+
"vision_model.encoder.layers.7.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 653 |
+
"vision_model.encoder.layers.7.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 654 |
+
"vision_model.encoder.layers.7.ls1": "model-00001-of-00004.safetensors",
|
| 655 |
+
"vision_model.encoder.layers.7.ls2": "model-00001-of-00004.safetensors",
|
| 656 |
+
"vision_model.encoder.layers.7.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 657 |
+
"vision_model.encoder.layers.7.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 658 |
+
"vision_model.encoder.layers.7.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 659 |
+
"vision_model.encoder.layers.7.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 660 |
+
"vision_model.encoder.layers.7.norm1.bias": "model-00001-of-00004.safetensors",
|
| 661 |
+
"vision_model.encoder.layers.7.norm1.weight": "model-00001-of-00004.safetensors",
|
| 662 |
+
"vision_model.encoder.layers.7.norm2.bias": "model-00001-of-00004.safetensors",
|
| 663 |
+
"vision_model.encoder.layers.7.norm2.weight": "model-00001-of-00004.safetensors",
|
| 664 |
+
"vision_model.encoder.layers.8.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 665 |
+
"vision_model.encoder.layers.8.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 666 |
+
"vision_model.encoder.layers.8.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 667 |
+
"vision_model.encoder.layers.8.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 668 |
+
"vision_model.encoder.layers.8.ls1": "model-00001-of-00004.safetensors",
|
| 669 |
+
"vision_model.encoder.layers.8.ls2": "model-00001-of-00004.safetensors",
|
| 670 |
+
"vision_model.encoder.layers.8.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 671 |
+
"vision_model.encoder.layers.8.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 672 |
+
"vision_model.encoder.layers.8.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 673 |
+
"vision_model.encoder.layers.8.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 674 |
+
"vision_model.encoder.layers.8.norm1.bias": "model-00001-of-00004.safetensors",
|
| 675 |
+
"vision_model.encoder.layers.8.norm1.weight": "model-00001-of-00004.safetensors",
|
| 676 |
+
"vision_model.encoder.layers.8.norm2.bias": "model-00001-of-00004.safetensors",
|
| 677 |
+
"vision_model.encoder.layers.8.norm2.weight": "model-00001-of-00004.safetensors",
|
| 678 |
+
"vision_model.encoder.layers.9.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 679 |
+
"vision_model.encoder.layers.9.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 680 |
+
"vision_model.encoder.layers.9.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 681 |
+
"vision_model.encoder.layers.9.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 682 |
+
"vision_model.encoder.layers.9.ls1": "model-00001-of-00004.safetensors",
|
| 683 |
+
"vision_model.encoder.layers.9.ls2": "model-00001-of-00004.safetensors",
|
| 684 |
+
"vision_model.encoder.layers.9.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
| 685 |
+
"vision_model.encoder.layers.9.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
| 686 |
+
"vision_model.encoder.layers.9.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
| 687 |
+
"vision_model.encoder.layers.9.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
| 688 |
+
"vision_model.encoder.layers.9.norm1.bias": "model-00001-of-00004.safetensors",
|
| 689 |
+
"vision_model.encoder.layers.9.norm1.weight": "model-00001-of-00004.safetensors",
|
| 690 |
+
"vision_model.encoder.layers.9.norm2.bias": "model-00001-of-00004.safetensors",
|
| 691 |
+
"vision_model.encoder.layers.9.norm2.weight": "model-00001-of-00004.safetensors"
|
| 692 |
+
}
|
| 693 |
+
}
|
ood/internvl3-8b-instruct-lora-ood-210/modeling_intern_vit.py
ADDED
|
@@ -0,0 +1,431 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# --------------------------------------------------------
|
| 2 |
+
# InternVL
|
| 3 |
+
# Copyright (c) 2024 OpenGVLab
|
| 4 |
+
# Licensed under The MIT License [see LICENSE for details]
|
| 5 |
+
# --------------------------------------------------------
|
| 6 |
+
|
| 7 |
+
from typing import Optional, Tuple, Union
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn.functional as F
|
| 11 |
+
import torch.utils.checkpoint
|
| 12 |
+
from einops import rearrange
|
| 13 |
+
from timm.layers import DropPath
|
| 14 |
+
from torch import nn
|
| 15 |
+
from transformers.activations import ACT2FN
|
| 16 |
+
from transformers.modeling_outputs import (BaseModelOutput,
|
| 17 |
+
BaseModelOutputWithPooling)
|
| 18 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 19 |
+
from transformers.utils import logging
|
| 20 |
+
|
| 21 |
+
from .configuration_intern_vit import InternVisionConfig
|
| 22 |
+
|
| 23 |
+
try:
|
| 24 |
+
from flash_attn.bert_padding import pad_input, unpad_input
|
| 25 |
+
from flash_attn.flash_attn_interface import \
|
| 26 |
+
flash_attn_varlen_qkvpacked_func
|
| 27 |
+
has_flash_attn = True
|
| 28 |
+
except:
|
| 29 |
+
print('FlashAttention2 is not installed.')
|
| 30 |
+
has_flash_attn = False
|
| 31 |
+
|
| 32 |
+
logger = logging.get_logger(__name__)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class FlashAttention(nn.Module):
|
| 36 |
+
"""Implement the scaled dot product attention with softmax.
|
| 37 |
+
Arguments
|
| 38 |
+
---------
|
| 39 |
+
softmax_scale: The temperature to use for the softmax attention.
|
| 40 |
+
(default: 1/sqrt(d_keys) where d_keys is computed at
|
| 41 |
+
runtime)
|
| 42 |
+
attention_dropout: The dropout rate to apply to the attention
|
| 43 |
+
(default: 0.0)
|
| 44 |
+
"""
|
| 45 |
+
|
| 46 |
+
def __init__(self, softmax_scale=None, attention_dropout=0.0, device=None, dtype=None):
|
| 47 |
+
super().__init__()
|
| 48 |
+
self.softmax_scale = softmax_scale
|
| 49 |
+
self.dropout_p = attention_dropout
|
| 50 |
+
|
| 51 |
+
def forward(self, qkv, key_padding_mask=None, causal=False, cu_seqlens=None,
|
| 52 |
+
max_s=None, need_weights=False):
|
| 53 |
+
"""Implements the multihead softmax attention.
|
| 54 |
+
Arguments
|
| 55 |
+
---------
|
| 56 |
+
qkv: The tensor containing the query, key, and value. (B, S, 3, H, D) if key_padding_mask is None
|
| 57 |
+
if unpadded: (nnz, 3, h, d)
|
| 58 |
+
key_padding_mask: a bool tensor of shape (B, S)
|
| 59 |
+
"""
|
| 60 |
+
assert not need_weights
|
| 61 |
+
assert qkv.dtype in [torch.float16, torch.bfloat16]
|
| 62 |
+
assert qkv.is_cuda
|
| 63 |
+
|
| 64 |
+
if cu_seqlens is None:
|
| 65 |
+
batch_size = qkv.shape[0]
|
| 66 |
+
seqlen = qkv.shape[1]
|
| 67 |
+
if key_padding_mask is None:
|
| 68 |
+
qkv = rearrange(qkv, 'b s ... -> (b s) ...')
|
| 69 |
+
max_s = seqlen
|
| 70 |
+
cu_seqlens = torch.arange(0, (batch_size + 1) * seqlen, step=seqlen, dtype=torch.int32,
|
| 71 |
+
device=qkv.device)
|
| 72 |
+
output = flash_attn_varlen_qkvpacked_func(
|
| 73 |
+
qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
|
| 74 |
+
softmax_scale=self.softmax_scale, causal=causal
|
| 75 |
+
)
|
| 76 |
+
output = rearrange(output, '(b s) ... -> b s ...', b=batch_size)
|
| 77 |
+
else:
|
| 78 |
+
nheads = qkv.shape[-2]
|
| 79 |
+
x = rearrange(qkv, 'b s three h d -> b s (three h d)')
|
| 80 |
+
x_unpad, indices, cu_seqlens, max_s = unpad_input(x, key_padding_mask)
|
| 81 |
+
x_unpad = rearrange(x_unpad, 'nnz (three h d) -> nnz three h d', three=3, h=nheads)
|
| 82 |
+
output_unpad = flash_attn_varlen_qkvpacked_func(
|
| 83 |
+
x_unpad, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
|
| 84 |
+
softmax_scale=self.softmax_scale, causal=causal
|
| 85 |
+
)
|
| 86 |
+
output = rearrange(pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'),
|
| 87 |
+
indices, batch_size, seqlen),
|
| 88 |
+
'b s (h d) -> b s h d', h=nheads)
|
| 89 |
+
else:
|
| 90 |
+
assert max_s is not None
|
| 91 |
+
output = flash_attn_varlen_qkvpacked_func(
|
| 92 |
+
qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
|
| 93 |
+
softmax_scale=self.softmax_scale, causal=causal
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
return output, None
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
class InternRMSNorm(nn.Module):
|
| 100 |
+
def __init__(self, hidden_size, eps=1e-6):
|
| 101 |
+
super().__init__()
|
| 102 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
| 103 |
+
self.variance_epsilon = eps
|
| 104 |
+
|
| 105 |
+
def forward(self, hidden_states):
|
| 106 |
+
input_dtype = hidden_states.dtype
|
| 107 |
+
hidden_states = hidden_states.to(torch.float32)
|
| 108 |
+
variance = hidden_states.pow(2).mean(-1, keepdim=True)
|
| 109 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
| 110 |
+
return self.weight * hidden_states.to(input_dtype)
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
try:
|
| 114 |
+
from apex.normalization import FusedRMSNorm
|
| 115 |
+
|
| 116 |
+
InternRMSNorm = FusedRMSNorm # noqa
|
| 117 |
+
|
| 118 |
+
logger.info('Discovered apex.normalization.FusedRMSNorm - will use it instead of InternRMSNorm')
|
| 119 |
+
except ImportError:
|
| 120 |
+
# using the normal InternRMSNorm
|
| 121 |
+
pass
|
| 122 |
+
except Exception:
|
| 123 |
+
logger.warning('discovered apex but it failed to load, falling back to InternRMSNorm')
|
| 124 |
+
pass
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
NORM2FN = {
|
| 128 |
+
'rms_norm': InternRMSNorm,
|
| 129 |
+
'layer_norm': nn.LayerNorm,
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
class InternVisionEmbeddings(nn.Module):
|
| 134 |
+
def __init__(self, config: InternVisionConfig):
|
| 135 |
+
super().__init__()
|
| 136 |
+
self.config = config
|
| 137 |
+
self.embed_dim = config.hidden_size
|
| 138 |
+
self.image_size = config.image_size
|
| 139 |
+
self.patch_size = config.patch_size
|
| 140 |
+
|
| 141 |
+
self.class_embedding = nn.Parameter(
|
| 142 |
+
torch.randn(1, 1, self.embed_dim),
|
| 143 |
+
)
|
| 144 |
+
|
| 145 |
+
self.patch_embedding = nn.Conv2d(
|
| 146 |
+
in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size
|
| 147 |
+
)
|
| 148 |
+
|
| 149 |
+
self.num_patches = (self.image_size // self.patch_size) ** 2
|
| 150 |
+
self.num_positions = self.num_patches + 1
|
| 151 |
+
|
| 152 |
+
self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim))
|
| 153 |
+
|
| 154 |
+
def _get_pos_embed(self, pos_embed, H, W):
|
| 155 |
+
target_dtype = pos_embed.dtype
|
| 156 |
+
pos_embed = pos_embed.float().reshape(
|
| 157 |
+
1, self.image_size // self.patch_size, self.image_size // self.patch_size, -1).permute(0, 3, 1, 2)
|
| 158 |
+
pos_embed = F.interpolate(pos_embed, size=(H, W), mode='bicubic', align_corners=False). \
|
| 159 |
+
reshape(1, -1, H * W).permute(0, 2, 1).to(target_dtype)
|
| 160 |
+
return pos_embed
|
| 161 |
+
|
| 162 |
+
def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
|
| 163 |
+
target_dtype = self.patch_embedding.weight.dtype
|
| 164 |
+
patch_embeds = self.patch_embedding(pixel_values) # shape = [*, channel, width, height]
|
| 165 |
+
batch_size, _, height, width = patch_embeds.shape
|
| 166 |
+
patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
|
| 167 |
+
class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype)
|
| 168 |
+
embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
|
| 169 |
+
position_embedding = torch.cat([
|
| 170 |
+
self.position_embedding[:, :1, :],
|
| 171 |
+
self._get_pos_embed(self.position_embedding[:, 1:, :], height, width)
|
| 172 |
+
], dim=1)
|
| 173 |
+
embeddings = embeddings + position_embedding.to(target_dtype)
|
| 174 |
+
return embeddings
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
class InternAttention(nn.Module):
|
| 178 |
+
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
| 179 |
+
|
| 180 |
+
def __init__(self, config: InternVisionConfig):
|
| 181 |
+
super().__init__()
|
| 182 |
+
self.config = config
|
| 183 |
+
self.embed_dim = config.hidden_size
|
| 184 |
+
self.num_heads = config.num_attention_heads
|
| 185 |
+
self.use_flash_attn = config.use_flash_attn and has_flash_attn
|
| 186 |
+
if config.use_flash_attn and not has_flash_attn:
|
| 187 |
+
print('Warning: Flash Attention is not available, use_flash_attn is set to False.')
|
| 188 |
+
self.head_dim = self.embed_dim // self.num_heads
|
| 189 |
+
if self.head_dim * self.num_heads != self.embed_dim:
|
| 190 |
+
raise ValueError(
|
| 191 |
+
f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:'
|
| 192 |
+
f' {self.num_heads}).'
|
| 193 |
+
)
|
| 194 |
+
|
| 195 |
+
self.scale = self.head_dim ** -0.5
|
| 196 |
+
self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim, bias=config.qkv_bias)
|
| 197 |
+
self.attn_drop = nn.Dropout(config.attention_dropout)
|
| 198 |
+
self.proj_drop = nn.Dropout(config.dropout)
|
| 199 |
+
|
| 200 |
+
self.qk_normalization = config.qk_normalization
|
| 201 |
+
|
| 202 |
+
if self.qk_normalization:
|
| 203 |
+
self.q_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
|
| 204 |
+
self.k_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
|
| 205 |
+
|
| 206 |
+
if self.use_flash_attn:
|
| 207 |
+
self.inner_attn = FlashAttention(attention_dropout=config.attention_dropout)
|
| 208 |
+
self.proj = nn.Linear(self.embed_dim, self.embed_dim)
|
| 209 |
+
|
| 210 |
+
def _naive_attn(self, x):
|
| 211 |
+
B, N, C = x.shape
|
| 212 |
+
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
|
| 213 |
+
q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
|
| 214 |
+
|
| 215 |
+
if self.qk_normalization:
|
| 216 |
+
B_, H_, N_, D_ = q.shape
|
| 217 |
+
q = self.q_norm(q.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
|
| 218 |
+
k = self.k_norm(k.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
|
| 219 |
+
|
| 220 |
+
attn = ((q * self.scale) @ k.transpose(-2, -1))
|
| 221 |
+
attn = attn.softmax(dim=-1)
|
| 222 |
+
attn = self.attn_drop(attn)
|
| 223 |
+
|
| 224 |
+
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
|
| 225 |
+
x = self.proj(x)
|
| 226 |
+
x = self.proj_drop(x)
|
| 227 |
+
return x
|
| 228 |
+
|
| 229 |
+
def _flash_attn(self, x, key_padding_mask=None, need_weights=False):
|
| 230 |
+
qkv = self.qkv(x)
|
| 231 |
+
qkv = rearrange(qkv, 'b s (three h d) -> b s three h d', three=3, h=self.num_heads)
|
| 232 |
+
|
| 233 |
+
if self.qk_normalization:
|
| 234 |
+
q, k, v = qkv.unbind(2)
|
| 235 |
+
q = self.q_norm(q.flatten(-2, -1)).view(q.shape)
|
| 236 |
+
k = self.k_norm(k.flatten(-2, -1)).view(k.shape)
|
| 237 |
+
qkv = torch.stack([q, k, v], dim=2)
|
| 238 |
+
|
| 239 |
+
context, _ = self.inner_attn(
|
| 240 |
+
qkv, key_padding_mask=key_padding_mask, need_weights=need_weights, causal=False
|
| 241 |
+
)
|
| 242 |
+
outs = self.proj(rearrange(context, 'b s h d -> b s (h d)'))
|
| 243 |
+
outs = self.proj_drop(outs)
|
| 244 |
+
return outs
|
| 245 |
+
|
| 246 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 247 |
+
x = self._naive_attn(hidden_states) if not self.use_flash_attn else self._flash_attn(hidden_states)
|
| 248 |
+
return x
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
class InternMLP(nn.Module):
|
| 252 |
+
def __init__(self, config: InternVisionConfig):
|
| 253 |
+
super().__init__()
|
| 254 |
+
self.config = config
|
| 255 |
+
self.act = ACT2FN[config.hidden_act]
|
| 256 |
+
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
|
| 257 |
+
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
|
| 258 |
+
|
| 259 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 260 |
+
hidden_states = self.fc1(hidden_states)
|
| 261 |
+
hidden_states = self.act(hidden_states)
|
| 262 |
+
hidden_states = self.fc2(hidden_states)
|
| 263 |
+
return hidden_states
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
class InternVisionEncoderLayer(nn.Module):
|
| 267 |
+
def __init__(self, config: InternVisionConfig, drop_path_rate: float):
|
| 268 |
+
super().__init__()
|
| 269 |
+
self.embed_dim = config.hidden_size
|
| 270 |
+
self.intermediate_size = config.intermediate_size
|
| 271 |
+
self.norm_type = config.norm_type
|
| 272 |
+
|
| 273 |
+
self.attn = InternAttention(config)
|
| 274 |
+
self.mlp = InternMLP(config)
|
| 275 |
+
self.norm1 = NORM2FN[self.norm_type](self.embed_dim, eps=config.layer_norm_eps)
|
| 276 |
+
self.norm2 = NORM2FN[self.norm_type](self.embed_dim, eps=config.layer_norm_eps)
|
| 277 |
+
|
| 278 |
+
self.ls1 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
|
| 279 |
+
self.ls2 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
|
| 280 |
+
self.drop_path1 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
|
| 281 |
+
self.drop_path2 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
|
| 282 |
+
|
| 283 |
+
def forward(
|
| 284 |
+
self,
|
| 285 |
+
hidden_states: torch.Tensor,
|
| 286 |
+
) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor], Optional[Tuple[torch.FloatTensor]]]:
|
| 287 |
+
"""
|
| 288 |
+
Args:
|
| 289 |
+
hidden_states (`Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
| 290 |
+
"""
|
| 291 |
+
hidden_states = hidden_states + self.drop_path1(self.attn(self.norm1(hidden_states).to(hidden_states.dtype)) * self.ls1)
|
| 292 |
+
|
| 293 |
+
hidden_states = hidden_states + self.drop_path2(self.mlp(self.norm2(hidden_states).to(hidden_states.dtype)) * self.ls2)
|
| 294 |
+
|
| 295 |
+
return hidden_states
|
| 296 |
+
|
| 297 |
+
|
| 298 |
+
class InternVisionEncoder(nn.Module):
|
| 299 |
+
"""
|
| 300 |
+
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
|
| 301 |
+
[`InternEncoderLayer`].
|
| 302 |
+
|
| 303 |
+
Args:
|
| 304 |
+
config (`InternConfig`):
|
| 305 |
+
The corresponding vision configuration for the `InternEncoder`.
|
| 306 |
+
"""
|
| 307 |
+
|
| 308 |
+
def __init__(self, config: InternVisionConfig):
|
| 309 |
+
super().__init__()
|
| 310 |
+
self.config = config
|
| 311 |
+
# stochastic depth decay rule
|
| 312 |
+
dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers)]
|
| 313 |
+
self.layers = nn.ModuleList([
|
| 314 |
+
InternVisionEncoderLayer(config, dpr[idx]) for idx in range(config.num_hidden_layers)])
|
| 315 |
+
self.gradient_checkpointing = True
|
| 316 |
+
|
| 317 |
+
def forward(
|
| 318 |
+
self,
|
| 319 |
+
inputs_embeds,
|
| 320 |
+
output_hidden_states: Optional[bool] = None,
|
| 321 |
+
return_dict: Optional[bool] = None,
|
| 322 |
+
) -> Union[Tuple, BaseModelOutput]:
|
| 323 |
+
r"""
|
| 324 |
+
Args:
|
| 325 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
| 326 |
+
Embedded representation of the inputs. Should be float, not int tokens.
|
| 327 |
+
output_hidden_states (`bool`, *optional*):
|
| 328 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
|
| 329 |
+
for more detail.
|
| 330 |
+
return_dict (`bool`, *optional*):
|
| 331 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 332 |
+
"""
|
| 333 |
+
output_hidden_states = (
|
| 334 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 335 |
+
)
|
| 336 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 337 |
+
|
| 338 |
+
encoder_states = () if output_hidden_states else None
|
| 339 |
+
hidden_states = inputs_embeds
|
| 340 |
+
|
| 341 |
+
for idx, encoder_layer in enumerate(self.layers):
|
| 342 |
+
if output_hidden_states:
|
| 343 |
+
encoder_states = encoder_states + (hidden_states,)
|
| 344 |
+
if self.gradient_checkpointing and self.training:
|
| 345 |
+
layer_outputs = torch.utils.checkpoint.checkpoint(
|
| 346 |
+
encoder_layer,
|
| 347 |
+
hidden_states)
|
| 348 |
+
else:
|
| 349 |
+
layer_outputs = encoder_layer(
|
| 350 |
+
hidden_states,
|
| 351 |
+
)
|
| 352 |
+
hidden_states = layer_outputs
|
| 353 |
+
|
| 354 |
+
if output_hidden_states:
|
| 355 |
+
encoder_states = encoder_states + (hidden_states,)
|
| 356 |
+
|
| 357 |
+
if not return_dict:
|
| 358 |
+
return tuple(v for v in [hidden_states, encoder_states] if v is not None)
|
| 359 |
+
return BaseModelOutput(
|
| 360 |
+
last_hidden_state=hidden_states, hidden_states=encoder_states
|
| 361 |
+
)
|
| 362 |
+
|
| 363 |
+
|
| 364 |
+
class InternVisionModel(PreTrainedModel):
|
| 365 |
+
main_input_name = 'pixel_values'
|
| 366 |
+
_supports_flash_attn_2 = True
|
| 367 |
+
supports_gradient_checkpointing = True
|
| 368 |
+
config_class = InternVisionConfig
|
| 369 |
+
_no_split_modules = ['InternVisionEncoderLayer']
|
| 370 |
+
|
| 371 |
+
def __init__(self, config: InternVisionConfig):
|
| 372 |
+
super().__init__(config)
|
| 373 |
+
self.config = config
|
| 374 |
+
|
| 375 |
+
self.embeddings = InternVisionEmbeddings(config)
|
| 376 |
+
self.encoder = InternVisionEncoder(config)
|
| 377 |
+
|
| 378 |
+
def resize_pos_embeddings(self, old_size, new_size, patch_size):
|
| 379 |
+
pos_emb = self.embeddings.position_embedding
|
| 380 |
+
_, num_positions, embed_dim = pos_emb.shape
|
| 381 |
+
cls_emb = pos_emb[:, :1, :]
|
| 382 |
+
pos_emb = pos_emb[:, 1:, :].reshape(1, old_size // patch_size, old_size // patch_size, -1).permute(0, 3, 1, 2)
|
| 383 |
+
pos_emb = F.interpolate(pos_emb.float(), size=new_size // patch_size, mode='bicubic', align_corners=False)
|
| 384 |
+
pos_emb = pos_emb.to(cls_emb.dtype).reshape(1, embed_dim, -1).permute(0, 2, 1)
|
| 385 |
+
pos_emb = torch.cat([cls_emb, pos_emb], dim=1)
|
| 386 |
+
self.embeddings.position_embedding = nn.Parameter(pos_emb)
|
| 387 |
+
self.embeddings.image_size = new_size
|
| 388 |
+
logger.info('Resized position embeddings from {} to {}'.format(old_size, new_size))
|
| 389 |
+
|
| 390 |
+
def get_input_embeddings(self):
|
| 391 |
+
return self.embeddings
|
| 392 |
+
|
| 393 |
+
def forward(
|
| 394 |
+
self,
|
| 395 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
| 396 |
+
output_hidden_states: Optional[bool] = None,
|
| 397 |
+
return_dict: Optional[bool] = None,
|
| 398 |
+
pixel_embeds: Optional[torch.FloatTensor] = None,
|
| 399 |
+
) -> Union[Tuple, BaseModelOutputWithPooling]:
|
| 400 |
+
output_hidden_states = (
|
| 401 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 402 |
+
)
|
| 403 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 404 |
+
|
| 405 |
+
if pixel_values is None and pixel_embeds is None:
|
| 406 |
+
raise ValueError('You have to specify pixel_values or pixel_embeds')
|
| 407 |
+
|
| 408 |
+
if pixel_embeds is not None:
|
| 409 |
+
hidden_states = pixel_embeds
|
| 410 |
+
else:
|
| 411 |
+
if len(pixel_values.shape) == 4:
|
| 412 |
+
hidden_states = self.embeddings(pixel_values)
|
| 413 |
+
else:
|
| 414 |
+
raise ValueError(f'wrong pixel_values size: {pixel_values.shape}')
|
| 415 |
+
encoder_outputs = self.encoder(
|
| 416 |
+
inputs_embeds=hidden_states,
|
| 417 |
+
output_hidden_states=output_hidden_states,
|
| 418 |
+
return_dict=return_dict,
|
| 419 |
+
)
|
| 420 |
+
last_hidden_state = encoder_outputs.last_hidden_state
|
| 421 |
+
pooled_output = last_hidden_state[:, 0, :]
|
| 422 |
+
|
| 423 |
+
if not return_dict:
|
| 424 |
+
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
|
| 425 |
+
|
| 426 |
+
return BaseModelOutputWithPooling(
|
| 427 |
+
last_hidden_state=last_hidden_state,
|
| 428 |
+
pooler_output=pooled_output,
|
| 429 |
+
hidden_states=encoder_outputs.hidden_states,
|
| 430 |
+
attentions=encoder_outputs.attentions,
|
| 431 |
+
)
|
ood/internvl3-8b-instruct-lora-ood-210/modeling_internvl_chat.py
ADDED
|
@@ -0,0 +1,359 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# --------------------------------------------------------
|
| 2 |
+
# InternVL
|
| 3 |
+
# Copyright (c) 2024 OpenGVLab
|
| 4 |
+
# Licensed under The MIT License [see LICENSE for details]
|
| 5 |
+
# --------------------------------------------------------
|
| 6 |
+
|
| 7 |
+
import warnings
|
| 8 |
+
from typing import List, Optional, Tuple, Union
|
| 9 |
+
|
| 10 |
+
import torch.utils.checkpoint
|
| 11 |
+
import transformers
|
| 12 |
+
from torch import nn
|
| 13 |
+
from torch.nn import CrossEntropyLoss
|
| 14 |
+
from transformers import (AutoModel, GenerationConfig, LlamaForCausalLM,
|
| 15 |
+
Qwen2ForCausalLM)
|
| 16 |
+
from transformers.modeling_outputs import CausalLMOutputWithPast
|
| 17 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 18 |
+
from transformers.utils import ModelOutput, logging
|
| 19 |
+
|
| 20 |
+
from .configuration_internvl_chat import InternVLChatConfig
|
| 21 |
+
from .conversation import get_conv_template
|
| 22 |
+
from .modeling_intern_vit import InternVisionModel, has_flash_attn
|
| 23 |
+
|
| 24 |
+
logger = logging.get_logger(__name__)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def version_cmp(v1, v2, op='eq'):
|
| 28 |
+
import operator
|
| 29 |
+
|
| 30 |
+
from packaging import version
|
| 31 |
+
op_func = getattr(operator, op)
|
| 32 |
+
return op_func(version.parse(v1), version.parse(v2))
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class InternVLChatModel(PreTrainedModel):
|
| 36 |
+
config_class = InternVLChatConfig
|
| 37 |
+
main_input_name = 'pixel_values'
|
| 38 |
+
base_model_prefix = 'language_model'
|
| 39 |
+
_supports_flash_attn_2 = True
|
| 40 |
+
supports_gradient_checkpointing = True
|
| 41 |
+
_no_split_modules = ['InternVisionModel', 'LlamaDecoderLayer', 'Qwen2DecoderLayer']
|
| 42 |
+
|
| 43 |
+
def __init__(self, config: InternVLChatConfig, vision_model=None, language_model=None, use_flash_attn=True):
|
| 44 |
+
super().__init__(config)
|
| 45 |
+
|
| 46 |
+
assert version_cmp(transformers.__version__, '4.37.0', 'ge')
|
| 47 |
+
image_size = config.force_image_size or config.vision_config.image_size
|
| 48 |
+
patch_size = config.vision_config.patch_size
|
| 49 |
+
self.patch_size = patch_size
|
| 50 |
+
self.select_layer = config.select_layer
|
| 51 |
+
self.template = config.template
|
| 52 |
+
self.num_image_token = int((image_size // patch_size) ** 2 * (config.downsample_ratio ** 2))
|
| 53 |
+
self.downsample_ratio = config.downsample_ratio
|
| 54 |
+
self.ps_version = config.ps_version
|
| 55 |
+
use_flash_attn = use_flash_attn if has_flash_attn else False
|
| 56 |
+
config.vision_config.use_flash_attn = True if use_flash_attn else False
|
| 57 |
+
config.llm_config._attn_implementation = 'flash_attention_2' if use_flash_attn else 'eager'
|
| 58 |
+
|
| 59 |
+
logger.info(f'num_image_token: {self.num_image_token}')
|
| 60 |
+
logger.info(f'ps_version: {self.ps_version}')
|
| 61 |
+
if vision_model is not None:
|
| 62 |
+
self.vision_model = vision_model
|
| 63 |
+
else:
|
| 64 |
+
self.vision_model = InternVisionModel(config.vision_config)
|
| 65 |
+
if language_model is not None:
|
| 66 |
+
self.language_model = language_model
|
| 67 |
+
else:
|
| 68 |
+
if config.llm_config.architectures[0] == 'LlamaForCausalLM':
|
| 69 |
+
self.language_model = LlamaForCausalLM(config.llm_config)
|
| 70 |
+
elif config.llm_config.architectures[0] == 'Qwen2ForCausalLM':
|
| 71 |
+
self.language_model = Qwen2ForCausalLM(config.llm_config)
|
| 72 |
+
else:
|
| 73 |
+
raise NotImplementedError(f'{config.llm_config.architectures[0]} is not implemented.')
|
| 74 |
+
|
| 75 |
+
vit_hidden_size = config.vision_config.hidden_size
|
| 76 |
+
llm_hidden_size = config.llm_config.hidden_size
|
| 77 |
+
|
| 78 |
+
self.mlp1 = nn.Sequential(
|
| 79 |
+
nn.LayerNorm(vit_hidden_size * int(1 / self.downsample_ratio) ** 2),
|
| 80 |
+
nn.Linear(vit_hidden_size * int(1 / self.downsample_ratio) ** 2, llm_hidden_size),
|
| 81 |
+
nn.GELU(),
|
| 82 |
+
nn.Linear(llm_hidden_size, llm_hidden_size)
|
| 83 |
+
)
|
| 84 |
+
|
| 85 |
+
self.img_context_token_id = None
|
| 86 |
+
self.conv_template = get_conv_template(self.template)
|
| 87 |
+
self.system_message = self.conv_template.system_message
|
| 88 |
+
|
| 89 |
+
def forward(
|
| 90 |
+
self,
|
| 91 |
+
pixel_values: torch.FloatTensor,
|
| 92 |
+
input_ids: torch.LongTensor = None,
|
| 93 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 94 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 95 |
+
image_flags: Optional[torch.LongTensor] = None,
|
| 96 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| 97 |
+
labels: Optional[torch.LongTensor] = None,
|
| 98 |
+
use_cache: Optional[bool] = None,
|
| 99 |
+
output_attentions: Optional[bool] = None,
|
| 100 |
+
output_hidden_states: Optional[bool] = None,
|
| 101 |
+
return_dict: Optional[bool] = None,
|
| 102 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 103 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 104 |
+
|
| 105 |
+
image_flags = image_flags.squeeze(-1)
|
| 106 |
+
input_embeds = self.language_model.get_input_embeddings()(input_ids).clone()
|
| 107 |
+
|
| 108 |
+
vit_embeds = self.extract_feature(pixel_values)
|
| 109 |
+
vit_embeds = vit_embeds[image_flags == 1]
|
| 110 |
+
vit_batch_size = pixel_values.shape[0]
|
| 111 |
+
|
| 112 |
+
B, N, C = input_embeds.shape
|
| 113 |
+
input_embeds = input_embeds.reshape(B * N, C)
|
| 114 |
+
|
| 115 |
+
if torch.distributed.is_initialized() and torch.distributed.get_rank() == 0:
|
| 116 |
+
print(f'dynamic ViT batch size: {vit_batch_size}, images per sample: {vit_batch_size / B}, dynamic token length: {N}')
|
| 117 |
+
|
| 118 |
+
input_ids = input_ids.reshape(B * N)
|
| 119 |
+
selected = (input_ids == self.img_context_token_id)
|
| 120 |
+
try:
|
| 121 |
+
input_embeds[selected] = input_embeds[selected] * 0.0 + vit_embeds.reshape(-1, C)
|
| 122 |
+
except Exception as e:
|
| 123 |
+
vit_embeds = vit_embeds.reshape(-1, C)
|
| 124 |
+
print(f'warning: {e}, input_embeds[selected].shape={input_embeds[selected].shape}, '
|
| 125 |
+
f'vit_embeds.shape={vit_embeds.shape}')
|
| 126 |
+
n_token = min(selected.sum(), vit_embeds.size(0))
|
| 127 |
+
input_embeds[selected][:n_token] = input_embeds[selected][:n_token] * 0.0 + vit_embeds[:n_token]
|
| 128 |
+
|
| 129 |
+
input_embeds = input_embeds.reshape(B, N, C)
|
| 130 |
+
|
| 131 |
+
outputs = self.language_model(
|
| 132 |
+
inputs_embeds=input_embeds,
|
| 133 |
+
attention_mask=attention_mask,
|
| 134 |
+
position_ids=position_ids,
|
| 135 |
+
past_key_values=past_key_values,
|
| 136 |
+
use_cache=use_cache,
|
| 137 |
+
output_attentions=output_attentions,
|
| 138 |
+
output_hidden_states=output_hidden_states,
|
| 139 |
+
return_dict=return_dict,
|
| 140 |
+
)
|
| 141 |
+
logits = outputs.logits
|
| 142 |
+
|
| 143 |
+
loss = None
|
| 144 |
+
if labels is not None:
|
| 145 |
+
# Shift so that tokens < n predict n
|
| 146 |
+
shift_logits = logits[..., :-1, :].contiguous()
|
| 147 |
+
shift_labels = labels[..., 1:].contiguous()
|
| 148 |
+
# Flatten the tokens
|
| 149 |
+
loss_fct = CrossEntropyLoss()
|
| 150 |
+
shift_logits = shift_logits.view(-1, self.language_model.config.vocab_size)
|
| 151 |
+
shift_labels = shift_labels.view(-1)
|
| 152 |
+
# Enable model parallelism
|
| 153 |
+
shift_labels = shift_labels.to(shift_logits.device)
|
| 154 |
+
loss = loss_fct(shift_logits, shift_labels)
|
| 155 |
+
|
| 156 |
+
if not return_dict:
|
| 157 |
+
output = (logits,) + outputs[1:]
|
| 158 |
+
return (loss,) + output if loss is not None else output
|
| 159 |
+
|
| 160 |
+
return CausalLMOutputWithPast(
|
| 161 |
+
loss=loss,
|
| 162 |
+
logits=logits,
|
| 163 |
+
past_key_values=outputs.past_key_values,
|
| 164 |
+
hidden_states=outputs.hidden_states,
|
| 165 |
+
attentions=outputs.attentions,
|
| 166 |
+
)
|
| 167 |
+
|
| 168 |
+
def pixel_shuffle(self, x, scale_factor=0.5):
|
| 169 |
+
n, w, h, c = x.size()
|
| 170 |
+
# N, W, H, C --> N, W, H * scale, C // scale
|
| 171 |
+
x = x.view(n, w, int(h * scale_factor), int(c / scale_factor))
|
| 172 |
+
# N, W, H * scale, C // scale --> N, H * scale, W, C // scale
|
| 173 |
+
x = x.permute(0, 2, 1, 3).contiguous()
|
| 174 |
+
# N, H * scale, W, C // scale --> N, H * scale, W * scale, C // (scale ** 2)
|
| 175 |
+
x = x.view(n, int(h * scale_factor), int(w * scale_factor),
|
| 176 |
+
int(c / (scale_factor * scale_factor)))
|
| 177 |
+
if self.ps_version == 'v1':
|
| 178 |
+
warnings.warn("In ps_version 'v1', the height and width have not been swapped back, "
|
| 179 |
+
'which results in a transposed image.')
|
| 180 |
+
else:
|
| 181 |
+
x = x.permute(0, 2, 1, 3).contiguous()
|
| 182 |
+
return x
|
| 183 |
+
|
| 184 |
+
def extract_feature(self, pixel_values):
|
| 185 |
+
if self.select_layer == -1:
|
| 186 |
+
vit_embeds = self.vision_model(
|
| 187 |
+
pixel_values=pixel_values,
|
| 188 |
+
output_hidden_states=False,
|
| 189 |
+
return_dict=True).last_hidden_state
|
| 190 |
+
else:
|
| 191 |
+
vit_embeds = self.vision_model(
|
| 192 |
+
pixel_values=pixel_values,
|
| 193 |
+
output_hidden_states=True,
|
| 194 |
+
return_dict=True).hidden_states[self.select_layer]
|
| 195 |
+
vit_embeds = vit_embeds[:, 1:, :]
|
| 196 |
+
|
| 197 |
+
h = w = int(vit_embeds.shape[1] ** 0.5)
|
| 198 |
+
vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], h, w, -1)
|
| 199 |
+
vit_embeds = self.pixel_shuffle(vit_embeds, scale_factor=self.downsample_ratio)
|
| 200 |
+
vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], -1, vit_embeds.shape[-1])
|
| 201 |
+
vit_embeds = self.mlp1(vit_embeds)
|
| 202 |
+
return vit_embeds
|
| 203 |
+
|
| 204 |
+
def batch_chat(self, tokenizer, pixel_values, questions, generation_config, num_patches_list=None,
|
| 205 |
+
history=None, return_history=False, IMG_START_TOKEN='<img>', IMG_END_TOKEN='</img>',
|
| 206 |
+
IMG_CONTEXT_TOKEN='<IMG_CONTEXT>', verbose=False, image_counts=None):
|
| 207 |
+
if history is not None or return_history:
|
| 208 |
+
print('Now multi-turn chat is not supported in batch_chat.')
|
| 209 |
+
raise NotImplementedError
|
| 210 |
+
|
| 211 |
+
if image_counts is not None:
|
| 212 |
+
num_patches_list = image_counts
|
| 213 |
+
print('Warning: `image_counts` is deprecated. Please use `num_patches_list` instead.')
|
| 214 |
+
|
| 215 |
+
img_context_token_id = tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN)
|
| 216 |
+
self.img_context_token_id = img_context_token_id
|
| 217 |
+
|
| 218 |
+
if verbose and pixel_values is not None:
|
| 219 |
+
image_bs = pixel_values.shape[0]
|
| 220 |
+
print(f'dynamic ViT batch size: {image_bs}')
|
| 221 |
+
|
| 222 |
+
queries = []
|
| 223 |
+
for idx, num_patches in enumerate(num_patches_list):
|
| 224 |
+
question = questions[idx]
|
| 225 |
+
if pixel_values is not None and '<image>' not in question:
|
| 226 |
+
question = '<image>\n' + question
|
| 227 |
+
template = get_conv_template(self.template)
|
| 228 |
+
template.system_message = self.system_message
|
| 229 |
+
template.append_message(template.roles[0], question)
|
| 230 |
+
template.append_message(template.roles[1], None)
|
| 231 |
+
query = template.get_prompt()
|
| 232 |
+
|
| 233 |
+
image_tokens = IMG_START_TOKEN + IMG_CONTEXT_TOKEN * self.num_image_token * num_patches + IMG_END_TOKEN
|
| 234 |
+
query = query.replace('<image>', image_tokens, 1)
|
| 235 |
+
queries.append(query)
|
| 236 |
+
|
| 237 |
+
tokenizer.padding_side = 'left'
|
| 238 |
+
model_inputs = tokenizer(queries, return_tensors='pt', padding=True)
|
| 239 |
+
input_ids = model_inputs['input_ids'].to(self.device)
|
| 240 |
+
attention_mask = model_inputs['attention_mask'].to(self.device)
|
| 241 |
+
eos_token_id = tokenizer.convert_tokens_to_ids(template.sep.strip())
|
| 242 |
+
generation_config['eos_token_id'] = eos_token_id
|
| 243 |
+
generation_output = self.generate(
|
| 244 |
+
pixel_values=pixel_values,
|
| 245 |
+
input_ids=input_ids,
|
| 246 |
+
attention_mask=attention_mask,
|
| 247 |
+
**generation_config
|
| 248 |
+
)
|
| 249 |
+
responses = tokenizer.batch_decode(generation_output, skip_special_tokens=True)
|
| 250 |
+
responses = [response.split(template.sep.strip())[0].strip() for response in responses]
|
| 251 |
+
return responses
|
| 252 |
+
|
| 253 |
+
def chat(self, tokenizer, pixel_values, question, generation_config, history=None, return_history=False,
|
| 254 |
+
num_patches_list=None, IMG_START_TOKEN='<img>', IMG_END_TOKEN='</img>', IMG_CONTEXT_TOKEN='<IMG_CONTEXT>',
|
| 255 |
+
verbose=False):
|
| 256 |
+
|
| 257 |
+
if history is None and pixel_values is not None and '<image>' not in question:
|
| 258 |
+
question = '<image>\n' + question
|
| 259 |
+
|
| 260 |
+
if num_patches_list is None:
|
| 261 |
+
num_patches_list = [pixel_values.shape[0]] if pixel_values is not None else []
|
| 262 |
+
assert pixel_values is None or len(pixel_values) == sum(num_patches_list)
|
| 263 |
+
|
| 264 |
+
img_context_token_id = tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN)
|
| 265 |
+
self.img_context_token_id = img_context_token_id
|
| 266 |
+
|
| 267 |
+
template = get_conv_template(self.template)
|
| 268 |
+
template.system_message = self.system_message
|
| 269 |
+
eos_token_id = tokenizer.convert_tokens_to_ids(template.sep.strip())
|
| 270 |
+
|
| 271 |
+
history = [] if history is None else history
|
| 272 |
+
for (old_question, old_answer) in history:
|
| 273 |
+
template.append_message(template.roles[0], old_question)
|
| 274 |
+
template.append_message(template.roles[1], old_answer)
|
| 275 |
+
template.append_message(template.roles[0], question)
|
| 276 |
+
template.append_message(template.roles[1], None)
|
| 277 |
+
query = template.get_prompt()
|
| 278 |
+
|
| 279 |
+
if verbose and pixel_values is not None:
|
| 280 |
+
image_bs = pixel_values.shape[0]
|
| 281 |
+
print(f'dynamic ViT batch size: {image_bs}')
|
| 282 |
+
|
| 283 |
+
for num_patches in num_patches_list:
|
| 284 |
+
image_tokens = IMG_START_TOKEN + IMG_CONTEXT_TOKEN * self.num_image_token * num_patches + IMG_END_TOKEN
|
| 285 |
+
query = query.replace('<image>', image_tokens, 1)
|
| 286 |
+
|
| 287 |
+
model_inputs = tokenizer(query, return_tensors='pt')
|
| 288 |
+
input_ids = model_inputs['input_ids'].to(self.device)
|
| 289 |
+
attention_mask = model_inputs['attention_mask'].to(self.device)
|
| 290 |
+
generation_config['eos_token_id'] = eos_token_id
|
| 291 |
+
generation_output = self.generate(
|
| 292 |
+
pixel_values=pixel_values,
|
| 293 |
+
input_ids=input_ids,
|
| 294 |
+
attention_mask=attention_mask,
|
| 295 |
+
**generation_config
|
| 296 |
+
)
|
| 297 |
+
response = tokenizer.batch_decode(generation_output, skip_special_tokens=True)[0]
|
| 298 |
+
response = response.split(template.sep.strip())[0].strip()
|
| 299 |
+
history.append((question, response))
|
| 300 |
+
if return_history:
|
| 301 |
+
return response, history
|
| 302 |
+
else:
|
| 303 |
+
query_to_print = query.replace(IMG_CONTEXT_TOKEN, '')
|
| 304 |
+
query_to_print = query_to_print.replace(f'{IMG_START_TOKEN}{IMG_END_TOKEN}', '<image>')
|
| 305 |
+
if verbose:
|
| 306 |
+
print(query_to_print, response)
|
| 307 |
+
return response
|
| 308 |
+
|
| 309 |
+
@torch.no_grad()
|
| 310 |
+
def generate(
|
| 311 |
+
self,
|
| 312 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
| 313 |
+
input_ids: Optional[torch.FloatTensor] = None,
|
| 314 |
+
attention_mask: Optional[torch.LongTensor] = None,
|
| 315 |
+
visual_features: Optional[torch.FloatTensor] = None,
|
| 316 |
+
generation_config: Optional[GenerationConfig] = None,
|
| 317 |
+
output_hidden_states: Optional[bool] = None,
|
| 318 |
+
**generate_kwargs,
|
| 319 |
+
) -> torch.LongTensor:
|
| 320 |
+
|
| 321 |
+
assert self.img_context_token_id is not None
|
| 322 |
+
if pixel_values is not None:
|
| 323 |
+
if visual_features is not None:
|
| 324 |
+
vit_embeds = visual_features
|
| 325 |
+
else:
|
| 326 |
+
vit_embeds = self.extract_feature(pixel_values)
|
| 327 |
+
input_embeds = self.language_model.get_input_embeddings()(input_ids)
|
| 328 |
+
B, N, C = input_embeds.shape
|
| 329 |
+
input_embeds = input_embeds.reshape(B * N, C)
|
| 330 |
+
|
| 331 |
+
input_ids = input_ids.reshape(B * N)
|
| 332 |
+
selected = (input_ids == self.img_context_token_id)
|
| 333 |
+
assert selected.sum() != 0
|
| 334 |
+
input_embeds[selected] = vit_embeds.reshape(-1, C).to(input_embeds.device)
|
| 335 |
+
|
| 336 |
+
input_embeds = input_embeds.reshape(B, N, C)
|
| 337 |
+
else:
|
| 338 |
+
input_embeds = self.language_model.get_input_embeddings()(input_ids)
|
| 339 |
+
|
| 340 |
+
outputs = self.language_model.generate(
|
| 341 |
+
inputs_embeds=input_embeds,
|
| 342 |
+
attention_mask=attention_mask,
|
| 343 |
+
generation_config=generation_config,
|
| 344 |
+
output_hidden_states=output_hidden_states,
|
| 345 |
+
use_cache=True,
|
| 346 |
+
**generate_kwargs,
|
| 347 |
+
)
|
| 348 |
+
|
| 349 |
+
return outputs
|
| 350 |
+
|
| 351 |
+
@property
|
| 352 |
+
def lm_head(self):
|
| 353 |
+
return self.language_model.get_output_embeddings()
|
| 354 |
+
|
| 355 |
+
def get_input_embeddings(self):
|
| 356 |
+
return self.language_model.get_input_embeddings()
|
| 357 |
+
|
| 358 |
+
def get_output_embeddings(self):
|
| 359 |
+
return self.language_model.get_output_embeddings()
|
ood/internvl3-8b-instruct-lora-ood-210/preprocessor_config.json
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"crop_size": 448,
|
| 3 |
+
"do_center_crop": true,
|
| 4 |
+
"do_normalize": true,
|
| 5 |
+
"do_resize": true,
|
| 6 |
+
"feature_extractor_type": "CLIPFeatureExtractor",
|
| 7 |
+
"image_mean": [
|
| 8 |
+
0.485,
|
| 9 |
+
0.456,
|
| 10 |
+
0.406
|
| 11 |
+
],
|
| 12 |
+
"image_std": [
|
| 13 |
+
0.229,
|
| 14 |
+
0.224,
|
| 15 |
+
0.225
|
| 16 |
+
],
|
| 17 |
+
"resample": 3,
|
| 18 |
+
"size": 448
|
| 19 |
+
}
|
ood/internvl3-8b-instruct-lora-ood-210/special_tokens_map.json
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"additional_special_tokens": [
|
| 3 |
+
"<|im_start|>",
|
| 4 |
+
"<|im_end|>",
|
| 5 |
+
"<|object_ref_start|>",
|
| 6 |
+
"<|object_ref_end|>",
|
| 7 |
+
"<|box_start|>",
|
| 8 |
+
"<|box_end|>",
|
| 9 |
+
"<|quad_start|>",
|
| 10 |
+
"<|quad_end|>",
|
| 11 |
+
"<|vision_start|>",
|
| 12 |
+
"<|vision_end|>",
|
| 13 |
+
"<|vision_pad|>",
|
| 14 |
+
"<|image_pad|>",
|
| 15 |
+
"<|video_pad|>"
|
| 16 |
+
],
|
| 17 |
+
"eos_token": {
|
| 18 |
+
"content": "<|im_end|>",
|
| 19 |
+
"lstrip": false,
|
| 20 |
+
"normalized": false,
|
| 21 |
+
"rstrip": false,
|
| 22 |
+
"single_word": false
|
| 23 |
+
},
|
| 24 |
+
"pad_token": {
|
| 25 |
+
"content": "<|endoftext|>",
|
| 26 |
+
"lstrip": false,
|
| 27 |
+
"normalized": false,
|
| 28 |
+
"rstrip": false,
|
| 29 |
+
"single_word": false
|
| 30 |
+
}
|
| 31 |
+
}
|
ood/internvl3-8b-instruct-lora-ood-210/tokenizer_config.json
ADDED
|
@@ -0,0 +1,280 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_bos_token": false,
|
| 3 |
+
"add_eos_token": false,
|
| 4 |
+
"add_prefix_space": false,
|
| 5 |
+
"added_tokens_decoder": {
|
| 6 |
+
"151643": {
|
| 7 |
+
"content": "<|endoftext|>",
|
| 8 |
+
"lstrip": false,
|
| 9 |
+
"normalized": false,
|
| 10 |
+
"rstrip": false,
|
| 11 |
+
"single_word": false,
|
| 12 |
+
"special": true
|
| 13 |
+
},
|
| 14 |
+
"151644": {
|
| 15 |
+
"content": "<|im_start|>",
|
| 16 |
+
"lstrip": false,
|
| 17 |
+
"normalized": false,
|
| 18 |
+
"rstrip": false,
|
| 19 |
+
"single_word": false,
|
| 20 |
+
"special": true
|
| 21 |
+
},
|
| 22 |
+
"151645": {
|
| 23 |
+
"content": "<|im_end|>",
|
| 24 |
+
"lstrip": false,
|
| 25 |
+
"normalized": false,
|
| 26 |
+
"rstrip": false,
|
| 27 |
+
"single_word": false,
|
| 28 |
+
"special": true
|
| 29 |
+
},
|
| 30 |
+
"151646": {
|
| 31 |
+
"content": "<|object_ref_start|>",
|
| 32 |
+
"lstrip": false,
|
| 33 |
+
"normalized": false,
|
| 34 |
+
"rstrip": false,
|
| 35 |
+
"single_word": false,
|
| 36 |
+
"special": true
|
| 37 |
+
},
|
| 38 |
+
"151647": {
|
| 39 |
+
"content": "<|object_ref_end|>",
|
| 40 |
+
"lstrip": false,
|
| 41 |
+
"normalized": false,
|
| 42 |
+
"rstrip": false,
|
| 43 |
+
"single_word": false,
|
| 44 |
+
"special": true
|
| 45 |
+
},
|
| 46 |
+
"151648": {
|
| 47 |
+
"content": "<|box_start|>",
|
| 48 |
+
"lstrip": false,
|
| 49 |
+
"normalized": false,
|
| 50 |
+
"rstrip": false,
|
| 51 |
+
"single_word": false,
|
| 52 |
+
"special": true
|
| 53 |
+
},
|
| 54 |
+
"151649": {
|
| 55 |
+
"content": "<|box_end|>",
|
| 56 |
+
"lstrip": false,
|
| 57 |
+
"normalized": false,
|
| 58 |
+
"rstrip": false,
|
| 59 |
+
"single_word": false,
|
| 60 |
+
"special": true
|
| 61 |
+
},
|
| 62 |
+
"151650": {
|
| 63 |
+
"content": "<|quad_start|>",
|
| 64 |
+
"lstrip": false,
|
| 65 |
+
"normalized": false,
|
| 66 |
+
"rstrip": false,
|
| 67 |
+
"single_word": false,
|
| 68 |
+
"special": true
|
| 69 |
+
},
|
| 70 |
+
"151651": {
|
| 71 |
+
"content": "<|quad_end|>",
|
| 72 |
+
"lstrip": false,
|
| 73 |
+
"normalized": false,
|
| 74 |
+
"rstrip": false,
|
| 75 |
+
"single_word": false,
|
| 76 |
+
"special": true
|
| 77 |
+
},
|
| 78 |
+
"151652": {
|
| 79 |
+
"content": "<|vision_start|>",
|
| 80 |
+
"lstrip": false,
|
| 81 |
+
"normalized": false,
|
| 82 |
+
"rstrip": false,
|
| 83 |
+
"single_word": false,
|
| 84 |
+
"special": true
|
| 85 |
+
},
|
| 86 |
+
"151653": {
|
| 87 |
+
"content": "<|vision_end|>",
|
| 88 |
+
"lstrip": false,
|
| 89 |
+
"normalized": false,
|
| 90 |
+
"rstrip": false,
|
| 91 |
+
"single_word": false,
|
| 92 |
+
"special": true
|
| 93 |
+
},
|
| 94 |
+
"151654": {
|
| 95 |
+
"content": "<|vision_pad|>",
|
| 96 |
+
"lstrip": false,
|
| 97 |
+
"normalized": false,
|
| 98 |
+
"rstrip": false,
|
| 99 |
+
"single_word": false,
|
| 100 |
+
"special": true
|
| 101 |
+
},
|
| 102 |
+
"151655": {
|
| 103 |
+
"content": "<|image_pad|>",
|
| 104 |
+
"lstrip": false,
|
| 105 |
+
"normalized": false,
|
| 106 |
+
"rstrip": false,
|
| 107 |
+
"single_word": false,
|
| 108 |
+
"special": true
|
| 109 |
+
},
|
| 110 |
+
"151656": {
|
| 111 |
+
"content": "<|video_pad|>",
|
| 112 |
+
"lstrip": false,
|
| 113 |
+
"normalized": false,
|
| 114 |
+
"rstrip": false,
|
| 115 |
+
"single_word": false,
|
| 116 |
+
"special": true
|
| 117 |
+
},
|
| 118 |
+
"151657": {
|
| 119 |
+
"content": "<tool_call>",
|
| 120 |
+
"lstrip": false,
|
| 121 |
+
"normalized": false,
|
| 122 |
+
"rstrip": false,
|
| 123 |
+
"single_word": false,
|
| 124 |
+
"special": false
|
| 125 |
+
},
|
| 126 |
+
"151658": {
|
| 127 |
+
"content": "</tool_call>",
|
| 128 |
+
"lstrip": false,
|
| 129 |
+
"normalized": false,
|
| 130 |
+
"rstrip": false,
|
| 131 |
+
"single_word": false,
|
| 132 |
+
"special": false
|
| 133 |
+
},
|
| 134 |
+
"151659": {
|
| 135 |
+
"content": "<|fim_prefix|>",
|
| 136 |
+
"lstrip": false,
|
| 137 |
+
"normalized": false,
|
| 138 |
+
"rstrip": false,
|
| 139 |
+
"single_word": false,
|
| 140 |
+
"special": false
|
| 141 |
+
},
|
| 142 |
+
"151660": {
|
| 143 |
+
"content": "<|fim_middle|>",
|
| 144 |
+
"lstrip": false,
|
| 145 |
+
"normalized": false,
|
| 146 |
+
"rstrip": false,
|
| 147 |
+
"single_word": false,
|
| 148 |
+
"special": false
|
| 149 |
+
},
|
| 150 |
+
"151661": {
|
| 151 |
+
"content": "<|fim_suffix|>",
|
| 152 |
+
"lstrip": false,
|
| 153 |
+
"normalized": false,
|
| 154 |
+
"rstrip": false,
|
| 155 |
+
"single_word": false,
|
| 156 |
+
"special": false
|
| 157 |
+
},
|
| 158 |
+
"151662": {
|
| 159 |
+
"content": "<|fim_pad|>",
|
| 160 |
+
"lstrip": false,
|
| 161 |
+
"normalized": false,
|
| 162 |
+
"rstrip": false,
|
| 163 |
+
"single_word": false,
|
| 164 |
+
"special": false
|
| 165 |
+
},
|
| 166 |
+
"151663": {
|
| 167 |
+
"content": "<|repo_name|>",
|
| 168 |
+
"lstrip": false,
|
| 169 |
+
"normalized": false,
|
| 170 |
+
"rstrip": false,
|
| 171 |
+
"single_word": false,
|
| 172 |
+
"special": false
|
| 173 |
+
},
|
| 174 |
+
"151664": {
|
| 175 |
+
"content": "<|file_sep|>",
|
| 176 |
+
"lstrip": false,
|
| 177 |
+
"normalized": false,
|
| 178 |
+
"rstrip": false,
|
| 179 |
+
"single_word": false,
|
| 180 |
+
"special": false
|
| 181 |
+
},
|
| 182 |
+
"151665": {
|
| 183 |
+
"content": "<img>",
|
| 184 |
+
"lstrip": false,
|
| 185 |
+
"normalized": false,
|
| 186 |
+
"rstrip": false,
|
| 187 |
+
"single_word": false,
|
| 188 |
+
"special": true
|
| 189 |
+
},
|
| 190 |
+
"151666": {
|
| 191 |
+
"content": "</img>",
|
| 192 |
+
"lstrip": false,
|
| 193 |
+
"normalized": false,
|
| 194 |
+
"rstrip": false,
|
| 195 |
+
"single_word": false,
|
| 196 |
+
"special": true
|
| 197 |
+
},
|
| 198 |
+
"151667": {
|
| 199 |
+
"content": "<IMG_CONTEXT>",
|
| 200 |
+
"lstrip": false,
|
| 201 |
+
"normalized": false,
|
| 202 |
+
"rstrip": false,
|
| 203 |
+
"single_word": false,
|
| 204 |
+
"special": true
|
| 205 |
+
},
|
| 206 |
+
"151668": {
|
| 207 |
+
"content": "<quad>",
|
| 208 |
+
"lstrip": false,
|
| 209 |
+
"normalized": false,
|
| 210 |
+
"rstrip": false,
|
| 211 |
+
"single_word": false,
|
| 212 |
+
"special": true
|
| 213 |
+
},
|
| 214 |
+
"151669": {
|
| 215 |
+
"content": "</quad>",
|
| 216 |
+
"lstrip": false,
|
| 217 |
+
"normalized": false,
|
| 218 |
+
"rstrip": false,
|
| 219 |
+
"single_word": false,
|
| 220 |
+
"special": true
|
| 221 |
+
},
|
| 222 |
+
"151670": {
|
| 223 |
+
"content": "<ref>",
|
| 224 |
+
"lstrip": false,
|
| 225 |
+
"normalized": false,
|
| 226 |
+
"rstrip": false,
|
| 227 |
+
"single_word": false,
|
| 228 |
+
"special": true
|
| 229 |
+
},
|
| 230 |
+
"151671": {
|
| 231 |
+
"content": "</ref>",
|
| 232 |
+
"lstrip": false,
|
| 233 |
+
"normalized": false,
|
| 234 |
+
"rstrip": false,
|
| 235 |
+
"single_word": false,
|
| 236 |
+
"special": true
|
| 237 |
+
},
|
| 238 |
+
"151672": {
|
| 239 |
+
"content": "<box>",
|
| 240 |
+
"lstrip": false,
|
| 241 |
+
"normalized": false,
|
| 242 |
+
"rstrip": false,
|
| 243 |
+
"single_word": false,
|
| 244 |
+
"special": true
|
| 245 |
+
},
|
| 246 |
+
"151673": {
|
| 247 |
+
"content": "</box>",
|
| 248 |
+
"lstrip": false,
|
| 249 |
+
"normalized": false,
|
| 250 |
+
"rstrip": false,
|
| 251 |
+
"single_word": false,
|
| 252 |
+
"special": true
|
| 253 |
+
}
|
| 254 |
+
},
|
| 255 |
+
"additional_special_tokens": [
|
| 256 |
+
"<|im_start|>",
|
| 257 |
+
"<|im_end|>",
|
| 258 |
+
"<|object_ref_start|>",
|
| 259 |
+
"<|object_ref_end|>",
|
| 260 |
+
"<|box_start|>",
|
| 261 |
+
"<|box_end|>",
|
| 262 |
+
"<|quad_start|>",
|
| 263 |
+
"<|quad_end|>",
|
| 264 |
+
"<|vision_start|>",
|
| 265 |
+
"<|vision_end|>",
|
| 266 |
+
"<|vision_pad|>",
|
| 267 |
+
"<|image_pad|>",
|
| 268 |
+
"<|video_pad|>"
|
| 269 |
+
],
|
| 270 |
+
"bos_token": null,
|
| 271 |
+
"clean_up_tokenization_spaces": false,
|
| 272 |
+
"eos_token": "<|im_end|>",
|
| 273 |
+
"errors": "replace",
|
| 274 |
+
"extra_special_tokens": {},
|
| 275 |
+
"model_max_length": 1000000,
|
| 276 |
+
"pad_token": "<|endoftext|>",
|
| 277 |
+
"split_special_tokens": false,
|
| 278 |
+
"tokenizer_class": "Qwen2Tokenizer",
|
| 279 |
+
"unk_token": null
|
| 280 |
+
}
|
ood/internvl3-8b-instruct-lora-ood-210/vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
qwen2.5vl-7b-lora_epoch10_2e-5/added_tokens.json
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"</tool_call>": 151658,
|
| 3 |
+
"<tool_call>": 151657,
|
| 4 |
+
"<|box_end|>": 151649,
|
| 5 |
+
"<|box_start|>": 151648,
|
| 6 |
+
"<|endoftext|>": 151643,
|
| 7 |
+
"<|file_sep|>": 151664,
|
| 8 |
+
"<|fim_middle|>": 151660,
|
| 9 |
+
"<|fim_pad|>": 151662,
|
| 10 |
+
"<|fim_prefix|>": 151659,
|
| 11 |
+
"<|fim_suffix|>": 151661,
|
| 12 |
+
"<|im_end|>": 151645,
|
| 13 |
+
"<|im_start|>": 151644,
|
| 14 |
+
"<|image_pad|>": 151655,
|
| 15 |
+
"<|object_ref_end|>": 151647,
|
| 16 |
+
"<|object_ref_start|>": 151646,
|
| 17 |
+
"<|quad_end|>": 151651,
|
| 18 |
+
"<|quad_start|>": 151650,
|
| 19 |
+
"<|repo_name|>": 151663,
|
| 20 |
+
"<|video_pad|>": 151656,
|
| 21 |
+
"<|vision_end|>": 151653,
|
| 22 |
+
"<|vision_pad|>": 151654,
|
| 23 |
+
"<|vision_start|>": 151652
|
| 24 |
+
}
|
qwen2.5vl-7b-lora_epoch10_2e-5/args.json
ADDED
|
@@ -0,0 +1,362 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model": "/mnt/data/users/liamding/data/models/Qwen2.5-VL-7B-Instruct",
|
| 3 |
+
"model_type": "qwen2_5_vl",
|
| 4 |
+
"model_revision": null,
|
| 5 |
+
"task_type": "causal_lm",
|
| 6 |
+
"torch_dtype": "bfloat16",
|
| 7 |
+
"attn_impl": null,
|
| 8 |
+
"num_labels": null,
|
| 9 |
+
"problem_type": null,
|
| 10 |
+
"rope_scaling": null,
|
| 11 |
+
"device_map": null,
|
| 12 |
+
"max_memory": {},
|
| 13 |
+
"local_repo_path": null,
|
| 14 |
+
"template": "qwen2_5_vl",
|
| 15 |
+
"system": null,
|
| 16 |
+
"max_length": 32768,
|
| 17 |
+
"truncation_strategy": "delete",
|
| 18 |
+
"max_pixels": null,
|
| 19 |
+
"agent_template": null,
|
| 20 |
+
"norm_bbox": null,
|
| 21 |
+
"response_prefix": null,
|
| 22 |
+
"padding_side": "right",
|
| 23 |
+
"loss_scale": "default",
|
| 24 |
+
"sequence_parallel_size": 1,
|
| 25 |
+
"use_chat_template": true,
|
| 26 |
+
"template_backend": "swift",
|
| 27 |
+
"dataset": [
|
| 28 |
+
"/mnt/data/users/liamding/data/3AM_Plus/final/training/ambi_normal_772.json",
|
| 29 |
+
"/mnt/data/users/liamding/data/3AM_Plus/final/training/mma_train_126.json",
|
| 30 |
+
"/mnt/data/users/liamding/data/3AM_Plus/final/training/sp_train_102.json"
|
| 31 |
+
],
|
| 32 |
+
"val_dataset": [],
|
| 33 |
+
"split_dataset_ratio": 0.01,
|
| 34 |
+
"data_seed": 42,
|
| 35 |
+
"dataset_num_proc": 1,
|
| 36 |
+
"dataset_shuffle": true,
|
| 37 |
+
"val_dataset_shuffle": false,
|
| 38 |
+
"streaming": false,
|
| 39 |
+
"interleave_prob": null,
|
| 40 |
+
"stopping_strategy": "first_exhausted",
|
| 41 |
+
"shuffle_buffer_size": 1000,
|
| 42 |
+
"enable_cache": false,
|
| 43 |
+
"download_mode": "reuse_dataset_if_exists",
|
| 44 |
+
"columns": {},
|
| 45 |
+
"strict": false,
|
| 46 |
+
"remove_unused_columns": true,
|
| 47 |
+
"model_name": [
|
| 48 |
+
null,
|
| 49 |
+
null
|
| 50 |
+
],
|
| 51 |
+
"model_author": [
|
| 52 |
+
null,
|
| 53 |
+
null
|
| 54 |
+
],
|
| 55 |
+
"custom_dataset_info": [],
|
| 56 |
+
"quant_method": null,
|
| 57 |
+
"quant_bits": null,
|
| 58 |
+
"hqq_axis": null,
|
| 59 |
+
"bnb_4bit_compute_dtype": "bfloat16",
|
| 60 |
+
"bnb_4bit_quant_type": "nf4",
|
| 61 |
+
"bnb_4bit_use_double_quant": true,
|
| 62 |
+
"bnb_4bit_quant_storage": null,
|
| 63 |
+
"max_new_tokens": 64,
|
| 64 |
+
"temperature": 0.0,
|
| 65 |
+
"top_k": null,
|
| 66 |
+
"top_p": null,
|
| 67 |
+
"repetition_penalty": null,
|
| 68 |
+
"num_beams": 1,
|
| 69 |
+
"stream": false,
|
| 70 |
+
"stop_words": [],
|
| 71 |
+
"logprobs": false,
|
| 72 |
+
"top_logprobs": null,
|
| 73 |
+
"ckpt_dir": null,
|
| 74 |
+
"load_dataset_config": null,
|
| 75 |
+
"lora_modules": [],
|
| 76 |
+
"tuner_backend": "peft",
|
| 77 |
+
"train_type": "lora",
|
| 78 |
+
"adapters": [],
|
| 79 |
+
"external_plugins": [],
|
| 80 |
+
"seed": 42,
|
| 81 |
+
"model_kwargs": {},
|
| 82 |
+
"load_args": false,
|
| 83 |
+
"load_data_args": false,
|
| 84 |
+
"use_hf": false,
|
| 85 |
+
"hub_token": null,
|
| 86 |
+
"custom_register_path": [],
|
| 87 |
+
"ignore_args_error": false,
|
| 88 |
+
"use_swift_lora": false,
|
| 89 |
+
"output_dir": "/mnt/data/users/liamding/data/MMMT/lora/qwen2.5vl-7b-lora/v0-20250528-085522",
|
| 90 |
+
"overwrite_output_dir": false,
|
| 91 |
+
"do_train": false,
|
| 92 |
+
"do_eval": false,
|
| 93 |
+
"do_predict": false,
|
| 94 |
+
"eval_strategy": "epoch",
|
| 95 |
+
"prediction_loss_only": false,
|
| 96 |
+
"per_device_train_batch_size": 8,
|
| 97 |
+
"per_device_eval_batch_size": 8,
|
| 98 |
+
"per_gpu_train_batch_size": null,
|
| 99 |
+
"per_gpu_eval_batch_size": null,
|
| 100 |
+
"gradient_accumulation_steps": 2,
|
| 101 |
+
"eval_accumulation_steps": null,
|
| 102 |
+
"eval_delay": 0,
|
| 103 |
+
"torch_empty_cache_steps": null,
|
| 104 |
+
"learning_rate": 2e-05,
|
| 105 |
+
"weight_decay": 0.1,
|
| 106 |
+
"adam_beta1": 0.9,
|
| 107 |
+
"adam_beta2": 0.95,
|
| 108 |
+
"adam_epsilon": 1e-08,
|
| 109 |
+
"max_grad_norm": 1.0,
|
| 110 |
+
"num_train_epochs": 10.0,
|
| 111 |
+
"max_steps": -1,
|
| 112 |
+
"lr_scheduler_type": "cosine",
|
| 113 |
+
"lr_scheduler_kwargs": null,
|
| 114 |
+
"warmup_ratio": 0.05,
|
| 115 |
+
"warmup_steps": 0,
|
| 116 |
+
"log_level": "passive",
|
| 117 |
+
"log_level_replica": "warning",
|
| 118 |
+
"log_on_each_node": true,
|
| 119 |
+
"logging_dir": "/mnt/data/users/liamding/data/MMMT/lora/qwen2.5vl-7b-lora/v0-20250528-085522/runs",
|
| 120 |
+
"logging_strategy": "steps",
|
| 121 |
+
"logging_first_step": true,
|
| 122 |
+
"logging_steps": 1,
|
| 123 |
+
"logging_nan_inf_filter": true,
|
| 124 |
+
"save_strategy": "steps",
|
| 125 |
+
"save_steps": 500,
|
| 126 |
+
"save_total_limit": 5,
|
| 127 |
+
"save_safetensors": true,
|
| 128 |
+
"save_on_each_node": false,
|
| 129 |
+
"save_only_model": false,
|
| 130 |
+
"restore_callback_states_from_checkpoint": false,
|
| 131 |
+
"no_cuda": false,
|
| 132 |
+
"use_cpu": false,
|
| 133 |
+
"use_mps_device": false,
|
| 134 |
+
"jit_mode_eval": false,
|
| 135 |
+
"use_ipex": false,
|
| 136 |
+
"bf16": true,
|
| 137 |
+
"fp16": false,
|
| 138 |
+
"fp16_opt_level": "O1",
|
| 139 |
+
"half_precision_backend": "auto",
|
| 140 |
+
"bf16_full_eval": false,
|
| 141 |
+
"fp16_full_eval": false,
|
| 142 |
+
"tf32": null,
|
| 143 |
+
"local_rank": 0,
|
| 144 |
+
"ddp_backend": null,
|
| 145 |
+
"tpu_num_cores": null,
|
| 146 |
+
"tpu_metrics_debug": false,
|
| 147 |
+
"debug": null,
|
| 148 |
+
"dataloader_drop_last": false,
|
| 149 |
+
"eval_steps": null,
|
| 150 |
+
"dataloader_num_workers": 4,
|
| 151 |
+
"dataloader_prefetch_factor": null,
|
| 152 |
+
"past_index": -1,
|
| 153 |
+
"run_name": null,
|
| 154 |
+
"disable_tqdm": null,
|
| 155 |
+
"label_names": null,
|
| 156 |
+
"load_best_model_at_end": false,
|
| 157 |
+
"metric_for_best_model": "loss",
|
| 158 |
+
"greater_is_better": false,
|
| 159 |
+
"ignore_data_skip": false,
|
| 160 |
+
"fsdp": "",
|
| 161 |
+
"fsdp_min_num_params": 0,
|
| 162 |
+
"fsdp_config": null,
|
| 163 |
+
"tp_size": 0,
|
| 164 |
+
"fsdp_transformer_layer_cls_to_wrap": null,
|
| 165 |
+
"accelerator_config": {
|
| 166 |
+
"dispatch_batches": false
|
| 167 |
+
},
|
| 168 |
+
"deepspeed": {
|
| 169 |
+
"fp16": {
|
| 170 |
+
"enabled": "auto",
|
| 171 |
+
"loss_scale": 0,
|
| 172 |
+
"loss_scale_window": 1000,
|
| 173 |
+
"initial_scale_power": 16,
|
| 174 |
+
"hysteresis": 2,
|
| 175 |
+
"min_loss_scale": 1
|
| 176 |
+
},
|
| 177 |
+
"bf16": {
|
| 178 |
+
"enabled": "auto"
|
| 179 |
+
},
|
| 180 |
+
"zero_optimization": {
|
| 181 |
+
"stage": 2,
|
| 182 |
+
"offload_optimizer": {
|
| 183 |
+
"device": "none",
|
| 184 |
+
"pin_memory": true
|
| 185 |
+
},
|
| 186 |
+
"allgather_partitions": true,
|
| 187 |
+
"allgather_bucket_size": 200000000.0,
|
| 188 |
+
"overlap_comm": false,
|
| 189 |
+
"reduce_scatter": true,
|
| 190 |
+
"reduce_bucket_size": 200000000.0,
|
| 191 |
+
"contiguous_gradients": true
|
| 192 |
+
},
|
| 193 |
+
"gradient_accumulation_steps": "auto",
|
| 194 |
+
"gradient_clipping": "auto",
|
| 195 |
+
"steps_per_print": 2000,
|
| 196 |
+
"train_batch_size": "auto",
|
| 197 |
+
"train_micro_batch_size_per_gpu": "auto",
|
| 198 |
+
"wall_clock_breakdown": false
|
| 199 |
+
},
|
| 200 |
+
"label_smoothing_factor": 0.0,
|
| 201 |
+
"optim": "adamw_torch",
|
| 202 |
+
"optim_args": null,
|
| 203 |
+
"adafactor": false,
|
| 204 |
+
"group_by_length": false,
|
| 205 |
+
"length_column_name": "length",
|
| 206 |
+
"report_to": [
|
| 207 |
+
"wandb"
|
| 208 |
+
],
|
| 209 |
+
"ddp_find_unused_parameters": null,
|
| 210 |
+
"ddp_bucket_cap_mb": null,
|
| 211 |
+
"ddp_broadcast_buffers": null,
|
| 212 |
+
"dataloader_pin_memory": true,
|
| 213 |
+
"dataloader_persistent_workers": false,
|
| 214 |
+
"skip_memory_metrics": true,
|
| 215 |
+
"use_legacy_prediction_loop": false,
|
| 216 |
+
"push_to_hub": false,
|
| 217 |
+
"resume_from_checkpoint": null,
|
| 218 |
+
"hub_model_id": null,
|
| 219 |
+
"hub_strategy": "every_save",
|
| 220 |
+
"hub_private_repo": null,
|
| 221 |
+
"hub_always_push": false,
|
| 222 |
+
"gradient_checkpointing": true,
|
| 223 |
+
"gradient_checkpointing_kwargs": null,
|
| 224 |
+
"include_inputs_for_metrics": false,
|
| 225 |
+
"include_for_metrics": [],
|
| 226 |
+
"eval_do_concat_batches": true,
|
| 227 |
+
"fp16_backend": "auto",
|
| 228 |
+
"push_to_hub_model_id": null,
|
| 229 |
+
"push_to_hub_organization": null,
|
| 230 |
+
"push_to_hub_token": null,
|
| 231 |
+
"mp_parameters": "",
|
| 232 |
+
"auto_find_batch_size": false,
|
| 233 |
+
"full_determinism": false,
|
| 234 |
+
"torchdynamo": null,
|
| 235 |
+
"ray_scope": "last",
|
| 236 |
+
"ddp_timeout": 1800,
|
| 237 |
+
"torch_compile": false,
|
| 238 |
+
"torch_compile_backend": null,
|
| 239 |
+
"torch_compile_mode": null,
|
| 240 |
+
"include_tokens_per_second": false,
|
| 241 |
+
"include_num_input_tokens_seen": false,
|
| 242 |
+
"neftune_noise_alpha": null,
|
| 243 |
+
"optim_target_modules": null,
|
| 244 |
+
"batch_eval_metrics": false,
|
| 245 |
+
"eval_on_start": false,
|
| 246 |
+
"use_liger_kernel": false,
|
| 247 |
+
"eval_use_gather_object": false,
|
| 248 |
+
"average_tokens_across_devices": false,
|
| 249 |
+
"sortish_sampler": false,
|
| 250 |
+
"predict_with_generate": false,
|
| 251 |
+
"generation_max_length": null,
|
| 252 |
+
"generation_num_beams": null,
|
| 253 |
+
"generation_config": null,
|
| 254 |
+
"check_model": true,
|
| 255 |
+
"acc_strategy": "token",
|
| 256 |
+
"train_dataloader_shuffle": true,
|
| 257 |
+
"metric_warmup_step": 0,
|
| 258 |
+
"fsdp_num": 1,
|
| 259 |
+
"acc_steps": 1,
|
| 260 |
+
"eval_use_evalscope": false,
|
| 261 |
+
"eval_datasets": [],
|
| 262 |
+
"eval_limit": null,
|
| 263 |
+
"eval_datasets_args": null,
|
| 264 |
+
"eval_generation_config": null,
|
| 265 |
+
"freeze_parameters": [
|
| 266 |
+
"visual",
|
| 267 |
+
"visual.merger"
|
| 268 |
+
],
|
| 269 |
+
"freeze_parameters_ratio": 0.0,
|
| 270 |
+
"trainable_parameters": [],
|
| 271 |
+
"freeze_llm": false,
|
| 272 |
+
"freeze_vit": true,
|
| 273 |
+
"freeze_aligner": true,
|
| 274 |
+
"target_modules": [
|
| 275 |
+
"all-linear"
|
| 276 |
+
],
|
| 277 |
+
"target_regex": null,
|
| 278 |
+
"modules_to_save": [],
|
| 279 |
+
"lora_rank": 8,
|
| 280 |
+
"lora_alpha": 16,
|
| 281 |
+
"lora_dropout": 0.1,
|
| 282 |
+
"lora_bias": "none",
|
| 283 |
+
"lora_dtype": null,
|
| 284 |
+
"lorap_lr_ratio": null,
|
| 285 |
+
"use_rslora": false,
|
| 286 |
+
"use_dora": false,
|
| 287 |
+
"lora_ga_batch_size": 2,
|
| 288 |
+
"lora_ga_iters": 2,
|
| 289 |
+
"lora_ga_max_length": 1024,
|
| 290 |
+
"lora_ga_direction": "ArB2r",
|
| 291 |
+
"lora_ga_scale": "stable",
|
| 292 |
+
"lora_ga_stable_gamma": 16,
|
| 293 |
+
"init_weights": true,
|
| 294 |
+
"fourier_n_frequency": 2000,
|
| 295 |
+
"fourier_scaling": 300.0,
|
| 296 |
+
"boft_block_size": 4,
|
| 297 |
+
"boft_block_num": 0,
|
| 298 |
+
"boft_n_butterfly_factor": 1,
|
| 299 |
+
"boft_dropout": 0.0,
|
| 300 |
+
"vera_rank": 256,
|
| 301 |
+
"vera_projection_prng_key": 0,
|
| 302 |
+
"vera_dropout": 0.0,
|
| 303 |
+
"vera_d_initial": 0.1,
|
| 304 |
+
"adapter_act": "gelu",
|
| 305 |
+
"adapter_length": 128,
|
| 306 |
+
"use_galore": false,
|
| 307 |
+
"galore_target_modules": null,
|
| 308 |
+
"galore_rank": 128,
|
| 309 |
+
"galore_update_proj_gap": 50,
|
| 310 |
+
"galore_scale": 1.0,
|
| 311 |
+
"galore_proj_type": "std",
|
| 312 |
+
"galore_optim_per_parameter": false,
|
| 313 |
+
"galore_with_embedding": false,
|
| 314 |
+
"galore_quantization": false,
|
| 315 |
+
"galore_proj_quant": false,
|
| 316 |
+
"galore_proj_bits": 4,
|
| 317 |
+
"galore_proj_group_size": 256,
|
| 318 |
+
"galore_cos_threshold": 0.4,
|
| 319 |
+
"galore_gamma_proj": 2,
|
| 320 |
+
"galore_queue_size": 5,
|
| 321 |
+
"adalora_target_r": 8,
|
| 322 |
+
"adalora_init_r": 12,
|
| 323 |
+
"adalora_tinit": 0,
|
| 324 |
+
"adalora_tfinal": 0,
|
| 325 |
+
"adalora_deltaT": 1,
|
| 326 |
+
"adalora_beta1": 0.85,
|
| 327 |
+
"adalora_beta2": 0.85,
|
| 328 |
+
"adalora_orth_reg_weight": 0.5,
|
| 329 |
+
"llamapro_num_new_blocks": 4,
|
| 330 |
+
"llamapro_num_groups": null,
|
| 331 |
+
"lisa_activated_layers": 0,
|
| 332 |
+
"lisa_step_interval": 20,
|
| 333 |
+
"reft_layer_key": null,
|
| 334 |
+
"reft_layers": null,
|
| 335 |
+
"reft_rank": 4,
|
| 336 |
+
"reft_intervention_type": "LoreftIntervention",
|
| 337 |
+
"reft_args": null,
|
| 338 |
+
"swanlab_token": null,
|
| 339 |
+
"swanlab_project": null,
|
| 340 |
+
"swanlab_workspace": null,
|
| 341 |
+
"swanlab_exp_name": null,
|
| 342 |
+
"swanlab_mode": "cloud",
|
| 343 |
+
"add_version": true,
|
| 344 |
+
"resume_only_model": false,
|
| 345 |
+
"create_checkpoint_symlink": false,
|
| 346 |
+
"packing": false,
|
| 347 |
+
"lazy_tokenize": true,
|
| 348 |
+
"loss_type": null,
|
| 349 |
+
"optimizer": null,
|
| 350 |
+
"metric": null,
|
| 351 |
+
"zero_hpz_partition_size": null,
|
| 352 |
+
"rank": 0,
|
| 353 |
+
"global_world_size": 2,
|
| 354 |
+
"local_world_size": 2,
|
| 355 |
+
"model_suffix": "Qwen2.5-VL-7B-Instruct",
|
| 356 |
+
"model_info": "ModelInfo(model_type='qwen2_5_vl', model_dir='/mnt/data/users/liamding/data/models/Qwen2.5-VL-7B-Instruct', torch_dtype=torch.bfloat16, max_model_len=128000, quant_method=None, quant_bits=None, rope_scaling={'type': 'default', 'mrope_section': [16, 24, 24], 'rope_type': 'default'}, config=None, task_type='causal_lm', num_labels=None)",
|
| 357 |
+
"model_meta": "ModelMeta(model_type='qwen2_5_vl', model_groups=[ModelGroup(models=[Model(ms_model_id='Qwen/Qwen2.5-VL-3B-Instruct', hf_model_id='Qwen/Qwen2.5-VL-3B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-VL-7B-Instruct', hf_model_id='Qwen/Qwen2.5-VL-7B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-VL-32B-Instruct', hf_model_id='Qwen/Qwen2.5-VL-32B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-VL-72B-Instruct', hf_model_id='Qwen/Qwen2.5-VL-72B-Instruct', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[]), ModelGroup(models=[Model(ms_model_id='Qwen/Qwen2.5-VL-3B-Instruct-AWQ', hf_model_id='Qwen/Qwen2.5-VL-3B-Instruct-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-VL-7B-Instruct-AWQ', hf_model_id='Qwen/Qwen2.5-VL-7B-Instruct-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-VL-32B-Instruct-AWQ', hf_model_id='Qwen/Qwen2.5-VL-32B-Instruct-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-VL-72B-Instruct-AWQ', hf_model_id='Qwen/Qwen2.5-VL-72B-Instruct-AWQ', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[])], template='qwen2_5_vl', get_function=<function get_model_tokenizer_qwen2_5_vl at 0x7f0ac0ba9ea0>, model_arch='qwen2_vl', architectures=['Qwen2_5_VLForConditionalGeneration'], additional_saved_files=[], torch_dtype=None, is_multimodal=True, is_reward=False, task_type=None, ignore_patterns=None, requires=['transformers>=4.49', 'qwen_vl_utils>=0.0.6', 'decord'], tags=[])",
|
| 358 |
+
"model_dir": "/mnt/data/users/liamding/data/models/Qwen2.5-VL-7B-Instruct",
|
| 359 |
+
"hub": "<class 'swift.hub.hub.MSHub'>",
|
| 360 |
+
"evaluation_strategy": "epoch",
|
| 361 |
+
"training_args": "Seq2SeqTrainingArguments(output_dir='/mnt/data/users/liamding/data/MMMT/lora/qwen2.5vl-7b-lora/v0-20250528-085522', overwrite_output_dir=False, do_train=False, do_eval=True, do_predict=False, eval_strategy=<IntervalStrategy.EPOCH: 'epoch'>, prediction_loss_only=False, per_device_train_batch_size=8, per_device_eval_batch_size=8, per_gpu_train_batch_size=None, per_gpu_eval_batch_size=None, gradient_accumulation_steps=2, eval_accumulation_steps=None, eval_delay=0, torch_empty_cache_steps=None, learning_rate=2e-05, weight_decay=0.1, adam_beta1=0.9, adam_beta2=0.95, adam_epsilon=1e-08, max_grad_norm=1.0, num_train_epochs=10.0, max_steps=-1, lr_scheduler_type=<SchedulerType.COSINE: 'cosine'>, lr_scheduler_kwargs=None, warmup_ratio=0.05, warmup_steps=0, log_level='passive', log_level_replica='warning', log_on_each_node=True, logging_dir='/mnt/data/users/liamding/data/MMMT/lora/qwen2.5vl-7b-lora/v0-20250528-085522/runs', logging_strategy=<IntervalStrategy.STEPS: 'steps'>, logging_first_step=True, logging_steps=1, logging_nan_inf_filter=True, save_strategy=<SaveStrategy.STEPS: 'steps'>, save_steps=500, save_total_limit=5, save_safetensors=True, save_on_each_node=False, save_only_model=False, restore_callback_states_from_checkpoint=False, no_cuda=False, use_cpu=False, use_mps_device=False, seed=42, data_seed=42, jit_mode_eval=False, use_ipex=False, bf16=True, fp16=False, fp16_opt_level='O1', half_precision_backend='auto', bf16_full_eval=False, fp16_full_eval=False, tf32=None, local_rank=0, ddp_backend=None, tpu_num_cores=None, tpu_metrics_debug=False, debug=[], dataloader_drop_last=False, eval_steps=None, dataloader_num_workers=4, dataloader_prefetch_factor=10, past_index=-1, run_name='/mnt/data/users/liamding/data/MMMT/lora/qwen2.5vl-7b-lora/v0-20250528-085522', disable_tqdm=False, remove_unused_columns=False, label_names=None, load_best_model_at_end=False, metric_for_best_model='loss', greater_is_better=False, ignore_data_skip=False, fsdp=[], fsdp_min_num_params=0, fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, tp_size=0, fsdp_transformer_layer_cls_to_wrap=None, accelerator_config=AcceleratorConfig(split_batches=False, dispatch_batches=False, even_batches=True, use_seedable_sampler=True, non_blocking=False, gradient_accumulation_kwargs=None, use_configured_state=False), deepspeed={'fp16': {'enabled': 'auto', 'loss_scale': 0, 'loss_scale_window': 1000, 'initial_scale_power': 16, 'hysteresis': 2, 'min_loss_scale': 1}, 'bf16': {'enabled': 'auto'}, 'zero_optimization': {'stage': 2, 'offload_optimizer': {'device': 'none', 'pin_memory': True}, 'allgather_partitions': True, 'allgather_bucket_size': 200000000.0, 'overlap_comm': False, 'reduce_scatter': True, 'reduce_bucket_size': 200000000.0, 'contiguous_gradients': True}, 'gradient_accumulation_steps': 'auto', 'gradient_clipping': 'auto', 'steps_per_print': 2000, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'wall_clock_breakdown': False}, label_smoothing_factor=0.0, optim=<OptimizerNames.ADAMW_TORCH: 'adamw_torch'>, optim_args=None, adafactor=False, group_by_length=False, length_column_name='length', report_to=['wandb'], ddp_find_unused_parameters=None, ddp_bucket_cap_mb=None, ddp_broadcast_buffers=None, dataloader_pin_memory=True, dataloader_persistent_workers=False, skip_memory_metrics=True, use_legacy_prediction_loop=False, push_to_hub=False, resume_from_checkpoint=None, hub_model_id=None, hub_strategy=<HubStrategy.EVERY_SAVE: 'every_save'>, hub_token=None, hub_private_repo=None, hub_always_push=False, gradient_checkpointing=True, gradient_checkpointing_kwargs=None, include_inputs_for_metrics=False, include_for_metrics=[], eval_do_concat_batches=True, fp16_backend='auto', push_to_hub_model_id=None, push_to_hub_organization=None, push_to_hub_token=None, mp_parameters='', auto_find_batch_size=False, full_determinism=False, torchdynamo=None, ray_scope='last', ddp_timeout=1800, torch_compile=False, torch_compile_backend=None, torch_compile_mode=None, include_tokens_per_second=None, include_num_input_tokens_seen=None, neftune_noise_alpha=None, optim_target_modules=None, batch_eval_metrics=False, eval_on_start=False, use_liger_kernel=False, eval_use_gather_object=False, average_tokens_across_devices=None, sortish_sampler=False, predict_with_generate=False, generation_max_length=None, generation_num_beams=None, generation_config=None, check_model=True, acc_strategy='token', train_dataloader_shuffle=True, metric_warmup_step=0, fsdp_num=1, acc_steps=1, eval_use_evalscope=False, eval_datasets=[], eval_limit=None, eval_datasets_args=None, eval_generation_config=None, train_type='lora', optimizer=None, local_repo_path=None, galore_config=None)"
|
| 362 |
+
}
|
qwen2.5vl-7b-lora_epoch10_2e-5/chat_template.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"chat_template": "{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n{% endif %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}"
|
| 3 |
+
}
|
qwen2.5vl-7b-lora_epoch10_2e-5/config.json
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"Qwen2_5_VLForConditionalGeneration"
|
| 4 |
+
],
|
| 5 |
+
"attention_dropout": 0.0,
|
| 6 |
+
"bos_token_id": 151643,
|
| 7 |
+
"eos_token_id": 151645,
|
| 8 |
+
"hidden_act": "silu",
|
| 9 |
+
"hidden_size": 3584,
|
| 10 |
+
"image_token_id": 151655,
|
| 11 |
+
"initializer_range": 0.02,
|
| 12 |
+
"intermediate_size": 18944,
|
| 13 |
+
"max_position_embeddings": 128000,
|
| 14 |
+
"max_window_layers": 28,
|
| 15 |
+
"model_type": "qwen2_5_vl",
|
| 16 |
+
"num_attention_heads": 28,
|
| 17 |
+
"num_hidden_layers": 28,
|
| 18 |
+
"num_key_value_heads": 4,
|
| 19 |
+
"pad_token_id": 151643,
|
| 20 |
+
"rms_norm_eps": 1e-06,
|
| 21 |
+
"rope_scaling": {
|
| 22 |
+
"mrope_section": [
|
| 23 |
+
16,
|
| 24 |
+
24,
|
| 25 |
+
24
|
| 26 |
+
],
|
| 27 |
+
"rope_type": "default",
|
| 28 |
+
"type": "default"
|
| 29 |
+
},
|
| 30 |
+
"rope_theta": 1000000.0,
|
| 31 |
+
"sliding_window": 32768,
|
| 32 |
+
"tie_word_embeddings": false,
|
| 33 |
+
"torch_dtype": "bfloat16",
|
| 34 |
+
"transformers_version": "4.51.3",
|
| 35 |
+
"use_cache": true,
|
| 36 |
+
"use_sliding_window": false,
|
| 37 |
+
"video_token_id": 151656,
|
| 38 |
+
"vision_config": {
|
| 39 |
+
"depth": 32,
|
| 40 |
+
"fullatt_block_indexes": [
|
| 41 |
+
7,
|
| 42 |
+
15,
|
| 43 |
+
23,
|
| 44 |
+
31
|
| 45 |
+
],
|
| 46 |
+
"hidden_act": "silu",
|
| 47 |
+
"hidden_size": 1280,
|
| 48 |
+
"in_channels": 3,
|
| 49 |
+
"in_chans": 3,
|
| 50 |
+
"intermediate_size": 3420,
|
| 51 |
+
"model_type": "qwen2_5_vl",
|
| 52 |
+
"num_heads": 16,
|
| 53 |
+
"out_hidden_size": 3584,
|
| 54 |
+
"patch_size": 14,
|
| 55 |
+
"spatial_merge_size": 2,
|
| 56 |
+
"spatial_patch_size": 14,
|
| 57 |
+
"temporal_patch_size": 2,
|
| 58 |
+
"tokens_per_second": 2,
|
| 59 |
+
"torch_dtype": "bfloat16",
|
| 60 |
+
"window_size": 112
|
| 61 |
+
},
|
| 62 |
+
"vision_end_token_id": 151653,
|
| 63 |
+
"vision_start_token_id": 151652,
|
| 64 |
+
"vision_token_id": 151654,
|
| 65 |
+
"vocab_size": 152064
|
| 66 |
+
}
|
qwen2.5vl-7b-lora_epoch10_2e-5/generation_config.json
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token_id": 151643,
|
| 3 |
+
"do_sample": true,
|
| 4 |
+
"eos_token_id": [
|
| 5 |
+
151645,
|
| 6 |
+
151643
|
| 7 |
+
],
|
| 8 |
+
"pad_token_id": 151643,
|
| 9 |
+
"repetition_penalty": 1.05,
|
| 10 |
+
"temperature": 1e-06,
|
| 11 |
+
"transformers_version": "4.51.3"
|
| 12 |
+
}
|
qwen2.5vl-7b-lora_epoch10_2e-5/merges.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
qwen2.5vl-7b-lora_epoch10_2e-5/model.safetensors.index.json
ADDED
|
@@ -0,0 +1,736 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"metadata": {
|
| 3 |
+
"total_size": 16584333312
|
| 4 |
+
},
|
| 5 |
+
"weight_map": {
|
| 6 |
+
"lm_head.weight": "model-00004-of-00004.safetensors",
|
| 7 |
+
"model.embed_tokens.weight": "model-00001-of-00004.safetensors",
|
| 8 |
+
"model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 9 |
+
"model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 10 |
+
"model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 11 |
+
"model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 12 |
+
"model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 13 |
+
"model.layers.0.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 14 |
+
"model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 15 |
+
"model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 16 |
+
"model.layers.0.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 17 |
+
"model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 18 |
+
"model.layers.0.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 19 |
+
"model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 20 |
+
"model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 21 |
+
"model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 22 |
+
"model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 23 |
+
"model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 24 |
+
"model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 25 |
+
"model.layers.1.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 26 |
+
"model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 27 |
+
"model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 28 |
+
"model.layers.1.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 29 |
+
"model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 30 |
+
"model.layers.1.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 31 |
+
"model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 32 |
+
"model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 33 |
+
"model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 34 |
+
"model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 35 |
+
"model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 36 |
+
"model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 37 |
+
"model.layers.10.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 38 |
+
"model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 39 |
+
"model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 40 |
+
"model.layers.10.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 41 |
+
"model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 42 |
+
"model.layers.10.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 43 |
+
"model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 44 |
+
"model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 45 |
+
"model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 46 |
+
"model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 47 |
+
"model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 48 |
+
"model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 49 |
+
"model.layers.11.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 50 |
+
"model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 51 |
+
"model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 52 |
+
"model.layers.11.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 53 |
+
"model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 54 |
+
"model.layers.11.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 55 |
+
"model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 56 |
+
"model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 57 |
+
"model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 58 |
+
"model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 59 |
+
"model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 60 |
+
"model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 61 |
+
"model.layers.12.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 62 |
+
"model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 63 |
+
"model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 64 |
+
"model.layers.12.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 65 |
+
"model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 66 |
+
"model.layers.12.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 67 |
+
"model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 68 |
+
"model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 69 |
+
"model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 70 |
+
"model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 71 |
+
"model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 72 |
+
"model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 73 |
+
"model.layers.13.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 74 |
+
"model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 75 |
+
"model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 76 |
+
"model.layers.13.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 77 |
+
"model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 78 |
+
"model.layers.13.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 79 |
+
"model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 80 |
+
"model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 81 |
+
"model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 82 |
+
"model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 83 |
+
"model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 84 |
+
"model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 85 |
+
"model.layers.14.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 86 |
+
"model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 87 |
+
"model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 88 |
+
"model.layers.14.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 89 |
+
"model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 90 |
+
"model.layers.14.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 91 |
+
"model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 92 |
+
"model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 93 |
+
"model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 94 |
+
"model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 95 |
+
"model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 96 |
+
"model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 97 |
+
"model.layers.15.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 98 |
+
"model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 99 |
+
"model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 100 |
+
"model.layers.15.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 101 |
+
"model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 102 |
+
"model.layers.15.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 103 |
+
"model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 104 |
+
"model.layers.16.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 105 |
+
"model.layers.16.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 106 |
+
"model.layers.16.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 107 |
+
"model.layers.16.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 108 |
+
"model.layers.16.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 109 |
+
"model.layers.16.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 110 |
+
"model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 111 |
+
"model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 112 |
+
"model.layers.16.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 113 |
+
"model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 114 |
+
"model.layers.16.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 115 |
+
"model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 116 |
+
"model.layers.17.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 117 |
+
"model.layers.17.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 118 |
+
"model.layers.17.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 119 |
+
"model.layers.17.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 120 |
+
"model.layers.17.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 121 |
+
"model.layers.17.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 122 |
+
"model.layers.17.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 123 |
+
"model.layers.17.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 124 |
+
"model.layers.17.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 125 |
+
"model.layers.17.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 126 |
+
"model.layers.17.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 127 |
+
"model.layers.17.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 128 |
+
"model.layers.18.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 129 |
+
"model.layers.18.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 130 |
+
"model.layers.18.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 131 |
+
"model.layers.18.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 132 |
+
"model.layers.18.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 133 |
+
"model.layers.18.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 134 |
+
"model.layers.18.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 135 |
+
"model.layers.18.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 136 |
+
"model.layers.18.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 137 |
+
"model.layers.18.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 138 |
+
"model.layers.18.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 139 |
+
"model.layers.18.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 140 |
+
"model.layers.19.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 141 |
+
"model.layers.19.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 142 |
+
"model.layers.19.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 143 |
+
"model.layers.19.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 144 |
+
"model.layers.19.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 145 |
+
"model.layers.19.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 146 |
+
"model.layers.19.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 147 |
+
"model.layers.19.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 148 |
+
"model.layers.19.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 149 |
+
"model.layers.19.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 150 |
+
"model.layers.19.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 151 |
+
"model.layers.19.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 152 |
+
"model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 153 |
+
"model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 154 |
+
"model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 155 |
+
"model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 156 |
+
"model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 157 |
+
"model.layers.2.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 158 |
+
"model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 159 |
+
"model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 160 |
+
"model.layers.2.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 161 |
+
"model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 162 |
+
"model.layers.2.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 163 |
+
"model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 164 |
+
"model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 165 |
+
"model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 166 |
+
"model.layers.20.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 167 |
+
"model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 168 |
+
"model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 169 |
+
"model.layers.20.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 170 |
+
"model.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 171 |
+
"model.layers.20.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 172 |
+
"model.layers.20.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 173 |
+
"model.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 174 |
+
"model.layers.20.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 175 |
+
"model.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 176 |
+
"model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 177 |
+
"model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 178 |
+
"model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 179 |
+
"model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 180 |
+
"model.layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 181 |
+
"model.layers.21.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 182 |
+
"model.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 183 |
+
"model.layers.21.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 184 |
+
"model.layers.21.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 185 |
+
"model.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 186 |
+
"model.layers.21.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 187 |
+
"model.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 188 |
+
"model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 189 |
+
"model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 190 |
+
"model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 191 |
+
"model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 192 |
+
"model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 193 |
+
"model.layers.22.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 194 |
+
"model.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 195 |
+
"model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 196 |
+
"model.layers.22.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 197 |
+
"model.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 198 |
+
"model.layers.22.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 199 |
+
"model.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 200 |
+
"model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 201 |
+
"model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 202 |
+
"model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 203 |
+
"model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 204 |
+
"model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 205 |
+
"model.layers.23.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 206 |
+
"model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 207 |
+
"model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 208 |
+
"model.layers.23.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 209 |
+
"model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 210 |
+
"model.layers.23.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 211 |
+
"model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 212 |
+
"model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 213 |
+
"model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 214 |
+
"model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 215 |
+
"model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 216 |
+
"model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 217 |
+
"model.layers.24.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 218 |
+
"model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 219 |
+
"model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 220 |
+
"model.layers.24.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 221 |
+
"model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 222 |
+
"model.layers.24.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 223 |
+
"model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 224 |
+
"model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 225 |
+
"model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 226 |
+
"model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 227 |
+
"model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 228 |
+
"model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 229 |
+
"model.layers.25.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 230 |
+
"model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 231 |
+
"model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 232 |
+
"model.layers.25.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 233 |
+
"model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 234 |
+
"model.layers.25.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 235 |
+
"model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 236 |
+
"model.layers.26.input_layernorm.weight": "model-00004-of-00004.safetensors",
|
| 237 |
+
"model.layers.26.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
|
| 238 |
+
"model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 239 |
+
"model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 240 |
+
"model.layers.26.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
|
| 241 |
+
"model.layers.26.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 242 |
+
"model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 243 |
+
"model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 244 |
+
"model.layers.26.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 245 |
+
"model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 246 |
+
"model.layers.26.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 247 |
+
"model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 248 |
+
"model.layers.27.input_layernorm.weight": "model-00004-of-00004.safetensors",
|
| 249 |
+
"model.layers.27.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
|
| 250 |
+
"model.layers.27.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
|
| 251 |
+
"model.layers.27.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
|
| 252 |
+
"model.layers.27.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
|
| 253 |
+
"model.layers.27.self_attn.k_proj.bias": "model-00004-of-00004.safetensors",
|
| 254 |
+
"model.layers.27.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
| 255 |
+
"model.layers.27.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
|
| 256 |
+
"model.layers.27.self_attn.q_proj.bias": "model-00004-of-00004.safetensors",
|
| 257 |
+
"model.layers.27.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
| 258 |
+
"model.layers.27.self_attn.v_proj.bias": "model-00004-of-00004.safetensors",
|
| 259 |
+
"model.layers.27.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
| 260 |
+
"model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 261 |
+
"model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 262 |
+
"model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 263 |
+
"model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 264 |
+
"model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 265 |
+
"model.layers.3.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 266 |
+
"model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 267 |
+
"model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 268 |
+
"model.layers.3.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 269 |
+
"model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 270 |
+
"model.layers.3.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 271 |
+
"model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 272 |
+
"model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 273 |
+
"model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 274 |
+
"model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 275 |
+
"model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 276 |
+
"model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 277 |
+
"model.layers.4.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 278 |
+
"model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 279 |
+
"model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 280 |
+
"model.layers.4.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 281 |
+
"model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 282 |
+
"model.layers.4.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 283 |
+
"model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 284 |
+
"model.layers.5.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 285 |
+
"model.layers.5.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 286 |
+
"model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 287 |
+
"model.layers.5.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 288 |
+
"model.layers.5.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 289 |
+
"model.layers.5.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 290 |
+
"model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 291 |
+
"model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 292 |
+
"model.layers.5.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 293 |
+
"model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 294 |
+
"model.layers.5.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 295 |
+
"model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 296 |
+
"model.layers.6.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 297 |
+
"model.layers.6.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 298 |
+
"model.layers.6.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 299 |
+
"model.layers.6.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 300 |
+
"model.layers.6.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 301 |
+
"model.layers.6.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 302 |
+
"model.layers.6.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 303 |
+
"model.layers.6.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 304 |
+
"model.layers.6.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 305 |
+
"model.layers.6.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 306 |
+
"model.layers.6.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 307 |
+
"model.layers.6.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 308 |
+
"model.layers.7.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 309 |
+
"model.layers.7.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 310 |
+
"model.layers.7.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 311 |
+
"model.layers.7.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 312 |
+
"model.layers.7.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 313 |
+
"model.layers.7.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 314 |
+
"model.layers.7.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 315 |
+
"model.layers.7.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 316 |
+
"model.layers.7.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 317 |
+
"model.layers.7.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 318 |
+
"model.layers.7.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 319 |
+
"model.layers.7.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 320 |
+
"model.layers.8.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 321 |
+
"model.layers.8.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 322 |
+
"model.layers.8.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 323 |
+
"model.layers.8.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 324 |
+
"model.layers.8.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 325 |
+
"model.layers.8.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 326 |
+
"model.layers.8.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 327 |
+
"model.layers.8.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 328 |
+
"model.layers.8.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 329 |
+
"model.layers.8.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 330 |
+
"model.layers.8.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 331 |
+
"model.layers.8.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 332 |
+
"model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 333 |
+
"model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 334 |
+
"model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 335 |
+
"model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 336 |
+
"model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 337 |
+
"model.layers.9.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 338 |
+
"model.layers.9.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 339 |
+
"model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 340 |
+
"model.layers.9.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 341 |
+
"model.layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 342 |
+
"model.layers.9.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 343 |
+
"model.layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 344 |
+
"model.norm.weight": "model-00004-of-00004.safetensors",
|
| 345 |
+
"visual.blocks.0.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 346 |
+
"visual.blocks.0.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 347 |
+
"visual.blocks.0.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 348 |
+
"visual.blocks.0.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 349 |
+
"visual.blocks.0.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 350 |
+
"visual.blocks.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 351 |
+
"visual.blocks.0.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 352 |
+
"visual.blocks.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 353 |
+
"visual.blocks.0.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 354 |
+
"visual.blocks.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 355 |
+
"visual.blocks.0.norm1.weight": "model-00001-of-00004.safetensors",
|
| 356 |
+
"visual.blocks.0.norm2.weight": "model-00001-of-00004.safetensors",
|
| 357 |
+
"visual.blocks.1.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 358 |
+
"visual.blocks.1.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 359 |
+
"visual.blocks.1.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 360 |
+
"visual.blocks.1.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 361 |
+
"visual.blocks.1.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 362 |
+
"visual.blocks.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 363 |
+
"visual.blocks.1.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 364 |
+
"visual.blocks.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 365 |
+
"visual.blocks.1.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 366 |
+
"visual.blocks.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 367 |
+
"visual.blocks.1.norm1.weight": "model-00001-of-00004.safetensors",
|
| 368 |
+
"visual.blocks.1.norm2.weight": "model-00001-of-00004.safetensors",
|
| 369 |
+
"visual.blocks.10.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 370 |
+
"visual.blocks.10.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 371 |
+
"visual.blocks.10.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 372 |
+
"visual.blocks.10.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 373 |
+
"visual.blocks.10.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 374 |
+
"visual.blocks.10.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 375 |
+
"visual.blocks.10.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 376 |
+
"visual.blocks.10.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 377 |
+
"visual.blocks.10.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 378 |
+
"visual.blocks.10.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 379 |
+
"visual.blocks.10.norm1.weight": "model-00001-of-00004.safetensors",
|
| 380 |
+
"visual.blocks.10.norm2.weight": "model-00001-of-00004.safetensors",
|
| 381 |
+
"visual.blocks.11.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 382 |
+
"visual.blocks.11.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 383 |
+
"visual.blocks.11.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 384 |
+
"visual.blocks.11.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 385 |
+
"visual.blocks.11.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 386 |
+
"visual.blocks.11.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 387 |
+
"visual.blocks.11.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 388 |
+
"visual.blocks.11.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 389 |
+
"visual.blocks.11.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 390 |
+
"visual.blocks.11.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 391 |
+
"visual.blocks.11.norm1.weight": "model-00001-of-00004.safetensors",
|
| 392 |
+
"visual.blocks.11.norm2.weight": "model-00001-of-00004.safetensors",
|
| 393 |
+
"visual.blocks.12.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 394 |
+
"visual.blocks.12.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 395 |
+
"visual.blocks.12.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 396 |
+
"visual.blocks.12.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 397 |
+
"visual.blocks.12.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 398 |
+
"visual.blocks.12.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 399 |
+
"visual.blocks.12.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 400 |
+
"visual.blocks.12.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 401 |
+
"visual.blocks.12.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 402 |
+
"visual.blocks.12.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 403 |
+
"visual.blocks.12.norm1.weight": "model-00001-of-00004.safetensors",
|
| 404 |
+
"visual.blocks.12.norm2.weight": "model-00001-of-00004.safetensors",
|
| 405 |
+
"visual.blocks.13.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 406 |
+
"visual.blocks.13.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 407 |
+
"visual.blocks.13.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 408 |
+
"visual.blocks.13.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 409 |
+
"visual.blocks.13.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 410 |
+
"visual.blocks.13.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 411 |
+
"visual.blocks.13.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 412 |
+
"visual.blocks.13.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 413 |
+
"visual.blocks.13.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 414 |
+
"visual.blocks.13.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 415 |
+
"visual.blocks.13.norm1.weight": "model-00001-of-00004.safetensors",
|
| 416 |
+
"visual.blocks.13.norm2.weight": "model-00001-of-00004.safetensors",
|
| 417 |
+
"visual.blocks.14.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 418 |
+
"visual.blocks.14.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 419 |
+
"visual.blocks.14.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 420 |
+
"visual.blocks.14.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 421 |
+
"visual.blocks.14.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 422 |
+
"visual.blocks.14.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 423 |
+
"visual.blocks.14.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 424 |
+
"visual.blocks.14.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 425 |
+
"visual.blocks.14.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 426 |
+
"visual.blocks.14.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 427 |
+
"visual.blocks.14.norm1.weight": "model-00001-of-00004.safetensors",
|
| 428 |
+
"visual.blocks.14.norm2.weight": "model-00001-of-00004.safetensors",
|
| 429 |
+
"visual.blocks.15.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 430 |
+
"visual.blocks.15.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 431 |
+
"visual.blocks.15.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 432 |
+
"visual.blocks.15.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 433 |
+
"visual.blocks.15.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 434 |
+
"visual.blocks.15.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 435 |
+
"visual.blocks.15.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 436 |
+
"visual.blocks.15.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 437 |
+
"visual.blocks.15.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 438 |
+
"visual.blocks.15.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 439 |
+
"visual.blocks.15.norm1.weight": "model-00001-of-00004.safetensors",
|
| 440 |
+
"visual.blocks.15.norm2.weight": "model-00001-of-00004.safetensors",
|
| 441 |
+
"visual.blocks.16.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 442 |
+
"visual.blocks.16.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 443 |
+
"visual.blocks.16.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 444 |
+
"visual.blocks.16.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 445 |
+
"visual.blocks.16.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 446 |
+
"visual.blocks.16.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 447 |
+
"visual.blocks.16.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 448 |
+
"visual.blocks.16.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 449 |
+
"visual.blocks.16.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 450 |
+
"visual.blocks.16.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 451 |
+
"visual.blocks.16.norm1.weight": "model-00001-of-00004.safetensors",
|
| 452 |
+
"visual.blocks.16.norm2.weight": "model-00001-of-00004.safetensors",
|
| 453 |
+
"visual.blocks.17.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 454 |
+
"visual.blocks.17.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 455 |
+
"visual.blocks.17.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 456 |
+
"visual.blocks.17.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 457 |
+
"visual.blocks.17.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 458 |
+
"visual.blocks.17.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 459 |
+
"visual.blocks.17.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 460 |
+
"visual.blocks.17.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 461 |
+
"visual.blocks.17.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 462 |
+
"visual.blocks.17.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 463 |
+
"visual.blocks.17.norm1.weight": "model-00001-of-00004.safetensors",
|
| 464 |
+
"visual.blocks.17.norm2.weight": "model-00001-of-00004.safetensors",
|
| 465 |
+
"visual.blocks.18.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 466 |
+
"visual.blocks.18.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 467 |
+
"visual.blocks.18.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 468 |
+
"visual.blocks.18.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 469 |
+
"visual.blocks.18.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 470 |
+
"visual.blocks.18.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 471 |
+
"visual.blocks.18.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 472 |
+
"visual.blocks.18.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 473 |
+
"visual.blocks.18.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 474 |
+
"visual.blocks.18.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 475 |
+
"visual.blocks.18.norm1.weight": "model-00001-of-00004.safetensors",
|
| 476 |
+
"visual.blocks.18.norm2.weight": "model-00001-of-00004.safetensors",
|
| 477 |
+
"visual.blocks.19.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 478 |
+
"visual.blocks.19.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 479 |
+
"visual.blocks.19.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 480 |
+
"visual.blocks.19.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 481 |
+
"visual.blocks.19.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 482 |
+
"visual.blocks.19.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 483 |
+
"visual.blocks.19.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 484 |
+
"visual.blocks.19.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 485 |
+
"visual.blocks.19.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 486 |
+
"visual.blocks.19.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 487 |
+
"visual.blocks.19.norm1.weight": "model-00001-of-00004.safetensors",
|
| 488 |
+
"visual.blocks.19.norm2.weight": "model-00001-of-00004.safetensors",
|
| 489 |
+
"visual.blocks.2.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 490 |
+
"visual.blocks.2.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 491 |
+
"visual.blocks.2.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 492 |
+
"visual.blocks.2.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 493 |
+
"visual.blocks.2.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 494 |
+
"visual.blocks.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 495 |
+
"visual.blocks.2.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 496 |
+
"visual.blocks.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 497 |
+
"visual.blocks.2.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 498 |
+
"visual.blocks.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 499 |
+
"visual.blocks.2.norm1.weight": "model-00001-of-00004.safetensors",
|
| 500 |
+
"visual.blocks.2.norm2.weight": "model-00001-of-00004.safetensors",
|
| 501 |
+
"visual.blocks.20.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 502 |
+
"visual.blocks.20.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 503 |
+
"visual.blocks.20.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 504 |
+
"visual.blocks.20.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 505 |
+
"visual.blocks.20.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 506 |
+
"visual.blocks.20.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 507 |
+
"visual.blocks.20.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 508 |
+
"visual.blocks.20.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 509 |
+
"visual.blocks.20.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 510 |
+
"visual.blocks.20.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 511 |
+
"visual.blocks.20.norm1.weight": "model-00001-of-00004.safetensors",
|
| 512 |
+
"visual.blocks.20.norm2.weight": "model-00001-of-00004.safetensors",
|
| 513 |
+
"visual.blocks.21.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 514 |
+
"visual.blocks.21.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 515 |
+
"visual.blocks.21.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 516 |
+
"visual.blocks.21.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 517 |
+
"visual.blocks.21.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 518 |
+
"visual.blocks.21.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 519 |
+
"visual.blocks.21.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 520 |
+
"visual.blocks.21.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 521 |
+
"visual.blocks.21.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 522 |
+
"visual.blocks.21.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 523 |
+
"visual.blocks.21.norm1.weight": "model-00001-of-00004.safetensors",
|
| 524 |
+
"visual.blocks.21.norm2.weight": "model-00001-of-00004.safetensors",
|
| 525 |
+
"visual.blocks.22.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 526 |
+
"visual.blocks.22.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 527 |
+
"visual.blocks.22.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 528 |
+
"visual.blocks.22.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 529 |
+
"visual.blocks.22.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 530 |
+
"visual.blocks.22.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 531 |
+
"visual.blocks.22.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 532 |
+
"visual.blocks.22.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 533 |
+
"visual.blocks.22.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 534 |
+
"visual.blocks.22.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 535 |
+
"visual.blocks.22.norm1.weight": "model-00001-of-00004.safetensors",
|
| 536 |
+
"visual.blocks.22.norm2.weight": "model-00001-of-00004.safetensors",
|
| 537 |
+
"visual.blocks.23.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 538 |
+
"visual.blocks.23.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 539 |
+
"visual.blocks.23.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 540 |
+
"visual.blocks.23.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 541 |
+
"visual.blocks.23.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 542 |
+
"visual.blocks.23.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 543 |
+
"visual.blocks.23.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 544 |
+
"visual.blocks.23.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 545 |
+
"visual.blocks.23.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 546 |
+
"visual.blocks.23.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 547 |
+
"visual.blocks.23.norm1.weight": "model-00001-of-00004.safetensors",
|
| 548 |
+
"visual.blocks.23.norm2.weight": "model-00001-of-00004.safetensors",
|
| 549 |
+
"visual.blocks.24.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 550 |
+
"visual.blocks.24.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 551 |
+
"visual.blocks.24.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 552 |
+
"visual.blocks.24.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 553 |
+
"visual.blocks.24.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 554 |
+
"visual.blocks.24.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 555 |
+
"visual.blocks.24.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 556 |
+
"visual.blocks.24.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 557 |
+
"visual.blocks.24.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 558 |
+
"visual.blocks.24.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 559 |
+
"visual.blocks.24.norm1.weight": "model-00001-of-00004.safetensors",
|
| 560 |
+
"visual.blocks.24.norm2.weight": "model-00001-of-00004.safetensors",
|
| 561 |
+
"visual.blocks.25.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 562 |
+
"visual.blocks.25.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 563 |
+
"visual.blocks.25.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 564 |
+
"visual.blocks.25.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 565 |
+
"visual.blocks.25.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 566 |
+
"visual.blocks.25.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 567 |
+
"visual.blocks.25.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 568 |
+
"visual.blocks.25.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 569 |
+
"visual.blocks.25.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 570 |
+
"visual.blocks.25.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 571 |
+
"visual.blocks.25.norm1.weight": "model-00001-of-00004.safetensors",
|
| 572 |
+
"visual.blocks.25.norm2.weight": "model-00001-of-00004.safetensors",
|
| 573 |
+
"visual.blocks.26.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 574 |
+
"visual.blocks.26.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 575 |
+
"visual.blocks.26.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 576 |
+
"visual.blocks.26.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 577 |
+
"visual.blocks.26.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 578 |
+
"visual.blocks.26.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 579 |
+
"visual.blocks.26.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 580 |
+
"visual.blocks.26.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 581 |
+
"visual.blocks.26.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 582 |
+
"visual.blocks.26.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 583 |
+
"visual.blocks.26.norm1.weight": "model-00001-of-00004.safetensors",
|
| 584 |
+
"visual.blocks.26.norm2.weight": "model-00001-of-00004.safetensors",
|
| 585 |
+
"visual.blocks.27.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 586 |
+
"visual.blocks.27.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 587 |
+
"visual.blocks.27.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 588 |
+
"visual.blocks.27.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 589 |
+
"visual.blocks.27.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 590 |
+
"visual.blocks.27.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 591 |
+
"visual.blocks.27.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 592 |
+
"visual.blocks.27.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 593 |
+
"visual.blocks.27.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 594 |
+
"visual.blocks.27.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 595 |
+
"visual.blocks.27.norm1.weight": "model-00001-of-00004.safetensors",
|
| 596 |
+
"visual.blocks.27.norm2.weight": "model-00001-of-00004.safetensors",
|
| 597 |
+
"visual.blocks.28.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 598 |
+
"visual.blocks.28.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 599 |
+
"visual.blocks.28.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 600 |
+
"visual.blocks.28.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 601 |
+
"visual.blocks.28.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 602 |
+
"visual.blocks.28.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 603 |
+
"visual.blocks.28.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 604 |
+
"visual.blocks.28.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 605 |
+
"visual.blocks.28.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 606 |
+
"visual.blocks.28.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 607 |
+
"visual.blocks.28.norm1.weight": "model-00001-of-00004.safetensors",
|
| 608 |
+
"visual.blocks.28.norm2.weight": "model-00001-of-00004.safetensors",
|
| 609 |
+
"visual.blocks.29.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 610 |
+
"visual.blocks.29.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 611 |
+
"visual.blocks.29.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 612 |
+
"visual.blocks.29.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 613 |
+
"visual.blocks.29.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 614 |
+
"visual.blocks.29.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 615 |
+
"visual.blocks.29.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 616 |
+
"visual.blocks.29.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 617 |
+
"visual.blocks.29.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 618 |
+
"visual.blocks.29.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 619 |
+
"visual.blocks.29.norm1.weight": "model-00001-of-00004.safetensors",
|
| 620 |
+
"visual.blocks.29.norm2.weight": "model-00001-of-00004.safetensors",
|
| 621 |
+
"visual.blocks.3.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 622 |
+
"visual.blocks.3.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 623 |
+
"visual.blocks.3.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 624 |
+
"visual.blocks.3.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 625 |
+
"visual.blocks.3.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 626 |
+
"visual.blocks.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 627 |
+
"visual.blocks.3.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 628 |
+
"visual.blocks.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 629 |
+
"visual.blocks.3.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 630 |
+
"visual.blocks.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 631 |
+
"visual.blocks.3.norm1.weight": "model-00001-of-00004.safetensors",
|
| 632 |
+
"visual.blocks.3.norm2.weight": "model-00001-of-00004.safetensors",
|
| 633 |
+
"visual.blocks.30.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 634 |
+
"visual.blocks.30.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 635 |
+
"visual.blocks.30.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 636 |
+
"visual.blocks.30.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 637 |
+
"visual.blocks.30.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 638 |
+
"visual.blocks.30.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 639 |
+
"visual.blocks.30.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 640 |
+
"visual.blocks.30.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 641 |
+
"visual.blocks.30.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 642 |
+
"visual.blocks.30.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 643 |
+
"visual.blocks.30.norm1.weight": "model-00001-of-00004.safetensors",
|
| 644 |
+
"visual.blocks.30.norm2.weight": "model-00001-of-00004.safetensors",
|
| 645 |
+
"visual.blocks.31.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 646 |
+
"visual.blocks.31.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 647 |
+
"visual.blocks.31.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 648 |
+
"visual.blocks.31.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 649 |
+
"visual.blocks.31.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 650 |
+
"visual.blocks.31.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 651 |
+
"visual.blocks.31.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 652 |
+
"visual.blocks.31.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 653 |
+
"visual.blocks.31.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 654 |
+
"visual.blocks.31.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 655 |
+
"visual.blocks.31.norm1.weight": "model-00001-of-00004.safetensors",
|
| 656 |
+
"visual.blocks.31.norm2.weight": "model-00001-of-00004.safetensors",
|
| 657 |
+
"visual.blocks.4.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 658 |
+
"visual.blocks.4.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 659 |
+
"visual.blocks.4.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 660 |
+
"visual.blocks.4.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 661 |
+
"visual.blocks.4.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 662 |
+
"visual.blocks.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 663 |
+
"visual.blocks.4.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 664 |
+
"visual.blocks.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 665 |
+
"visual.blocks.4.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 666 |
+
"visual.blocks.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 667 |
+
"visual.blocks.4.norm1.weight": "model-00001-of-00004.safetensors",
|
| 668 |
+
"visual.blocks.4.norm2.weight": "model-00001-of-00004.safetensors",
|
| 669 |
+
"visual.blocks.5.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 670 |
+
"visual.blocks.5.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 671 |
+
"visual.blocks.5.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 672 |
+
"visual.blocks.5.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 673 |
+
"visual.blocks.5.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 674 |
+
"visual.blocks.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 675 |
+
"visual.blocks.5.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 676 |
+
"visual.blocks.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 677 |
+
"visual.blocks.5.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 678 |
+
"visual.blocks.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 679 |
+
"visual.blocks.5.norm1.weight": "model-00001-of-00004.safetensors",
|
| 680 |
+
"visual.blocks.5.norm2.weight": "model-00001-of-00004.safetensors",
|
| 681 |
+
"visual.blocks.6.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 682 |
+
"visual.blocks.6.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 683 |
+
"visual.blocks.6.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 684 |
+
"visual.blocks.6.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 685 |
+
"visual.blocks.6.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 686 |
+
"visual.blocks.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 687 |
+
"visual.blocks.6.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 688 |
+
"visual.blocks.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 689 |
+
"visual.blocks.6.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 690 |
+
"visual.blocks.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 691 |
+
"visual.blocks.6.norm1.weight": "model-00001-of-00004.safetensors",
|
| 692 |
+
"visual.blocks.6.norm2.weight": "model-00001-of-00004.safetensors",
|
| 693 |
+
"visual.blocks.7.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 694 |
+
"visual.blocks.7.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 695 |
+
"visual.blocks.7.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 696 |
+
"visual.blocks.7.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 697 |
+
"visual.blocks.7.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 698 |
+
"visual.blocks.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 699 |
+
"visual.blocks.7.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 700 |
+
"visual.blocks.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 701 |
+
"visual.blocks.7.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 702 |
+
"visual.blocks.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 703 |
+
"visual.blocks.7.norm1.weight": "model-00001-of-00004.safetensors",
|
| 704 |
+
"visual.blocks.7.norm2.weight": "model-00001-of-00004.safetensors",
|
| 705 |
+
"visual.blocks.8.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 706 |
+
"visual.blocks.8.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 707 |
+
"visual.blocks.8.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 708 |
+
"visual.blocks.8.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 709 |
+
"visual.blocks.8.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 710 |
+
"visual.blocks.8.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 711 |
+
"visual.blocks.8.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 712 |
+
"visual.blocks.8.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 713 |
+
"visual.blocks.8.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 714 |
+
"visual.blocks.8.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 715 |
+
"visual.blocks.8.norm1.weight": "model-00001-of-00004.safetensors",
|
| 716 |
+
"visual.blocks.8.norm2.weight": "model-00001-of-00004.safetensors",
|
| 717 |
+
"visual.blocks.9.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 718 |
+
"visual.blocks.9.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 719 |
+
"visual.blocks.9.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 720 |
+
"visual.blocks.9.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 721 |
+
"visual.blocks.9.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 722 |
+
"visual.blocks.9.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 723 |
+
"visual.blocks.9.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 724 |
+
"visual.blocks.9.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 725 |
+
"visual.blocks.9.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 726 |
+
"visual.blocks.9.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 727 |
+
"visual.blocks.9.norm1.weight": "model-00001-of-00004.safetensors",
|
| 728 |
+
"visual.blocks.9.norm2.weight": "model-00001-of-00004.safetensors",
|
| 729 |
+
"visual.merger.ln_q.weight": "model-00001-of-00004.safetensors",
|
| 730 |
+
"visual.merger.mlp.0.bias": "model-00001-of-00004.safetensors",
|
| 731 |
+
"visual.merger.mlp.0.weight": "model-00001-of-00004.safetensors",
|
| 732 |
+
"visual.merger.mlp.2.bias": "model-00001-of-00004.safetensors",
|
| 733 |
+
"visual.merger.mlp.2.weight": "model-00001-of-00004.safetensors",
|
| 734 |
+
"visual.patch_embed.proj.weight": "model-00001-of-00004.safetensors"
|
| 735 |
+
}
|
| 736 |
+
}
|
qwen2.5vl-7b-lora_epoch10_2e-5/preprocessor_config.json
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"min_pixels": 3136,
|
| 3 |
+
"max_pixels": 12845056,
|
| 4 |
+
"patch_size": 14,
|
| 5 |
+
"temporal_patch_size": 2,
|
| 6 |
+
"merge_size": 2,
|
| 7 |
+
"image_mean": [
|
| 8 |
+
0.48145466,
|
| 9 |
+
0.4578275,
|
| 10 |
+
0.40821073
|
| 11 |
+
],
|
| 12 |
+
"image_std": [
|
| 13 |
+
0.26862954,
|
| 14 |
+
0.26130258,
|
| 15 |
+
0.27577711
|
| 16 |
+
],
|
| 17 |
+
"image_processor_type": "Qwen2VLImageProcessor",
|
| 18 |
+
"processor_class": "Qwen2_5_VLProcessor"
|
| 19 |
+
}
|
qwen2.5vl-7b-lora_epoch10_2e-5/special_tokens_map.json
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"additional_special_tokens": [
|
| 3 |
+
"<|im_start|>",
|
| 4 |
+
"<|im_end|>",
|
| 5 |
+
"<|object_ref_start|>",
|
| 6 |
+
"<|object_ref_end|>",
|
| 7 |
+
"<|box_start|>",
|
| 8 |
+
"<|box_end|>",
|
| 9 |
+
"<|quad_start|>",
|
| 10 |
+
"<|quad_end|>",
|
| 11 |
+
"<|vision_start|>",
|
| 12 |
+
"<|vision_end|>",
|
| 13 |
+
"<|vision_pad|>",
|
| 14 |
+
"<|image_pad|>",
|
| 15 |
+
"<|video_pad|>"
|
| 16 |
+
],
|
| 17 |
+
"eos_token": {
|
| 18 |
+
"content": "<|im_end|>",
|
| 19 |
+
"lstrip": false,
|
| 20 |
+
"normalized": false,
|
| 21 |
+
"rstrip": false,
|
| 22 |
+
"single_word": false
|
| 23 |
+
},
|
| 24 |
+
"pad_token": {
|
| 25 |
+
"content": "<|endoftext|>",
|
| 26 |
+
"lstrip": false,
|
| 27 |
+
"normalized": false,
|
| 28 |
+
"rstrip": false,
|
| 29 |
+
"single_word": false
|
| 30 |
+
}
|
| 31 |
+
}
|
qwen2.5vl-7b-lora_epoch10_2e-5/tokenizer_config.json
ADDED
|
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_bos_token": false,
|
| 3 |
+
"add_prefix_space": false,
|
| 4 |
+
"added_tokens_decoder": {
|
| 5 |
+
"151643": {
|
| 6 |
+
"content": "<|endoftext|>",
|
| 7 |
+
"lstrip": false,
|
| 8 |
+
"normalized": false,
|
| 9 |
+
"rstrip": false,
|
| 10 |
+
"single_word": false,
|
| 11 |
+
"special": true
|
| 12 |
+
},
|
| 13 |
+
"151644": {
|
| 14 |
+
"content": "<|im_start|>",
|
| 15 |
+
"lstrip": false,
|
| 16 |
+
"normalized": false,
|
| 17 |
+
"rstrip": false,
|
| 18 |
+
"single_word": false,
|
| 19 |
+
"special": true
|
| 20 |
+
},
|
| 21 |
+
"151645": {
|
| 22 |
+
"content": "<|im_end|>",
|
| 23 |
+
"lstrip": false,
|
| 24 |
+
"normalized": false,
|
| 25 |
+
"rstrip": false,
|
| 26 |
+
"single_word": false,
|
| 27 |
+
"special": true
|
| 28 |
+
},
|
| 29 |
+
"151646": {
|
| 30 |
+
"content": "<|object_ref_start|>",
|
| 31 |
+
"lstrip": false,
|
| 32 |
+
"normalized": false,
|
| 33 |
+
"rstrip": false,
|
| 34 |
+
"single_word": false,
|
| 35 |
+
"special": true
|
| 36 |
+
},
|
| 37 |
+
"151647": {
|
| 38 |
+
"content": "<|object_ref_end|>",
|
| 39 |
+
"lstrip": false,
|
| 40 |
+
"normalized": false,
|
| 41 |
+
"rstrip": false,
|
| 42 |
+
"single_word": false,
|
| 43 |
+
"special": true
|
| 44 |
+
},
|
| 45 |
+
"151648": {
|
| 46 |
+
"content": "<|box_start|>",
|
| 47 |
+
"lstrip": false,
|
| 48 |
+
"normalized": false,
|
| 49 |
+
"rstrip": false,
|
| 50 |
+
"single_word": false,
|
| 51 |
+
"special": true
|
| 52 |
+
},
|
| 53 |
+
"151649": {
|
| 54 |
+
"content": "<|box_end|>",
|
| 55 |
+
"lstrip": false,
|
| 56 |
+
"normalized": false,
|
| 57 |
+
"rstrip": false,
|
| 58 |
+
"single_word": false,
|
| 59 |
+
"special": true
|
| 60 |
+
},
|
| 61 |
+
"151650": {
|
| 62 |
+
"content": "<|quad_start|>",
|
| 63 |
+
"lstrip": false,
|
| 64 |
+
"normalized": false,
|
| 65 |
+
"rstrip": false,
|
| 66 |
+
"single_word": false,
|
| 67 |
+
"special": true
|
| 68 |
+
},
|
| 69 |
+
"151651": {
|
| 70 |
+
"content": "<|quad_end|>",
|
| 71 |
+
"lstrip": false,
|
| 72 |
+
"normalized": false,
|
| 73 |
+
"rstrip": false,
|
| 74 |
+
"single_word": false,
|
| 75 |
+
"special": true
|
| 76 |
+
},
|
| 77 |
+
"151652": {
|
| 78 |
+
"content": "<|vision_start|>",
|
| 79 |
+
"lstrip": false,
|
| 80 |
+
"normalized": false,
|
| 81 |
+
"rstrip": false,
|
| 82 |
+
"single_word": false,
|
| 83 |
+
"special": true
|
| 84 |
+
},
|
| 85 |
+
"151653": {
|
| 86 |
+
"content": "<|vision_end|>",
|
| 87 |
+
"lstrip": false,
|
| 88 |
+
"normalized": false,
|
| 89 |
+
"rstrip": false,
|
| 90 |
+
"single_word": false,
|
| 91 |
+
"special": true
|
| 92 |
+
},
|
| 93 |
+
"151654": {
|
| 94 |
+
"content": "<|vision_pad|>",
|
| 95 |
+
"lstrip": false,
|
| 96 |
+
"normalized": false,
|
| 97 |
+
"rstrip": false,
|
| 98 |
+
"single_word": false,
|
| 99 |
+
"special": true
|
| 100 |
+
},
|
| 101 |
+
"151655": {
|
| 102 |
+
"content": "<|image_pad|>",
|
| 103 |
+
"lstrip": false,
|
| 104 |
+
"normalized": false,
|
| 105 |
+
"rstrip": false,
|
| 106 |
+
"single_word": false,
|
| 107 |
+
"special": true
|
| 108 |
+
},
|
| 109 |
+
"151656": {
|
| 110 |
+
"content": "<|video_pad|>",
|
| 111 |
+
"lstrip": false,
|
| 112 |
+
"normalized": false,
|
| 113 |
+
"rstrip": false,
|
| 114 |
+
"single_word": false,
|
| 115 |
+
"special": true
|
| 116 |
+
},
|
| 117 |
+
"151657": {
|
| 118 |
+
"content": "<tool_call>",
|
| 119 |
+
"lstrip": false,
|
| 120 |
+
"normalized": false,
|
| 121 |
+
"rstrip": false,
|
| 122 |
+
"single_word": false,
|
| 123 |
+
"special": false
|
| 124 |
+
},
|
| 125 |
+
"151658": {
|
| 126 |
+
"content": "</tool_call>",
|
| 127 |
+
"lstrip": false,
|
| 128 |
+
"normalized": false,
|
| 129 |
+
"rstrip": false,
|
| 130 |
+
"single_word": false,
|
| 131 |
+
"special": false
|
| 132 |
+
},
|
| 133 |
+
"151659": {
|
| 134 |
+
"content": "<|fim_prefix|>",
|
| 135 |
+
"lstrip": false,
|
| 136 |
+
"normalized": false,
|
| 137 |
+
"rstrip": false,
|
| 138 |
+
"single_word": false,
|
| 139 |
+
"special": false
|
| 140 |
+
},
|
| 141 |
+
"151660": {
|
| 142 |
+
"content": "<|fim_middle|>",
|
| 143 |
+
"lstrip": false,
|
| 144 |
+
"normalized": false,
|
| 145 |
+
"rstrip": false,
|
| 146 |
+
"single_word": false,
|
| 147 |
+
"special": false
|
| 148 |
+
},
|
| 149 |
+
"151661": {
|
| 150 |
+
"content": "<|fim_suffix|>",
|
| 151 |
+
"lstrip": false,
|
| 152 |
+
"normalized": false,
|
| 153 |
+
"rstrip": false,
|
| 154 |
+
"single_word": false,
|
| 155 |
+
"special": false
|
| 156 |
+
},
|
| 157 |
+
"151662": {
|
| 158 |
+
"content": "<|fim_pad|>",
|
| 159 |
+
"lstrip": false,
|
| 160 |
+
"normalized": false,
|
| 161 |
+
"rstrip": false,
|
| 162 |
+
"single_word": false,
|
| 163 |
+
"special": false
|
| 164 |
+
},
|
| 165 |
+
"151663": {
|
| 166 |
+
"content": "<|repo_name|>",
|
| 167 |
+
"lstrip": false,
|
| 168 |
+
"normalized": false,
|
| 169 |
+
"rstrip": false,
|
| 170 |
+
"single_word": false,
|
| 171 |
+
"special": false
|
| 172 |
+
},
|
| 173 |
+
"151664": {
|
| 174 |
+
"content": "<|file_sep|>",
|
| 175 |
+
"lstrip": false,
|
| 176 |
+
"normalized": false,
|
| 177 |
+
"rstrip": false,
|
| 178 |
+
"single_word": false,
|
| 179 |
+
"special": false
|
| 180 |
+
}
|
| 181 |
+
},
|
| 182 |
+
"additional_special_tokens": [
|
| 183 |
+
"<|im_start|>",
|
| 184 |
+
"<|im_end|>",
|
| 185 |
+
"<|object_ref_start|>",
|
| 186 |
+
"<|object_ref_end|>",
|
| 187 |
+
"<|box_start|>",
|
| 188 |
+
"<|box_end|>",
|
| 189 |
+
"<|quad_start|>",
|
| 190 |
+
"<|quad_end|>",
|
| 191 |
+
"<|vision_start|>",
|
| 192 |
+
"<|vision_end|>",
|
| 193 |
+
"<|vision_pad|>",
|
| 194 |
+
"<|image_pad|>",
|
| 195 |
+
"<|video_pad|>"
|
| 196 |
+
],
|
| 197 |
+
"bos_token": null,
|
| 198 |
+
"chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
|
| 199 |
+
"clean_up_tokenization_spaces": false,
|
| 200 |
+
"eos_token": "<|im_end|>",
|
| 201 |
+
"errors": "replace",
|
| 202 |
+
"extra_special_tokens": {},
|
| 203 |
+
"model_max_length": 131072,
|
| 204 |
+
"pad_token": "<|endoftext|>",
|
| 205 |
+
"processor_class": "Qwen2_5_VLProcessor",
|
| 206 |
+
"split_special_tokens": false,
|
| 207 |
+
"tokenizer_class": "Qwen2Tokenizer",
|
| 208 |
+
"unk_token": null
|
| 209 |
+
}
|
qwen2.5vl-7b-lora_epoch10_2e-5/vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
selective_loss/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148/args.json
ADDED
|
@@ -0,0 +1,375 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"output_dir": "/mnt/data/users/liamding/data/MMMT/lora/selective_mask_plugin/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148",
|
| 3 |
+
"overwrite_output_dir": false,
|
| 4 |
+
"do_train": false,
|
| 5 |
+
"do_eval": false,
|
| 6 |
+
"do_predict": false,
|
| 7 |
+
"eval_strategy": "epoch",
|
| 8 |
+
"prediction_loss_only": false,
|
| 9 |
+
"per_device_train_batch_size": 2,
|
| 10 |
+
"per_device_eval_batch_size": 2,
|
| 11 |
+
"per_gpu_train_batch_size": null,
|
| 12 |
+
"per_gpu_eval_batch_size": null,
|
| 13 |
+
"gradient_accumulation_steps": 2,
|
| 14 |
+
"eval_accumulation_steps": null,
|
| 15 |
+
"eval_delay": 0,
|
| 16 |
+
"torch_empty_cache_steps": null,
|
| 17 |
+
"learning_rate": 2e-06,
|
| 18 |
+
"weight_decay": 0.0001,
|
| 19 |
+
"adam_beta1": 0.9,
|
| 20 |
+
"adam_beta2": 0.95,
|
| 21 |
+
"adam_epsilon": 1e-08,
|
| 22 |
+
"max_grad_norm": 1.0,
|
| 23 |
+
"num_train_epochs": 5.0,
|
| 24 |
+
"max_steps": -1,
|
| 25 |
+
"lr_scheduler_type": "cosine",
|
| 26 |
+
"lr_scheduler_kwargs": null,
|
| 27 |
+
"warmup_ratio": 0.1,
|
| 28 |
+
"warmup_steps": 0,
|
| 29 |
+
"log_level": "passive",
|
| 30 |
+
"log_level_replica": "warning",
|
| 31 |
+
"log_on_each_node": true,
|
| 32 |
+
"logging_dir": "/mnt/data/users/liamding/data/MMMT/lora/selective_mask_plugin/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148/runs",
|
| 33 |
+
"logging_strategy": "steps",
|
| 34 |
+
"logging_first_step": true,
|
| 35 |
+
"logging_steps": 5,
|
| 36 |
+
"logging_nan_inf_filter": true,
|
| 37 |
+
"save_strategy": "epoch",
|
| 38 |
+
"save_steps": 500,
|
| 39 |
+
"save_total_limit": 5,
|
| 40 |
+
"save_safetensors": true,
|
| 41 |
+
"save_on_each_node": false,
|
| 42 |
+
"save_only_model": false,
|
| 43 |
+
"restore_callback_states_from_checkpoint": false,
|
| 44 |
+
"no_cuda": false,
|
| 45 |
+
"use_cpu": false,
|
| 46 |
+
"use_mps_device": false,
|
| 47 |
+
"seed": 42,
|
| 48 |
+
"data_seed": 42,
|
| 49 |
+
"jit_mode_eval": false,
|
| 50 |
+
"use_ipex": false,
|
| 51 |
+
"bf16": true,
|
| 52 |
+
"fp16": false,
|
| 53 |
+
"fp16_opt_level": "O1",
|
| 54 |
+
"half_precision_backend": "auto",
|
| 55 |
+
"bf16_full_eval": false,
|
| 56 |
+
"fp16_full_eval": false,
|
| 57 |
+
"tf32": null,
|
| 58 |
+
"local_rank": 0,
|
| 59 |
+
"ddp_backend": null,
|
| 60 |
+
"tpu_num_cores": null,
|
| 61 |
+
"tpu_metrics_debug": false,
|
| 62 |
+
"debug": null,
|
| 63 |
+
"dataloader_drop_last": false,
|
| 64 |
+
"eval_steps": null,
|
| 65 |
+
"dataloader_num_workers": 4,
|
| 66 |
+
"dataloader_prefetch_factor": null,
|
| 67 |
+
"past_index": -1,
|
| 68 |
+
"run_name": "/mnt/data/users/liamding/data/MMMT/lora/selective_mask_plugin/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148",
|
| 69 |
+
"disable_tqdm": null,
|
| 70 |
+
"remove_unused_columns": true,
|
| 71 |
+
"label_names": null,
|
| 72 |
+
"load_best_model_at_end": false,
|
| 73 |
+
"metric_for_best_model": "loss",
|
| 74 |
+
"greater_is_better": false,
|
| 75 |
+
"ignore_data_skip": false,
|
| 76 |
+
"fsdp": "",
|
| 77 |
+
"fsdp_min_num_params": 0,
|
| 78 |
+
"fsdp_config": null,
|
| 79 |
+
"tp_size": 0,
|
| 80 |
+
"fsdp_transformer_layer_cls_to_wrap": null,
|
| 81 |
+
"accelerator_config": {
|
| 82 |
+
"dispatch_batches": false
|
| 83 |
+
},
|
| 84 |
+
"deepspeed": {
|
| 85 |
+
"fp16": {
|
| 86 |
+
"enabled": "auto",
|
| 87 |
+
"loss_scale": 0,
|
| 88 |
+
"loss_scale_window": 1000,
|
| 89 |
+
"initial_scale_power": 16,
|
| 90 |
+
"hysteresis": 2,
|
| 91 |
+
"min_loss_scale": 1
|
| 92 |
+
},
|
| 93 |
+
"bf16": {
|
| 94 |
+
"enabled": "auto"
|
| 95 |
+
},
|
| 96 |
+
"zero_optimization": {
|
| 97 |
+
"stage": 3,
|
| 98 |
+
"offload_optimizer": {
|
| 99 |
+
"device": "none",
|
| 100 |
+
"pin_memory": true
|
| 101 |
+
},
|
| 102 |
+
"offload_param": {
|
| 103 |
+
"device": "none",
|
| 104 |
+
"pin_memory": true
|
| 105 |
+
},
|
| 106 |
+
"overlap_comm": false,
|
| 107 |
+
"contiguous_gradients": true,
|
| 108 |
+
"sub_group_size": 1000000000.0,
|
| 109 |
+
"reduce_bucket_size": "auto",
|
| 110 |
+
"zero_quantized_weights": false,
|
| 111 |
+
"zero_quantized_gradients": false,
|
| 112 |
+
"stage3_prefetch_bucket_size": "auto",
|
| 113 |
+
"stage3_param_persistence_threshold": "auto",
|
| 114 |
+
"stage3_max_live_parameters": 1000000000.0,
|
| 115 |
+
"stage3_max_reuse_distance": 1000000000.0,
|
| 116 |
+
"stage3_gather_16bit_weights_on_model_save": true
|
| 117 |
+
},
|
| 118 |
+
"gradient_accumulation_steps": "auto",
|
| 119 |
+
"gradient_clipping": "auto",
|
| 120 |
+
"steps_per_print": 2000,
|
| 121 |
+
"train_batch_size": "auto",
|
| 122 |
+
"train_micro_batch_size_per_gpu": "auto",
|
| 123 |
+
"wall_clock_breakdown": false
|
| 124 |
+
},
|
| 125 |
+
"label_smoothing_factor": 0.0,
|
| 126 |
+
"optim": "adamw_torch",
|
| 127 |
+
"optim_args": null,
|
| 128 |
+
"adafactor": false,
|
| 129 |
+
"group_by_length": false,
|
| 130 |
+
"length_column_name": "length",
|
| 131 |
+
"report_to": [
|
| 132 |
+
"swanlab"
|
| 133 |
+
],
|
| 134 |
+
"ddp_find_unused_parameters": null,
|
| 135 |
+
"ddp_bucket_cap_mb": null,
|
| 136 |
+
"ddp_broadcast_buffers": null,
|
| 137 |
+
"dataloader_pin_memory": true,
|
| 138 |
+
"dataloader_persistent_workers": false,
|
| 139 |
+
"skip_memory_metrics": true,
|
| 140 |
+
"use_legacy_prediction_loop": false,
|
| 141 |
+
"push_to_hub": false,
|
| 142 |
+
"resume_from_checkpoint": null,
|
| 143 |
+
"hub_model_id": null,
|
| 144 |
+
"hub_strategy": "every_save",
|
| 145 |
+
"hub_token": null,
|
| 146 |
+
"hub_private_repo": null,
|
| 147 |
+
"hub_always_push": false,
|
| 148 |
+
"gradient_checkpointing": true,
|
| 149 |
+
"gradient_checkpointing_kwargs": null,
|
| 150 |
+
"include_inputs_for_metrics": false,
|
| 151 |
+
"include_for_metrics": [],
|
| 152 |
+
"eval_do_concat_batches": true,
|
| 153 |
+
"fp16_backend": "auto",
|
| 154 |
+
"push_to_hub_model_id": null,
|
| 155 |
+
"push_to_hub_organization": null,
|
| 156 |
+
"push_to_hub_token": null,
|
| 157 |
+
"mp_parameters": "",
|
| 158 |
+
"auto_find_batch_size": false,
|
| 159 |
+
"full_determinism": false,
|
| 160 |
+
"torchdynamo": null,
|
| 161 |
+
"ray_scope": "last",
|
| 162 |
+
"ddp_timeout": 18000000,
|
| 163 |
+
"torch_compile": false,
|
| 164 |
+
"torch_compile_backend": null,
|
| 165 |
+
"torch_compile_mode": null,
|
| 166 |
+
"include_tokens_per_second": false,
|
| 167 |
+
"include_num_input_tokens_seen": false,
|
| 168 |
+
"neftune_noise_alpha": null,
|
| 169 |
+
"optim_target_modules": null,
|
| 170 |
+
"batch_eval_metrics": false,
|
| 171 |
+
"eval_on_start": false,
|
| 172 |
+
"use_liger_kernel": false,
|
| 173 |
+
"eval_use_gather_object": false,
|
| 174 |
+
"average_tokens_across_devices": false,
|
| 175 |
+
"sortish_sampler": false,
|
| 176 |
+
"predict_with_generate": false,
|
| 177 |
+
"generation_max_length": null,
|
| 178 |
+
"generation_num_beams": null,
|
| 179 |
+
"generation_config": null,
|
| 180 |
+
"vit_gradient_checkpointing": null,
|
| 181 |
+
"check_model": true,
|
| 182 |
+
"acc_strategy": "token",
|
| 183 |
+
"train_dataloader_shuffle": true,
|
| 184 |
+
"max_epochs": null,
|
| 185 |
+
"aligner_lr": null,
|
| 186 |
+
"vit_lr": null,
|
| 187 |
+
"optimizer": null,
|
| 188 |
+
"use_logits_to_keep": null,
|
| 189 |
+
"channels": null,
|
| 190 |
+
"metric_warmup_step": 0,
|
| 191 |
+
"fsdp_num": 1,
|
| 192 |
+
"acc_steps": 1,
|
| 193 |
+
"eval_use_evalscope": false,
|
| 194 |
+
"eval_datasets": [],
|
| 195 |
+
"eval_limit": null,
|
| 196 |
+
"eval_datasets_args": null,
|
| 197 |
+
"eval_generation_config": null,
|
| 198 |
+
"model": "/mnt/data/users/liamding/data/models/Qwen2.5-VL-7B-Instruct",
|
| 199 |
+
"model_type": "qwen2_5_vl",
|
| 200 |
+
"model_revision": null,
|
| 201 |
+
"task_type": "causal_lm",
|
| 202 |
+
"torch_dtype": "bfloat16",
|
| 203 |
+
"attn_impl": null,
|
| 204 |
+
"num_labels": null,
|
| 205 |
+
"problem_type": null,
|
| 206 |
+
"rope_scaling": null,
|
| 207 |
+
"device_map": null,
|
| 208 |
+
"max_memory": {},
|
| 209 |
+
"local_repo_path": null,
|
| 210 |
+
"init_strategy": null,
|
| 211 |
+
"template": "qwen2_5_vl",
|
| 212 |
+
"system": null,
|
| 213 |
+
"max_length": 32768,
|
| 214 |
+
"truncation_strategy": "delete",
|
| 215 |
+
"max_pixels": null,
|
| 216 |
+
"agent_template": null,
|
| 217 |
+
"norm_bbox": null,
|
| 218 |
+
"use_chat_template": true,
|
| 219 |
+
"padding_free": false,
|
| 220 |
+
"padding_side": "right",
|
| 221 |
+
"loss_scale": "default",
|
| 222 |
+
"sequence_parallel_size": 1,
|
| 223 |
+
"response_prefix": null,
|
| 224 |
+
"template_backend": "swift",
|
| 225 |
+
"dataset": [
|
| 226 |
+
"/mnt/data/users/liamding/data/3AM_Plus/final/training/thinking_v2/ambi_normal_train_thinking_772.json",
|
| 227 |
+
"/mnt/data/users/liamding/data/3AM_Plus/final/training/thinking_v2/mma_train_thinking_126.json",
|
| 228 |
+
"/mnt/data/users/liamding/data/3AM_Plus/final/training/thinking_v2/sp_train_thinking_102.json"
|
| 229 |
+
],
|
| 230 |
+
"val_dataset": [],
|
| 231 |
+
"split_dataset_ratio": 0.1,
|
| 232 |
+
"dataset_num_proc": 1,
|
| 233 |
+
"load_from_cache_file": true,
|
| 234 |
+
"dataset_shuffle": true,
|
| 235 |
+
"val_dataset_shuffle": false,
|
| 236 |
+
"streaming": false,
|
| 237 |
+
"interleave_prob": null,
|
| 238 |
+
"stopping_strategy": "first_exhausted",
|
| 239 |
+
"shuffle_buffer_size": 1000,
|
| 240 |
+
"download_mode": "reuse_dataset_if_exists",
|
| 241 |
+
"columns": {},
|
| 242 |
+
"strict": false,
|
| 243 |
+
"model_name": null,
|
| 244 |
+
"model_author": null,
|
| 245 |
+
"custom_dataset_info": [],
|
| 246 |
+
"quant_method": null,
|
| 247 |
+
"quant_bits": null,
|
| 248 |
+
"hqq_axis": null,
|
| 249 |
+
"bnb_4bit_compute_dtype": "bfloat16",
|
| 250 |
+
"bnb_4bit_quant_type": "nf4",
|
| 251 |
+
"bnb_4bit_use_double_quant": true,
|
| 252 |
+
"bnb_4bit_quant_storage": null,
|
| 253 |
+
"max_new_tokens": 64,
|
| 254 |
+
"temperature": 0.0,
|
| 255 |
+
"top_k": null,
|
| 256 |
+
"top_p": null,
|
| 257 |
+
"repetition_penalty": null,
|
| 258 |
+
"num_beams": 1,
|
| 259 |
+
"stream": false,
|
| 260 |
+
"stop_words": [],
|
| 261 |
+
"logprobs": false,
|
| 262 |
+
"top_logprobs": null,
|
| 263 |
+
"ckpt_dir": null,
|
| 264 |
+
"lora_modules": [],
|
| 265 |
+
"tuner_backend": "peft",
|
| 266 |
+
"train_type": "full",
|
| 267 |
+
"adapters": [],
|
| 268 |
+
"external_plugins": [],
|
| 269 |
+
"model_kwargs": {},
|
| 270 |
+
"load_args": false,
|
| 271 |
+
"load_data_args": false,
|
| 272 |
+
"packing": false,
|
| 273 |
+
"packing_cache": null,
|
| 274 |
+
"custom_register_path": [],
|
| 275 |
+
"use_hf": false,
|
| 276 |
+
"ignore_args_error": false,
|
| 277 |
+
"use_swift_lora": false,
|
| 278 |
+
"freeze_parameters": [
|
| 279 |
+
"visual",
|
| 280 |
+
"visual.merger"
|
| 281 |
+
],
|
| 282 |
+
"freeze_parameters_regex": null,
|
| 283 |
+
"freeze_parameters_ratio": 0.0,
|
| 284 |
+
"trainable_parameters": [],
|
| 285 |
+
"trainable_parameters_regex": null,
|
| 286 |
+
"freeze_llm": false,
|
| 287 |
+
"freeze_vit": true,
|
| 288 |
+
"freeze_aligner": true,
|
| 289 |
+
"target_modules": [
|
| 290 |
+
"all-linear"
|
| 291 |
+
],
|
| 292 |
+
"target_regex": null,
|
| 293 |
+
"modules_to_save": [],
|
| 294 |
+
"lora_rank": 8,
|
| 295 |
+
"lora_alpha": 32,
|
| 296 |
+
"lora_dropout": 0.05,
|
| 297 |
+
"lora_bias": "none",
|
| 298 |
+
"lora_dtype": null,
|
| 299 |
+
"lorap_lr_ratio": null,
|
| 300 |
+
"use_rslora": false,
|
| 301 |
+
"use_dora": false,
|
| 302 |
+
"lora_ga_batch_size": 2,
|
| 303 |
+
"lora_ga_iters": 2,
|
| 304 |
+
"lora_ga_max_length": 1024,
|
| 305 |
+
"lora_ga_direction": "ArB2r",
|
| 306 |
+
"lora_ga_scale": "stable",
|
| 307 |
+
"lora_ga_stable_gamma": 16,
|
| 308 |
+
"init_weights": true,
|
| 309 |
+
"fourier_n_frequency": 2000,
|
| 310 |
+
"fourier_scaling": 300.0,
|
| 311 |
+
"boft_block_size": 4,
|
| 312 |
+
"boft_block_num": 0,
|
| 313 |
+
"boft_n_butterfly_factor": 1,
|
| 314 |
+
"boft_dropout": 0.0,
|
| 315 |
+
"vera_rank": 256,
|
| 316 |
+
"vera_projection_prng_key": 0,
|
| 317 |
+
"vera_dropout": 0.0,
|
| 318 |
+
"vera_d_initial": 0.1,
|
| 319 |
+
"adapter_act": "gelu",
|
| 320 |
+
"adapter_length": 128,
|
| 321 |
+
"use_galore": false,
|
| 322 |
+
"galore_target_modules": null,
|
| 323 |
+
"galore_rank": 128,
|
| 324 |
+
"galore_update_proj_gap": 50,
|
| 325 |
+
"galore_scale": 1.0,
|
| 326 |
+
"galore_proj_type": "std",
|
| 327 |
+
"galore_optim_per_parameter": false,
|
| 328 |
+
"galore_with_embedding": false,
|
| 329 |
+
"galore_quantization": false,
|
| 330 |
+
"galore_proj_quant": false,
|
| 331 |
+
"galore_proj_bits": 4,
|
| 332 |
+
"galore_proj_group_size": 256,
|
| 333 |
+
"galore_cos_threshold": 0.4,
|
| 334 |
+
"galore_gamma_proj": 2,
|
| 335 |
+
"galore_queue_size": 5,
|
| 336 |
+
"adalora_target_r": 8,
|
| 337 |
+
"adalora_init_r": 12,
|
| 338 |
+
"adalora_tinit": 0,
|
| 339 |
+
"adalora_tfinal": 0,
|
| 340 |
+
"adalora_deltaT": 1,
|
| 341 |
+
"adalora_beta1": 0.85,
|
| 342 |
+
"adalora_beta2": 0.85,
|
| 343 |
+
"adalora_orth_reg_weight": 0.5,
|
| 344 |
+
"llamapro_num_new_blocks": 4,
|
| 345 |
+
"llamapro_num_groups": null,
|
| 346 |
+
"lisa_activated_layers": 0,
|
| 347 |
+
"lisa_step_interval": 20,
|
| 348 |
+
"reft_layer_key": null,
|
| 349 |
+
"reft_layers": null,
|
| 350 |
+
"reft_rank": 4,
|
| 351 |
+
"reft_intervention_type": "LoreftIntervention",
|
| 352 |
+
"reft_args": null,
|
| 353 |
+
"swanlab_token": null,
|
| 354 |
+
"swanlab_project": null,
|
| 355 |
+
"swanlab_workspace": null,
|
| 356 |
+
"swanlab_exp_name": "/mnt/data/users/liamding/data/MMMT/lora/selective_mask_plugin/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148",
|
| 357 |
+
"swanlab_mode": "cloud",
|
| 358 |
+
"add_version": true,
|
| 359 |
+
"resume_only_model": false,
|
| 360 |
+
"create_checkpoint_symlink": false,
|
| 361 |
+
"lazy_tokenize": true,
|
| 362 |
+
"loss_type": "selective_translate_weighted_ratio",
|
| 363 |
+
"metric": null,
|
| 364 |
+
"zero_hpz_partition_size": null,
|
| 365 |
+
"rank": 0,
|
| 366 |
+
"global_world_size": 4,
|
| 367 |
+
"local_world_size": 4,
|
| 368 |
+
"model_suffix": "Qwen2.5-VL-7B-Instruct",
|
| 369 |
+
"model_info": "ModelInfo(model_type='qwen2_5_vl', model_dir='/mnt/data/users/liamding/data/models/Qwen2.5-VL-7B-Instruct', torch_dtype=torch.bfloat16, max_model_len=128000, quant_method=None, quant_bits=None, rope_scaling={'type': 'default', 'mrope_section': [16, 24, 24], 'rope_type': 'default'}, config=None, task_type='causal_lm', num_labels=None)",
|
| 370 |
+
"model_meta": "ModelMeta(model_type='qwen2_5_vl', model_groups=[ModelGroup(models=[Model(ms_model_id='Qwen/Qwen2.5-VL-3B-Instruct', hf_model_id='Qwen/Qwen2.5-VL-3B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-VL-7B-Instruct', hf_model_id='Qwen/Qwen2.5-VL-7B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-VL-32B-Instruct', hf_model_id='Qwen/Qwen2.5-VL-32B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-VL-72B-Instruct', hf_model_id='Qwen/Qwen2.5-VL-72B-Instruct', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[]), ModelGroup(models=[Model(ms_model_id='Qwen/Qwen2.5-VL-3B-Instruct-AWQ', hf_model_id='Qwen/Qwen2.5-VL-3B-Instruct-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-VL-7B-Instruct-AWQ', hf_model_id='Qwen/Qwen2.5-VL-7B-Instruct-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-VL-32B-Instruct-AWQ', hf_model_id='Qwen/Qwen2.5-VL-32B-Instruct-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-VL-72B-Instruct-AWQ', hf_model_id='Qwen/Qwen2.5-VL-72B-Instruct-AWQ', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[])], template='qwen2_5_vl', get_function=<function get_model_tokenizer_qwen2_5_vl at 0x7f11c7e81360>, model_arch='qwen2_vl', architectures=['Qwen2_5_VLForConditionalGeneration'], additional_saved_files=[], torch_dtype=None, is_multimodal=True, is_reward=False, task_type=None, ignore_patterns=None, requires=['transformers>=4.49', 'qwen_vl_utils>=0.0.6', 'decord'], tags=[])",
|
| 371 |
+
"model_dir": "/mnt/data/users/liamding/data/models/Qwen2.5-VL-7B-Instruct",
|
| 372 |
+
"hub": "<class 'swift.hub.hub.MSHub'>",
|
| 373 |
+
"evaluation_strategy": "epoch",
|
| 374 |
+
"training_args": "Seq2SeqTrainingArguments(output_dir='/mnt/data/users/liamding/data/MMMT/lora/selective_mask_plugin/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148', overwrite_output_dir=False, do_train=False, do_eval=True, do_predict=False, eval_strategy=<IntervalStrategy.EPOCH: 'epoch'>, prediction_loss_only=False, per_device_train_batch_size=2, per_device_eval_batch_size=2, per_gpu_train_batch_size=None, per_gpu_eval_batch_size=None, gradient_accumulation_steps=2, eval_accumulation_steps=None, eval_delay=0, torch_empty_cache_steps=None, learning_rate=2e-06, weight_decay=0.0001, adam_beta1=0.9, adam_beta2=0.95, adam_epsilon=1e-08, max_grad_norm=1.0, num_train_epochs=5.0, max_steps=-1, lr_scheduler_type=<SchedulerType.COSINE: 'cosine'>, lr_scheduler_kwargs=None, warmup_ratio=0.1, warmup_steps=0, log_level='passive', log_level_replica='warning', log_on_each_node=True, logging_dir='/mnt/data/users/liamding/data/MMMT/lora/selective_mask_plugin/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148/runs', logging_strategy=<IntervalStrategy.STEPS: 'steps'>, logging_first_step=True, logging_steps=5, logging_nan_inf_filter=True, save_strategy=<SaveStrategy.EPOCH: 'epoch'>, save_steps=500, save_total_limit=5, save_safetensors=True, save_on_each_node=False, save_only_model=False, restore_callback_states_from_checkpoint=False, no_cuda=False, use_cpu=False, use_mps_device=False, seed=42, data_seed=42, jit_mode_eval=False, use_ipex=False, bf16=True, fp16=False, fp16_opt_level='O1', half_precision_backend='auto', bf16_full_eval=False, fp16_full_eval=False, tf32=None, local_rank=0, ddp_backend=None, tpu_num_cores=None, tpu_metrics_debug=False, debug=[], dataloader_drop_last=False, eval_steps=None, dataloader_num_workers=4, dataloader_prefetch_factor=10, past_index=-1, run_name='/mnt/data/users/liamding/data/MMMT/lora/selective_mask_plugin/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148', disable_tqdm=False, remove_unused_columns=False, label_names=None, load_best_model_at_end=False, metric_for_best_model='loss', greater_is_better=False, ignore_data_skip=False, fsdp=[], fsdp_min_num_params=0, fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, tp_size=0, fsdp_transformer_layer_cls_to_wrap=None, accelerator_config=AcceleratorConfig(split_batches=False, dispatch_batches=False, even_batches=True, use_seedable_sampler=True, non_blocking=False, gradient_accumulation_kwargs=None, use_configured_state=False), deepspeed={'fp16': {'enabled': 'auto', 'loss_scale': 0, 'loss_scale_window': 1000, 'initial_scale_power': 16, 'hysteresis': 2, 'min_loss_scale': 1}, 'bf16': {'enabled': 'auto'}, 'zero_optimization': {'stage': 3, 'offload_optimizer': {'device': 'none', 'pin_memory': True}, 'offload_param': {'device': 'none', 'pin_memory': True}, 'overlap_comm': False, 'contiguous_gradients': True, 'sub_group_size': 1000000000.0, 'reduce_bucket_size': 'auto', 'zero_quantized_weights': False, 'zero_quantized_gradients': False, 'stage3_prefetch_bucket_size': 'auto', 'stage3_param_persistence_threshold': 'auto', 'stage3_max_live_parameters': 1000000000.0, 'stage3_max_reuse_distance': 1000000000.0, 'stage3_gather_16bit_weights_on_model_save': True}, 'gradient_accumulation_steps': 'auto', 'gradient_clipping': 'auto', 'steps_per_print': 2000, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'wall_clock_breakdown': False}, label_smoothing_factor=0.0, optim=<OptimizerNames.ADAMW_TORCH: 'adamw_torch'>, optim_args=None, adafactor=False, group_by_length=False, length_column_name='length', report_to=['swanlab'], ddp_find_unused_parameters=None, ddp_bucket_cap_mb=None, ddp_broadcast_buffers=None, dataloader_pin_memory=True, dataloader_persistent_workers=False, skip_memory_metrics=True, use_legacy_prediction_loop=False, push_to_hub=False, resume_from_checkpoint=None, hub_model_id=None, hub_strategy=<HubStrategy.EVERY_SAVE: 'every_save'>, hub_token=None, hub_private_repo=None, hub_always_push=False, gradient_checkpointing=True, gradient_checkpointing_kwargs=None, include_inputs_for_metrics=False, include_for_metrics=[], eval_do_concat_batches=True, fp16_backend='auto', push_to_hub_model_id=None, push_to_hub_organization=None, push_to_hub_token=None, mp_parameters='', auto_find_batch_size=False, full_determinism=False, torchdynamo=None, ray_scope='last', ddp_timeout=18000000, torch_compile=False, torch_compile_backend=None, torch_compile_mode=None, include_tokens_per_second=None, include_num_input_tokens_seen=None, neftune_noise_alpha=None, optim_target_modules=None, batch_eval_metrics=False, eval_on_start=False, use_liger_kernel=False, eval_use_gather_object=False, average_tokens_across_devices=None, sortish_sampler=False, predict_with_generate=False, generation_max_length=None, generation_num_beams=None, generation_config=None, vit_gradient_checkpointing=True, check_model=True, acc_strategy='token', train_dataloader_shuffle=True, max_epochs=None, aligner_lr=None, vit_lr=None, optimizer=None, use_logits_to_keep=None, channels=None, metric_warmup_step=0, fsdp_num=1, acc_steps=1, eval_use_evalscope=False, eval_datasets=[], eval_limit=None, eval_datasets_args=None, eval_generation_config=None, train_type='full', local_repo_path=None, galore_config=None)"
|
| 375 |
+
}
|
selective_loss/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148/checkpoint-280/added_tokens.json
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"</tool_call>": 151658,
|
| 3 |
+
"<tool_call>": 151657,
|
| 4 |
+
"<|box_end|>": 151649,
|
| 5 |
+
"<|box_start|>": 151648,
|
| 6 |
+
"<|endoftext|>": 151643,
|
| 7 |
+
"<|file_sep|>": 151664,
|
| 8 |
+
"<|fim_middle|>": 151660,
|
| 9 |
+
"<|fim_pad|>": 151662,
|
| 10 |
+
"<|fim_prefix|>": 151659,
|
| 11 |
+
"<|fim_suffix|>": 151661,
|
| 12 |
+
"<|im_end|>": 151645,
|
| 13 |
+
"<|im_start|>": 151644,
|
| 14 |
+
"<|image_pad|>": 151655,
|
| 15 |
+
"<|object_ref_end|>": 151647,
|
| 16 |
+
"<|object_ref_start|>": 151646,
|
| 17 |
+
"<|quad_end|>": 151651,
|
| 18 |
+
"<|quad_start|>": 151650,
|
| 19 |
+
"<|repo_name|>": 151663,
|
| 20 |
+
"<|video_pad|>": 151656,
|
| 21 |
+
"<|vision_end|>": 151653,
|
| 22 |
+
"<|vision_pad|>": 151654,
|
| 23 |
+
"<|vision_start|>": 151652
|
| 24 |
+
}
|
selective_loss/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148/checkpoint-280/args.json
ADDED
|
@@ -0,0 +1,375 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"output_dir": "/mnt/data/users/liamding/data/MMMT/lora/selective_mask_plugin/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148",
|
| 3 |
+
"overwrite_output_dir": false,
|
| 4 |
+
"do_train": false,
|
| 5 |
+
"do_eval": false,
|
| 6 |
+
"do_predict": false,
|
| 7 |
+
"eval_strategy": "epoch",
|
| 8 |
+
"prediction_loss_only": false,
|
| 9 |
+
"per_device_train_batch_size": 2,
|
| 10 |
+
"per_device_eval_batch_size": 2,
|
| 11 |
+
"per_gpu_train_batch_size": null,
|
| 12 |
+
"per_gpu_eval_batch_size": null,
|
| 13 |
+
"gradient_accumulation_steps": 2,
|
| 14 |
+
"eval_accumulation_steps": null,
|
| 15 |
+
"eval_delay": 0,
|
| 16 |
+
"torch_empty_cache_steps": null,
|
| 17 |
+
"learning_rate": 2e-06,
|
| 18 |
+
"weight_decay": 0.0001,
|
| 19 |
+
"adam_beta1": 0.9,
|
| 20 |
+
"adam_beta2": 0.95,
|
| 21 |
+
"adam_epsilon": 1e-08,
|
| 22 |
+
"max_grad_norm": 1.0,
|
| 23 |
+
"num_train_epochs": 5.0,
|
| 24 |
+
"max_steps": -1,
|
| 25 |
+
"lr_scheduler_type": "cosine",
|
| 26 |
+
"lr_scheduler_kwargs": null,
|
| 27 |
+
"warmup_ratio": 0.1,
|
| 28 |
+
"warmup_steps": 0,
|
| 29 |
+
"log_level": "passive",
|
| 30 |
+
"log_level_replica": "warning",
|
| 31 |
+
"log_on_each_node": true,
|
| 32 |
+
"logging_dir": "/mnt/data/users/liamding/data/MMMT/lora/selective_mask_plugin/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148/runs",
|
| 33 |
+
"logging_strategy": "steps",
|
| 34 |
+
"logging_first_step": true,
|
| 35 |
+
"logging_steps": 5,
|
| 36 |
+
"logging_nan_inf_filter": true,
|
| 37 |
+
"save_strategy": "epoch",
|
| 38 |
+
"save_steps": 500,
|
| 39 |
+
"save_total_limit": 5,
|
| 40 |
+
"save_safetensors": true,
|
| 41 |
+
"save_on_each_node": false,
|
| 42 |
+
"save_only_model": false,
|
| 43 |
+
"restore_callback_states_from_checkpoint": false,
|
| 44 |
+
"no_cuda": false,
|
| 45 |
+
"use_cpu": false,
|
| 46 |
+
"use_mps_device": false,
|
| 47 |
+
"seed": 42,
|
| 48 |
+
"data_seed": 42,
|
| 49 |
+
"jit_mode_eval": false,
|
| 50 |
+
"use_ipex": false,
|
| 51 |
+
"bf16": true,
|
| 52 |
+
"fp16": false,
|
| 53 |
+
"fp16_opt_level": "O1",
|
| 54 |
+
"half_precision_backend": "auto",
|
| 55 |
+
"bf16_full_eval": false,
|
| 56 |
+
"fp16_full_eval": false,
|
| 57 |
+
"tf32": null,
|
| 58 |
+
"local_rank": 0,
|
| 59 |
+
"ddp_backend": null,
|
| 60 |
+
"tpu_num_cores": null,
|
| 61 |
+
"tpu_metrics_debug": false,
|
| 62 |
+
"debug": null,
|
| 63 |
+
"dataloader_drop_last": false,
|
| 64 |
+
"eval_steps": null,
|
| 65 |
+
"dataloader_num_workers": 4,
|
| 66 |
+
"dataloader_prefetch_factor": null,
|
| 67 |
+
"past_index": -1,
|
| 68 |
+
"run_name": "/mnt/data/users/liamding/data/MMMT/lora/selective_mask_plugin/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148",
|
| 69 |
+
"disable_tqdm": null,
|
| 70 |
+
"remove_unused_columns": true,
|
| 71 |
+
"label_names": null,
|
| 72 |
+
"load_best_model_at_end": false,
|
| 73 |
+
"metric_for_best_model": "loss",
|
| 74 |
+
"greater_is_better": false,
|
| 75 |
+
"ignore_data_skip": false,
|
| 76 |
+
"fsdp": "",
|
| 77 |
+
"fsdp_min_num_params": 0,
|
| 78 |
+
"fsdp_config": null,
|
| 79 |
+
"tp_size": 0,
|
| 80 |
+
"fsdp_transformer_layer_cls_to_wrap": null,
|
| 81 |
+
"accelerator_config": {
|
| 82 |
+
"dispatch_batches": false
|
| 83 |
+
},
|
| 84 |
+
"deepspeed": {
|
| 85 |
+
"fp16": {
|
| 86 |
+
"enabled": "auto",
|
| 87 |
+
"loss_scale": 0,
|
| 88 |
+
"loss_scale_window": 1000,
|
| 89 |
+
"initial_scale_power": 16,
|
| 90 |
+
"hysteresis": 2,
|
| 91 |
+
"min_loss_scale": 1
|
| 92 |
+
},
|
| 93 |
+
"bf16": {
|
| 94 |
+
"enabled": "auto"
|
| 95 |
+
},
|
| 96 |
+
"zero_optimization": {
|
| 97 |
+
"stage": 3,
|
| 98 |
+
"offload_optimizer": {
|
| 99 |
+
"device": "none",
|
| 100 |
+
"pin_memory": true
|
| 101 |
+
},
|
| 102 |
+
"offload_param": {
|
| 103 |
+
"device": "none",
|
| 104 |
+
"pin_memory": true
|
| 105 |
+
},
|
| 106 |
+
"overlap_comm": false,
|
| 107 |
+
"contiguous_gradients": true,
|
| 108 |
+
"sub_group_size": 1000000000.0,
|
| 109 |
+
"reduce_bucket_size": "auto",
|
| 110 |
+
"zero_quantized_weights": false,
|
| 111 |
+
"zero_quantized_gradients": false,
|
| 112 |
+
"stage3_prefetch_bucket_size": "auto",
|
| 113 |
+
"stage3_param_persistence_threshold": "auto",
|
| 114 |
+
"stage3_max_live_parameters": 1000000000.0,
|
| 115 |
+
"stage3_max_reuse_distance": 1000000000.0,
|
| 116 |
+
"stage3_gather_16bit_weights_on_model_save": true
|
| 117 |
+
},
|
| 118 |
+
"gradient_accumulation_steps": "auto",
|
| 119 |
+
"gradient_clipping": "auto",
|
| 120 |
+
"steps_per_print": 2000,
|
| 121 |
+
"train_batch_size": "auto",
|
| 122 |
+
"train_micro_batch_size_per_gpu": "auto",
|
| 123 |
+
"wall_clock_breakdown": false
|
| 124 |
+
},
|
| 125 |
+
"label_smoothing_factor": 0.0,
|
| 126 |
+
"optim": "adamw_torch",
|
| 127 |
+
"optim_args": null,
|
| 128 |
+
"adafactor": false,
|
| 129 |
+
"group_by_length": false,
|
| 130 |
+
"length_column_name": "length",
|
| 131 |
+
"report_to": [
|
| 132 |
+
"swanlab"
|
| 133 |
+
],
|
| 134 |
+
"ddp_find_unused_parameters": null,
|
| 135 |
+
"ddp_bucket_cap_mb": null,
|
| 136 |
+
"ddp_broadcast_buffers": null,
|
| 137 |
+
"dataloader_pin_memory": true,
|
| 138 |
+
"dataloader_persistent_workers": false,
|
| 139 |
+
"skip_memory_metrics": true,
|
| 140 |
+
"use_legacy_prediction_loop": false,
|
| 141 |
+
"push_to_hub": false,
|
| 142 |
+
"resume_from_checkpoint": null,
|
| 143 |
+
"hub_model_id": null,
|
| 144 |
+
"hub_strategy": "every_save",
|
| 145 |
+
"hub_token": null,
|
| 146 |
+
"hub_private_repo": null,
|
| 147 |
+
"hub_always_push": false,
|
| 148 |
+
"gradient_checkpointing": true,
|
| 149 |
+
"gradient_checkpointing_kwargs": null,
|
| 150 |
+
"include_inputs_for_metrics": false,
|
| 151 |
+
"include_for_metrics": [],
|
| 152 |
+
"eval_do_concat_batches": true,
|
| 153 |
+
"fp16_backend": "auto",
|
| 154 |
+
"push_to_hub_model_id": null,
|
| 155 |
+
"push_to_hub_organization": null,
|
| 156 |
+
"push_to_hub_token": null,
|
| 157 |
+
"mp_parameters": "",
|
| 158 |
+
"auto_find_batch_size": false,
|
| 159 |
+
"full_determinism": false,
|
| 160 |
+
"torchdynamo": null,
|
| 161 |
+
"ray_scope": "last",
|
| 162 |
+
"ddp_timeout": 18000000,
|
| 163 |
+
"torch_compile": false,
|
| 164 |
+
"torch_compile_backend": null,
|
| 165 |
+
"torch_compile_mode": null,
|
| 166 |
+
"include_tokens_per_second": false,
|
| 167 |
+
"include_num_input_tokens_seen": false,
|
| 168 |
+
"neftune_noise_alpha": null,
|
| 169 |
+
"optim_target_modules": null,
|
| 170 |
+
"batch_eval_metrics": false,
|
| 171 |
+
"eval_on_start": false,
|
| 172 |
+
"use_liger_kernel": false,
|
| 173 |
+
"eval_use_gather_object": false,
|
| 174 |
+
"average_tokens_across_devices": false,
|
| 175 |
+
"sortish_sampler": false,
|
| 176 |
+
"predict_with_generate": false,
|
| 177 |
+
"generation_max_length": null,
|
| 178 |
+
"generation_num_beams": null,
|
| 179 |
+
"generation_config": null,
|
| 180 |
+
"vit_gradient_checkpointing": null,
|
| 181 |
+
"check_model": true,
|
| 182 |
+
"acc_strategy": "token",
|
| 183 |
+
"train_dataloader_shuffle": true,
|
| 184 |
+
"max_epochs": null,
|
| 185 |
+
"aligner_lr": null,
|
| 186 |
+
"vit_lr": null,
|
| 187 |
+
"optimizer": null,
|
| 188 |
+
"use_logits_to_keep": null,
|
| 189 |
+
"channels": null,
|
| 190 |
+
"metric_warmup_step": 0,
|
| 191 |
+
"fsdp_num": 1,
|
| 192 |
+
"acc_steps": 1,
|
| 193 |
+
"eval_use_evalscope": false,
|
| 194 |
+
"eval_datasets": [],
|
| 195 |
+
"eval_limit": null,
|
| 196 |
+
"eval_datasets_args": null,
|
| 197 |
+
"eval_generation_config": null,
|
| 198 |
+
"model": "/mnt/data/users/liamding/data/models/Qwen2.5-VL-7B-Instruct",
|
| 199 |
+
"model_type": "qwen2_5_vl",
|
| 200 |
+
"model_revision": null,
|
| 201 |
+
"task_type": "causal_lm",
|
| 202 |
+
"torch_dtype": "bfloat16",
|
| 203 |
+
"attn_impl": null,
|
| 204 |
+
"num_labels": null,
|
| 205 |
+
"problem_type": null,
|
| 206 |
+
"rope_scaling": null,
|
| 207 |
+
"device_map": null,
|
| 208 |
+
"max_memory": {},
|
| 209 |
+
"local_repo_path": null,
|
| 210 |
+
"init_strategy": null,
|
| 211 |
+
"template": "qwen2_5_vl",
|
| 212 |
+
"system": null,
|
| 213 |
+
"max_length": 32768,
|
| 214 |
+
"truncation_strategy": "delete",
|
| 215 |
+
"max_pixels": null,
|
| 216 |
+
"agent_template": null,
|
| 217 |
+
"norm_bbox": null,
|
| 218 |
+
"use_chat_template": true,
|
| 219 |
+
"padding_free": false,
|
| 220 |
+
"padding_side": "right",
|
| 221 |
+
"loss_scale": "default",
|
| 222 |
+
"sequence_parallel_size": 1,
|
| 223 |
+
"response_prefix": null,
|
| 224 |
+
"template_backend": "swift",
|
| 225 |
+
"dataset": [
|
| 226 |
+
"/mnt/data/users/liamding/data/3AM_Plus/final/training/thinking_v2/ambi_normal_train_thinking_772.json",
|
| 227 |
+
"/mnt/data/users/liamding/data/3AM_Plus/final/training/thinking_v2/mma_train_thinking_126.json",
|
| 228 |
+
"/mnt/data/users/liamding/data/3AM_Plus/final/training/thinking_v2/sp_train_thinking_102.json"
|
| 229 |
+
],
|
| 230 |
+
"val_dataset": [],
|
| 231 |
+
"split_dataset_ratio": 0.1,
|
| 232 |
+
"dataset_num_proc": 1,
|
| 233 |
+
"load_from_cache_file": true,
|
| 234 |
+
"dataset_shuffle": true,
|
| 235 |
+
"val_dataset_shuffle": false,
|
| 236 |
+
"streaming": false,
|
| 237 |
+
"interleave_prob": null,
|
| 238 |
+
"stopping_strategy": "first_exhausted",
|
| 239 |
+
"shuffle_buffer_size": 1000,
|
| 240 |
+
"download_mode": "reuse_dataset_if_exists",
|
| 241 |
+
"columns": {},
|
| 242 |
+
"strict": false,
|
| 243 |
+
"model_name": null,
|
| 244 |
+
"model_author": null,
|
| 245 |
+
"custom_dataset_info": [],
|
| 246 |
+
"quant_method": null,
|
| 247 |
+
"quant_bits": null,
|
| 248 |
+
"hqq_axis": null,
|
| 249 |
+
"bnb_4bit_compute_dtype": "bfloat16",
|
| 250 |
+
"bnb_4bit_quant_type": "nf4",
|
| 251 |
+
"bnb_4bit_use_double_quant": true,
|
| 252 |
+
"bnb_4bit_quant_storage": null,
|
| 253 |
+
"max_new_tokens": 64,
|
| 254 |
+
"temperature": 0.0,
|
| 255 |
+
"top_k": null,
|
| 256 |
+
"top_p": null,
|
| 257 |
+
"repetition_penalty": null,
|
| 258 |
+
"num_beams": 1,
|
| 259 |
+
"stream": false,
|
| 260 |
+
"stop_words": [],
|
| 261 |
+
"logprobs": false,
|
| 262 |
+
"top_logprobs": null,
|
| 263 |
+
"ckpt_dir": null,
|
| 264 |
+
"lora_modules": [],
|
| 265 |
+
"tuner_backend": "peft",
|
| 266 |
+
"train_type": "full",
|
| 267 |
+
"adapters": [],
|
| 268 |
+
"external_plugins": [],
|
| 269 |
+
"model_kwargs": {},
|
| 270 |
+
"load_args": false,
|
| 271 |
+
"load_data_args": false,
|
| 272 |
+
"packing": false,
|
| 273 |
+
"packing_cache": null,
|
| 274 |
+
"custom_register_path": [],
|
| 275 |
+
"use_hf": false,
|
| 276 |
+
"ignore_args_error": false,
|
| 277 |
+
"use_swift_lora": false,
|
| 278 |
+
"freeze_parameters": [
|
| 279 |
+
"visual",
|
| 280 |
+
"visual.merger"
|
| 281 |
+
],
|
| 282 |
+
"freeze_parameters_regex": null,
|
| 283 |
+
"freeze_parameters_ratio": 0.0,
|
| 284 |
+
"trainable_parameters": [],
|
| 285 |
+
"trainable_parameters_regex": null,
|
| 286 |
+
"freeze_llm": false,
|
| 287 |
+
"freeze_vit": true,
|
| 288 |
+
"freeze_aligner": true,
|
| 289 |
+
"target_modules": [
|
| 290 |
+
"all-linear"
|
| 291 |
+
],
|
| 292 |
+
"target_regex": null,
|
| 293 |
+
"modules_to_save": [],
|
| 294 |
+
"lora_rank": 8,
|
| 295 |
+
"lora_alpha": 32,
|
| 296 |
+
"lora_dropout": 0.05,
|
| 297 |
+
"lora_bias": "none",
|
| 298 |
+
"lora_dtype": null,
|
| 299 |
+
"lorap_lr_ratio": null,
|
| 300 |
+
"use_rslora": false,
|
| 301 |
+
"use_dora": false,
|
| 302 |
+
"lora_ga_batch_size": 2,
|
| 303 |
+
"lora_ga_iters": 2,
|
| 304 |
+
"lora_ga_max_length": 1024,
|
| 305 |
+
"lora_ga_direction": "ArB2r",
|
| 306 |
+
"lora_ga_scale": "stable",
|
| 307 |
+
"lora_ga_stable_gamma": 16,
|
| 308 |
+
"init_weights": true,
|
| 309 |
+
"fourier_n_frequency": 2000,
|
| 310 |
+
"fourier_scaling": 300.0,
|
| 311 |
+
"boft_block_size": 4,
|
| 312 |
+
"boft_block_num": 0,
|
| 313 |
+
"boft_n_butterfly_factor": 1,
|
| 314 |
+
"boft_dropout": 0.0,
|
| 315 |
+
"vera_rank": 256,
|
| 316 |
+
"vera_projection_prng_key": 0,
|
| 317 |
+
"vera_dropout": 0.0,
|
| 318 |
+
"vera_d_initial": 0.1,
|
| 319 |
+
"adapter_act": "gelu",
|
| 320 |
+
"adapter_length": 128,
|
| 321 |
+
"use_galore": false,
|
| 322 |
+
"galore_target_modules": null,
|
| 323 |
+
"galore_rank": 128,
|
| 324 |
+
"galore_update_proj_gap": 50,
|
| 325 |
+
"galore_scale": 1.0,
|
| 326 |
+
"galore_proj_type": "std",
|
| 327 |
+
"galore_optim_per_parameter": false,
|
| 328 |
+
"galore_with_embedding": false,
|
| 329 |
+
"galore_quantization": false,
|
| 330 |
+
"galore_proj_quant": false,
|
| 331 |
+
"galore_proj_bits": 4,
|
| 332 |
+
"galore_proj_group_size": 256,
|
| 333 |
+
"galore_cos_threshold": 0.4,
|
| 334 |
+
"galore_gamma_proj": 2,
|
| 335 |
+
"galore_queue_size": 5,
|
| 336 |
+
"adalora_target_r": 8,
|
| 337 |
+
"adalora_init_r": 12,
|
| 338 |
+
"adalora_tinit": 0,
|
| 339 |
+
"adalora_tfinal": 0,
|
| 340 |
+
"adalora_deltaT": 1,
|
| 341 |
+
"adalora_beta1": 0.85,
|
| 342 |
+
"adalora_beta2": 0.85,
|
| 343 |
+
"adalora_orth_reg_weight": 0.5,
|
| 344 |
+
"llamapro_num_new_blocks": 4,
|
| 345 |
+
"llamapro_num_groups": null,
|
| 346 |
+
"lisa_activated_layers": 0,
|
| 347 |
+
"lisa_step_interval": 20,
|
| 348 |
+
"reft_layer_key": null,
|
| 349 |
+
"reft_layers": null,
|
| 350 |
+
"reft_rank": 4,
|
| 351 |
+
"reft_intervention_type": "LoreftIntervention",
|
| 352 |
+
"reft_args": null,
|
| 353 |
+
"swanlab_token": null,
|
| 354 |
+
"swanlab_project": null,
|
| 355 |
+
"swanlab_workspace": null,
|
| 356 |
+
"swanlab_exp_name": "/mnt/data/users/liamding/data/MMMT/lora/selective_mask_plugin/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148",
|
| 357 |
+
"swanlab_mode": "cloud",
|
| 358 |
+
"add_version": true,
|
| 359 |
+
"resume_only_model": false,
|
| 360 |
+
"create_checkpoint_symlink": false,
|
| 361 |
+
"lazy_tokenize": true,
|
| 362 |
+
"loss_type": "selective_translate_weighted_ratio",
|
| 363 |
+
"metric": null,
|
| 364 |
+
"zero_hpz_partition_size": null,
|
| 365 |
+
"rank": 0,
|
| 366 |
+
"global_world_size": 4,
|
| 367 |
+
"local_world_size": 4,
|
| 368 |
+
"model_suffix": "Qwen2.5-VL-7B-Instruct",
|
| 369 |
+
"model_info": "ModelInfo(model_type='qwen2_5_vl', model_dir='/mnt/data/users/liamding/data/models/Qwen2.5-VL-7B-Instruct', torch_dtype=torch.bfloat16, max_model_len=128000, quant_method=None, quant_bits=None, rope_scaling={'type': 'default', 'mrope_section': [16, 24, 24], 'rope_type': 'default'}, config=None, task_type='causal_lm', num_labels=None)",
|
| 370 |
+
"model_meta": "ModelMeta(model_type='qwen2_5_vl', model_groups=[ModelGroup(models=[Model(ms_model_id='Qwen/Qwen2.5-VL-3B-Instruct', hf_model_id='Qwen/Qwen2.5-VL-3B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-VL-7B-Instruct', hf_model_id='Qwen/Qwen2.5-VL-7B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-VL-32B-Instruct', hf_model_id='Qwen/Qwen2.5-VL-32B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-VL-72B-Instruct', hf_model_id='Qwen/Qwen2.5-VL-72B-Instruct', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[]), ModelGroup(models=[Model(ms_model_id='Qwen/Qwen2.5-VL-3B-Instruct-AWQ', hf_model_id='Qwen/Qwen2.5-VL-3B-Instruct-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-VL-7B-Instruct-AWQ', hf_model_id='Qwen/Qwen2.5-VL-7B-Instruct-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-VL-32B-Instruct-AWQ', hf_model_id='Qwen/Qwen2.5-VL-32B-Instruct-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-VL-72B-Instruct-AWQ', hf_model_id='Qwen/Qwen2.5-VL-72B-Instruct-AWQ', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[])], template='qwen2_5_vl', get_function=<function get_model_tokenizer_qwen2_5_vl at 0x7f11c7e81360>, model_arch='qwen2_vl', architectures=['Qwen2_5_VLForConditionalGeneration'], additional_saved_files=[], torch_dtype=None, is_multimodal=True, is_reward=False, task_type=None, ignore_patterns=None, requires=['transformers>=4.49', 'qwen_vl_utils>=0.0.6', 'decord'], tags=[])",
|
| 371 |
+
"model_dir": "/mnt/data/users/liamding/data/models/Qwen2.5-VL-7B-Instruct",
|
| 372 |
+
"hub": "<class 'swift.hub.hub.MSHub'>",
|
| 373 |
+
"evaluation_strategy": "epoch",
|
| 374 |
+
"training_args": "Seq2SeqTrainingArguments(output_dir='/mnt/data/users/liamding/data/MMMT/lora/selective_mask_plugin/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148', overwrite_output_dir=False, do_train=False, do_eval=True, do_predict=False, eval_strategy=<IntervalStrategy.EPOCH: 'epoch'>, prediction_loss_only=False, per_device_train_batch_size=2, per_device_eval_batch_size=2, per_gpu_train_batch_size=None, per_gpu_eval_batch_size=None, gradient_accumulation_steps=2, eval_accumulation_steps=None, eval_delay=0, torch_empty_cache_steps=None, learning_rate=2e-06, weight_decay=0.0001, adam_beta1=0.9, adam_beta2=0.95, adam_epsilon=1e-08, max_grad_norm=1.0, num_train_epochs=5.0, max_steps=-1, lr_scheduler_type=<SchedulerType.COSINE: 'cosine'>, lr_scheduler_kwargs=None, warmup_ratio=0.1, warmup_steps=0, log_level='passive', log_level_replica='warning', log_on_each_node=True, logging_dir='/mnt/data/users/liamding/data/MMMT/lora/selective_mask_plugin/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148/runs', logging_strategy=<IntervalStrategy.STEPS: 'steps'>, logging_first_step=True, logging_steps=5, logging_nan_inf_filter=True, save_strategy=<SaveStrategy.EPOCH: 'epoch'>, save_steps=500, save_total_limit=5, save_safetensors=True, save_on_each_node=False, save_only_model=False, restore_callback_states_from_checkpoint=False, no_cuda=False, use_cpu=False, use_mps_device=False, seed=42, data_seed=42, jit_mode_eval=False, use_ipex=False, bf16=True, fp16=False, fp16_opt_level='O1', half_precision_backend='auto', bf16_full_eval=False, fp16_full_eval=False, tf32=None, local_rank=0, ddp_backend=None, tpu_num_cores=None, tpu_metrics_debug=False, debug=[], dataloader_drop_last=False, eval_steps=None, dataloader_num_workers=4, dataloader_prefetch_factor=10, past_index=-1, run_name='/mnt/data/users/liamding/data/MMMT/lora/selective_mask_plugin/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148', disable_tqdm=False, remove_unused_columns=False, label_names=None, load_best_model_at_end=False, metric_for_best_model='loss', greater_is_better=False, ignore_data_skip=False, fsdp=[], fsdp_min_num_params=0, fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, tp_size=0, fsdp_transformer_layer_cls_to_wrap=None, accelerator_config=AcceleratorConfig(split_batches=False, dispatch_batches=False, even_batches=True, use_seedable_sampler=True, non_blocking=False, gradient_accumulation_kwargs=None, use_configured_state=False), deepspeed={'fp16': {'enabled': 'auto', 'loss_scale': 0, 'loss_scale_window': 1000, 'initial_scale_power': 16, 'hysteresis': 2, 'min_loss_scale': 1}, 'bf16': {'enabled': 'auto'}, 'zero_optimization': {'stage': 3, 'offload_optimizer': {'device': 'none', 'pin_memory': True}, 'offload_param': {'device': 'none', 'pin_memory': True}, 'overlap_comm': False, 'contiguous_gradients': True, 'sub_group_size': 1000000000.0, 'reduce_bucket_size': 'auto', 'zero_quantized_weights': False, 'zero_quantized_gradients': False, 'stage3_prefetch_bucket_size': 'auto', 'stage3_param_persistence_threshold': 'auto', 'stage3_max_live_parameters': 1000000000.0, 'stage3_max_reuse_distance': 1000000000.0, 'stage3_gather_16bit_weights_on_model_save': True}, 'gradient_accumulation_steps': 'auto', 'gradient_clipping': 'auto', 'steps_per_print': 2000, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'wall_clock_breakdown': False}, label_smoothing_factor=0.0, optim=<OptimizerNames.ADAMW_TORCH: 'adamw_torch'>, optim_args=None, adafactor=False, group_by_length=False, length_column_name='length', report_to=['swanlab'], ddp_find_unused_parameters=None, ddp_bucket_cap_mb=None, ddp_broadcast_buffers=None, dataloader_pin_memory=True, dataloader_persistent_workers=False, skip_memory_metrics=True, use_legacy_prediction_loop=False, push_to_hub=False, resume_from_checkpoint=None, hub_model_id=None, hub_strategy=<HubStrategy.EVERY_SAVE: 'every_save'>, hub_token=None, hub_private_repo=None, hub_always_push=False, gradient_checkpointing=True, gradient_checkpointing_kwargs=None, include_inputs_for_metrics=False, include_for_metrics=[], eval_do_concat_batches=True, fp16_backend='auto', push_to_hub_model_id=None, push_to_hub_organization=None, push_to_hub_token=None, mp_parameters='', auto_find_batch_size=False, full_determinism=False, torchdynamo=None, ray_scope='last', ddp_timeout=18000000, torch_compile=False, torch_compile_backend=None, torch_compile_mode=None, include_tokens_per_second=None, include_num_input_tokens_seen=None, neftune_noise_alpha=None, optim_target_modules=None, batch_eval_metrics=False, eval_on_start=False, use_liger_kernel=False, eval_use_gather_object=False, average_tokens_across_devices=None, sortish_sampler=False, predict_with_generate=False, generation_max_length=None, generation_num_beams=None, generation_config=None, vit_gradient_checkpointing=True, check_model=True, acc_strategy='token', train_dataloader_shuffle=True, max_epochs=None, aligner_lr=None, vit_lr=None, optimizer=None, use_logits_to_keep=None, channels=None, metric_warmup_step=0, fsdp_num=1, acc_steps=1, eval_use_evalscope=False, eval_datasets=[], eval_limit=None, eval_datasets_args=None, eval_generation_config=None, train_type='full', local_repo_path=None, galore_config=None)"
|
| 375 |
+
}
|
selective_loss/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148/checkpoint-280/chat_template.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"chat_template": "{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n{% endif %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}"
|
| 3 |
+
}
|
selective_loss/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148/checkpoint-280/config.json
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"Qwen2_5_VLForConditionalGeneration"
|
| 4 |
+
],
|
| 5 |
+
"attention_dropout": 0.0,
|
| 6 |
+
"bos_token_id": 151643,
|
| 7 |
+
"eos_token_id": 151645,
|
| 8 |
+
"hidden_act": "silu",
|
| 9 |
+
"hidden_size": 3584,
|
| 10 |
+
"image_token_id": 151655,
|
| 11 |
+
"initializer_range": 0.02,
|
| 12 |
+
"intermediate_size": 18944,
|
| 13 |
+
"max_position_embeddings": 128000,
|
| 14 |
+
"max_window_layers": 28,
|
| 15 |
+
"model_type": "qwen2_5_vl",
|
| 16 |
+
"num_attention_heads": 28,
|
| 17 |
+
"num_hidden_layers": 28,
|
| 18 |
+
"num_key_value_heads": 4,
|
| 19 |
+
"pad_token_id": 151643,
|
| 20 |
+
"rms_norm_eps": 1e-06,
|
| 21 |
+
"rope_scaling": {
|
| 22 |
+
"mrope_section": [
|
| 23 |
+
16,
|
| 24 |
+
24,
|
| 25 |
+
24
|
| 26 |
+
],
|
| 27 |
+
"rope_type": "default",
|
| 28 |
+
"type": "default"
|
| 29 |
+
},
|
| 30 |
+
"rope_theta": 1000000.0,
|
| 31 |
+
"sliding_window": 32768,
|
| 32 |
+
"tie_word_embeddings": false,
|
| 33 |
+
"torch_dtype": "bfloat16",
|
| 34 |
+
"transformers_version": "4.51.3",
|
| 35 |
+
"use_cache": false,
|
| 36 |
+
"use_sliding_window": false,
|
| 37 |
+
"video_token_id": 151656,
|
| 38 |
+
"vision_config": {
|
| 39 |
+
"depth": 32,
|
| 40 |
+
"fullatt_block_indexes": [
|
| 41 |
+
7,
|
| 42 |
+
15,
|
| 43 |
+
23,
|
| 44 |
+
31
|
| 45 |
+
],
|
| 46 |
+
"hidden_act": "silu",
|
| 47 |
+
"hidden_size": 1280,
|
| 48 |
+
"in_channels": 3,
|
| 49 |
+
"in_chans": 3,
|
| 50 |
+
"intermediate_size": 3420,
|
| 51 |
+
"model_type": "qwen2_5_vl",
|
| 52 |
+
"num_heads": 16,
|
| 53 |
+
"out_hidden_size": 3584,
|
| 54 |
+
"patch_size": 14,
|
| 55 |
+
"spatial_merge_size": 2,
|
| 56 |
+
"spatial_patch_size": 14,
|
| 57 |
+
"temporal_patch_size": 2,
|
| 58 |
+
"tokens_per_second": 2,
|
| 59 |
+
"torch_dtype": "bfloat16",
|
| 60 |
+
"window_size": 112
|
| 61 |
+
},
|
| 62 |
+
"vision_end_token_id": 151653,
|
| 63 |
+
"vision_start_token_id": 151652,
|
| 64 |
+
"vision_token_id": 151654,
|
| 65 |
+
"vocab_size": 152064
|
| 66 |
+
}
|
selective_loss/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148/checkpoint-280/generation_config.json
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token_id": 151643,
|
| 3 |
+
"do_sample": true,
|
| 4 |
+
"eos_token_id": [
|
| 5 |
+
151645,
|
| 6 |
+
151643
|
| 7 |
+
],
|
| 8 |
+
"pad_token_id": 151643,
|
| 9 |
+
"repetition_penalty": 1.05,
|
| 10 |
+
"temperature": 1e-06,
|
| 11 |
+
"transformers_version": "4.51.3"
|
| 12 |
+
}
|
selective_loss/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148/checkpoint-280/latest
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
global_step278
|
selective_loss/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148/checkpoint-280/merges.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
selective_loss/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148/checkpoint-280/model.safetensors.index.json
ADDED
|
@@ -0,0 +1,736 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"metadata": {
|
| 3 |
+
"total_size": 16584333312
|
| 4 |
+
},
|
| 5 |
+
"weight_map": {
|
| 6 |
+
"lm_head.weight": "model-00004-of-00004.safetensors",
|
| 7 |
+
"model.embed_tokens.weight": "model-00001-of-00004.safetensors",
|
| 8 |
+
"model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 9 |
+
"model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 10 |
+
"model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 11 |
+
"model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 12 |
+
"model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 13 |
+
"model.layers.0.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 14 |
+
"model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 15 |
+
"model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 16 |
+
"model.layers.0.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 17 |
+
"model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 18 |
+
"model.layers.0.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 19 |
+
"model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 20 |
+
"model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 21 |
+
"model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 22 |
+
"model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 23 |
+
"model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 24 |
+
"model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 25 |
+
"model.layers.1.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 26 |
+
"model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 27 |
+
"model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 28 |
+
"model.layers.1.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 29 |
+
"model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 30 |
+
"model.layers.1.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 31 |
+
"model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 32 |
+
"model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 33 |
+
"model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 34 |
+
"model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 35 |
+
"model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 36 |
+
"model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 37 |
+
"model.layers.10.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 38 |
+
"model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 39 |
+
"model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 40 |
+
"model.layers.10.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 41 |
+
"model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 42 |
+
"model.layers.10.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 43 |
+
"model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 44 |
+
"model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 45 |
+
"model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 46 |
+
"model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 47 |
+
"model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 48 |
+
"model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 49 |
+
"model.layers.11.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 50 |
+
"model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 51 |
+
"model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 52 |
+
"model.layers.11.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 53 |
+
"model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 54 |
+
"model.layers.11.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 55 |
+
"model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 56 |
+
"model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 57 |
+
"model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 58 |
+
"model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 59 |
+
"model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 60 |
+
"model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 61 |
+
"model.layers.12.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 62 |
+
"model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 63 |
+
"model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 64 |
+
"model.layers.12.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 65 |
+
"model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 66 |
+
"model.layers.12.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 67 |
+
"model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 68 |
+
"model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 69 |
+
"model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 70 |
+
"model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 71 |
+
"model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 72 |
+
"model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 73 |
+
"model.layers.13.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 74 |
+
"model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 75 |
+
"model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 76 |
+
"model.layers.13.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 77 |
+
"model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 78 |
+
"model.layers.13.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 79 |
+
"model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 80 |
+
"model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 81 |
+
"model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 82 |
+
"model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 83 |
+
"model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 84 |
+
"model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 85 |
+
"model.layers.14.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 86 |
+
"model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 87 |
+
"model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 88 |
+
"model.layers.14.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 89 |
+
"model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 90 |
+
"model.layers.14.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 91 |
+
"model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 92 |
+
"model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 93 |
+
"model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 94 |
+
"model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 95 |
+
"model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 96 |
+
"model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 97 |
+
"model.layers.15.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 98 |
+
"model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 99 |
+
"model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 100 |
+
"model.layers.15.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 101 |
+
"model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 102 |
+
"model.layers.15.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 103 |
+
"model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 104 |
+
"model.layers.16.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 105 |
+
"model.layers.16.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 106 |
+
"model.layers.16.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 107 |
+
"model.layers.16.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 108 |
+
"model.layers.16.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 109 |
+
"model.layers.16.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 110 |
+
"model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 111 |
+
"model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 112 |
+
"model.layers.16.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 113 |
+
"model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 114 |
+
"model.layers.16.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 115 |
+
"model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 116 |
+
"model.layers.17.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 117 |
+
"model.layers.17.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 118 |
+
"model.layers.17.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 119 |
+
"model.layers.17.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 120 |
+
"model.layers.17.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 121 |
+
"model.layers.17.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 122 |
+
"model.layers.17.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 123 |
+
"model.layers.17.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 124 |
+
"model.layers.17.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 125 |
+
"model.layers.17.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 126 |
+
"model.layers.17.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 127 |
+
"model.layers.17.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 128 |
+
"model.layers.18.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 129 |
+
"model.layers.18.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 130 |
+
"model.layers.18.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 131 |
+
"model.layers.18.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 132 |
+
"model.layers.18.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 133 |
+
"model.layers.18.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 134 |
+
"model.layers.18.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 135 |
+
"model.layers.18.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 136 |
+
"model.layers.18.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 137 |
+
"model.layers.18.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 138 |
+
"model.layers.18.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 139 |
+
"model.layers.18.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 140 |
+
"model.layers.19.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 141 |
+
"model.layers.19.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 142 |
+
"model.layers.19.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 143 |
+
"model.layers.19.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 144 |
+
"model.layers.19.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 145 |
+
"model.layers.19.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 146 |
+
"model.layers.19.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 147 |
+
"model.layers.19.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 148 |
+
"model.layers.19.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 149 |
+
"model.layers.19.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 150 |
+
"model.layers.19.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 151 |
+
"model.layers.19.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 152 |
+
"model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 153 |
+
"model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 154 |
+
"model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 155 |
+
"model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 156 |
+
"model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 157 |
+
"model.layers.2.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 158 |
+
"model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 159 |
+
"model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 160 |
+
"model.layers.2.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 161 |
+
"model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 162 |
+
"model.layers.2.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 163 |
+
"model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 164 |
+
"model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 165 |
+
"model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 166 |
+
"model.layers.20.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 167 |
+
"model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 168 |
+
"model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 169 |
+
"model.layers.20.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 170 |
+
"model.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 171 |
+
"model.layers.20.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 172 |
+
"model.layers.20.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 173 |
+
"model.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 174 |
+
"model.layers.20.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 175 |
+
"model.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 176 |
+
"model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 177 |
+
"model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 178 |
+
"model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 179 |
+
"model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 180 |
+
"model.layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 181 |
+
"model.layers.21.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 182 |
+
"model.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 183 |
+
"model.layers.21.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 184 |
+
"model.layers.21.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 185 |
+
"model.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 186 |
+
"model.layers.21.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 187 |
+
"model.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 188 |
+
"model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 189 |
+
"model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 190 |
+
"model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 191 |
+
"model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 192 |
+
"model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 193 |
+
"model.layers.22.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 194 |
+
"model.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 195 |
+
"model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 196 |
+
"model.layers.22.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 197 |
+
"model.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 198 |
+
"model.layers.22.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 199 |
+
"model.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 200 |
+
"model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 201 |
+
"model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 202 |
+
"model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 203 |
+
"model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 204 |
+
"model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 205 |
+
"model.layers.23.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 206 |
+
"model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 207 |
+
"model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 208 |
+
"model.layers.23.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 209 |
+
"model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 210 |
+
"model.layers.23.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 211 |
+
"model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 212 |
+
"model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 213 |
+
"model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 214 |
+
"model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 215 |
+
"model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 216 |
+
"model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 217 |
+
"model.layers.24.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 218 |
+
"model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 219 |
+
"model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 220 |
+
"model.layers.24.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 221 |
+
"model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 222 |
+
"model.layers.24.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 223 |
+
"model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 224 |
+
"model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 225 |
+
"model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 226 |
+
"model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 227 |
+
"model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 228 |
+
"model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 229 |
+
"model.layers.25.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 230 |
+
"model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 231 |
+
"model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 232 |
+
"model.layers.25.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 233 |
+
"model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 234 |
+
"model.layers.25.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 235 |
+
"model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 236 |
+
"model.layers.26.input_layernorm.weight": "model-00004-of-00004.safetensors",
|
| 237 |
+
"model.layers.26.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
|
| 238 |
+
"model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 239 |
+
"model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 240 |
+
"model.layers.26.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
|
| 241 |
+
"model.layers.26.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 242 |
+
"model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 243 |
+
"model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 244 |
+
"model.layers.26.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 245 |
+
"model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 246 |
+
"model.layers.26.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 247 |
+
"model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 248 |
+
"model.layers.27.input_layernorm.weight": "model-00004-of-00004.safetensors",
|
| 249 |
+
"model.layers.27.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
|
| 250 |
+
"model.layers.27.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
|
| 251 |
+
"model.layers.27.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
|
| 252 |
+
"model.layers.27.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
|
| 253 |
+
"model.layers.27.self_attn.k_proj.bias": "model-00004-of-00004.safetensors",
|
| 254 |
+
"model.layers.27.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
| 255 |
+
"model.layers.27.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
|
| 256 |
+
"model.layers.27.self_attn.q_proj.bias": "model-00004-of-00004.safetensors",
|
| 257 |
+
"model.layers.27.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
| 258 |
+
"model.layers.27.self_attn.v_proj.bias": "model-00004-of-00004.safetensors",
|
| 259 |
+
"model.layers.27.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
| 260 |
+
"model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 261 |
+
"model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 262 |
+
"model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 263 |
+
"model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 264 |
+
"model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 265 |
+
"model.layers.3.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 266 |
+
"model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 267 |
+
"model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 268 |
+
"model.layers.3.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 269 |
+
"model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 270 |
+
"model.layers.3.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 271 |
+
"model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 272 |
+
"model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 273 |
+
"model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 274 |
+
"model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 275 |
+
"model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 276 |
+
"model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 277 |
+
"model.layers.4.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 278 |
+
"model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 279 |
+
"model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 280 |
+
"model.layers.4.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 281 |
+
"model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 282 |
+
"model.layers.4.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 283 |
+
"model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 284 |
+
"model.layers.5.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 285 |
+
"model.layers.5.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 286 |
+
"model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 287 |
+
"model.layers.5.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 288 |
+
"model.layers.5.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 289 |
+
"model.layers.5.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 290 |
+
"model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 291 |
+
"model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 292 |
+
"model.layers.5.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 293 |
+
"model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 294 |
+
"model.layers.5.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 295 |
+
"model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 296 |
+
"model.layers.6.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 297 |
+
"model.layers.6.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 298 |
+
"model.layers.6.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 299 |
+
"model.layers.6.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 300 |
+
"model.layers.6.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 301 |
+
"model.layers.6.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 302 |
+
"model.layers.6.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 303 |
+
"model.layers.6.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 304 |
+
"model.layers.6.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 305 |
+
"model.layers.6.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 306 |
+
"model.layers.6.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 307 |
+
"model.layers.6.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 308 |
+
"model.layers.7.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 309 |
+
"model.layers.7.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 310 |
+
"model.layers.7.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 311 |
+
"model.layers.7.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 312 |
+
"model.layers.7.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 313 |
+
"model.layers.7.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 314 |
+
"model.layers.7.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 315 |
+
"model.layers.7.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 316 |
+
"model.layers.7.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 317 |
+
"model.layers.7.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 318 |
+
"model.layers.7.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 319 |
+
"model.layers.7.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 320 |
+
"model.layers.8.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 321 |
+
"model.layers.8.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 322 |
+
"model.layers.8.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 323 |
+
"model.layers.8.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 324 |
+
"model.layers.8.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 325 |
+
"model.layers.8.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 326 |
+
"model.layers.8.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 327 |
+
"model.layers.8.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 328 |
+
"model.layers.8.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 329 |
+
"model.layers.8.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 330 |
+
"model.layers.8.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 331 |
+
"model.layers.8.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 332 |
+
"model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 333 |
+
"model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 334 |
+
"model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 335 |
+
"model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 336 |
+
"model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 337 |
+
"model.layers.9.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 338 |
+
"model.layers.9.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 339 |
+
"model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 340 |
+
"model.layers.9.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 341 |
+
"model.layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 342 |
+
"model.layers.9.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 343 |
+
"model.layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 344 |
+
"model.norm.weight": "model-00004-of-00004.safetensors",
|
| 345 |
+
"visual.blocks.0.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 346 |
+
"visual.blocks.0.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 347 |
+
"visual.blocks.0.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 348 |
+
"visual.blocks.0.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 349 |
+
"visual.blocks.0.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 350 |
+
"visual.blocks.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 351 |
+
"visual.blocks.0.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 352 |
+
"visual.blocks.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 353 |
+
"visual.blocks.0.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 354 |
+
"visual.blocks.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 355 |
+
"visual.blocks.0.norm1.weight": "model-00001-of-00004.safetensors",
|
| 356 |
+
"visual.blocks.0.norm2.weight": "model-00001-of-00004.safetensors",
|
| 357 |
+
"visual.blocks.1.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 358 |
+
"visual.blocks.1.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 359 |
+
"visual.blocks.1.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 360 |
+
"visual.blocks.1.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 361 |
+
"visual.blocks.1.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 362 |
+
"visual.blocks.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 363 |
+
"visual.blocks.1.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 364 |
+
"visual.blocks.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 365 |
+
"visual.blocks.1.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 366 |
+
"visual.blocks.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 367 |
+
"visual.blocks.1.norm1.weight": "model-00001-of-00004.safetensors",
|
| 368 |
+
"visual.blocks.1.norm2.weight": "model-00001-of-00004.safetensors",
|
| 369 |
+
"visual.blocks.10.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 370 |
+
"visual.blocks.10.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 371 |
+
"visual.blocks.10.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 372 |
+
"visual.blocks.10.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 373 |
+
"visual.blocks.10.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 374 |
+
"visual.blocks.10.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 375 |
+
"visual.blocks.10.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 376 |
+
"visual.blocks.10.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 377 |
+
"visual.blocks.10.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 378 |
+
"visual.blocks.10.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 379 |
+
"visual.blocks.10.norm1.weight": "model-00001-of-00004.safetensors",
|
| 380 |
+
"visual.blocks.10.norm2.weight": "model-00001-of-00004.safetensors",
|
| 381 |
+
"visual.blocks.11.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 382 |
+
"visual.blocks.11.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 383 |
+
"visual.blocks.11.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 384 |
+
"visual.blocks.11.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 385 |
+
"visual.blocks.11.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 386 |
+
"visual.blocks.11.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 387 |
+
"visual.blocks.11.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 388 |
+
"visual.blocks.11.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 389 |
+
"visual.blocks.11.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 390 |
+
"visual.blocks.11.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 391 |
+
"visual.blocks.11.norm1.weight": "model-00001-of-00004.safetensors",
|
| 392 |
+
"visual.blocks.11.norm2.weight": "model-00001-of-00004.safetensors",
|
| 393 |
+
"visual.blocks.12.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 394 |
+
"visual.blocks.12.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 395 |
+
"visual.blocks.12.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 396 |
+
"visual.blocks.12.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 397 |
+
"visual.blocks.12.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 398 |
+
"visual.blocks.12.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 399 |
+
"visual.blocks.12.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 400 |
+
"visual.blocks.12.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 401 |
+
"visual.blocks.12.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 402 |
+
"visual.blocks.12.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 403 |
+
"visual.blocks.12.norm1.weight": "model-00001-of-00004.safetensors",
|
| 404 |
+
"visual.blocks.12.norm2.weight": "model-00001-of-00004.safetensors",
|
| 405 |
+
"visual.blocks.13.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 406 |
+
"visual.blocks.13.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 407 |
+
"visual.blocks.13.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 408 |
+
"visual.blocks.13.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 409 |
+
"visual.blocks.13.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 410 |
+
"visual.blocks.13.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 411 |
+
"visual.blocks.13.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 412 |
+
"visual.blocks.13.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 413 |
+
"visual.blocks.13.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 414 |
+
"visual.blocks.13.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 415 |
+
"visual.blocks.13.norm1.weight": "model-00001-of-00004.safetensors",
|
| 416 |
+
"visual.blocks.13.norm2.weight": "model-00001-of-00004.safetensors",
|
| 417 |
+
"visual.blocks.14.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 418 |
+
"visual.blocks.14.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 419 |
+
"visual.blocks.14.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 420 |
+
"visual.blocks.14.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 421 |
+
"visual.blocks.14.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 422 |
+
"visual.blocks.14.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 423 |
+
"visual.blocks.14.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 424 |
+
"visual.blocks.14.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 425 |
+
"visual.blocks.14.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 426 |
+
"visual.blocks.14.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 427 |
+
"visual.blocks.14.norm1.weight": "model-00001-of-00004.safetensors",
|
| 428 |
+
"visual.blocks.14.norm2.weight": "model-00001-of-00004.safetensors",
|
| 429 |
+
"visual.blocks.15.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 430 |
+
"visual.blocks.15.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 431 |
+
"visual.blocks.15.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 432 |
+
"visual.blocks.15.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 433 |
+
"visual.blocks.15.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 434 |
+
"visual.blocks.15.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 435 |
+
"visual.blocks.15.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 436 |
+
"visual.blocks.15.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 437 |
+
"visual.blocks.15.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 438 |
+
"visual.blocks.15.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 439 |
+
"visual.blocks.15.norm1.weight": "model-00001-of-00004.safetensors",
|
| 440 |
+
"visual.blocks.15.norm2.weight": "model-00001-of-00004.safetensors",
|
| 441 |
+
"visual.blocks.16.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 442 |
+
"visual.blocks.16.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 443 |
+
"visual.blocks.16.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 444 |
+
"visual.blocks.16.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 445 |
+
"visual.blocks.16.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 446 |
+
"visual.blocks.16.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 447 |
+
"visual.blocks.16.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 448 |
+
"visual.blocks.16.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 449 |
+
"visual.blocks.16.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 450 |
+
"visual.blocks.16.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 451 |
+
"visual.blocks.16.norm1.weight": "model-00001-of-00004.safetensors",
|
| 452 |
+
"visual.blocks.16.norm2.weight": "model-00001-of-00004.safetensors",
|
| 453 |
+
"visual.blocks.17.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 454 |
+
"visual.blocks.17.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 455 |
+
"visual.blocks.17.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 456 |
+
"visual.blocks.17.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 457 |
+
"visual.blocks.17.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 458 |
+
"visual.blocks.17.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 459 |
+
"visual.blocks.17.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 460 |
+
"visual.blocks.17.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 461 |
+
"visual.blocks.17.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 462 |
+
"visual.blocks.17.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 463 |
+
"visual.blocks.17.norm1.weight": "model-00001-of-00004.safetensors",
|
| 464 |
+
"visual.blocks.17.norm2.weight": "model-00001-of-00004.safetensors",
|
| 465 |
+
"visual.blocks.18.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 466 |
+
"visual.blocks.18.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 467 |
+
"visual.blocks.18.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 468 |
+
"visual.blocks.18.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 469 |
+
"visual.blocks.18.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 470 |
+
"visual.blocks.18.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 471 |
+
"visual.blocks.18.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 472 |
+
"visual.blocks.18.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 473 |
+
"visual.blocks.18.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 474 |
+
"visual.blocks.18.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 475 |
+
"visual.blocks.18.norm1.weight": "model-00001-of-00004.safetensors",
|
| 476 |
+
"visual.blocks.18.norm2.weight": "model-00001-of-00004.safetensors",
|
| 477 |
+
"visual.blocks.19.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 478 |
+
"visual.blocks.19.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 479 |
+
"visual.blocks.19.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 480 |
+
"visual.blocks.19.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 481 |
+
"visual.blocks.19.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 482 |
+
"visual.blocks.19.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 483 |
+
"visual.blocks.19.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 484 |
+
"visual.blocks.19.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 485 |
+
"visual.blocks.19.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 486 |
+
"visual.blocks.19.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 487 |
+
"visual.blocks.19.norm1.weight": "model-00001-of-00004.safetensors",
|
| 488 |
+
"visual.blocks.19.norm2.weight": "model-00001-of-00004.safetensors",
|
| 489 |
+
"visual.blocks.2.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 490 |
+
"visual.blocks.2.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 491 |
+
"visual.blocks.2.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 492 |
+
"visual.blocks.2.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 493 |
+
"visual.blocks.2.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 494 |
+
"visual.blocks.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 495 |
+
"visual.blocks.2.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 496 |
+
"visual.blocks.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 497 |
+
"visual.blocks.2.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 498 |
+
"visual.blocks.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 499 |
+
"visual.blocks.2.norm1.weight": "model-00001-of-00004.safetensors",
|
| 500 |
+
"visual.blocks.2.norm2.weight": "model-00001-of-00004.safetensors",
|
| 501 |
+
"visual.blocks.20.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 502 |
+
"visual.blocks.20.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 503 |
+
"visual.blocks.20.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 504 |
+
"visual.blocks.20.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 505 |
+
"visual.blocks.20.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 506 |
+
"visual.blocks.20.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 507 |
+
"visual.blocks.20.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 508 |
+
"visual.blocks.20.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 509 |
+
"visual.blocks.20.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 510 |
+
"visual.blocks.20.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 511 |
+
"visual.blocks.20.norm1.weight": "model-00001-of-00004.safetensors",
|
| 512 |
+
"visual.blocks.20.norm2.weight": "model-00001-of-00004.safetensors",
|
| 513 |
+
"visual.blocks.21.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 514 |
+
"visual.blocks.21.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 515 |
+
"visual.blocks.21.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 516 |
+
"visual.blocks.21.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 517 |
+
"visual.blocks.21.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 518 |
+
"visual.blocks.21.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 519 |
+
"visual.blocks.21.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 520 |
+
"visual.blocks.21.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 521 |
+
"visual.blocks.21.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 522 |
+
"visual.blocks.21.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 523 |
+
"visual.blocks.21.norm1.weight": "model-00001-of-00004.safetensors",
|
| 524 |
+
"visual.blocks.21.norm2.weight": "model-00001-of-00004.safetensors",
|
| 525 |
+
"visual.blocks.22.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 526 |
+
"visual.blocks.22.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 527 |
+
"visual.blocks.22.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 528 |
+
"visual.blocks.22.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 529 |
+
"visual.blocks.22.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 530 |
+
"visual.blocks.22.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 531 |
+
"visual.blocks.22.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 532 |
+
"visual.blocks.22.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 533 |
+
"visual.blocks.22.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 534 |
+
"visual.blocks.22.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 535 |
+
"visual.blocks.22.norm1.weight": "model-00001-of-00004.safetensors",
|
| 536 |
+
"visual.blocks.22.norm2.weight": "model-00001-of-00004.safetensors",
|
| 537 |
+
"visual.blocks.23.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 538 |
+
"visual.blocks.23.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 539 |
+
"visual.blocks.23.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 540 |
+
"visual.blocks.23.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 541 |
+
"visual.blocks.23.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 542 |
+
"visual.blocks.23.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 543 |
+
"visual.blocks.23.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 544 |
+
"visual.blocks.23.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 545 |
+
"visual.blocks.23.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 546 |
+
"visual.blocks.23.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 547 |
+
"visual.blocks.23.norm1.weight": "model-00001-of-00004.safetensors",
|
| 548 |
+
"visual.blocks.23.norm2.weight": "model-00001-of-00004.safetensors",
|
| 549 |
+
"visual.blocks.24.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 550 |
+
"visual.blocks.24.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 551 |
+
"visual.blocks.24.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 552 |
+
"visual.blocks.24.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 553 |
+
"visual.blocks.24.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 554 |
+
"visual.blocks.24.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 555 |
+
"visual.blocks.24.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 556 |
+
"visual.blocks.24.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 557 |
+
"visual.blocks.24.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 558 |
+
"visual.blocks.24.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 559 |
+
"visual.blocks.24.norm1.weight": "model-00001-of-00004.safetensors",
|
| 560 |
+
"visual.blocks.24.norm2.weight": "model-00001-of-00004.safetensors",
|
| 561 |
+
"visual.blocks.25.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 562 |
+
"visual.blocks.25.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 563 |
+
"visual.blocks.25.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 564 |
+
"visual.blocks.25.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 565 |
+
"visual.blocks.25.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 566 |
+
"visual.blocks.25.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 567 |
+
"visual.blocks.25.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 568 |
+
"visual.blocks.25.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 569 |
+
"visual.blocks.25.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 570 |
+
"visual.blocks.25.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 571 |
+
"visual.blocks.25.norm1.weight": "model-00001-of-00004.safetensors",
|
| 572 |
+
"visual.blocks.25.norm2.weight": "model-00001-of-00004.safetensors",
|
| 573 |
+
"visual.blocks.26.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 574 |
+
"visual.blocks.26.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 575 |
+
"visual.blocks.26.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 576 |
+
"visual.blocks.26.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 577 |
+
"visual.blocks.26.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 578 |
+
"visual.blocks.26.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 579 |
+
"visual.blocks.26.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 580 |
+
"visual.blocks.26.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 581 |
+
"visual.blocks.26.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 582 |
+
"visual.blocks.26.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 583 |
+
"visual.blocks.26.norm1.weight": "model-00001-of-00004.safetensors",
|
| 584 |
+
"visual.blocks.26.norm2.weight": "model-00001-of-00004.safetensors",
|
| 585 |
+
"visual.blocks.27.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 586 |
+
"visual.blocks.27.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 587 |
+
"visual.blocks.27.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 588 |
+
"visual.blocks.27.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 589 |
+
"visual.blocks.27.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 590 |
+
"visual.blocks.27.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 591 |
+
"visual.blocks.27.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 592 |
+
"visual.blocks.27.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 593 |
+
"visual.blocks.27.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 594 |
+
"visual.blocks.27.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 595 |
+
"visual.blocks.27.norm1.weight": "model-00001-of-00004.safetensors",
|
| 596 |
+
"visual.blocks.27.norm2.weight": "model-00001-of-00004.safetensors",
|
| 597 |
+
"visual.blocks.28.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 598 |
+
"visual.blocks.28.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 599 |
+
"visual.blocks.28.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 600 |
+
"visual.blocks.28.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 601 |
+
"visual.blocks.28.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 602 |
+
"visual.blocks.28.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 603 |
+
"visual.blocks.28.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 604 |
+
"visual.blocks.28.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 605 |
+
"visual.blocks.28.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 606 |
+
"visual.blocks.28.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 607 |
+
"visual.blocks.28.norm1.weight": "model-00001-of-00004.safetensors",
|
| 608 |
+
"visual.blocks.28.norm2.weight": "model-00001-of-00004.safetensors",
|
| 609 |
+
"visual.blocks.29.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 610 |
+
"visual.blocks.29.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 611 |
+
"visual.blocks.29.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 612 |
+
"visual.blocks.29.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 613 |
+
"visual.blocks.29.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 614 |
+
"visual.blocks.29.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 615 |
+
"visual.blocks.29.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 616 |
+
"visual.blocks.29.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 617 |
+
"visual.blocks.29.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 618 |
+
"visual.blocks.29.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 619 |
+
"visual.blocks.29.norm1.weight": "model-00001-of-00004.safetensors",
|
| 620 |
+
"visual.blocks.29.norm2.weight": "model-00001-of-00004.safetensors",
|
| 621 |
+
"visual.blocks.3.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 622 |
+
"visual.blocks.3.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 623 |
+
"visual.blocks.3.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 624 |
+
"visual.blocks.3.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 625 |
+
"visual.blocks.3.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 626 |
+
"visual.blocks.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 627 |
+
"visual.blocks.3.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 628 |
+
"visual.blocks.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 629 |
+
"visual.blocks.3.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 630 |
+
"visual.blocks.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 631 |
+
"visual.blocks.3.norm1.weight": "model-00001-of-00004.safetensors",
|
| 632 |
+
"visual.blocks.3.norm2.weight": "model-00001-of-00004.safetensors",
|
| 633 |
+
"visual.blocks.30.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 634 |
+
"visual.blocks.30.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 635 |
+
"visual.blocks.30.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 636 |
+
"visual.blocks.30.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 637 |
+
"visual.blocks.30.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 638 |
+
"visual.blocks.30.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 639 |
+
"visual.blocks.30.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 640 |
+
"visual.blocks.30.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 641 |
+
"visual.blocks.30.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 642 |
+
"visual.blocks.30.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 643 |
+
"visual.blocks.30.norm1.weight": "model-00001-of-00004.safetensors",
|
| 644 |
+
"visual.blocks.30.norm2.weight": "model-00001-of-00004.safetensors",
|
| 645 |
+
"visual.blocks.31.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 646 |
+
"visual.blocks.31.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 647 |
+
"visual.blocks.31.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 648 |
+
"visual.blocks.31.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 649 |
+
"visual.blocks.31.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 650 |
+
"visual.blocks.31.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 651 |
+
"visual.blocks.31.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 652 |
+
"visual.blocks.31.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 653 |
+
"visual.blocks.31.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 654 |
+
"visual.blocks.31.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 655 |
+
"visual.blocks.31.norm1.weight": "model-00001-of-00004.safetensors",
|
| 656 |
+
"visual.blocks.31.norm2.weight": "model-00001-of-00004.safetensors",
|
| 657 |
+
"visual.blocks.4.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 658 |
+
"visual.blocks.4.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 659 |
+
"visual.blocks.4.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 660 |
+
"visual.blocks.4.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 661 |
+
"visual.blocks.4.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 662 |
+
"visual.blocks.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 663 |
+
"visual.blocks.4.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 664 |
+
"visual.blocks.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 665 |
+
"visual.blocks.4.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 666 |
+
"visual.blocks.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 667 |
+
"visual.blocks.4.norm1.weight": "model-00001-of-00004.safetensors",
|
| 668 |
+
"visual.blocks.4.norm2.weight": "model-00001-of-00004.safetensors",
|
| 669 |
+
"visual.blocks.5.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 670 |
+
"visual.blocks.5.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 671 |
+
"visual.blocks.5.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 672 |
+
"visual.blocks.5.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 673 |
+
"visual.blocks.5.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 674 |
+
"visual.blocks.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 675 |
+
"visual.blocks.5.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 676 |
+
"visual.blocks.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 677 |
+
"visual.blocks.5.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 678 |
+
"visual.blocks.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 679 |
+
"visual.blocks.5.norm1.weight": "model-00001-of-00004.safetensors",
|
| 680 |
+
"visual.blocks.5.norm2.weight": "model-00001-of-00004.safetensors",
|
| 681 |
+
"visual.blocks.6.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 682 |
+
"visual.blocks.6.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 683 |
+
"visual.blocks.6.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 684 |
+
"visual.blocks.6.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 685 |
+
"visual.blocks.6.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 686 |
+
"visual.blocks.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 687 |
+
"visual.blocks.6.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 688 |
+
"visual.blocks.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 689 |
+
"visual.blocks.6.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 690 |
+
"visual.blocks.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 691 |
+
"visual.blocks.6.norm1.weight": "model-00001-of-00004.safetensors",
|
| 692 |
+
"visual.blocks.6.norm2.weight": "model-00001-of-00004.safetensors",
|
| 693 |
+
"visual.blocks.7.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 694 |
+
"visual.blocks.7.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 695 |
+
"visual.blocks.7.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 696 |
+
"visual.blocks.7.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 697 |
+
"visual.blocks.7.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 698 |
+
"visual.blocks.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 699 |
+
"visual.blocks.7.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 700 |
+
"visual.blocks.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 701 |
+
"visual.blocks.7.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 702 |
+
"visual.blocks.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 703 |
+
"visual.blocks.7.norm1.weight": "model-00001-of-00004.safetensors",
|
| 704 |
+
"visual.blocks.7.norm2.weight": "model-00001-of-00004.safetensors",
|
| 705 |
+
"visual.blocks.8.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 706 |
+
"visual.blocks.8.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 707 |
+
"visual.blocks.8.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 708 |
+
"visual.blocks.8.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 709 |
+
"visual.blocks.8.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 710 |
+
"visual.blocks.8.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 711 |
+
"visual.blocks.8.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 712 |
+
"visual.blocks.8.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 713 |
+
"visual.blocks.8.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 714 |
+
"visual.blocks.8.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 715 |
+
"visual.blocks.8.norm1.weight": "model-00001-of-00004.safetensors",
|
| 716 |
+
"visual.blocks.8.norm2.weight": "model-00001-of-00004.safetensors",
|
| 717 |
+
"visual.blocks.9.attn.proj.bias": "model-00001-of-00004.safetensors",
|
| 718 |
+
"visual.blocks.9.attn.proj.weight": "model-00001-of-00004.safetensors",
|
| 719 |
+
"visual.blocks.9.attn.qkv.bias": "model-00001-of-00004.safetensors",
|
| 720 |
+
"visual.blocks.9.attn.qkv.weight": "model-00001-of-00004.safetensors",
|
| 721 |
+
"visual.blocks.9.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
|
| 722 |
+
"visual.blocks.9.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 723 |
+
"visual.blocks.9.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
|
| 724 |
+
"visual.blocks.9.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 725 |
+
"visual.blocks.9.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
|
| 726 |
+
"visual.blocks.9.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 727 |
+
"visual.blocks.9.norm1.weight": "model-00001-of-00004.safetensors",
|
| 728 |
+
"visual.blocks.9.norm2.weight": "model-00001-of-00004.safetensors",
|
| 729 |
+
"visual.merger.ln_q.weight": "model-00001-of-00004.safetensors",
|
| 730 |
+
"visual.merger.mlp.0.bias": "model-00001-of-00004.safetensors",
|
| 731 |
+
"visual.merger.mlp.0.weight": "model-00001-of-00004.safetensors",
|
| 732 |
+
"visual.merger.mlp.2.bias": "model-00001-of-00004.safetensors",
|
| 733 |
+
"visual.merger.mlp.2.weight": "model-00001-of-00004.safetensors",
|
| 734 |
+
"visual.patch_embed.proj.weight": "model-00001-of-00004.safetensors"
|
| 735 |
+
}
|
| 736 |
+
}
|
selective_loss/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148/checkpoint-280/preprocessor_config.json
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"min_pixels": 3136,
|
| 3 |
+
"max_pixels": 12845056,
|
| 4 |
+
"patch_size": 14,
|
| 5 |
+
"temporal_patch_size": 2,
|
| 6 |
+
"merge_size": 2,
|
| 7 |
+
"image_mean": [
|
| 8 |
+
0.48145466,
|
| 9 |
+
0.4578275,
|
| 10 |
+
0.40821073
|
| 11 |
+
],
|
| 12 |
+
"image_std": [
|
| 13 |
+
0.26862954,
|
| 14 |
+
0.26130258,
|
| 15 |
+
0.27577711
|
| 16 |
+
],
|
| 17 |
+
"image_processor_type": "Qwen2VLImageProcessor",
|
| 18 |
+
"processor_class": "Qwen2_5_VLProcessor"
|
| 19 |
+
}
|
selective_loss/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148/checkpoint-280/special_tokens_map.json
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"additional_special_tokens": [
|
| 3 |
+
"<|im_start|>",
|
| 4 |
+
"<|im_end|>",
|
| 5 |
+
"<|object_ref_start|>",
|
| 6 |
+
"<|object_ref_end|>",
|
| 7 |
+
"<|box_start|>",
|
| 8 |
+
"<|box_end|>",
|
| 9 |
+
"<|quad_start|>",
|
| 10 |
+
"<|quad_end|>",
|
| 11 |
+
"<|vision_start|>",
|
| 12 |
+
"<|vision_end|>",
|
| 13 |
+
"<|vision_pad|>",
|
| 14 |
+
"<|image_pad|>",
|
| 15 |
+
"<|video_pad|>"
|
| 16 |
+
],
|
| 17 |
+
"eos_token": {
|
| 18 |
+
"content": "<|im_end|>",
|
| 19 |
+
"lstrip": false,
|
| 20 |
+
"normalized": false,
|
| 21 |
+
"rstrip": false,
|
| 22 |
+
"single_word": false
|
| 23 |
+
},
|
| 24 |
+
"pad_token": {
|
| 25 |
+
"content": "<|endoftext|>",
|
| 26 |
+
"lstrip": false,
|
| 27 |
+
"normalized": false,
|
| 28 |
+
"rstrip": false,
|
| 29 |
+
"single_word": false
|
| 30 |
+
}
|
| 31 |
+
}
|
selective_loss/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148/checkpoint-280/tokenizer_config.json
ADDED
|
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_bos_token": false,
|
| 3 |
+
"add_prefix_space": false,
|
| 4 |
+
"added_tokens_decoder": {
|
| 5 |
+
"151643": {
|
| 6 |
+
"content": "<|endoftext|>",
|
| 7 |
+
"lstrip": false,
|
| 8 |
+
"normalized": false,
|
| 9 |
+
"rstrip": false,
|
| 10 |
+
"single_word": false,
|
| 11 |
+
"special": true
|
| 12 |
+
},
|
| 13 |
+
"151644": {
|
| 14 |
+
"content": "<|im_start|>",
|
| 15 |
+
"lstrip": false,
|
| 16 |
+
"normalized": false,
|
| 17 |
+
"rstrip": false,
|
| 18 |
+
"single_word": false,
|
| 19 |
+
"special": true
|
| 20 |
+
},
|
| 21 |
+
"151645": {
|
| 22 |
+
"content": "<|im_end|>",
|
| 23 |
+
"lstrip": false,
|
| 24 |
+
"normalized": false,
|
| 25 |
+
"rstrip": false,
|
| 26 |
+
"single_word": false,
|
| 27 |
+
"special": true
|
| 28 |
+
},
|
| 29 |
+
"151646": {
|
| 30 |
+
"content": "<|object_ref_start|>",
|
| 31 |
+
"lstrip": false,
|
| 32 |
+
"normalized": false,
|
| 33 |
+
"rstrip": false,
|
| 34 |
+
"single_word": false,
|
| 35 |
+
"special": true
|
| 36 |
+
},
|
| 37 |
+
"151647": {
|
| 38 |
+
"content": "<|object_ref_end|>",
|
| 39 |
+
"lstrip": false,
|
| 40 |
+
"normalized": false,
|
| 41 |
+
"rstrip": false,
|
| 42 |
+
"single_word": false,
|
| 43 |
+
"special": true
|
| 44 |
+
},
|
| 45 |
+
"151648": {
|
| 46 |
+
"content": "<|box_start|>",
|
| 47 |
+
"lstrip": false,
|
| 48 |
+
"normalized": false,
|
| 49 |
+
"rstrip": false,
|
| 50 |
+
"single_word": false,
|
| 51 |
+
"special": true
|
| 52 |
+
},
|
| 53 |
+
"151649": {
|
| 54 |
+
"content": "<|box_end|>",
|
| 55 |
+
"lstrip": false,
|
| 56 |
+
"normalized": false,
|
| 57 |
+
"rstrip": false,
|
| 58 |
+
"single_word": false,
|
| 59 |
+
"special": true
|
| 60 |
+
},
|
| 61 |
+
"151650": {
|
| 62 |
+
"content": "<|quad_start|>",
|
| 63 |
+
"lstrip": false,
|
| 64 |
+
"normalized": false,
|
| 65 |
+
"rstrip": false,
|
| 66 |
+
"single_word": false,
|
| 67 |
+
"special": true
|
| 68 |
+
},
|
| 69 |
+
"151651": {
|
| 70 |
+
"content": "<|quad_end|>",
|
| 71 |
+
"lstrip": false,
|
| 72 |
+
"normalized": false,
|
| 73 |
+
"rstrip": false,
|
| 74 |
+
"single_word": false,
|
| 75 |
+
"special": true
|
| 76 |
+
},
|
| 77 |
+
"151652": {
|
| 78 |
+
"content": "<|vision_start|>",
|
| 79 |
+
"lstrip": false,
|
| 80 |
+
"normalized": false,
|
| 81 |
+
"rstrip": false,
|
| 82 |
+
"single_word": false,
|
| 83 |
+
"special": true
|
| 84 |
+
},
|
| 85 |
+
"151653": {
|
| 86 |
+
"content": "<|vision_end|>",
|
| 87 |
+
"lstrip": false,
|
| 88 |
+
"normalized": false,
|
| 89 |
+
"rstrip": false,
|
| 90 |
+
"single_word": false,
|
| 91 |
+
"special": true
|
| 92 |
+
},
|
| 93 |
+
"151654": {
|
| 94 |
+
"content": "<|vision_pad|>",
|
| 95 |
+
"lstrip": false,
|
| 96 |
+
"normalized": false,
|
| 97 |
+
"rstrip": false,
|
| 98 |
+
"single_word": false,
|
| 99 |
+
"special": true
|
| 100 |
+
},
|
| 101 |
+
"151655": {
|
| 102 |
+
"content": "<|image_pad|>",
|
| 103 |
+
"lstrip": false,
|
| 104 |
+
"normalized": false,
|
| 105 |
+
"rstrip": false,
|
| 106 |
+
"single_word": false,
|
| 107 |
+
"special": true
|
| 108 |
+
},
|
| 109 |
+
"151656": {
|
| 110 |
+
"content": "<|video_pad|>",
|
| 111 |
+
"lstrip": false,
|
| 112 |
+
"normalized": false,
|
| 113 |
+
"rstrip": false,
|
| 114 |
+
"single_word": false,
|
| 115 |
+
"special": true
|
| 116 |
+
},
|
| 117 |
+
"151657": {
|
| 118 |
+
"content": "<tool_call>",
|
| 119 |
+
"lstrip": false,
|
| 120 |
+
"normalized": false,
|
| 121 |
+
"rstrip": false,
|
| 122 |
+
"single_word": false,
|
| 123 |
+
"special": false
|
| 124 |
+
},
|
| 125 |
+
"151658": {
|
| 126 |
+
"content": "</tool_call>",
|
| 127 |
+
"lstrip": false,
|
| 128 |
+
"normalized": false,
|
| 129 |
+
"rstrip": false,
|
| 130 |
+
"single_word": false,
|
| 131 |
+
"special": false
|
| 132 |
+
},
|
| 133 |
+
"151659": {
|
| 134 |
+
"content": "<|fim_prefix|>",
|
| 135 |
+
"lstrip": false,
|
| 136 |
+
"normalized": false,
|
| 137 |
+
"rstrip": false,
|
| 138 |
+
"single_word": false,
|
| 139 |
+
"special": false
|
| 140 |
+
},
|
| 141 |
+
"151660": {
|
| 142 |
+
"content": "<|fim_middle|>",
|
| 143 |
+
"lstrip": false,
|
| 144 |
+
"normalized": false,
|
| 145 |
+
"rstrip": false,
|
| 146 |
+
"single_word": false,
|
| 147 |
+
"special": false
|
| 148 |
+
},
|
| 149 |
+
"151661": {
|
| 150 |
+
"content": "<|fim_suffix|>",
|
| 151 |
+
"lstrip": false,
|
| 152 |
+
"normalized": false,
|
| 153 |
+
"rstrip": false,
|
| 154 |
+
"single_word": false,
|
| 155 |
+
"special": false
|
| 156 |
+
},
|
| 157 |
+
"151662": {
|
| 158 |
+
"content": "<|fim_pad|>",
|
| 159 |
+
"lstrip": false,
|
| 160 |
+
"normalized": false,
|
| 161 |
+
"rstrip": false,
|
| 162 |
+
"single_word": false,
|
| 163 |
+
"special": false
|
| 164 |
+
},
|
| 165 |
+
"151663": {
|
| 166 |
+
"content": "<|repo_name|>",
|
| 167 |
+
"lstrip": false,
|
| 168 |
+
"normalized": false,
|
| 169 |
+
"rstrip": false,
|
| 170 |
+
"single_word": false,
|
| 171 |
+
"special": false
|
| 172 |
+
},
|
| 173 |
+
"151664": {
|
| 174 |
+
"content": "<|file_sep|>",
|
| 175 |
+
"lstrip": false,
|
| 176 |
+
"normalized": false,
|
| 177 |
+
"rstrip": false,
|
| 178 |
+
"single_word": false,
|
| 179 |
+
"special": false
|
| 180 |
+
}
|
| 181 |
+
},
|
| 182 |
+
"additional_special_tokens": [
|
| 183 |
+
"<|im_start|>",
|
| 184 |
+
"<|im_end|>",
|
| 185 |
+
"<|object_ref_start|>",
|
| 186 |
+
"<|object_ref_end|>",
|
| 187 |
+
"<|box_start|>",
|
| 188 |
+
"<|box_end|>",
|
| 189 |
+
"<|quad_start|>",
|
| 190 |
+
"<|quad_end|>",
|
| 191 |
+
"<|vision_start|>",
|
| 192 |
+
"<|vision_end|>",
|
| 193 |
+
"<|vision_pad|>",
|
| 194 |
+
"<|image_pad|>",
|
| 195 |
+
"<|video_pad|>"
|
| 196 |
+
],
|
| 197 |
+
"bos_token": null,
|
| 198 |
+
"chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
|
| 199 |
+
"clean_up_tokenization_spaces": false,
|
| 200 |
+
"eos_token": "<|im_end|>",
|
| 201 |
+
"errors": "replace",
|
| 202 |
+
"extra_special_tokens": {},
|
| 203 |
+
"model_max_length": 131072,
|
| 204 |
+
"pad_token": "<|endoftext|>",
|
| 205 |
+
"processor_class": "Qwen2_5_VLProcessor",
|
| 206 |
+
"split_special_tokens": false,
|
| 207 |
+
"tokenizer_class": "Qwen2Tokenizer",
|
| 208 |
+
"unk_token": null
|
| 209 |
+
}
|
selective_loss/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148/checkpoint-280/trainer_state.json
ADDED
|
@@ -0,0 +1,658 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"best_global_step": 114,
|
| 3 |
+
"best_metric": 0.54378814,
|
| 4 |
+
"best_model_checkpoint": "/mnt/data/users/liamding/data/MMMT/lora/selective_mask_plugin/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148/checkpoint-114",
|
| 5 |
+
"epoch": 4.920353982300885,
|
| 6 |
+
"eval_steps": 500,
|
| 7 |
+
"global_step": 280,
|
| 8 |
+
"is_hyper_param_search": false,
|
| 9 |
+
"is_local_process_zero": true,
|
| 10 |
+
"is_world_process_zero": true,
|
| 11 |
+
"log_history": [
|
| 12 |
+
{
|
| 13 |
+
"epoch": 0.017699115044247787,
|
| 14 |
+
"grad_norm": 71.34660319849452,
|
| 15 |
+
"learning_rate": 7.142857142857142e-08,
|
| 16 |
+
"loss": 2.3933868408203125,
|
| 17 |
+
"memory(GiB)": 45.01,
|
| 18 |
+
"step": 1,
|
| 19 |
+
"token_acc": 0.6500470366886171,
|
| 20 |
+
"train_speed(iter/s)": 0.042756
|
| 21 |
+
},
|
| 22 |
+
{
|
| 23 |
+
"epoch": 0.08849557522123894,
|
| 24 |
+
"grad_norm": 83.56776459164254,
|
| 25 |
+
"learning_rate": 3.5714285714285716e-07,
|
| 26 |
+
"loss": 2.159541606903076,
|
| 27 |
+
"memory(GiB)": 48.77,
|
| 28 |
+
"step": 5,
|
| 29 |
+
"token_acc": 0.6550046772684752,
|
| 30 |
+
"train_speed(iter/s)": 0.123972
|
| 31 |
+
},
|
| 32 |
+
{
|
| 33 |
+
"epoch": 0.17699115044247787,
|
| 34 |
+
"grad_norm": 34.05608807852992,
|
| 35 |
+
"learning_rate": 7.142857142857143e-07,
|
| 36 |
+
"loss": 2.339263343811035,
|
| 37 |
+
"memory(GiB)": 48.77,
|
| 38 |
+
"step": 10,
|
| 39 |
+
"token_acc": 0.6404985432178698,
|
| 40 |
+
"train_speed(iter/s)": 0.166375
|
| 41 |
+
},
|
| 42 |
+
{
|
| 43 |
+
"epoch": 0.26548672566371684,
|
| 44 |
+
"grad_norm": 25.500565551593816,
|
| 45 |
+
"learning_rate": 1.0714285714285714e-06,
|
| 46 |
+
"loss": 1.798775863647461,
|
| 47 |
+
"memory(GiB)": 48.77,
|
| 48 |
+
"step": 15,
|
| 49 |
+
"token_acc": 0.6422512234910277,
|
| 50 |
+
"train_speed(iter/s)": 0.187062
|
| 51 |
+
},
|
| 52 |
+
{
|
| 53 |
+
"epoch": 0.35398230088495575,
|
| 54 |
+
"grad_norm": 16.712932338074264,
|
| 55 |
+
"learning_rate": 1.4285714285714286e-06,
|
| 56 |
+
"loss": 1.530697727203369,
|
| 57 |
+
"memory(GiB)": 48.77,
|
| 58 |
+
"step": 20,
|
| 59 |
+
"token_acc": 0.6638304703804784,
|
| 60 |
+
"train_speed(iter/s)": 0.200803
|
| 61 |
+
},
|
| 62 |
+
{
|
| 63 |
+
"epoch": 0.4424778761061947,
|
| 64 |
+
"grad_norm": 17.641674630168712,
|
| 65 |
+
"learning_rate": 1.7857142857142857e-06,
|
| 66 |
+
"loss": 1.410023307800293,
|
| 67 |
+
"memory(GiB)": 48.77,
|
| 68 |
+
"step": 25,
|
| 69 |
+
"token_acc": 0.6750814332247557,
|
| 70 |
+
"train_speed(iter/s)": 0.207089
|
| 71 |
+
},
|
| 72 |
+
{
|
| 73 |
+
"epoch": 0.5309734513274337,
|
| 74 |
+
"grad_norm": 13.685381032193781,
|
| 75 |
+
"learning_rate": 1.999689182000816e-06,
|
| 76 |
+
"loss": 1.2505983352661132,
|
| 77 |
+
"memory(GiB)": 48.77,
|
| 78 |
+
"step": 30,
|
| 79 |
+
"token_acc": 0.7019507515190279,
|
| 80 |
+
"train_speed(iter/s)": 0.21304
|
| 81 |
+
},
|
| 82 |
+
{
|
| 83 |
+
"epoch": 0.6194690265486725,
|
| 84 |
+
"grad_norm": 11.221955923013804,
|
| 85 |
+
"learning_rate": 1.9961946980917456e-06,
|
| 86 |
+
"loss": 1.1053658485412599,
|
| 87 |
+
"memory(GiB)": 48.77,
|
| 88 |
+
"step": 35,
|
| 89 |
+
"token_acc": 0.7295155771654961,
|
| 90 |
+
"train_speed(iter/s)": 0.216941
|
| 91 |
+
},
|
| 92 |
+
{
|
| 93 |
+
"epoch": 0.7079646017699115,
|
| 94 |
+
"grad_norm": 13.61471786516788,
|
| 95 |
+
"learning_rate": 1.9888308262251284e-06,
|
| 96 |
+
"loss": 1.138925552368164,
|
| 97 |
+
"memory(GiB)": 48.77,
|
| 98 |
+
"step": 40,
|
| 99 |
+
"token_acc": 0.7432905484247374,
|
| 100 |
+
"train_speed(iter/s)": 0.220463
|
| 101 |
+
},
|
| 102 |
+
{
|
| 103 |
+
"epoch": 0.7964601769911505,
|
| 104 |
+
"grad_norm": 11.62532074887379,
|
| 105 |
+
"learning_rate": 1.9776261689193047e-06,
|
| 106 |
+
"loss": 1.1350323677062988,
|
| 107 |
+
"memory(GiB)": 48.77,
|
| 108 |
+
"step": 45,
|
| 109 |
+
"token_acc": 0.7520905066404329,
|
| 110 |
+
"train_speed(iter/s)": 0.222472
|
| 111 |
+
},
|
| 112 |
+
{
|
| 113 |
+
"epoch": 0.8849557522123894,
|
| 114 |
+
"grad_norm": 10.615826546314963,
|
| 115 |
+
"learning_rate": 1.962624246950012e-06,
|
| 116 |
+
"loss": 0.9772750854492187,
|
| 117 |
+
"memory(GiB)": 48.77,
|
| 118 |
+
"step": 50,
|
| 119 |
+
"token_acc": 0.7405189620758483,
|
| 120 |
+
"train_speed(iter/s)": 0.224224
|
| 121 |
+
},
|
| 122 |
+
{
|
| 123 |
+
"epoch": 0.9734513274336283,
|
| 124 |
+
"grad_norm": 12.595376651407483,
|
| 125 |
+
"learning_rate": 1.9438833303083674e-06,
|
| 126 |
+
"loss": 1.1083248138427735,
|
| 127 |
+
"memory(GiB)": 48.77,
|
| 128 |
+
"step": 55,
|
| 129 |
+
"token_acc": 0.7464830888955403,
|
| 130 |
+
"train_speed(iter/s)": 0.22532
|
| 131 |
+
},
|
| 132 |
+
{
|
| 133 |
+
"epoch": 1.0,
|
| 134 |
+
"eval_loss": 0.5650736093521118,
|
| 135 |
+
"eval_runtime": 8.5395,
|
| 136 |
+
"eval_samples_per_second": 11.593,
|
| 137 |
+
"eval_steps_per_second": 1.522,
|
| 138 |
+
"eval_token_acc": 0.7579033630661581,
|
| 139 |
+
"step": 57
|
| 140 |
+
},
|
| 141 |
+
{
|
| 142 |
+
"epoch": 1.0530973451327434,
|
| 143 |
+
"grad_norm": 9.69581460794372,
|
| 144 |
+
"learning_rate": 1.9214762118704076e-06,
|
| 145 |
+
"loss": 0.8373285293579101,
|
| 146 |
+
"memory(GiB)": 48.78,
|
| 147 |
+
"step": 60,
|
| 148 |
+
"token_acc": 0.7705505279034691,
|
| 149 |
+
"train_speed(iter/s)": 0.171599
|
| 150 |
+
},
|
| 151 |
+
{
|
| 152 |
+
"epoch": 1.1415929203539823,
|
| 153 |
+
"grad_norm": 11.083390187113388,
|
| 154 |
+
"learning_rate": 1.895489924657301e-06,
|
| 155 |
+
"loss": 0.8807508468627929,
|
| 156 |
+
"memory(GiB)": 48.78,
|
| 157 |
+
"step": 65,
|
| 158 |
+
"token_acc": 0.7706816677696889,
|
| 159 |
+
"train_speed(iter/s)": 0.175669
|
| 160 |
+
},
|
| 161 |
+
{
|
| 162 |
+
"epoch": 1.2300884955752212,
|
| 163 |
+
"grad_norm": 12.415922478666385,
|
| 164 |
+
"learning_rate": 1.8660254037844386e-06,
|
| 165 |
+
"loss": 0.8378904342651368,
|
| 166 |
+
"memory(GiB)": 48.78,
|
| 167 |
+
"step": 70,
|
| 168 |
+
"token_acc": 0.7803855825649623,
|
| 169 |
+
"train_speed(iter/s)": 0.178838
|
| 170 |
+
},
|
| 171 |
+
{
|
| 172 |
+
"epoch": 1.3185840707964602,
|
| 173 |
+
"grad_norm": 8.450562427876633,
|
| 174 |
+
"learning_rate": 1.8331970944124488e-06,
|
| 175 |
+
"loss": 0.8272968292236328,
|
| 176 |
+
"memory(GiB)": 48.78,
|
| 177 |
+
"step": 75,
|
| 178 |
+
"token_acc": 0.7696491228070176,
|
| 179 |
+
"train_speed(iter/s)": 0.182395
|
| 180 |
+
},
|
| 181 |
+
{
|
| 182 |
+
"epoch": 1.407079646017699,
|
| 183 |
+
"grad_norm": 14.004351195839384,
|
| 184 |
+
"learning_rate": 1.7971325072229223e-06,
|
| 185 |
+
"loss": 0.9129334449768066,
|
| 186 |
+
"memory(GiB)": 48.78,
|
| 187 |
+
"step": 80,
|
| 188 |
+
"token_acc": 0.760928549894483,
|
| 189 |
+
"train_speed(iter/s)": 0.185332
|
| 190 |
+
},
|
| 191 |
+
{
|
| 192 |
+
"epoch": 1.495575221238938,
|
| 193 |
+
"grad_norm": 8.570012885646701,
|
| 194 |
+
"learning_rate": 1.7579717231454529e-06,
|
| 195 |
+
"loss": 0.8181610107421875,
|
| 196 |
+
"memory(GiB)": 48.78,
|
| 197 |
+
"step": 85,
|
| 198 |
+
"token_acc": 0.7538960037031323,
|
| 199 |
+
"train_speed(iter/s)": 0.18795
|
| 200 |
+
},
|
| 201 |
+
{
|
| 202 |
+
"epoch": 1.584070796460177,
|
| 203 |
+
"grad_norm": 9.613361045579158,
|
| 204 |
+
"learning_rate": 1.7158668492597184e-06,
|
| 205 |
+
"loss": 0.7870856285095215,
|
| 206 |
+
"memory(GiB)": 48.78,
|
| 207 |
+
"step": 90,
|
| 208 |
+
"token_acc": 0.7618816682832201,
|
| 209 |
+
"train_speed(iter/s)": 0.190426
|
| 210 |
+
},
|
| 211 |
+
{
|
| 212 |
+
"epoch": 1.672566371681416,
|
| 213 |
+
"grad_norm": 10.122082437554802,
|
| 214 |
+
"learning_rate": 1.67098142798597e-06,
|
| 215 |
+
"loss": 0.8310245513916016,
|
| 216 |
+
"memory(GiB)": 48.78,
|
| 217 |
+
"step": 95,
|
| 218 |
+
"token_acc": 0.7752255947497949,
|
| 219 |
+
"train_speed(iter/s)": 0.192905
|
| 220 |
+
},
|
| 221 |
+
{
|
| 222 |
+
"epoch": 1.7610619469026547,
|
| 223 |
+
"grad_norm": 10.602875101442406,
|
| 224 |
+
"learning_rate": 1.6234898018587336e-06,
|
| 225 |
+
"loss": 0.8105389595031738,
|
| 226 |
+
"memory(GiB)": 48.78,
|
| 227 |
+
"step": 100,
|
| 228 |
+
"token_acc": 0.7644825878857897,
|
| 229 |
+
"train_speed(iter/s)": 0.195119
|
| 230 |
+
},
|
| 231 |
+
{
|
| 232 |
+
"epoch": 1.8495575221238938,
|
| 233 |
+
"grad_norm": 11.76556584469257,
|
| 234 |
+
"learning_rate": 1.573576436351046e-06,
|
| 235 |
+
"loss": 0.8393661499023437,
|
| 236 |
+
"memory(GiB)": 48.78,
|
| 237 |
+
"step": 105,
|
| 238 |
+
"token_acc": 0.7800134138162307,
|
| 239 |
+
"train_speed(iter/s)": 0.196801
|
| 240 |
+
},
|
| 241 |
+
{
|
| 242 |
+
"epoch": 1.9380530973451329,
|
| 243 |
+
"grad_norm": 7.960442929539371,
|
| 244 |
+
"learning_rate": 1.521435203379498e-06,
|
| 245 |
+
"loss": 0.8687034606933594,
|
| 246 |
+
"memory(GiB)": 48.78,
|
| 247 |
+
"step": 110,
|
| 248 |
+
"token_acc": 0.771374829001368,
|
| 249 |
+
"train_speed(iter/s)": 0.198748
|
| 250 |
+
},
|
| 251 |
+
{
|
| 252 |
+
"epoch": 2.0,
|
| 253 |
+
"eval_loss": 0.5437881350517273,
|
| 254 |
+
"eval_runtime": 8.627,
|
| 255 |
+
"eval_samples_per_second": 11.476,
|
| 256 |
+
"eval_steps_per_second": 1.507,
|
| 257 |
+
"eval_token_acc": 0.7667601727235478,
|
| 258 |
+
"step": 114
|
| 259 |
+
},
|
| 260 |
+
{
|
| 261 |
+
"epoch": 2.017699115044248,
|
| 262 |
+
"grad_norm": 4.502049505152077,
|
| 263 |
+
"learning_rate": 1.467268628273062e-06,
|
| 264 |
+
"loss": 0.7106491565704346,
|
| 265 |
+
"memory(GiB)": 48.78,
|
| 266 |
+
"step": 115,
|
| 267 |
+
"token_acc": 0.7862446268073466,
|
| 268 |
+
"train_speed(iter/s)": 0.173338
|
| 269 |
+
},
|
| 270 |
+
{
|
| 271 |
+
"epoch": 2.106194690265487,
|
| 272 |
+
"grad_norm": 7.276003996918958,
|
| 273 |
+
"learning_rate": 1.4112871031306117e-06,
|
| 274 |
+
"loss": 0.6555411338806152,
|
| 275 |
+
"memory(GiB)": 48.78,
|
| 276 |
+
"step": 120,
|
| 277 |
+
"token_acc": 0.7813225956326467,
|
| 278 |
+
"train_speed(iter/s)": 0.175242
|
| 279 |
+
},
|
| 280 |
+
{
|
| 281 |
+
"epoch": 2.1946902654867255,
|
| 282 |
+
"grad_norm": 4.8568813692113135,
|
| 283 |
+
"learning_rate": 1.3537080696225813e-06,
|
| 284 |
+
"loss": 0.6249444961547852,
|
| 285 |
+
"memory(GiB)": 48.78,
|
| 286 |
+
"step": 125,
|
| 287 |
+
"token_acc": 0.783449211463456,
|
| 288 |
+
"train_speed(iter/s)": 0.177449
|
| 289 |
+
},
|
| 290 |
+
{
|
| 291 |
+
"epoch": 2.2831858407079646,
|
| 292 |
+
"grad_norm": 5.890485254316051,
|
| 293 |
+
"learning_rate": 1.2947551744109043e-06,
|
| 294 |
+
"loss": 0.6664288520812989,
|
| 295 |
+
"memory(GiB)": 48.78,
|
| 296 |
+
"step": 130,
|
| 297 |
+
"token_acc": 0.7687228217104204,
|
| 298 |
+
"train_speed(iter/s)": 0.179237
|
| 299 |
+
},
|
| 300 |
+
{
|
| 301 |
+
"epoch": 2.3716814159292037,
|
| 302 |
+
"grad_norm": 8.771449820473132,
|
| 303 |
+
"learning_rate": 1.2346574004677154e-06,
|
| 304 |
+
"loss": 0.6991546630859375,
|
| 305 |
+
"memory(GiB)": 48.78,
|
| 306 |
+
"step": 135,
|
| 307 |
+
"token_acc": 0.7700296735905044,
|
| 308 |
+
"train_speed(iter/s)": 0.180871
|
| 309 |
+
},
|
| 310 |
+
{
|
| 311 |
+
"epoch": 2.4601769911504423,
|
| 312 |
+
"grad_norm": 9.067730854578071,
|
| 313 |
+
"learning_rate": 1.1736481776669305e-06,
|
| 314 |
+
"loss": 0.6536848545074463,
|
| 315 |
+
"memory(GiB)": 48.78,
|
| 316 |
+
"step": 140,
|
| 317 |
+
"token_acc": 0.766509049404859,
|
| 318 |
+
"train_speed(iter/s)": 0.182581
|
| 319 |
+
},
|
| 320 |
+
{
|
| 321 |
+
"epoch": 2.5486725663716814,
|
| 322 |
+
"grad_norm": 4.50790352837357,
|
| 323 |
+
"learning_rate": 1.1119644761033077e-06,
|
| 324 |
+
"loss": 0.6313935279846191,
|
| 325 |
+
"memory(GiB)": 48.78,
|
| 326 |
+
"step": 145,
|
| 327 |
+
"token_acc": 0.7825399613560513,
|
| 328 |
+
"train_speed(iter/s)": 0.184363
|
| 329 |
+
},
|
| 330 |
+
{
|
| 331 |
+
"epoch": 2.6371681415929205,
|
| 332 |
+
"grad_norm": 4.312670055490422,
|
| 333 |
+
"learning_rate": 1.0498458856606971e-06,
|
| 334 |
+
"loss": 0.6255232334136963,
|
| 335 |
+
"memory(GiB)": 48.78,
|
| 336 |
+
"step": 150,
|
| 337 |
+
"token_acc": 0.7767244197450147,
|
| 338 |
+
"train_speed(iter/s)": 0.185598
|
| 339 |
+
},
|
| 340 |
+
{
|
| 341 |
+
"epoch": 2.725663716814159,
|
| 342 |
+
"grad_norm": 7.755258717453065,
|
| 343 |
+
"learning_rate": 9.875336854045848e-07,
|
| 344 |
+
"loss": 0.6171076774597168,
|
| 345 |
+
"memory(GiB)": 48.78,
|
| 346 |
+
"step": 155,
|
| 347 |
+
"token_acc": 0.7896267571497819,
|
| 348 |
+
"train_speed(iter/s)": 0.187098
|
| 349 |
+
},
|
| 350 |
+
{
|
| 351 |
+
"epoch": 2.814159292035398,
|
| 352 |
+
"grad_norm": 7.845634190557466,
|
| 353 |
+
"learning_rate": 9.252699064135758e-07,
|
| 354 |
+
"loss": 0.6185690879821777,
|
| 355 |
+
"memory(GiB)": 48.78,
|
| 356 |
+
"step": 160,
|
| 357 |
+
"token_acc": 0.7802016195670137,
|
| 358 |
+
"train_speed(iter/s)": 0.188585
|
| 359 |
+
},
|
| 360 |
+
{
|
| 361 |
+
"epoch": 2.9026548672566372,
|
| 362 |
+
"grad_norm": 6.338292715281437,
|
| 363 |
+
"learning_rate": 8.632963916899268e-07,
|
| 364 |
+
"loss": 0.656156349182129,
|
| 365 |
+
"memory(GiB)": 48.78,
|
| 366 |
+
"step": 165,
|
| 367 |
+
"token_acc": 0.7780242279474492,
|
| 368 |
+
"train_speed(iter/s)": 0.190226
|
| 369 |
+
},
|
| 370 |
+
{
|
| 371 |
+
"epoch": 2.991150442477876,
|
| 372 |
+
"grad_norm": 7.843306869070192,
|
| 373 |
+
"learning_rate": 8.018538568006025e-07,
|
| 374 |
+
"loss": 0.6247214794158935,
|
| 375 |
+
"memory(GiB)": 48.78,
|
| 376 |
+
"step": 170,
|
| 377 |
+
"token_acc": 0.7823321554770318,
|
| 378 |
+
"train_speed(iter/s)": 0.191501
|
| 379 |
+
},
|
| 380 |
+
{
|
| 381 |
+
"epoch": 3.0,
|
| 382 |
+
"eval_loss": 0.5920259952545166,
|
| 383 |
+
"eval_runtime": 9.0693,
|
| 384 |
+
"eval_samples_per_second": 10.916,
|
| 385 |
+
"eval_steps_per_second": 1.433,
|
| 386 |
+
"eval_token_acc": 0.7687458631449554,
|
| 387 |
+
"step": 171
|
| 388 |
+
},
|
| 389 |
+
{
|
| 390 |
+
"epoch": 3.0707964601769913,
|
| 391 |
+
"grad_norm": 4.95362684527685,
|
| 392 |
+
"learning_rate": 7.411809548974791e-07,
|
| 393 |
+
"loss": 0.5203310966491699,
|
| 394 |
+
"memory(GiB)": 48.78,
|
| 395 |
+
"step": 175,
|
| 396 |
+
"token_acc": 0.7873366834170854,
|
| 397 |
+
"train_speed(iter/s)": 0.175347
|
| 398 |
+
},
|
| 399 |
+
{
|
| 400 |
+
"epoch": 3.15929203539823,
|
| 401 |
+
"grad_norm": 3.7681038604987718,
|
| 402 |
+
"learning_rate": 6.815133497483157e-07,
|
| 403 |
+
"loss": 0.5779544830322265,
|
| 404 |
+
"memory(GiB)": 48.78,
|
| 405 |
+
"step": 180,
|
| 406 |
+
"token_acc": 0.7699359972323128,
|
| 407 |
+
"train_speed(iter/s)": 0.176628
|
| 408 |
+
},
|
| 409 |
+
{
|
| 410 |
+
"epoch": 3.247787610619469,
|
| 411 |
+
"grad_norm": 3.423633138763175,
|
| 412 |
+
"learning_rate": 6.230828003789947e-07,
|
| 413 |
+
"loss": 0.5451234340667724,
|
| 414 |
+
"memory(GiB)": 48.78,
|
| 415 |
+
"step": 185,
|
| 416 |
+
"token_acc": 0.7917084587094617,
|
| 417 |
+
"train_speed(iter/s)": 0.177868
|
| 418 |
+
},
|
| 419 |
+
{
|
| 420 |
+
"epoch": 3.336283185840708,
|
| 421 |
+
"grad_norm": 3.30866813240123,
|
| 422 |
+
"learning_rate": 5.661162608824419e-07,
|
| 423 |
+
"loss": 0.5657532691955567,
|
| 424 |
+
"memory(GiB)": 48.78,
|
| 425 |
+
"step": 190,
|
| 426 |
+
"token_acc": 0.7935142289874255,
|
| 427 |
+
"train_speed(iter/s)": 0.179042
|
| 428 |
+
},
|
| 429 |
+
{
|
| 430 |
+
"epoch": 3.4247787610619467,
|
| 431 |
+
"grad_norm": 4.15737971213242,
|
| 432 |
+
"learning_rate": 5.10834998890711e-07,
|
| 433 |
+
"loss": 0.5481144905090332,
|
| 434 |
+
"memory(GiB)": 48.78,
|
| 435 |
+
"step": 195,
|
| 436 |
+
"token_acc": 0.7881839543470963,
|
| 437 |
+
"train_speed(iter/s)": 0.180324
|
| 438 |
+
},
|
| 439 |
+
{
|
| 440 |
+
"epoch": 3.5132743362831858,
|
| 441 |
+
"grad_norm": 3.6687005982187846,
|
| 442 |
+
"learning_rate": 4.5745373613424065e-07,
|
| 443 |
+
"loss": 0.5490777969360352,
|
| 444 |
+
"memory(GiB)": 48.78,
|
| 445 |
+
"step": 200,
|
| 446 |
+
"token_acc": 0.7685361216730038,
|
| 447 |
+
"train_speed(iter/s)": 0.181654
|
| 448 |
+
},
|
| 449 |
+
{
|
| 450 |
+
"epoch": 3.601769911504425,
|
| 451 |
+
"grad_norm": 2.619022737106196,
|
| 452 |
+
"learning_rate": 4.061798144264985e-07,
|
| 453 |
+
"loss": 0.5527618408203125,
|
| 454 |
+
"memory(GiB)": 48.78,
|
| 455 |
+
"step": 205,
|
| 456 |
+
"token_acc": 0.797918473547268,
|
| 457 |
+
"train_speed(iter/s)": 0.182886
|
| 458 |
+
},
|
| 459 |
+
{
|
| 460 |
+
"epoch": 3.6902654867256635,
|
| 461 |
+
"grad_norm": 8.889913575790986,
|
| 462 |
+
"learning_rate": 3.5721239031346063e-07,
|
| 463 |
+
"loss": 0.569515323638916,
|
| 464 |
+
"memory(GiB)": 48.78,
|
| 465 |
+
"step": 210,
|
| 466 |
+
"token_acc": 0.7887000459347726,
|
| 467 |
+
"train_speed(iter/s)": 0.183743
|
| 468 |
+
},
|
| 469 |
+
{
|
| 470 |
+
"epoch": 3.7787610619469025,
|
| 471 |
+
"grad_norm": 4.806585418130697,
|
| 472 |
+
"learning_rate": 3.1074166151605295e-07,
|
| 473 |
+
"loss": 0.5423957347869873,
|
| 474 |
+
"memory(GiB)": 48.78,
|
| 475 |
+
"step": 215,
|
| 476 |
+
"token_acc": 0.7839798170923998,
|
| 477 |
+
"train_speed(iter/s)": 0.184758
|
| 478 |
+
},
|
| 479 |
+
{
|
| 480 |
+
"epoch": 3.8672566371681416,
|
| 481 |
+
"grad_norm": 8.958938051180708,
|
| 482 |
+
"learning_rate": 2.6694812817017387e-07,
|
| 483 |
+
"loss": 0.5565788269042968,
|
| 484 |
+
"memory(GiB)": 48.78,
|
| 485 |
+
"step": 220,
|
| 486 |
+
"token_acc": 0.7851294903926482,
|
| 487 |
+
"train_speed(iter/s)": 0.185767
|
| 488 |
+
},
|
| 489 |
+
{
|
| 490 |
+
"epoch": 3.9557522123893807,
|
| 491 |
+
"grad_norm": 2.510888682354834,
|
| 492 |
+
"learning_rate": 2.260018917337726e-07,
|
| 493 |
+
"loss": 0.5587899208068847,
|
| 494 |
+
"memory(GiB)": 48.78,
|
| 495 |
+
"step": 225,
|
| 496 |
+
"token_acc": 0.7872554660529344,
|
| 497 |
+
"train_speed(iter/s)": 0.186581
|
| 498 |
+
},
|
| 499 |
+
{
|
| 500 |
+
"epoch": 4.0,
|
| 501 |
+
"eval_loss": 0.6418129801750183,
|
| 502 |
+
"eval_runtime": 8.9927,
|
| 503 |
+
"eval_samples_per_second": 11.009,
|
| 504 |
+
"eval_steps_per_second": 1.446,
|
| 505 |
+
"eval_token_acc": 0.7703218079238503,
|
| 506 |
+
"step": 228
|
| 507 |
+
},
|
| 508 |
+
{
|
| 509 |
+
"epoch": 4.035398230088496,
|
| 510 |
+
"grad_norm": 2.3659313835670357,
|
| 511 |
+
"learning_rate": 1.880619942841435e-07,
|
| 512 |
+
"loss": 0.4850145816802979,
|
| 513 |
+
"memory(GiB)": 48.78,
|
| 514 |
+
"step": 230,
|
| 515 |
+
"token_acc": 0.7843368752459662,
|
| 516 |
+
"train_speed(iter/s)": 0.174518
|
| 517 |
+
},
|
| 518 |
+
{
|
| 519 |
+
"epoch": 4.123893805309734,
|
| 520 |
+
"grad_norm": 3.029452323677481,
|
| 521 |
+
"learning_rate": 1.5327580077171588e-07,
|
| 522 |
+
"loss": 0.5167119026184082,
|
| 523 |
+
"memory(GiB)": 48.78,
|
| 524 |
+
"step": 235,
|
| 525 |
+
"token_acc": 0.7835400225479143,
|
| 526 |
+
"train_speed(iter/s)": 0.17559
|
| 527 |
+
},
|
| 528 |
+
{
|
| 529 |
+
"epoch": 4.212389380530974,
|
| 530 |
+
"grad_norm": 2.6111688557578265,
|
| 531 |
+
"learning_rate": 1.2177842662977133e-07,
|
| 532 |
+
"loss": 0.5277352333068848,
|
| 533 |
+
"memory(GiB)": 48.78,
|
| 534 |
+
"step": 240,
|
| 535 |
+
"token_acc": 0.7970020730346037,
|
| 536 |
+
"train_speed(iter/s)": 0.17656
|
| 537 |
+
},
|
| 538 |
+
{
|
| 539 |
+
"epoch": 4.300884955752212,
|
| 540 |
+
"grad_norm": 2.343147313652203,
|
| 541 |
+
"learning_rate": 9.369221296335006e-08,
|
| 542 |
+
"loss": 0.519285011291504,
|
| 543 |
+
"memory(GiB)": 48.78,
|
| 544 |
+
"step": 245,
|
| 545 |
+
"token_acc": 0.7873526600828289,
|
| 546 |
+
"train_speed(iter/s)": 0.177675
|
| 547 |
+
},
|
| 548 |
+
{
|
| 549 |
+
"epoch": 4.389380530973451,
|
| 550 |
+
"grad_norm": 2.857900500008329,
|
| 551 |
+
"learning_rate": 6.912625135579586e-08,
|
| 552 |
+
"loss": 0.5282041549682617,
|
| 553 |
+
"memory(GiB)": 48.78,
|
| 554 |
+
"step": 250,
|
| 555 |
+
"token_acc": 0.8017311775273559,
|
| 556 |
+
"train_speed(iter/s)": 0.178638
|
| 557 |
+
},
|
| 558 |
+
{
|
| 559 |
+
"epoch": 4.477876106194691,
|
| 560 |
+
"grad_norm": 3.2862304977309584,
|
| 561 |
+
"learning_rate": 4.817596013867764e-08,
|
| 562 |
+
"loss": 0.5366128921508789,
|
| 563 |
+
"memory(GiB)": 48.78,
|
| 564 |
+
"step": 255,
|
| 565 |
+
"token_acc": 0.7901314721636098,
|
| 566 |
+
"train_speed(iter/s)": 0.179505
|
| 567 |
+
},
|
| 568 |
+
{
|
| 569 |
+
"epoch": 4.566371681415929,
|
| 570 |
+
"grad_norm": 2.5989413835699304,
|
| 571 |
+
"learning_rate": 3.092271377092215e-08,
|
| 572 |
+
"loss": 0.5257000923156738,
|
| 573 |
+
"memory(GiB)": 48.78,
|
| 574 |
+
"step": 260,
|
| 575 |
+
"token_acc": 0.7834569493711706,
|
| 576 |
+
"train_speed(iter/s)": 0.180409
|
| 577 |
+
},
|
| 578 |
+
{
|
| 579 |
+
"epoch": 4.654867256637168,
|
| 580 |
+
"grad_norm": 2.7056193294351574,
|
| 581 |
+
"learning_rate": 1.7433526766711725e-08,
|
| 582 |
+
"loss": 0.5092650413513183,
|
| 583 |
+
"memory(GiB)": 48.78,
|
| 584 |
+
"step": 265,
|
| 585 |
+
"token_acc": 0.7875399361022364,
|
| 586 |
+
"train_speed(iter/s)": 0.181212
|
| 587 |
+
},
|
| 588 |
+
{
|
| 589 |
+
"epoch": 4.743362831858407,
|
| 590 |
+
"grad_norm": 3.278617091813132,
|
| 591 |
+
"learning_rate": 7.760793399827936e-09,
|
| 592 |
+
"loss": 0.5484159469604493,
|
| 593 |
+
"memory(GiB)": 48.78,
|
| 594 |
+
"step": 270,
|
| 595 |
+
"token_acc": 0.7744212772943687,
|
| 596 |
+
"train_speed(iter/s)": 0.182044
|
| 597 |
+
},
|
| 598 |
+
{
|
| 599 |
+
"epoch": 4.831858407079646,
|
| 600 |
+
"grad_norm": 2.432252205143753,
|
| 601 |
+
"learning_rate": 1.942084195468152e-09,
|
| 602 |
+
"loss": 0.5685730934143066,
|
| 603 |
+
"memory(GiB)": 48.78,
|
| 604 |
+
"step": 275,
|
| 605 |
+
"token_acc": 0.7785588752196837,
|
| 606 |
+
"train_speed(iter/s)": 0.182863
|
| 607 |
+
},
|
| 608 |
+
{
|
| 609 |
+
"epoch": 4.920353982300885,
|
| 610 |
+
"grad_norm": 5.405387490184348,
|
| 611 |
+
"learning_rate": 0.0,
|
| 612 |
+
"loss": 0.5433285713195801,
|
| 613 |
+
"memory(GiB)": 48.78,
|
| 614 |
+
"step": 280,
|
| 615 |
+
"token_acc": 0.788130081300813,
|
| 616 |
+
"train_speed(iter/s)": 0.1837
|
| 617 |
+
},
|
| 618 |
+
{
|
| 619 |
+
"epoch": 4.920353982300885,
|
| 620 |
+
"eval_loss": 0.6589558124542236,
|
| 621 |
+
"eval_runtime": 9.3193,
|
| 622 |
+
"eval_samples_per_second": 10.623,
|
| 623 |
+
"eval_steps_per_second": 1.395,
|
| 624 |
+
"eval_token_acc": 0.7703218079238503,
|
| 625 |
+
"step": 280
|
| 626 |
+
},
|
| 627 |
+
{
|
| 628 |
+
"epoch": 4.920353982300885,
|
| 629 |
+
"eval_loss": 0.6589558124542236,
|
| 630 |
+
"eval_runtime": 9.6886,
|
| 631 |
+
"eval_samples_per_second": 10.218,
|
| 632 |
+
"eval_steps_per_second": 1.342,
|
| 633 |
+
"eval_token_acc": 0.7703218079238503,
|
| 634 |
+
"step": 280
|
| 635 |
+
}
|
| 636 |
+
],
|
| 637 |
+
"logging_steps": 5,
|
| 638 |
+
"max_steps": 280,
|
| 639 |
+
"num_input_tokens_seen": 0,
|
| 640 |
+
"num_train_epochs": 5,
|
| 641 |
+
"save_steps": 500,
|
| 642 |
+
"stateful_callbacks": {
|
| 643 |
+
"TrainerControl": {
|
| 644 |
+
"args": {
|
| 645 |
+
"should_epoch_stop": false,
|
| 646 |
+
"should_evaluate": false,
|
| 647 |
+
"should_log": false,
|
| 648 |
+
"should_save": true,
|
| 649 |
+
"should_training_stop": true
|
| 650 |
+
},
|
| 651 |
+
"attributes": {}
|
| 652 |
+
}
|
| 653 |
+
},
|
| 654 |
+
"total_flos": 43238058737664.0,
|
| 655 |
+
"train_batch_size": 2,
|
| 656 |
+
"trial_name": null,
|
| 657 |
+
"trial_params": null
|
| 658 |
+
}
|
selective_loss/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148/checkpoint-280/vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
selective_loss/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148/checkpoint-280/zero_to_fp32.py
ADDED
|
@@ -0,0 +1,760 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
|
| 3 |
+
# Copyright (c) Microsoft Corporation.
|
| 4 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 5 |
+
|
| 6 |
+
# DeepSpeed Team
|
| 7 |
+
|
| 8 |
+
# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
|
| 9 |
+
# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
|
| 10 |
+
# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
|
| 11 |
+
# application.
|
| 12 |
+
#
|
| 13 |
+
# example:
|
| 14 |
+
# python zero_to_fp32.py . output_dir/
|
| 15 |
+
# or
|
| 16 |
+
# python zero_to_fp32.py . output_dir/ --safe_serialization
|
| 17 |
+
|
| 18 |
+
import argparse
|
| 19 |
+
import torch
|
| 20 |
+
import glob
|
| 21 |
+
import math
|
| 22 |
+
import os
|
| 23 |
+
import re
|
| 24 |
+
import gc
|
| 25 |
+
import json
|
| 26 |
+
import numpy as np
|
| 27 |
+
from tqdm import tqdm
|
| 28 |
+
from collections import OrderedDict
|
| 29 |
+
from dataclasses import dataclass
|
| 30 |
+
|
| 31 |
+
# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
|
| 32 |
+
# DeepSpeed data structures it has to be available in the current python environment.
|
| 33 |
+
from deepspeed.utils import logger
|
| 34 |
+
from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
|
| 35 |
+
FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
|
| 36 |
+
FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
@dataclass
|
| 40 |
+
class zero_model_state:
|
| 41 |
+
buffers: dict()
|
| 42 |
+
param_shapes: dict()
|
| 43 |
+
shared_params: list
|
| 44 |
+
ds_version: int
|
| 45 |
+
frozen_param_shapes: dict()
|
| 46 |
+
frozen_param_fragments: dict()
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
debug = 0
|
| 50 |
+
|
| 51 |
+
# load to cpu
|
| 52 |
+
device = torch.device('cpu')
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def atoi(text):
|
| 56 |
+
return int(text) if text.isdigit() else text
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def natural_keys(text):
|
| 60 |
+
'''
|
| 61 |
+
alist.sort(key=natural_keys) sorts in human order
|
| 62 |
+
http://nedbatchelder.com/blog/200712/human_sorting.html
|
| 63 |
+
(See Toothy's implementation in the comments)
|
| 64 |
+
'''
|
| 65 |
+
return [atoi(c) for c in re.split(r'(\d+)', text)]
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def get_model_state_file(checkpoint_dir, zero_stage):
|
| 69 |
+
if not os.path.isdir(checkpoint_dir):
|
| 70 |
+
raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
|
| 71 |
+
|
| 72 |
+
# there should be only one file
|
| 73 |
+
if zero_stage <= 2:
|
| 74 |
+
file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
|
| 75 |
+
elif zero_stage == 3:
|
| 76 |
+
file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
|
| 77 |
+
|
| 78 |
+
if not os.path.exists(file):
|
| 79 |
+
raise FileNotFoundError(f"can't find model states file at '{file}'")
|
| 80 |
+
|
| 81 |
+
return file
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def get_checkpoint_files(checkpoint_dir, glob_pattern):
|
| 85 |
+
# XXX: need to test that this simple glob rule works for multi-node setup too
|
| 86 |
+
ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
|
| 87 |
+
|
| 88 |
+
if len(ckpt_files) == 0:
|
| 89 |
+
raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
|
| 90 |
+
|
| 91 |
+
return ckpt_files
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def get_optim_files(checkpoint_dir):
|
| 95 |
+
return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def get_model_state_files(checkpoint_dir):
|
| 99 |
+
return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def parse_model_states(files):
|
| 103 |
+
zero_model_states = []
|
| 104 |
+
for file in files:
|
| 105 |
+
state_dict = torch.load(file, map_location=device, weights_only=False)
|
| 106 |
+
|
| 107 |
+
if BUFFER_NAMES not in state_dict:
|
| 108 |
+
raise ValueError(f"{file} is not a model state checkpoint")
|
| 109 |
+
buffer_names = state_dict[BUFFER_NAMES]
|
| 110 |
+
if debug:
|
| 111 |
+
print("Found buffers:", buffer_names)
|
| 112 |
+
|
| 113 |
+
# recover just the buffers while restoring them to fp32 if they were saved in fp16
|
| 114 |
+
buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
|
| 115 |
+
param_shapes = state_dict[PARAM_SHAPES]
|
| 116 |
+
|
| 117 |
+
# collect parameters that are included in param_shapes
|
| 118 |
+
param_names = []
|
| 119 |
+
for s in param_shapes:
|
| 120 |
+
for name in s.keys():
|
| 121 |
+
param_names.append(name)
|
| 122 |
+
|
| 123 |
+
# update with frozen parameters
|
| 124 |
+
frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
|
| 125 |
+
if frozen_param_shapes is not None:
|
| 126 |
+
if debug:
|
| 127 |
+
print(f"Found frozen_param_shapes: {frozen_param_shapes}")
|
| 128 |
+
param_names += list(frozen_param_shapes.keys())
|
| 129 |
+
|
| 130 |
+
# handle shared params
|
| 131 |
+
shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
|
| 132 |
+
|
| 133 |
+
ds_version = state_dict.get(DS_VERSION, None)
|
| 134 |
+
|
| 135 |
+
frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
|
| 136 |
+
|
| 137 |
+
z_model_state = zero_model_state(buffers=buffers,
|
| 138 |
+
param_shapes=param_shapes,
|
| 139 |
+
shared_params=shared_params,
|
| 140 |
+
ds_version=ds_version,
|
| 141 |
+
frozen_param_shapes=frozen_param_shapes,
|
| 142 |
+
frozen_param_fragments=frozen_param_fragments)
|
| 143 |
+
zero_model_states.append(z_model_state)
|
| 144 |
+
|
| 145 |
+
return zero_model_states
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
def parse_optim_states(files, ds_checkpoint_dir):
|
| 149 |
+
total_files = len(files)
|
| 150 |
+
state_dicts = []
|
| 151 |
+
for f in tqdm(files, desc='Loading checkpoint shards'):
|
| 152 |
+
state_dict = torch.load(f, map_location=device, mmap=True, weights_only=False)
|
| 153 |
+
# immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
|
| 154 |
+
# and also handle the case where it was already removed by another helper script
|
| 155 |
+
state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
|
| 156 |
+
state_dicts.append(state_dict)
|
| 157 |
+
|
| 158 |
+
if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
|
| 159 |
+
raise ValueError(f"{files[0]} is not a zero checkpoint")
|
| 160 |
+
zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
|
| 161 |
+
world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
|
| 162 |
+
|
| 163 |
+
# For ZeRO-2 each param group can have different partition_count as data parallelism for expert
|
| 164 |
+
# parameters can be different from data parallelism for non-expert parameters. So we can just
|
| 165 |
+
# use the max of the partition_count to get the dp world_size.
|
| 166 |
+
|
| 167 |
+
if type(world_size) is list:
|
| 168 |
+
world_size = max(world_size)
|
| 169 |
+
|
| 170 |
+
if world_size != total_files:
|
| 171 |
+
raise ValueError(
|
| 172 |
+
f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
|
| 173 |
+
"Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
|
| 174 |
+
)
|
| 175 |
+
|
| 176 |
+
# the groups are named differently in each stage
|
| 177 |
+
if zero_stage <= 2:
|
| 178 |
+
fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
|
| 179 |
+
elif zero_stage == 3:
|
| 180 |
+
fp32_groups_key = FP32_FLAT_GROUPS
|
| 181 |
+
else:
|
| 182 |
+
raise ValueError(f"unknown zero stage {zero_stage}")
|
| 183 |
+
|
| 184 |
+
fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
|
| 185 |
+
return zero_stage, world_size, fp32_flat_groups
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
|
| 189 |
+
"""
|
| 190 |
+
Returns fp32 state_dict reconstructed from ds checkpoint
|
| 191 |
+
|
| 192 |
+
Args:
|
| 193 |
+
- ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
|
| 194 |
+
|
| 195 |
+
"""
|
| 196 |
+
print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
|
| 197 |
+
|
| 198 |
+
optim_files = get_optim_files(ds_checkpoint_dir)
|
| 199 |
+
zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
|
| 200 |
+
print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
|
| 201 |
+
|
| 202 |
+
model_files = get_model_state_files(ds_checkpoint_dir)
|
| 203 |
+
|
| 204 |
+
zero_model_states = parse_model_states(model_files)
|
| 205 |
+
print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
|
| 206 |
+
|
| 207 |
+
if zero_stage <= 2:
|
| 208 |
+
return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
| 209 |
+
exclude_frozen_parameters)
|
| 210 |
+
elif zero_stage == 3:
|
| 211 |
+
return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
| 212 |
+
exclude_frozen_parameters)
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
def _zero2_merge_frozen_params(state_dict, zero_model_states):
|
| 216 |
+
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
|
| 217 |
+
return
|
| 218 |
+
|
| 219 |
+
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
|
| 220 |
+
frozen_param_fragments = zero_model_states[0].frozen_param_fragments
|
| 221 |
+
|
| 222 |
+
if debug:
|
| 223 |
+
num_elem = sum(s.numel() for s in frozen_param_shapes.values())
|
| 224 |
+
print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
|
| 225 |
+
|
| 226 |
+
wanted_params = len(frozen_param_shapes)
|
| 227 |
+
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
|
| 228 |
+
avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
|
| 229 |
+
print(f'Frozen params: Have {avail_numel} numels to process.')
|
| 230 |
+
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
|
| 231 |
+
|
| 232 |
+
total_params = 0
|
| 233 |
+
total_numel = 0
|
| 234 |
+
for name, shape in frozen_param_shapes.items():
|
| 235 |
+
total_params += 1
|
| 236 |
+
unpartitioned_numel = shape.numel()
|
| 237 |
+
total_numel += unpartitioned_numel
|
| 238 |
+
|
| 239 |
+
state_dict[name] = frozen_param_fragments[name]
|
| 240 |
+
|
| 241 |
+
if debug:
|
| 242 |
+
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
|
| 243 |
+
|
| 244 |
+
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
def _has_callable(obj, fn):
|
| 248 |
+
attr = getattr(obj, fn, None)
|
| 249 |
+
return callable(attr)
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
|
| 253 |
+
param_shapes = zero_model_states[0].param_shapes
|
| 254 |
+
|
| 255 |
+
# Reconstruction protocol:
|
| 256 |
+
#
|
| 257 |
+
# XXX: document this
|
| 258 |
+
|
| 259 |
+
if debug:
|
| 260 |
+
for i in range(world_size):
|
| 261 |
+
for j in range(len(fp32_flat_groups[0])):
|
| 262 |
+
print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
|
| 263 |
+
|
| 264 |
+
# XXX: memory usage doubles here (zero2)
|
| 265 |
+
num_param_groups = len(fp32_flat_groups[0])
|
| 266 |
+
merged_single_partition_of_fp32_groups = []
|
| 267 |
+
for i in range(num_param_groups):
|
| 268 |
+
merged_partitions = [sd[i] for sd in fp32_flat_groups]
|
| 269 |
+
full_single_fp32_vector = torch.cat(merged_partitions, 0)
|
| 270 |
+
merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
|
| 271 |
+
avail_numel = sum(
|
| 272 |
+
[full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
|
| 273 |
+
|
| 274 |
+
if debug:
|
| 275 |
+
wanted_params = sum([len(shapes) for shapes in param_shapes])
|
| 276 |
+
wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
|
| 277 |
+
# not asserting if there is a mismatch due to possible padding
|
| 278 |
+
print(f"Have {avail_numel} numels to process.")
|
| 279 |
+
print(f"Need {wanted_numel} numels in {wanted_params} params.")
|
| 280 |
+
|
| 281 |
+
# params
|
| 282 |
+
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
|
| 283 |
+
# out-of-core computing solution
|
| 284 |
+
total_numel = 0
|
| 285 |
+
total_params = 0
|
| 286 |
+
for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
|
| 287 |
+
offset = 0
|
| 288 |
+
avail_numel = full_single_fp32_vector.numel()
|
| 289 |
+
for name, shape in shapes.items():
|
| 290 |
+
|
| 291 |
+
unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
|
| 292 |
+
total_numel += unpartitioned_numel
|
| 293 |
+
total_params += 1
|
| 294 |
+
|
| 295 |
+
if debug:
|
| 296 |
+
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
|
| 297 |
+
state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
|
| 298 |
+
offset += unpartitioned_numel
|
| 299 |
+
|
| 300 |
+
# Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
|
| 301 |
+
# avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
|
| 302 |
+
# paddings performed in the code it's almost impossible to predict the exact numbers w/o the
|
| 303 |
+
# live optimizer object, so we are checking that the numbers are within the right range
|
| 304 |
+
align_to = 2 * world_size
|
| 305 |
+
|
| 306 |
+
def zero2_align(x):
|
| 307 |
+
return align_to * math.ceil(x / align_to)
|
| 308 |
+
|
| 309 |
+
if debug:
|
| 310 |
+
print(f"original offset={offset}, avail_numel={avail_numel}")
|
| 311 |
+
|
| 312 |
+
offset = zero2_align(offset)
|
| 313 |
+
avail_numel = zero2_align(avail_numel)
|
| 314 |
+
|
| 315 |
+
if debug:
|
| 316 |
+
print(f"aligned offset={offset}, avail_numel={avail_numel}")
|
| 317 |
+
|
| 318 |
+
# Sanity check
|
| 319 |
+
if offset != avail_numel:
|
| 320 |
+
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
|
| 321 |
+
|
| 322 |
+
print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
| 326 |
+
exclude_frozen_parameters):
|
| 327 |
+
state_dict = OrderedDict()
|
| 328 |
+
|
| 329 |
+
# buffers
|
| 330 |
+
buffers = zero_model_states[0].buffers
|
| 331 |
+
state_dict.update(buffers)
|
| 332 |
+
if debug:
|
| 333 |
+
print(f"added {len(buffers)} buffers")
|
| 334 |
+
|
| 335 |
+
if not exclude_frozen_parameters:
|
| 336 |
+
_zero2_merge_frozen_params(state_dict, zero_model_states)
|
| 337 |
+
|
| 338 |
+
_zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
|
| 339 |
+
|
| 340 |
+
# recover shared parameters
|
| 341 |
+
for pair in zero_model_states[0].shared_params:
|
| 342 |
+
if pair[1] in state_dict:
|
| 343 |
+
state_dict[pair[0]] = state_dict[pair[1]]
|
| 344 |
+
|
| 345 |
+
return state_dict
|
| 346 |
+
|
| 347 |
+
|
| 348 |
+
def zero3_partitioned_param_info(unpartitioned_numel, world_size):
|
| 349 |
+
remainder = unpartitioned_numel % world_size
|
| 350 |
+
padding_numel = (world_size - remainder) if remainder else 0
|
| 351 |
+
partitioned_numel = math.ceil(unpartitioned_numel / world_size)
|
| 352 |
+
return partitioned_numel, padding_numel
|
| 353 |
+
|
| 354 |
+
|
| 355 |
+
def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
|
| 356 |
+
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
|
| 357 |
+
return
|
| 358 |
+
|
| 359 |
+
if debug:
|
| 360 |
+
for i in range(world_size):
|
| 361 |
+
num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
|
| 362 |
+
print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
|
| 363 |
+
|
| 364 |
+
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
|
| 365 |
+
wanted_params = len(frozen_param_shapes)
|
| 366 |
+
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
|
| 367 |
+
avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
|
| 368 |
+
print(f'Frozen params: Have {avail_numel} numels to process.')
|
| 369 |
+
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
|
| 370 |
+
|
| 371 |
+
total_params = 0
|
| 372 |
+
total_numel = 0
|
| 373 |
+
for name, shape in zero_model_states[0].frozen_param_shapes.items():
|
| 374 |
+
total_params += 1
|
| 375 |
+
unpartitioned_numel = shape.numel()
|
| 376 |
+
total_numel += unpartitioned_numel
|
| 377 |
+
|
| 378 |
+
param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
|
| 379 |
+
state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
|
| 380 |
+
|
| 381 |
+
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
|
| 382 |
+
|
| 383 |
+
if debug:
|
| 384 |
+
print(
|
| 385 |
+
f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
|
| 386 |
+
)
|
| 387 |
+
|
| 388 |
+
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
|
| 389 |
+
|
| 390 |
+
|
| 391 |
+
class GatheredTensor:
|
| 392 |
+
"""
|
| 393 |
+
A pseudo tensor that collects partitioned weights.
|
| 394 |
+
It is more memory efficient when there are multiple groups.
|
| 395 |
+
"""
|
| 396 |
+
|
| 397 |
+
def __init__(self, flat_groups, flat_groups_offset, offset, partitioned_numel, shape):
|
| 398 |
+
self.flat_groups = flat_groups
|
| 399 |
+
self.flat_groups_offset = flat_groups_offset
|
| 400 |
+
self.offset = offset
|
| 401 |
+
self.partitioned_numel = partitioned_numel
|
| 402 |
+
self.shape = shape
|
| 403 |
+
self.dtype = self.flat_groups[0][0].dtype
|
| 404 |
+
|
| 405 |
+
def contiguous(self):
|
| 406 |
+
"""
|
| 407 |
+
Merge partitioned weights from flat_groups into a single tensor.
|
| 408 |
+
"""
|
| 409 |
+
end_idx = self.offset + self.partitioned_numel
|
| 410 |
+
world_size = len(self.flat_groups)
|
| 411 |
+
pad_flat_param_chunks = []
|
| 412 |
+
|
| 413 |
+
for rank_i in range(world_size):
|
| 414 |
+
# for each rank, we need to collect weights from related group/groups
|
| 415 |
+
flat_groups_at_rank_i = self.flat_groups[rank_i]
|
| 416 |
+
start_group_id = None
|
| 417 |
+
end_group_id = None
|
| 418 |
+
for group_id in range(len(self.flat_groups_offset)):
|
| 419 |
+
if self.flat_groups_offset[group_id] <= self.offset < self.flat_groups_offset[group_id + 1]:
|
| 420 |
+
start_group_id = group_id
|
| 421 |
+
if self.flat_groups_offset[group_id] < end_idx <= self.flat_groups_offset[group_id + 1]:
|
| 422 |
+
end_group_id = group_id
|
| 423 |
+
break
|
| 424 |
+
# collect weights from related group/groups
|
| 425 |
+
for group_id in range(start_group_id, end_group_id + 1):
|
| 426 |
+
flat_tensor = flat_groups_at_rank_i[group_id]
|
| 427 |
+
start_offset = self.offset - self.flat_groups_offset[group_id]
|
| 428 |
+
end_offset = min(end_idx, self.flat_groups_offset[group_id + 1]) - self.flat_groups_offset[group_id]
|
| 429 |
+
pad_flat_param_chunks.append(flat_tensor[start_offset:end_offset])
|
| 430 |
+
|
| 431 |
+
# collect weights from all ranks
|
| 432 |
+
pad_flat_param = torch.cat(pad_flat_param_chunks, dim=0)
|
| 433 |
+
param = pad_flat_param[:self.shape.numel()].view(self.shape).contiguous()
|
| 434 |
+
return param
|
| 435 |
+
|
| 436 |
+
|
| 437 |
+
def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
|
| 438 |
+
param_shapes = zero_model_states[0].param_shapes
|
| 439 |
+
avail_numel = sum([flat_group.numel() for flat_group in fp32_flat_groups[0]]) * world_size
|
| 440 |
+
|
| 441 |
+
# Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
|
| 442 |
+
# param, re-consolidating each param, while dealing with padding if any
|
| 443 |
+
|
| 444 |
+
# merge list of dicts, preserving order
|
| 445 |
+
param_shapes = {k: v for d in param_shapes for k, v in d.items()}
|
| 446 |
+
|
| 447 |
+
if debug:
|
| 448 |
+
for i in range(world_size):
|
| 449 |
+
print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
|
| 450 |
+
|
| 451 |
+
wanted_params = len(param_shapes)
|
| 452 |
+
wanted_numel = sum(shape.numel() for shape in param_shapes.values())
|
| 453 |
+
# not asserting if there is a mismatch due to possible padding
|
| 454 |
+
avail_numel = fp32_flat_groups[0].numel() * world_size
|
| 455 |
+
print(f"Trainable params: Have {avail_numel} numels to process.")
|
| 456 |
+
print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
|
| 457 |
+
|
| 458 |
+
# params
|
| 459 |
+
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
|
| 460 |
+
# out-of-core computing solution
|
| 461 |
+
offset = 0
|
| 462 |
+
total_numel = 0
|
| 463 |
+
total_params = 0
|
| 464 |
+
flat_groups_offset = [0] + list(np.cumsum([flat_tensor.numel() for flat_tensor in fp32_flat_groups[0]]))
|
| 465 |
+
for name, shape in tqdm(param_shapes.items(), desc='Gathering sharded weights'):
|
| 466 |
+
unpartitioned_numel = shape.numel()
|
| 467 |
+
total_numel += unpartitioned_numel
|
| 468 |
+
total_params += 1
|
| 469 |
+
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
|
| 470 |
+
|
| 471 |
+
if debug:
|
| 472 |
+
print(
|
| 473 |
+
f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
|
| 474 |
+
)
|
| 475 |
+
|
| 476 |
+
# memory efficient tensor
|
| 477 |
+
tensor = GatheredTensor(fp32_flat_groups, flat_groups_offset, offset, partitioned_numel, shape)
|
| 478 |
+
state_dict[name] = tensor
|
| 479 |
+
offset += partitioned_numel
|
| 480 |
+
|
| 481 |
+
offset *= world_size
|
| 482 |
+
|
| 483 |
+
# Sanity check
|
| 484 |
+
if offset != avail_numel:
|
| 485 |
+
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
|
| 486 |
+
|
| 487 |
+
print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
|
| 488 |
+
|
| 489 |
+
|
| 490 |
+
def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
| 491 |
+
exclude_frozen_parameters):
|
| 492 |
+
state_dict = OrderedDict()
|
| 493 |
+
|
| 494 |
+
# buffers
|
| 495 |
+
buffers = zero_model_states[0].buffers
|
| 496 |
+
state_dict.update(buffers)
|
| 497 |
+
if debug:
|
| 498 |
+
print(f"added {len(buffers)} buffers")
|
| 499 |
+
|
| 500 |
+
if not exclude_frozen_parameters:
|
| 501 |
+
_zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
|
| 502 |
+
|
| 503 |
+
_zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
|
| 504 |
+
|
| 505 |
+
# recover shared parameters
|
| 506 |
+
for pair in zero_model_states[0].shared_params:
|
| 507 |
+
if pair[1] in state_dict:
|
| 508 |
+
state_dict[pair[0]] = state_dict[pair[1]]
|
| 509 |
+
|
| 510 |
+
return state_dict
|
| 511 |
+
|
| 512 |
+
|
| 513 |
+
def to_torch_tensor(state_dict, return_empty_tensor=False):
|
| 514 |
+
"""
|
| 515 |
+
Convert state_dict of GatheredTensor to torch tensor
|
| 516 |
+
"""
|
| 517 |
+
torch_state_dict = {}
|
| 518 |
+
converted_tensors = {}
|
| 519 |
+
for name, tensor in state_dict.items():
|
| 520 |
+
tensor_id = id(tensor)
|
| 521 |
+
if tensor_id in converted_tensors: # shared tensors
|
| 522 |
+
shared_tensor = torch_state_dict[converted_tensors[tensor_id]]
|
| 523 |
+
torch_state_dict[name] = shared_tensor
|
| 524 |
+
else:
|
| 525 |
+
converted_tensors[tensor_id] = name
|
| 526 |
+
if return_empty_tensor:
|
| 527 |
+
torch_state_dict[name] = torch.empty(tensor.shape, dtype=tensor.dtype)
|
| 528 |
+
else:
|
| 529 |
+
torch_state_dict[name] = tensor.contiguous()
|
| 530 |
+
return torch_state_dict
|
| 531 |
+
|
| 532 |
+
|
| 533 |
+
def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
|
| 534 |
+
tag=None,
|
| 535 |
+
exclude_frozen_parameters=False,
|
| 536 |
+
lazy_mode=False):
|
| 537 |
+
"""
|
| 538 |
+
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
|
| 539 |
+
``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
|
| 540 |
+
via a model hub.
|
| 541 |
+
|
| 542 |
+
Args:
|
| 543 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder
|
| 544 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
|
| 545 |
+
- ``exclude_frozen_parameters``: exclude frozen parameters
|
| 546 |
+
- ``lazy_mode``: get state_dict in lazy mode. It returns a dict of pesduo tensor instead of torch tensor, which is more memory efficient.
|
| 547 |
+
Convert the pesduo tensor to torch tensor by ``.contiguous()``
|
| 548 |
+
|
| 549 |
+
Returns:
|
| 550 |
+
- pytorch ``state_dict``
|
| 551 |
+
|
| 552 |
+
A typical usage might be ::
|
| 553 |
+
|
| 554 |
+
from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
|
| 555 |
+
# do the training and checkpoint saving
|
| 556 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
|
| 557 |
+
model = model.cpu() # move to cpu
|
| 558 |
+
model.load_state_dict(state_dict)
|
| 559 |
+
# submit to model hub or save the model to share with others
|
| 560 |
+
|
| 561 |
+
In this example the ``model`` will no longer be usable in the deepspeed context of the same
|
| 562 |
+
application. i.e. you will need to re-initialize the deepspeed engine, since
|
| 563 |
+
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
|
| 564 |
+
|
| 565 |
+
If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
|
| 566 |
+
|
| 567 |
+
Note: the above usage may not work if your application doesn't have sufficient free CPU memory.
|
| 568 |
+
You may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
|
| 569 |
+
the checkpoint. Or you can load state_dict in lazy mode ::
|
| 570 |
+
|
| 571 |
+
from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
|
| 572 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, lazy_mode=True) # not on cpu
|
| 573 |
+
for name, lazy_tensor in state_dict.item():
|
| 574 |
+
tensor = lazy_tensor.contiguous() # to cpu
|
| 575 |
+
print(name, tensor)
|
| 576 |
+
# del tensor to release memory if it no longer in use
|
| 577 |
+
"""
|
| 578 |
+
if tag is None:
|
| 579 |
+
latest_path = os.path.join(checkpoint_dir, 'latest')
|
| 580 |
+
if os.path.isfile(latest_path):
|
| 581 |
+
with open(latest_path, 'r') as fd:
|
| 582 |
+
tag = fd.read().strip()
|
| 583 |
+
else:
|
| 584 |
+
raise ValueError(f"Unable to find 'latest' file at {latest_path}")
|
| 585 |
+
|
| 586 |
+
ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
|
| 587 |
+
|
| 588 |
+
if not os.path.isdir(ds_checkpoint_dir):
|
| 589 |
+
raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
|
| 590 |
+
|
| 591 |
+
state_dict = _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
|
| 592 |
+
if lazy_mode:
|
| 593 |
+
return state_dict
|
| 594 |
+
else:
|
| 595 |
+
return to_torch_tensor(state_dict)
|
| 596 |
+
|
| 597 |
+
|
| 598 |
+
def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir,
|
| 599 |
+
output_dir,
|
| 600 |
+
max_shard_size="5GB",
|
| 601 |
+
safe_serialization=False,
|
| 602 |
+
tag=None,
|
| 603 |
+
exclude_frozen_parameters=False):
|
| 604 |
+
"""
|
| 605 |
+
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
|
| 606 |
+
loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
|
| 607 |
+
|
| 608 |
+
Args:
|
| 609 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
|
| 610 |
+
- ``output_dir``: directory to the pytorch fp32 state_dict output files
|
| 611 |
+
- ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB
|
| 612 |
+
- ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
|
| 613 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
|
| 614 |
+
- ``exclude_frozen_parameters``: exclude frozen parameters
|
| 615 |
+
"""
|
| 616 |
+
|
| 617 |
+
# Dependency pre-check
|
| 618 |
+
if safe_serialization:
|
| 619 |
+
try:
|
| 620 |
+
from safetensors.torch import save_file
|
| 621 |
+
except ImportError:
|
| 622 |
+
print('If you want to use `safe_serialization`, please `pip install safetensors`')
|
| 623 |
+
raise
|
| 624 |
+
if max_shard_size is not None:
|
| 625 |
+
try:
|
| 626 |
+
from huggingface_hub import split_torch_state_dict_into_shards
|
| 627 |
+
except ImportError:
|
| 628 |
+
print('If you want to use `max_shard_size`, please `pip install huggingface_hub`')
|
| 629 |
+
raise
|
| 630 |
+
|
| 631 |
+
# Convert zero checkpoint to state_dict
|
| 632 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
|
| 633 |
+
tag,
|
| 634 |
+
exclude_frozen_parameters,
|
| 635 |
+
lazy_mode=True)
|
| 636 |
+
|
| 637 |
+
# Shard the model if it is too big.
|
| 638 |
+
weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin"
|
| 639 |
+
if max_shard_size is not None:
|
| 640 |
+
filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors")
|
| 641 |
+
# an memory-efficient approach for sharding
|
| 642 |
+
empty_state_dict = to_torch_tensor(state_dict, return_empty_tensor=True)
|
| 643 |
+
state_dict_split = split_torch_state_dict_into_shards(empty_state_dict,
|
| 644 |
+
filename_pattern=filename_pattern,
|
| 645 |
+
max_shard_size=max_shard_size)
|
| 646 |
+
else:
|
| 647 |
+
from collections import namedtuple
|
| 648 |
+
StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"])
|
| 649 |
+
state_dict_split = StateDictSplit(is_sharded=False,
|
| 650 |
+
filename_to_tensors={weights_name: list(state_dict.keys())})
|
| 651 |
+
|
| 652 |
+
# Save the model by shard
|
| 653 |
+
os.makedirs(output_dir, exist_ok=True)
|
| 654 |
+
filename_to_tensors = state_dict_split.filename_to_tensors.items()
|
| 655 |
+
for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"):
|
| 656 |
+
shard_state_dict = {tensor_name: state_dict[tensor_name] for tensor_name in tensors}
|
| 657 |
+
shard_state_dict = to_torch_tensor(shard_state_dict)
|
| 658 |
+
output_path = os.path.join(output_dir, shard_file)
|
| 659 |
+
if safe_serialization:
|
| 660 |
+
save_file(shard_state_dict, output_path, metadata={"format": "pt"})
|
| 661 |
+
else:
|
| 662 |
+
torch.save(shard_state_dict, output_path)
|
| 663 |
+
# release the memory of current shard
|
| 664 |
+
for tensor_name in list(shard_state_dict.keys()):
|
| 665 |
+
del state_dict[tensor_name]
|
| 666 |
+
del shard_state_dict[tensor_name]
|
| 667 |
+
del shard_state_dict
|
| 668 |
+
gc.collect()
|
| 669 |
+
|
| 670 |
+
# Save index if sharded
|
| 671 |
+
if state_dict_split.is_sharded:
|
| 672 |
+
index = {
|
| 673 |
+
"metadata": state_dict_split.metadata,
|
| 674 |
+
"weight_map": state_dict_split.tensor_to_filename,
|
| 675 |
+
}
|
| 676 |
+
save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json"
|
| 677 |
+
save_index_file = os.path.join(output_dir, save_index_file)
|
| 678 |
+
with open(save_index_file, "w", encoding="utf-8") as f:
|
| 679 |
+
content = json.dumps(index, indent=2, sort_keys=True) + "\n"
|
| 680 |
+
f.write(content)
|
| 681 |
+
|
| 682 |
+
|
| 683 |
+
def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
|
| 684 |
+
"""
|
| 685 |
+
1. Put the provided model to cpu
|
| 686 |
+
2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
|
| 687 |
+
3. Load it into the provided model
|
| 688 |
+
|
| 689 |
+
Args:
|
| 690 |
+
- ``model``: the model object to update
|
| 691 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
|
| 692 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
|
| 693 |
+
|
| 694 |
+
Returns:
|
| 695 |
+
- ``model`: modified model
|
| 696 |
+
|
| 697 |
+
Make sure you have plenty of CPU memory available before you call this function. If you don't
|
| 698 |
+
have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
|
| 699 |
+
conveniently placed for you in the checkpoint folder.
|
| 700 |
+
|
| 701 |
+
A typical usage might be ::
|
| 702 |
+
|
| 703 |
+
from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
|
| 704 |
+
model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
|
| 705 |
+
# submit to model hub or save the model to share with others
|
| 706 |
+
|
| 707 |
+
Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
|
| 708 |
+
of the same application. i.e. you will need to re-initialize the deepspeed engine, since
|
| 709 |
+
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
|
| 710 |
+
|
| 711 |
+
"""
|
| 712 |
+
logger.info(f"Extracting fp32 weights")
|
| 713 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
|
| 714 |
+
|
| 715 |
+
logger.info(f"Overwriting model with fp32 weights")
|
| 716 |
+
model = model.cpu()
|
| 717 |
+
model.load_state_dict(state_dict, strict=False)
|
| 718 |
+
|
| 719 |
+
return model
|
| 720 |
+
|
| 721 |
+
|
| 722 |
+
if __name__ == "__main__":
|
| 723 |
+
parser = argparse.ArgumentParser()
|
| 724 |
+
parser.add_argument("checkpoint_dir",
|
| 725 |
+
type=str,
|
| 726 |
+
help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
|
| 727 |
+
parser.add_argument("output_dir",
|
| 728 |
+
type=str,
|
| 729 |
+
help="directory to the pytorch fp32 state_dict output files"
|
| 730 |
+
"(e.g. path/checkpoint-12-output/)")
|
| 731 |
+
parser.add_argument(
|
| 732 |
+
"--max_shard_size",
|
| 733 |
+
type=str,
|
| 734 |
+
default="5GB",
|
| 735 |
+
help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size"
|
| 736 |
+
"lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`"
|
| 737 |
+
"We default it to 5GB in order for models to be able to run easily on free-tier google colab instances"
|
| 738 |
+
"without CPU OOM issues.")
|
| 739 |
+
parser.add_argument(
|
| 740 |
+
"--safe_serialization",
|
| 741 |
+
default=False,
|
| 742 |
+
action='store_true',
|
| 743 |
+
help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).")
|
| 744 |
+
parser.add_argument("-t",
|
| 745 |
+
"--tag",
|
| 746 |
+
type=str,
|
| 747 |
+
default=None,
|
| 748 |
+
help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
|
| 749 |
+
parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
|
| 750 |
+
parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
|
| 751 |
+
args = parser.parse_args()
|
| 752 |
+
|
| 753 |
+
debug = args.debug
|
| 754 |
+
|
| 755 |
+
convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
|
| 756 |
+
args.output_dir,
|
| 757 |
+
max_shard_size=args.max_shard_size,
|
| 758 |
+
safe_serialization=args.safe_serialization,
|
| 759 |
+
tag=args.tag,
|
| 760 |
+
exclude_frozen_parameters=args.exclude_frozen_parameters)
|
selective_loss/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148/logging.jsonl
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{"loss": 2.39338684, "token_acc": 0.65004704, "grad_norm": 71.3466032, "learning_rate": 7e-08, "memory(GiB)": 45.01, "train_speed(iter/s)": 0.042756, "epoch": 0.01769912, "global_step/max_steps": "1/280", "percentage": "0.36%", "elapsed_time": "11s", "remaining_time": "55m 40s"}
|
| 2 |
+
{"loss": 2.15954161, "token_acc": 0.65500468, "grad_norm": 83.56776459, "learning_rate": 3.6e-07, "memory(GiB)": 48.77, "train_speed(iter/s)": 0.123972, "epoch": 0.08849558, "global_step/max_steps": "5/280", "percentage": "1.79%", "elapsed_time": "28s", "remaining_time": "26m 30s"}
|
| 3 |
+
{"loss": 2.33926334, "token_acc": 0.64049854, "grad_norm": 34.05608808, "learning_rate": 7.1e-07, "memory(GiB)": 48.77, "train_speed(iter/s)": 0.166375, "epoch": 0.17699115, "global_step/max_steps": "10/280", "percentage": "3.57%", "elapsed_time": "48s", "remaining_time": "21m 55s"}
|
| 4 |
+
{"loss": 1.79877586, "token_acc": 0.64225122, "grad_norm": 25.50056555, "learning_rate": 1.07e-06, "memory(GiB)": 48.77, "train_speed(iter/s)": 0.187062, "epoch": 0.26548673, "global_step/max_steps": "15/280", "percentage": "5.36%", "elapsed_time": "1m 8s", "remaining_time": "20m 14s"}
|
| 5 |
+
{"loss": 1.53069773, "token_acc": 0.66383047, "grad_norm": 16.71293234, "learning_rate": 1.43e-06, "memory(GiB)": 48.77, "train_speed(iter/s)": 0.200803, "epoch": 0.3539823, "global_step/max_steps": "20/280", "percentage": "7.14%", "elapsed_time": "1m 28s", "remaining_time": "19m 6s"}
|
| 6 |
+
{"loss": 1.41002331, "token_acc": 0.67508143, "grad_norm": 17.64167463, "learning_rate": 1.79e-06, "memory(GiB)": 48.77, "train_speed(iter/s)": 0.207089, "epoch": 0.44247788, "global_step/max_steps": "25/280", "percentage": "8.93%", "elapsed_time": "1m 49s", "remaining_time": "18m 34s"}
|
| 7 |
+
{"loss": 1.25059834, "token_acc": 0.70195075, "grad_norm": 13.68538103, "learning_rate": 2e-06, "memory(GiB)": 48.77, "train_speed(iter/s)": 0.21304, "epoch": 0.53097345, "global_step/max_steps": "30/280", "percentage": "10.71%", "elapsed_time": "2m 9s", "remaining_time": "17m 58s"}
|
| 8 |
+
{"loss": 1.10536585, "token_acc": 0.72951558, "grad_norm": 11.22195592, "learning_rate": 2e-06, "memory(GiB)": 48.77, "train_speed(iter/s)": 0.216941, "epoch": 0.61946903, "global_step/max_steps": "35/280", "percentage": "12.50%", "elapsed_time": "2m 29s", "remaining_time": "17m 29s"}
|
| 9 |
+
{"loss": 1.13892555, "token_acc": 0.74329055, "grad_norm": 13.61471787, "learning_rate": 1.99e-06, "memory(GiB)": 48.77, "train_speed(iter/s)": 0.220463, "epoch": 0.7079646, "global_step/max_steps": "40/280", "percentage": "14.29%", "elapsed_time": "2m 50s", "remaining_time": "17m 0s"}
|
| 10 |
+
{"loss": 1.13503237, "token_acc": 0.75209051, "grad_norm": 11.62532075, "learning_rate": 1.98e-06, "memory(GiB)": 48.77, "train_speed(iter/s)": 0.222472, "epoch": 0.79646018, "global_step/max_steps": "45/280", "percentage": "16.07%", "elapsed_time": "3m 10s", "remaining_time": "16m 36s"}
|
| 11 |
+
{"loss": 0.97727509, "token_acc": 0.74051896, "grad_norm": 10.61582655, "learning_rate": 1.96e-06, "memory(GiB)": 48.77, "train_speed(iter/s)": 0.224224, "epoch": 0.88495575, "global_step/max_steps": "50/280", "percentage": "17.86%", "elapsed_time": "3m 31s", "remaining_time": "16m 13s"}
|
| 12 |
+
{"loss": 1.10832481, "token_acc": 0.74648309, "grad_norm": 12.59537665, "learning_rate": 1.94e-06, "memory(GiB)": 48.77, "train_speed(iter/s)": 0.22532, "epoch": 0.97345133, "global_step/max_steps": "55/280", "percentage": "19.64%", "elapsed_time": "3m 52s", "remaining_time": "15m 51s"}
|
| 13 |
+
{"eval_loss": 0.56507361, "eval_token_acc": 0.75790336, "eval_runtime": 8.5395, "eval_samples_per_second": 11.593, "eval_steps_per_second": 1.522, "epoch": 1.0, "global_step/max_steps": "57/280", "percentage": "20.36%", "elapsed_time": "4m 6s", "remaining_time": "16m 4s"}
|
| 14 |
+
{"loss": 0.83732853, "token_acc": 0.77055053, "grad_norm": 9.69581461, "learning_rate": 1.92e-06, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.171599, "epoch": 1.05309735, "global_step/max_steps": "60/280", "percentage": "21.43%", "elapsed_time": "5m 38s", "remaining_time": "20m 40s"}
|
| 15 |
+
{"loss": 0.88075085, "token_acc": 0.77068167, "grad_norm": 11.08339019, "learning_rate": 1.9e-06, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.175669, "epoch": 1.14159292, "global_step/max_steps": "65/280", "percentage": "23.21%", "elapsed_time": "5m 58s", "remaining_time": "19m 46s"}
|
| 16 |
+
{"loss": 0.83789043, "token_acc": 0.78038558, "grad_norm": 12.41592248, "learning_rate": 1.87e-06, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.178838, "epoch": 1.2300885, "global_step/max_steps": "70/280", "percentage": "25.00%", "elapsed_time": "6m 20s", "remaining_time": "19m 0s"}
|
| 17 |
+
{"loss": 0.82729683, "token_acc": 0.76964912, "grad_norm": 8.45056243, "learning_rate": 1.83e-06, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.182395, "epoch": 1.31858407, "global_step/max_steps": "75/280", "percentage": "26.79%", "elapsed_time": "6m 39s", "remaining_time": "18m 12s"}
|
| 18 |
+
{"loss": 0.91293344, "token_acc": 0.76092855, "grad_norm": 14.0043512, "learning_rate": 1.8e-06, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.185332, "epoch": 1.40707965, "global_step/max_steps": "80/280", "percentage": "28.57%", "elapsed_time": "7m 0s", "remaining_time": "17m 30s"}
|
| 19 |
+
{"loss": 0.81816101, "token_acc": 0.753896, "grad_norm": 8.57001289, "learning_rate": 1.76e-06, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.18795, "epoch": 1.49557522, "global_step/max_steps": "85/280", "percentage": "30.36%", "elapsed_time": "7m 20s", "remaining_time": "16m 51s"}
|
| 20 |
+
{"loss": 0.78708563, "token_acc": 0.76188167, "grad_norm": 9.61336105, "learning_rate": 1.72e-06, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.190426, "epoch": 1.5840708, "global_step/max_steps": "90/280", "percentage": "32.14%", "elapsed_time": "7m 41s", "remaining_time": "16m 13s"}
|
| 21 |
+
{"loss": 0.83102455, "token_acc": 0.77522559, "grad_norm": 10.12208244, "learning_rate": 1.67e-06, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.192905, "epoch": 1.67256637, "global_step/max_steps": "95/280", "percentage": "33.93%", "elapsed_time": "8m 1s", "remaining_time": "15m 36s"}
|
| 22 |
+
{"loss": 0.81053896, "token_acc": 0.76448259, "grad_norm": 10.6028751, "learning_rate": 1.62e-06, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.195119, "epoch": 1.76106195, "global_step/max_steps": "100/280", "percentage": "35.71%", "elapsed_time": "8m 21s", "remaining_time": "15m 1s"}
|
| 23 |
+
{"loss": 0.83936615, "token_acc": 0.78001341, "grad_norm": 11.76556584, "learning_rate": 1.57e-06, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.196801, "epoch": 1.84955752, "global_step/max_steps": "105/280", "percentage": "37.50%", "elapsed_time": "8m 42s", "remaining_time": "14m 30s"}
|
| 24 |
+
{"loss": 0.86870346, "token_acc": 0.77137483, "grad_norm": 7.96044293, "learning_rate": 1.52e-06, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.198748, "epoch": 1.9380531, "global_step/max_steps": "110/280", "percentage": "39.29%", "elapsed_time": "9m 2s", "remaining_time": "13m 57s"}
|
| 25 |
+
{"eval_loss": 0.54378814, "eval_token_acc": 0.76676017, "eval_runtime": 8.627, "eval_samples_per_second": 11.476, "eval_steps_per_second": 1.507, "epoch": 2.0, "global_step/max_steps": "114/280", "percentage": "40.71%", "elapsed_time": "9m 24s", "remaining_time": "13m 42s"}
|
| 26 |
+
{"loss": 0.71064916, "token_acc": 0.78624463, "grad_norm": 4.50204951, "learning_rate": 1.47e-06, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.173338, "epoch": 2.01769912, "global_step/max_steps": "115/280", "percentage": "41.07%", "elapsed_time": "10m 52s", "remaining_time": "15m 35s"}
|
| 27 |
+
{"loss": 0.65554113, "token_acc": 0.7813226, "grad_norm": 7.276004, "learning_rate": 1.41e-06, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.175242, "epoch": 2.10619469, "global_step/max_steps": "120/280", "percentage": "42.86%", "elapsed_time": "11m 13s", "remaining_time": "14m 57s"}
|
| 28 |
+
{"loss": 0.6249445, "token_acc": 0.78344921, "grad_norm": 4.85688137, "learning_rate": 1.35e-06, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.177449, "epoch": 2.19469027, "global_step/max_steps": "125/280", "percentage": "44.64%", "elapsed_time": "11m 33s", "remaining_time": "14m 19s"}
|
| 29 |
+
{"loss": 0.66642885, "token_acc": 0.76872282, "grad_norm": 5.89048525, "learning_rate": 1.29e-06, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.179237, "epoch": 2.28318584, "global_step/max_steps": "130/280", "percentage": "46.43%", "elapsed_time": "11m 53s", "remaining_time": "13m 43s"}
|
| 30 |
+
{"loss": 0.69915466, "token_acc": 0.77002967, "grad_norm": 8.77144982, "learning_rate": 1.23e-06, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.180871, "epoch": 2.37168142, "global_step/max_steps": "135/280", "percentage": "48.21%", "elapsed_time": "12m 14s", "remaining_time": "13m 9s"}
|
| 31 |
+
{"loss": 0.65368485, "token_acc": 0.76650905, "grad_norm": 9.06773085, "learning_rate": 1.17e-06, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.182581, "epoch": 2.46017699, "global_step/max_steps": "140/280", "percentage": "50.00%", "elapsed_time": "12m 35s", "remaining_time": "12m 35s"}
|
| 32 |
+
{"loss": 0.63139353, "token_acc": 0.78253996, "grad_norm": 4.50790353, "learning_rate": 1.11e-06, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.184363, "epoch": 2.54867257, "global_step/max_steps": "145/280", "percentage": "51.79%", "elapsed_time": "12m 55s", "remaining_time": "12m 1s"}
|
| 33 |
+
{"loss": 0.62552323, "token_acc": 0.77672442, "grad_norm": 4.31267006, "learning_rate": 1.05e-06, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.185598, "epoch": 2.63716814, "global_step/max_steps": "150/280", "percentage": "53.57%", "elapsed_time": "13m 16s", "remaining_time": "11m 30s"}
|
| 34 |
+
{"loss": 0.61710768, "token_acc": 0.78962676, "grad_norm": 7.75525872, "learning_rate": 9.9e-07, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.187098, "epoch": 2.72566372, "global_step/max_steps": "155/280", "percentage": "55.36%", "elapsed_time": "13m 37s", "remaining_time": "10m 58s"}
|
| 35 |
+
{"loss": 0.61856909, "token_acc": 0.78020162, "grad_norm": 7.84563419, "learning_rate": 9.3e-07, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.188585, "epoch": 2.81415929, "global_step/max_steps": "160/280", "percentage": "57.14%", "elapsed_time": "13m 57s", "remaining_time": "10m 27s"}
|
| 36 |
+
{"loss": 0.65615635, "token_acc": 0.77802423, "grad_norm": 6.33829272, "learning_rate": 8.6e-07, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.190226, "epoch": 2.90265487, "global_step/max_steps": "165/280", "percentage": "58.93%", "elapsed_time": "14m 15s", "remaining_time": "9m 56s"}
|
| 37 |
+
{"loss": 0.62472148, "token_acc": 0.78233216, "grad_norm": 7.84330687, "learning_rate": 8e-07, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.191501, "epoch": 2.99115044, "global_step/max_steps": "170/280", "percentage": "60.71%", "elapsed_time": "14m 36s", "remaining_time": "9m 27s"}
|
| 38 |
+
{"eval_loss": 0.592026, "eval_token_acc": 0.76874586, "eval_runtime": 9.0693, "eval_samples_per_second": 10.916, "eval_steps_per_second": 1.433, "epoch": 3.0, "global_step/max_steps": "171/280", "percentage": "61.07%", "elapsed_time": "14m 46s", "remaining_time": "9m 25s"}
|
| 39 |
+
{"loss": 0.5203311, "token_acc": 0.78733668, "grad_norm": 4.95362685, "learning_rate": 7.4e-07, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.175347, "epoch": 3.07079646, "global_step/max_steps": "175/280", "percentage": "62.50%", "elapsed_time": "16m 26s", "remaining_time": "9m 51s"}
|
| 40 |
+
{"loss": 0.57795448, "token_acc": 0.769936, "grad_norm": 3.76810386, "learning_rate": 6.8e-07, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.176628, "epoch": 3.15929204, "global_step/max_steps": "180/280", "percentage": "64.29%", "elapsed_time": "16m 47s", "remaining_time": "9m 19s"}
|
| 41 |
+
{"loss": 0.54512343, "token_acc": 0.79170846, "grad_norm": 3.42363314, "learning_rate": 6.2e-07, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.177868, "epoch": 3.24778761, "global_step/max_steps": "185/280", "percentage": "66.07%", "elapsed_time": "17m 8s", "remaining_time": "8m 48s"}
|
| 42 |
+
{"loss": 0.56575327, "token_acc": 0.79351423, "grad_norm": 3.30866813, "learning_rate": 5.7e-07, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.179042, "epoch": 3.33628319, "global_step/max_steps": "190/280", "percentage": "67.86%", "elapsed_time": "17m 29s", "remaining_time": "8m 17s"}
|
| 43 |
+
{"loss": 0.54811449, "token_acc": 0.78818395, "grad_norm": 4.15737971, "learning_rate": 5.1e-07, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.180324, "epoch": 3.42477876, "global_step/max_steps": "195/280", "percentage": "69.64%", "elapsed_time": "17m 49s", "remaining_time": "7m 46s"}
|
| 44 |
+
{"loss": 0.5490778, "token_acc": 0.76853612, "grad_norm": 3.6687006, "learning_rate": 4.6e-07, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.181654, "epoch": 3.51327434, "global_step/max_steps": "200/280", "percentage": "71.43%", "elapsed_time": "18m 9s", "remaining_time": "7m 15s"}
|
| 45 |
+
{"loss": 0.55276184, "token_acc": 0.79791847, "grad_norm": 2.61902274, "learning_rate": 4.1e-07, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.182886, "epoch": 3.60176991, "global_step/max_steps": "205/280", "percentage": "73.21%", "elapsed_time": "18m 29s", "remaining_time": "6m 45s"}
|
| 46 |
+
{"loss": 0.56951532, "token_acc": 0.78870005, "grad_norm": 8.88991358, "learning_rate": 3.6e-07, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.183743, "epoch": 3.69026549, "global_step/max_steps": "210/280", "percentage": "75.00%", "elapsed_time": "18m 51s", "remaining_time": "6m 17s"}
|
| 47 |
+
{"loss": 0.54239573, "token_acc": 0.78397982, "grad_norm": 4.80658542, "learning_rate": 3.1e-07, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.184758, "epoch": 3.77876106, "global_step/max_steps": "215/280", "percentage": "76.79%", "elapsed_time": "19m 12s", "remaining_time": "5m 48s"}
|
| 48 |
+
{"loss": 0.55657883, "token_acc": 0.78512949, "grad_norm": 8.95893805, "learning_rate": 2.7e-07, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.185767, "epoch": 3.86725664, "global_step/max_steps": "220/280", "percentage": "78.57%", "elapsed_time": "19m 32s", "remaining_time": "5m 19s"}
|
| 49 |
+
{"loss": 0.55878992, "token_acc": 0.78725547, "grad_norm": 2.51088868, "learning_rate": 2.3e-07, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.186581, "epoch": 3.95575221, "global_step/max_steps": "225/280", "percentage": "80.36%", "elapsed_time": "19m 54s", "remaining_time": "4m 51s"}
|
| 50 |
+
{"eval_loss": 0.64181298, "eval_token_acc": 0.77032181, "eval_runtime": 8.9927, "eval_samples_per_second": 11.009, "eval_steps_per_second": 1.446, "epoch": 4.0, "global_step/max_steps": "228/280", "percentage": "81.43%", "elapsed_time": "20m 12s", "remaining_time": "4m 36s"}
|
| 51 |
+
{"loss": 0.48501458, "token_acc": 0.78433688, "grad_norm": 2.36593138, "learning_rate": 1.9e-07, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.174518, "epoch": 4.03539823, "global_step/max_steps": "230/280", "percentage": "82.14%", "elapsed_time": "21m 46s", "remaining_time": "4m 44s"}
|
| 52 |
+
{"loss": 0.5167119, "token_acc": 0.78354002, "grad_norm": 3.02945232, "learning_rate": 1.5e-07, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.17559, "epoch": 4.12389381, "global_step/max_steps": "235/280", "percentage": "83.93%", "elapsed_time": "22m 6s", "remaining_time": "4m 14s"}
|
| 53 |
+
{"loss": 0.52773523, "token_acc": 0.79700207, "grad_norm": 2.61116886, "learning_rate": 1.2e-07, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.17656, "epoch": 4.21238938, "global_step/max_steps": "240/280", "percentage": "85.71%", "elapsed_time": "22m 27s", "remaining_time": "3m 44s"}
|
| 54 |
+
{"loss": 0.51928501, "token_acc": 0.78735266, "grad_norm": 2.34314731, "learning_rate": 9e-08, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.177675, "epoch": 4.30088496, "global_step/max_steps": "245/280", "percentage": "87.50%", "elapsed_time": "22m 47s", "remaining_time": "3m 15s"}
|
| 55 |
+
{"loss": 0.52820415, "token_acc": 0.80173118, "grad_norm": 2.8579005, "learning_rate": 7e-08, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.178638, "epoch": 4.38938053, "global_step/max_steps": "250/280", "percentage": "89.29%", "elapsed_time": "23m 8s", "remaining_time": "2m 46s"}
|
| 56 |
+
{"loss": 0.53661289, "token_acc": 0.79013147, "grad_norm": 3.2862305, "learning_rate": 5e-08, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.179505, "epoch": 4.47787611, "global_step/max_steps": "255/280", "percentage": "91.07%", "elapsed_time": "23m 29s", "remaining_time": "2m 18s"}
|
| 57 |
+
{"loss": 0.52570009, "token_acc": 0.78345695, "grad_norm": 2.59894138, "learning_rate": 3e-08, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.180409, "epoch": 4.56637168, "global_step/max_steps": "260/280", "percentage": "92.86%", "elapsed_time": "23m 49s", "remaining_time": "1m 49s"}
|
| 58 |
+
{"loss": 0.50926504, "token_acc": 0.78753994, "grad_norm": 2.70561933, "learning_rate": 2e-08, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.181212, "epoch": 4.65486726, "global_step/max_steps": "265/280", "percentage": "94.64%", "elapsed_time": "24m 10s", "remaining_time": "1m 22s"}
|
| 59 |
+
{"loss": 0.54841595, "token_acc": 0.77442128, "grad_norm": 3.27861709, "learning_rate": 1e-08, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.182044, "epoch": 4.74336283, "global_step/max_steps": "270/280", "percentage": "96.43%", "elapsed_time": "24m 31s", "remaining_time": "54s"}
|
| 60 |
+
{"loss": 0.56857309, "token_acc": 0.77855888, "grad_norm": 2.43225221, "learning_rate": 0.0, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.182863, "epoch": 4.83185841, "global_step/max_steps": "275/280", "percentage": "98.21%", "elapsed_time": "24m 52s", "remaining_time": "27s"}
|
| 61 |
+
{"loss": 0.54332857, "token_acc": 0.78813008, "grad_norm": 5.40538749, "learning_rate": 0.0, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.1837, "epoch": 4.92035398, "global_step/max_steps": "280/280", "percentage": "100.00%", "elapsed_time": "25m 12s", "remaining_time": "0s"}
|
| 62 |
+
{"eval_loss": 0.65895581, "eval_token_acc": 0.77032181, "eval_runtime": 9.3193, "eval_samples_per_second": 10.623, "eval_steps_per_second": 1.395, "epoch": 4.92035398, "global_step/max_steps": "280/280", "percentage": "100.00%", "elapsed_time": "25m 22s", "remaining_time": "0s"}
|
| 63 |
+
{"eval_loss": 0.65895581, "eval_token_acc": 0.77032181, "eval_runtime": 9.6886, "eval_samples_per_second": 10.218, "eval_steps_per_second": 1.342, "epoch": 4.92035398, "global_step/max_steps": "280/280", "percentage": "100.00%", "elapsed_time": "26m 56s", "remaining_time": "0s"}
|
| 64 |
+
{"train_runtime": 1706.7476, "train_samples_per_second": 2.64, "train_steps_per_second": 0.164, "total_flos": 43238058737664.0, "train_loss": 0.80233554, "epoch": 4.92035398, "global_step/max_steps": "280/280", "percentage": "100.00%", "elapsed_time": "28m 20s", "remaining_time": "0s"}
|
| 65 |
+
{"model_parameter_info": "Qwen2_5_VLForConditionalGeneration: 8292.1667M Params (7615.6165M Trainable [91.8411%]), 0.0019M Buffers.", "last_model_checkpoint": "/mnt/data/users/liamding/data/MMMT/lora/selective_mask_plugin/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148/checkpoint-280", "best_model_checkpoint": "/mnt/data/users/liamding/data/MMMT/lora/selective_mask_plugin/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148/checkpoint-114", "best_metric": 0.54378814, "global_step": 280, "log_history": [{"loss": 2.3933868408203125, "token_acc": 0.6500470366886171, "grad_norm": 71.34660319849452, "learning_rate": 7.142857142857142e-08, "memory(GiB)": 45.01, "train_speed(iter/s)": 0.042756, "epoch": 0.017699115044247787, "step": 1}, {"loss": 2.159541606903076, "token_acc": 0.6550046772684752, "grad_norm": 83.56776459164254, "learning_rate": 3.5714285714285716e-07, "memory(GiB)": 48.77, "train_speed(iter/s)": 0.123972, "epoch": 0.08849557522123894, "step": 5}, {"loss": 2.339263343811035, "token_acc": 0.6404985432178698, "grad_norm": 34.05608807852992, "learning_rate": 7.142857142857143e-07, "memory(GiB)": 48.77, "train_speed(iter/s)": 0.166375, "epoch": 0.17699115044247787, "step": 10}, {"loss": 1.798775863647461, "token_acc": 0.6422512234910277, "grad_norm": 25.500565551593816, "learning_rate": 1.0714285714285714e-06, "memory(GiB)": 48.77, "train_speed(iter/s)": 0.187062, "epoch": 0.26548672566371684, "step": 15}, {"loss": 1.530697727203369, "token_acc": 0.6638304703804784, "grad_norm": 16.712932338074264, "learning_rate": 1.4285714285714286e-06, "memory(GiB)": 48.77, "train_speed(iter/s)": 0.200803, "epoch": 0.35398230088495575, "step": 20}, {"loss": 1.410023307800293, "token_acc": 0.6750814332247557, "grad_norm": 17.641674630168712, "learning_rate": 1.7857142857142857e-06, "memory(GiB)": 48.77, "train_speed(iter/s)": 0.207089, "epoch": 0.4424778761061947, "step": 25}, {"loss": 1.2505983352661132, "token_acc": 0.7019507515190279, "grad_norm": 13.685381032193781, "learning_rate": 1.999689182000816e-06, "memory(GiB)": 48.77, "train_speed(iter/s)": 0.21304, "epoch": 0.5309734513274337, "step": 30}, {"loss": 1.1053658485412599, "token_acc": 0.7295155771654961, "grad_norm": 11.221955923013804, "learning_rate": 1.9961946980917456e-06, "memory(GiB)": 48.77, "train_speed(iter/s)": 0.216941, "epoch": 0.6194690265486725, "step": 35}, {"loss": 1.138925552368164, "token_acc": 0.7432905484247374, "grad_norm": 13.61471786516788, "learning_rate": 1.9888308262251284e-06, "memory(GiB)": 48.77, "train_speed(iter/s)": 0.220463, "epoch": 0.7079646017699115, "step": 40}, {"loss": 1.1350323677062988, "token_acc": 0.7520905066404329, "grad_norm": 11.62532074887379, "learning_rate": 1.9776261689193047e-06, "memory(GiB)": 48.77, "train_speed(iter/s)": 0.222472, "epoch": 0.7964601769911505, "step": 45}, {"loss": 0.9772750854492187, "token_acc": 0.7405189620758483, "grad_norm": 10.615826546314963, "learning_rate": 1.962624246950012e-06, "memory(GiB)": 48.77, "train_speed(iter/s)": 0.224224, "epoch": 0.8849557522123894, "step": 50}, {"loss": 1.1083248138427735, "token_acc": 0.7464830888955403, "grad_norm": 12.595376651407483, "learning_rate": 1.9438833303083674e-06, "memory(GiB)": 48.77, "train_speed(iter/s)": 0.22532, "epoch": 0.9734513274336283, "step": 55}, {"eval_loss": 0.5650736093521118, "eval_token_acc": 0.7579033630661581, "eval_runtime": 8.5395, "eval_samples_per_second": 11.593, "eval_steps_per_second": 1.522, "epoch": 1.0, "step": 57}, {"loss": 0.8373285293579101, "token_acc": 0.7705505279034691, "grad_norm": 9.69581460794372, "learning_rate": 1.9214762118704076e-06, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.171599, "epoch": 1.0530973451327434, "step": 60}, {"loss": 0.8807508468627929, "token_acc": 0.7706816677696889, "grad_norm": 11.083390187113388, "learning_rate": 1.895489924657301e-06, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.175669, "epoch": 1.1415929203539823, "step": 65}, {"loss": 0.8378904342651368, "token_acc": 0.7803855825649623, "grad_norm": 12.415922478666385, "learning_rate": 1.8660254037844386e-06, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.178838, "epoch": 1.2300884955752212, "step": 70}, {"loss": 0.8272968292236328, "token_acc": 0.7696491228070176, "grad_norm": 8.450562427876633, "learning_rate": 1.8331970944124488e-06, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.182395, "epoch": 1.3185840707964602, "step": 75}, {"loss": 0.9129334449768066, "token_acc": 0.760928549894483, "grad_norm": 14.004351195839384, "learning_rate": 1.7971325072229223e-06, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.185332, "epoch": 1.407079646017699, "step": 80}, {"loss": 0.8181610107421875, "token_acc": 0.7538960037031323, "grad_norm": 8.570012885646701, "learning_rate": 1.7579717231454529e-06, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.18795, "epoch": 1.495575221238938, "step": 85}, {"loss": 0.7870856285095215, "token_acc": 0.7618816682832201, "grad_norm": 9.613361045579158, "learning_rate": 1.7158668492597184e-06, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.190426, "epoch": 1.584070796460177, "step": 90}, {"loss": 0.8310245513916016, "token_acc": 0.7752255947497949, "grad_norm": 10.122082437554802, "learning_rate": 1.67098142798597e-06, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.192905, "epoch": 1.672566371681416, "step": 95}, {"loss": 0.8105389595031738, "token_acc": 0.7644825878857897, "grad_norm": 10.602875101442406, "learning_rate": 1.6234898018587336e-06, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.195119, "epoch": 1.7610619469026547, "step": 100}, {"loss": 0.8393661499023437, "token_acc": 0.7800134138162307, "grad_norm": 11.76556584469257, "learning_rate": 1.573576436351046e-06, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.196801, "epoch": 1.8495575221238938, "step": 105}, {"loss": 0.8687034606933594, "token_acc": 0.771374829001368, "grad_norm": 7.960442929539371, "learning_rate": 1.521435203379498e-06, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.198748, "epoch": 1.9380530973451329, "step": 110}, {"eval_loss": 0.5437881350517273, "eval_token_acc": 0.7667601727235478, "eval_runtime": 8.627, "eval_samples_per_second": 11.476, "eval_steps_per_second": 1.507, "epoch": 2.0, "step": 114}, {"loss": 0.7106491565704346, "token_acc": 0.7862446268073466, "grad_norm": 4.502049505152077, "learning_rate": 1.467268628273062e-06, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.173338, "epoch": 2.017699115044248, "step": 115}, {"loss": 0.6555411338806152, "token_acc": 0.7813225956326467, "grad_norm": 7.276003996918958, "learning_rate": 1.4112871031306117e-06, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.175242, "epoch": 2.106194690265487, "step": 120}, {"loss": 0.6249444961547852, "token_acc": 0.783449211463456, "grad_norm": 4.8568813692113135, "learning_rate": 1.3537080696225813e-06, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.177449, "epoch": 2.1946902654867255, "step": 125}, {"loss": 0.6664288520812989, "token_acc": 0.7687228217104204, "grad_norm": 5.890485254316051, "learning_rate": 1.2947551744109043e-06, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.179237, "epoch": 2.2831858407079646, "step": 130}, {"loss": 0.6991546630859375, "token_acc": 0.7700296735905044, "grad_norm": 8.771449820473132, "learning_rate": 1.2346574004677154e-06, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.180871, "epoch": 2.3716814159292037, "step": 135}, {"loss": 0.6536848545074463, "token_acc": 0.766509049404859, "grad_norm": 9.067730854578071, "learning_rate": 1.1736481776669305e-06, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.182581, "epoch": 2.4601769911504423, "step": 140}, {"loss": 0.6313935279846191, "token_acc": 0.7825399613560513, "grad_norm": 4.50790352837357, "learning_rate": 1.1119644761033077e-06, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.184363, "epoch": 2.5486725663716814, "step": 145}, {"loss": 0.6255232334136963, "token_acc": 0.7767244197450147, "grad_norm": 4.312670055490422, "learning_rate": 1.0498458856606971e-06, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.185598, "epoch": 2.6371681415929205, "step": 150}, {"loss": 0.6171076774597168, "token_acc": 0.7896267571497819, "grad_norm": 7.755258717453065, "learning_rate": 9.875336854045848e-07, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.187098, "epoch": 2.725663716814159, "step": 155}, {"loss": 0.6185690879821777, "token_acc": 0.7802016195670137, "grad_norm": 7.845634190557466, "learning_rate": 9.252699064135758e-07, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.188585, "epoch": 2.814159292035398, "step": 160}, {"loss": 0.656156349182129, "token_acc": 0.7780242279474492, "grad_norm": 6.338292715281437, "learning_rate": 8.632963916899268e-07, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.190226, "epoch": 2.9026548672566372, "step": 165}, {"loss": 0.6247214794158935, "token_acc": 0.7823321554770318, "grad_norm": 7.843306869070192, "learning_rate": 8.018538568006025e-07, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.191501, "epoch": 2.991150442477876, "step": 170}, {"eval_loss": 0.5920259952545166, "eval_token_acc": 0.7687458631449554, "eval_runtime": 9.0693, "eval_samples_per_second": 10.916, "eval_steps_per_second": 1.433, "epoch": 3.0, "step": 171}, {"loss": 0.5203310966491699, "token_acc": 0.7873366834170854, "grad_norm": 4.95362684527685, "learning_rate": 7.411809548974791e-07, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.175347, "epoch": 3.0707964601769913, "step": 175}, {"loss": 0.5779544830322265, "token_acc": 0.7699359972323128, "grad_norm": 3.7681038604987718, "learning_rate": 6.815133497483157e-07, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.176628, "epoch": 3.15929203539823, "step": 180}, {"loss": 0.5451234340667724, "token_acc": 0.7917084587094617, "grad_norm": 3.423633138763175, "learning_rate": 6.230828003789947e-07, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.177868, "epoch": 3.247787610619469, "step": 185}, {"loss": 0.5657532691955567, "token_acc": 0.7935142289874255, "grad_norm": 3.30866813240123, "learning_rate": 5.661162608824419e-07, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.179042, "epoch": 3.336283185840708, "step": 190}, {"loss": 0.5481144905090332, "token_acc": 0.7881839543470963, "grad_norm": 4.15737971213242, "learning_rate": 5.10834998890711e-07, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.180324, "epoch": 3.4247787610619467, "step": 195}, {"loss": 0.5490777969360352, "token_acc": 0.7685361216730038, "grad_norm": 3.6687005982187846, "learning_rate": 4.5745373613424065e-07, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.181654, "epoch": 3.5132743362831858, "step": 200}, {"loss": 0.5527618408203125, "token_acc": 0.797918473547268, "grad_norm": 2.619022737106196, "learning_rate": 4.061798144264985e-07, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.182886, "epoch": 3.601769911504425, "step": 205}, {"loss": 0.569515323638916, "token_acc": 0.7887000459347726, "grad_norm": 8.889913575790986, "learning_rate": 3.5721239031346063e-07, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.183743, "epoch": 3.6902654867256635, "step": 210}, {"loss": 0.5423957347869873, "token_acc": 0.7839798170923998, "grad_norm": 4.806585418130697, "learning_rate": 3.1074166151605295e-07, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.184758, "epoch": 3.7787610619469025, "step": 215}, {"loss": 0.5565788269042968, "token_acc": 0.7851294903926482, "grad_norm": 8.958938051180708, "learning_rate": 2.6694812817017387e-07, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.185767, "epoch": 3.8672566371681416, "step": 220}, {"loss": 0.5587899208068847, "token_acc": 0.7872554660529344, "grad_norm": 2.510888682354834, "learning_rate": 2.260018917337726e-07, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.186581, "epoch": 3.9557522123893807, "step": 225}, {"eval_loss": 0.6418129801750183, "eval_token_acc": 0.7703218079238503, "eval_runtime": 8.9927, "eval_samples_per_second": 11.009, "eval_steps_per_second": 1.446, "epoch": 4.0, "step": 228}, {"loss": 0.4850145816802979, "token_acc": 0.7843368752459662, "grad_norm": 2.3659313835670357, "learning_rate": 1.880619942841435e-07, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.174518, "epoch": 4.035398230088496, "step": 230}, {"loss": 0.5167119026184082, "token_acc": 0.7835400225479143, "grad_norm": 3.029452323677481, "learning_rate": 1.5327580077171588e-07, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.17559, "epoch": 4.123893805309734, "step": 235}, {"loss": 0.5277352333068848, "token_acc": 0.7970020730346037, "grad_norm": 2.6111688557578265, "learning_rate": 1.2177842662977133e-07, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.17656, "epoch": 4.212389380530974, "step": 240}, {"loss": 0.519285011291504, "token_acc": 0.7873526600828289, "grad_norm": 2.343147313652203, "learning_rate": 9.369221296335006e-08, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.177675, "epoch": 4.300884955752212, "step": 245}, {"loss": 0.5282041549682617, "token_acc": 0.8017311775273559, "grad_norm": 2.857900500008329, "learning_rate": 6.912625135579586e-08, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.178638, "epoch": 4.389380530973451, "step": 250}, {"loss": 0.5366128921508789, "token_acc": 0.7901314721636098, "grad_norm": 3.2862304977309584, "learning_rate": 4.817596013867764e-08, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.179505, "epoch": 4.477876106194691, "step": 255}, {"loss": 0.5257000923156738, "token_acc": 0.7834569493711706, "grad_norm": 2.5989413835699304, "learning_rate": 3.092271377092215e-08, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.180409, "epoch": 4.566371681415929, "step": 260}, {"loss": 0.5092650413513183, "token_acc": 0.7875399361022364, "grad_norm": 2.7056193294351574, "learning_rate": 1.7433526766711725e-08, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.181212, "epoch": 4.654867256637168, "step": 265}, {"loss": 0.5484159469604493, "token_acc": 0.7744212772943687, "grad_norm": 3.278617091813132, "learning_rate": 7.760793399827936e-09, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.182044, "epoch": 4.743362831858407, "step": 270}, {"loss": 0.5685730934143066, "token_acc": 0.7785588752196837, "grad_norm": 2.432252205143753, "learning_rate": 1.942084195468152e-09, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.182863, "epoch": 4.831858407079646, "step": 275}, {"loss": 0.5433285713195801, "token_acc": 0.788130081300813, "grad_norm": 5.405387490184348, "learning_rate": 0.0, "memory(GiB)": 48.78, "train_speed(iter/s)": 0.1837, "epoch": 4.920353982300885, "step": 280}, {"eval_loss": 0.6589558124542236, "eval_token_acc": 0.7703218079238503, "eval_runtime": 9.3193, "eval_samples_per_second": 10.623, "eval_steps_per_second": 1.395, "epoch": 4.920353982300885, "step": 280}, {"eval_loss": 0.6589558124542236, "eval_token_acc": 0.7703218079238503, "eval_runtime": 9.6886, "eval_samples_per_second": 10.218, "eval_steps_per_second": 1.342, "epoch": 4.920353982300885, "step": 280}, {"train_runtime": 1706.7476, "train_samples_per_second": 2.64, "train_steps_per_second": 0.164, "total_flos": 43238058737664.0, "train_loss": 0.802335535628455, "epoch": 4.920353982300885, "step": 280}], "memory": 48.775390625}
|
selective_loss/q7b-thinking_full_v2-spwr_lr2e-6_wd1e-4_a05/v0-20250823-204148/val_dataset.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
selective_loss/qwen2.5vl-7b-thinking_full_v2-spwr_lr1e-6_wd1e-3/v0-20250818-120403/args.json
ADDED
|
@@ -0,0 +1,375 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"output_dir": "/mnt/data/users/liamding/data/MMMT/lora/selective_mask_plugin/qwen2.5vl-7b-thinking_full_v2-selective-plugin-weighted_ratio_2/v0-20250818-120403",
|
| 3 |
+
"overwrite_output_dir": false,
|
| 4 |
+
"do_train": false,
|
| 5 |
+
"do_eval": false,
|
| 6 |
+
"do_predict": false,
|
| 7 |
+
"eval_strategy": "epoch",
|
| 8 |
+
"prediction_loss_only": false,
|
| 9 |
+
"per_device_train_batch_size": 2,
|
| 10 |
+
"per_device_eval_batch_size": 2,
|
| 11 |
+
"per_gpu_train_batch_size": null,
|
| 12 |
+
"per_gpu_eval_batch_size": null,
|
| 13 |
+
"gradient_accumulation_steps": 2,
|
| 14 |
+
"eval_accumulation_steps": null,
|
| 15 |
+
"eval_delay": 0,
|
| 16 |
+
"torch_empty_cache_steps": null,
|
| 17 |
+
"learning_rate": 1e-06,
|
| 18 |
+
"weight_decay": 0.001,
|
| 19 |
+
"adam_beta1": 0.9,
|
| 20 |
+
"adam_beta2": 0.95,
|
| 21 |
+
"adam_epsilon": 1e-08,
|
| 22 |
+
"max_grad_norm": 1.0,
|
| 23 |
+
"num_train_epochs": 5.0,
|
| 24 |
+
"max_steps": -1,
|
| 25 |
+
"lr_scheduler_type": "cosine",
|
| 26 |
+
"lr_scheduler_kwargs": null,
|
| 27 |
+
"warmup_ratio": 0.1,
|
| 28 |
+
"warmup_steps": 0,
|
| 29 |
+
"log_level": "passive",
|
| 30 |
+
"log_level_replica": "warning",
|
| 31 |
+
"log_on_each_node": true,
|
| 32 |
+
"logging_dir": "/mnt/data/users/liamding/data/MMMT/lora/selective_mask_plugin/qwen2.5vl-7b-thinking_full_v2-selective-plugin-weighted_ratio_2/v0-20250818-120403/runs",
|
| 33 |
+
"logging_strategy": "steps",
|
| 34 |
+
"logging_first_step": true,
|
| 35 |
+
"logging_steps": 5,
|
| 36 |
+
"logging_nan_inf_filter": true,
|
| 37 |
+
"save_strategy": "epoch",
|
| 38 |
+
"save_steps": 500,
|
| 39 |
+
"save_total_limit": 5,
|
| 40 |
+
"save_safetensors": true,
|
| 41 |
+
"save_on_each_node": false,
|
| 42 |
+
"save_only_model": false,
|
| 43 |
+
"restore_callback_states_from_checkpoint": false,
|
| 44 |
+
"no_cuda": false,
|
| 45 |
+
"use_cpu": false,
|
| 46 |
+
"use_mps_device": false,
|
| 47 |
+
"seed": 42,
|
| 48 |
+
"data_seed": 42,
|
| 49 |
+
"jit_mode_eval": false,
|
| 50 |
+
"use_ipex": false,
|
| 51 |
+
"bf16": true,
|
| 52 |
+
"fp16": false,
|
| 53 |
+
"fp16_opt_level": "O1",
|
| 54 |
+
"half_precision_backend": "auto",
|
| 55 |
+
"bf16_full_eval": false,
|
| 56 |
+
"fp16_full_eval": false,
|
| 57 |
+
"tf32": null,
|
| 58 |
+
"local_rank": 0,
|
| 59 |
+
"ddp_backend": null,
|
| 60 |
+
"tpu_num_cores": null,
|
| 61 |
+
"tpu_metrics_debug": false,
|
| 62 |
+
"debug": null,
|
| 63 |
+
"dataloader_drop_last": false,
|
| 64 |
+
"eval_steps": null,
|
| 65 |
+
"dataloader_num_workers": 4,
|
| 66 |
+
"dataloader_prefetch_factor": null,
|
| 67 |
+
"past_index": -1,
|
| 68 |
+
"run_name": "/mnt/data/users/liamding/data/MMMT/lora/selective_mask_plugin/qwen2.5vl-7b-thinking_full_v2-selective-plugin-weighted_ratio_2/v0-20250818-120403",
|
| 69 |
+
"disable_tqdm": null,
|
| 70 |
+
"remove_unused_columns": true,
|
| 71 |
+
"label_names": null,
|
| 72 |
+
"load_best_model_at_end": false,
|
| 73 |
+
"metric_for_best_model": "loss",
|
| 74 |
+
"greater_is_better": false,
|
| 75 |
+
"ignore_data_skip": false,
|
| 76 |
+
"fsdp": "",
|
| 77 |
+
"fsdp_min_num_params": 0,
|
| 78 |
+
"fsdp_config": null,
|
| 79 |
+
"tp_size": 0,
|
| 80 |
+
"fsdp_transformer_layer_cls_to_wrap": null,
|
| 81 |
+
"accelerator_config": {
|
| 82 |
+
"dispatch_batches": false
|
| 83 |
+
},
|
| 84 |
+
"deepspeed": {
|
| 85 |
+
"fp16": {
|
| 86 |
+
"enabled": "auto",
|
| 87 |
+
"loss_scale": 0,
|
| 88 |
+
"loss_scale_window": 1000,
|
| 89 |
+
"initial_scale_power": 16,
|
| 90 |
+
"hysteresis": 2,
|
| 91 |
+
"min_loss_scale": 1
|
| 92 |
+
},
|
| 93 |
+
"bf16": {
|
| 94 |
+
"enabled": "auto"
|
| 95 |
+
},
|
| 96 |
+
"zero_optimization": {
|
| 97 |
+
"stage": 3,
|
| 98 |
+
"offload_optimizer": {
|
| 99 |
+
"device": "none",
|
| 100 |
+
"pin_memory": true
|
| 101 |
+
},
|
| 102 |
+
"offload_param": {
|
| 103 |
+
"device": "none",
|
| 104 |
+
"pin_memory": true
|
| 105 |
+
},
|
| 106 |
+
"overlap_comm": false,
|
| 107 |
+
"contiguous_gradients": true,
|
| 108 |
+
"sub_group_size": 1000000000.0,
|
| 109 |
+
"reduce_bucket_size": "auto",
|
| 110 |
+
"zero_quantized_weights": false,
|
| 111 |
+
"zero_quantized_gradients": false,
|
| 112 |
+
"stage3_prefetch_bucket_size": "auto",
|
| 113 |
+
"stage3_param_persistence_threshold": "auto",
|
| 114 |
+
"stage3_max_live_parameters": 1000000000.0,
|
| 115 |
+
"stage3_max_reuse_distance": 1000000000.0,
|
| 116 |
+
"stage3_gather_16bit_weights_on_model_save": true
|
| 117 |
+
},
|
| 118 |
+
"gradient_accumulation_steps": "auto",
|
| 119 |
+
"gradient_clipping": "auto",
|
| 120 |
+
"steps_per_print": 2000,
|
| 121 |
+
"train_batch_size": "auto",
|
| 122 |
+
"train_micro_batch_size_per_gpu": "auto",
|
| 123 |
+
"wall_clock_breakdown": false
|
| 124 |
+
},
|
| 125 |
+
"label_smoothing_factor": 0.0,
|
| 126 |
+
"optim": "adamw_torch",
|
| 127 |
+
"optim_args": null,
|
| 128 |
+
"adafactor": false,
|
| 129 |
+
"group_by_length": false,
|
| 130 |
+
"length_column_name": "length",
|
| 131 |
+
"report_to": [
|
| 132 |
+
"swanlab"
|
| 133 |
+
],
|
| 134 |
+
"ddp_find_unused_parameters": null,
|
| 135 |
+
"ddp_bucket_cap_mb": null,
|
| 136 |
+
"ddp_broadcast_buffers": null,
|
| 137 |
+
"dataloader_pin_memory": true,
|
| 138 |
+
"dataloader_persistent_workers": false,
|
| 139 |
+
"skip_memory_metrics": true,
|
| 140 |
+
"use_legacy_prediction_loop": false,
|
| 141 |
+
"push_to_hub": false,
|
| 142 |
+
"resume_from_checkpoint": null,
|
| 143 |
+
"hub_model_id": null,
|
| 144 |
+
"hub_strategy": "every_save",
|
| 145 |
+
"hub_token": null,
|
| 146 |
+
"hub_private_repo": null,
|
| 147 |
+
"hub_always_push": false,
|
| 148 |
+
"gradient_checkpointing": true,
|
| 149 |
+
"gradient_checkpointing_kwargs": null,
|
| 150 |
+
"include_inputs_for_metrics": false,
|
| 151 |
+
"include_for_metrics": [],
|
| 152 |
+
"eval_do_concat_batches": true,
|
| 153 |
+
"fp16_backend": "auto",
|
| 154 |
+
"push_to_hub_model_id": null,
|
| 155 |
+
"push_to_hub_organization": null,
|
| 156 |
+
"push_to_hub_token": null,
|
| 157 |
+
"mp_parameters": "",
|
| 158 |
+
"auto_find_batch_size": false,
|
| 159 |
+
"full_determinism": false,
|
| 160 |
+
"torchdynamo": null,
|
| 161 |
+
"ray_scope": "last",
|
| 162 |
+
"ddp_timeout": 18000000,
|
| 163 |
+
"torch_compile": false,
|
| 164 |
+
"torch_compile_backend": null,
|
| 165 |
+
"torch_compile_mode": null,
|
| 166 |
+
"include_tokens_per_second": false,
|
| 167 |
+
"include_num_input_tokens_seen": false,
|
| 168 |
+
"neftune_noise_alpha": null,
|
| 169 |
+
"optim_target_modules": null,
|
| 170 |
+
"batch_eval_metrics": false,
|
| 171 |
+
"eval_on_start": false,
|
| 172 |
+
"use_liger_kernel": false,
|
| 173 |
+
"eval_use_gather_object": false,
|
| 174 |
+
"average_tokens_across_devices": false,
|
| 175 |
+
"sortish_sampler": false,
|
| 176 |
+
"predict_with_generate": false,
|
| 177 |
+
"generation_max_length": null,
|
| 178 |
+
"generation_num_beams": null,
|
| 179 |
+
"generation_config": null,
|
| 180 |
+
"vit_gradient_checkpointing": null,
|
| 181 |
+
"check_model": true,
|
| 182 |
+
"acc_strategy": "token",
|
| 183 |
+
"train_dataloader_shuffle": true,
|
| 184 |
+
"max_epochs": null,
|
| 185 |
+
"aligner_lr": null,
|
| 186 |
+
"vit_lr": null,
|
| 187 |
+
"optimizer": null,
|
| 188 |
+
"use_logits_to_keep": null,
|
| 189 |
+
"channels": null,
|
| 190 |
+
"metric_warmup_step": 0,
|
| 191 |
+
"fsdp_num": 1,
|
| 192 |
+
"acc_steps": 1,
|
| 193 |
+
"eval_use_evalscope": false,
|
| 194 |
+
"eval_datasets": [],
|
| 195 |
+
"eval_limit": null,
|
| 196 |
+
"eval_datasets_args": null,
|
| 197 |
+
"eval_generation_config": null,
|
| 198 |
+
"model": "/mnt/data/users/liamding/data/models/Qwen2.5-VL-7B-Instruct",
|
| 199 |
+
"model_type": "qwen2_5_vl",
|
| 200 |
+
"model_revision": null,
|
| 201 |
+
"task_type": "causal_lm",
|
| 202 |
+
"torch_dtype": "bfloat16",
|
| 203 |
+
"attn_impl": null,
|
| 204 |
+
"num_labels": null,
|
| 205 |
+
"problem_type": null,
|
| 206 |
+
"rope_scaling": null,
|
| 207 |
+
"device_map": null,
|
| 208 |
+
"max_memory": {},
|
| 209 |
+
"local_repo_path": null,
|
| 210 |
+
"init_strategy": null,
|
| 211 |
+
"template": "qwen2_5_vl",
|
| 212 |
+
"system": null,
|
| 213 |
+
"max_length": 32768,
|
| 214 |
+
"truncation_strategy": "delete",
|
| 215 |
+
"max_pixels": null,
|
| 216 |
+
"agent_template": null,
|
| 217 |
+
"norm_bbox": null,
|
| 218 |
+
"use_chat_template": true,
|
| 219 |
+
"padding_free": false,
|
| 220 |
+
"padding_side": "right",
|
| 221 |
+
"loss_scale": "default",
|
| 222 |
+
"sequence_parallel_size": 1,
|
| 223 |
+
"response_prefix": null,
|
| 224 |
+
"template_backend": "swift",
|
| 225 |
+
"dataset": [
|
| 226 |
+
"/mnt/data/users/liamding/data/3AM_Plus/final/training/thinking_v2/ambi_normal_train_thinking_772.json",
|
| 227 |
+
"/mnt/data/users/liamding/data/3AM_Plus/final/training/thinking_v2/mma_train_thinking_126.json",
|
| 228 |
+
"/mnt/data/users/liamding/data/3AM_Plus/final/training/thinking_v2/sp_train_thinking_102.json"
|
| 229 |
+
],
|
| 230 |
+
"val_dataset": [],
|
| 231 |
+
"split_dataset_ratio": 0.1,
|
| 232 |
+
"dataset_num_proc": 1,
|
| 233 |
+
"load_from_cache_file": true,
|
| 234 |
+
"dataset_shuffle": true,
|
| 235 |
+
"val_dataset_shuffle": false,
|
| 236 |
+
"streaming": false,
|
| 237 |
+
"interleave_prob": null,
|
| 238 |
+
"stopping_strategy": "first_exhausted",
|
| 239 |
+
"shuffle_buffer_size": 1000,
|
| 240 |
+
"download_mode": "reuse_dataset_if_exists",
|
| 241 |
+
"columns": {},
|
| 242 |
+
"strict": false,
|
| 243 |
+
"model_name": null,
|
| 244 |
+
"model_author": null,
|
| 245 |
+
"custom_dataset_info": [],
|
| 246 |
+
"quant_method": null,
|
| 247 |
+
"quant_bits": null,
|
| 248 |
+
"hqq_axis": null,
|
| 249 |
+
"bnb_4bit_compute_dtype": "bfloat16",
|
| 250 |
+
"bnb_4bit_quant_type": "nf4",
|
| 251 |
+
"bnb_4bit_use_double_quant": true,
|
| 252 |
+
"bnb_4bit_quant_storage": null,
|
| 253 |
+
"max_new_tokens": 64,
|
| 254 |
+
"temperature": 0.0,
|
| 255 |
+
"top_k": null,
|
| 256 |
+
"top_p": null,
|
| 257 |
+
"repetition_penalty": null,
|
| 258 |
+
"num_beams": 1,
|
| 259 |
+
"stream": false,
|
| 260 |
+
"stop_words": [],
|
| 261 |
+
"logprobs": false,
|
| 262 |
+
"top_logprobs": null,
|
| 263 |
+
"ckpt_dir": null,
|
| 264 |
+
"lora_modules": [],
|
| 265 |
+
"tuner_backend": "peft",
|
| 266 |
+
"train_type": "full",
|
| 267 |
+
"adapters": [],
|
| 268 |
+
"external_plugins": [],
|
| 269 |
+
"model_kwargs": {},
|
| 270 |
+
"load_args": false,
|
| 271 |
+
"load_data_args": false,
|
| 272 |
+
"packing": false,
|
| 273 |
+
"packing_cache": null,
|
| 274 |
+
"custom_register_path": [],
|
| 275 |
+
"use_hf": false,
|
| 276 |
+
"ignore_args_error": false,
|
| 277 |
+
"use_swift_lora": false,
|
| 278 |
+
"freeze_parameters": [
|
| 279 |
+
"visual",
|
| 280 |
+
"visual.merger"
|
| 281 |
+
],
|
| 282 |
+
"freeze_parameters_regex": null,
|
| 283 |
+
"freeze_parameters_ratio": 0.0,
|
| 284 |
+
"trainable_parameters": [],
|
| 285 |
+
"trainable_parameters_regex": null,
|
| 286 |
+
"freeze_llm": false,
|
| 287 |
+
"freeze_vit": true,
|
| 288 |
+
"freeze_aligner": true,
|
| 289 |
+
"target_modules": [
|
| 290 |
+
"all-linear"
|
| 291 |
+
],
|
| 292 |
+
"target_regex": null,
|
| 293 |
+
"modules_to_save": [],
|
| 294 |
+
"lora_rank": 8,
|
| 295 |
+
"lora_alpha": 32,
|
| 296 |
+
"lora_dropout": 0.05,
|
| 297 |
+
"lora_bias": "none",
|
| 298 |
+
"lora_dtype": null,
|
| 299 |
+
"lorap_lr_ratio": null,
|
| 300 |
+
"use_rslora": false,
|
| 301 |
+
"use_dora": false,
|
| 302 |
+
"lora_ga_batch_size": 2,
|
| 303 |
+
"lora_ga_iters": 2,
|
| 304 |
+
"lora_ga_max_length": 1024,
|
| 305 |
+
"lora_ga_direction": "ArB2r",
|
| 306 |
+
"lora_ga_scale": "stable",
|
| 307 |
+
"lora_ga_stable_gamma": 16,
|
| 308 |
+
"init_weights": true,
|
| 309 |
+
"fourier_n_frequency": 2000,
|
| 310 |
+
"fourier_scaling": 300.0,
|
| 311 |
+
"boft_block_size": 4,
|
| 312 |
+
"boft_block_num": 0,
|
| 313 |
+
"boft_n_butterfly_factor": 1,
|
| 314 |
+
"boft_dropout": 0.0,
|
| 315 |
+
"vera_rank": 256,
|
| 316 |
+
"vera_projection_prng_key": 0,
|
| 317 |
+
"vera_dropout": 0.0,
|
| 318 |
+
"vera_d_initial": 0.1,
|
| 319 |
+
"adapter_act": "gelu",
|
| 320 |
+
"adapter_length": 128,
|
| 321 |
+
"use_galore": false,
|
| 322 |
+
"galore_target_modules": null,
|
| 323 |
+
"galore_rank": 128,
|
| 324 |
+
"galore_update_proj_gap": 50,
|
| 325 |
+
"galore_scale": 1.0,
|
| 326 |
+
"galore_proj_type": "std",
|
| 327 |
+
"galore_optim_per_parameter": false,
|
| 328 |
+
"galore_with_embedding": false,
|
| 329 |
+
"galore_quantization": false,
|
| 330 |
+
"galore_proj_quant": false,
|
| 331 |
+
"galore_proj_bits": 4,
|
| 332 |
+
"galore_proj_group_size": 256,
|
| 333 |
+
"galore_cos_threshold": 0.4,
|
| 334 |
+
"galore_gamma_proj": 2,
|
| 335 |
+
"galore_queue_size": 5,
|
| 336 |
+
"adalora_target_r": 8,
|
| 337 |
+
"adalora_init_r": 12,
|
| 338 |
+
"adalora_tinit": 0,
|
| 339 |
+
"adalora_tfinal": 0,
|
| 340 |
+
"adalora_deltaT": 1,
|
| 341 |
+
"adalora_beta1": 0.85,
|
| 342 |
+
"adalora_beta2": 0.85,
|
| 343 |
+
"adalora_orth_reg_weight": 0.5,
|
| 344 |
+
"llamapro_num_new_blocks": 4,
|
| 345 |
+
"llamapro_num_groups": null,
|
| 346 |
+
"lisa_activated_layers": 0,
|
| 347 |
+
"lisa_step_interval": 20,
|
| 348 |
+
"reft_layer_key": null,
|
| 349 |
+
"reft_layers": null,
|
| 350 |
+
"reft_rank": 4,
|
| 351 |
+
"reft_intervention_type": "LoreftIntervention",
|
| 352 |
+
"reft_args": null,
|
| 353 |
+
"swanlab_token": null,
|
| 354 |
+
"swanlab_project": null,
|
| 355 |
+
"swanlab_workspace": null,
|
| 356 |
+
"swanlab_exp_name": "/mnt/data/users/liamding/data/MMMT/lora/selective_mask_plugin/qwen2.5vl-7b-thinking_full_v2-selective-plugin-weighted_ratio_2/v0-20250818-120403",
|
| 357 |
+
"swanlab_mode": "cloud",
|
| 358 |
+
"add_version": true,
|
| 359 |
+
"resume_only_model": false,
|
| 360 |
+
"create_checkpoint_symlink": false,
|
| 361 |
+
"lazy_tokenize": true,
|
| 362 |
+
"loss_type": "selective_translate_weighted_ratio",
|
| 363 |
+
"metric": null,
|
| 364 |
+
"zero_hpz_partition_size": null,
|
| 365 |
+
"rank": 0,
|
| 366 |
+
"global_world_size": 4,
|
| 367 |
+
"local_world_size": 4,
|
| 368 |
+
"model_suffix": "Qwen2.5-VL-7B-Instruct",
|
| 369 |
+
"model_info": "ModelInfo(model_type='qwen2_5_vl', model_dir='/mnt/data/users/liamding/data/models/Qwen2.5-VL-7B-Instruct', torch_dtype=torch.bfloat16, max_model_len=128000, quant_method=None, quant_bits=None, rope_scaling={'type': 'default', 'mrope_section': [16, 24, 24], 'rope_type': 'default'}, config=None, task_type='causal_lm', num_labels=None)",
|
| 370 |
+
"model_meta": "ModelMeta(model_type='qwen2_5_vl', model_groups=[ModelGroup(models=[Model(ms_model_id='Qwen/Qwen2.5-VL-3B-Instruct', hf_model_id='Qwen/Qwen2.5-VL-3B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-VL-7B-Instruct', hf_model_id='Qwen/Qwen2.5-VL-7B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-VL-32B-Instruct', hf_model_id='Qwen/Qwen2.5-VL-32B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-VL-72B-Instruct', hf_model_id='Qwen/Qwen2.5-VL-72B-Instruct', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[]), ModelGroup(models=[Model(ms_model_id='Qwen/Qwen2.5-VL-3B-Instruct-AWQ', hf_model_id='Qwen/Qwen2.5-VL-3B-Instruct-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-VL-7B-Instruct-AWQ', hf_model_id='Qwen/Qwen2.5-VL-7B-Instruct-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-VL-32B-Instruct-AWQ', hf_model_id='Qwen/Qwen2.5-VL-32B-Instruct-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-VL-72B-Instruct-AWQ', hf_model_id='Qwen/Qwen2.5-VL-72B-Instruct-AWQ', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[])], template='qwen2_5_vl', get_function=<function get_model_tokenizer_qwen2_5_vl at 0x7f719b4055a0>, model_arch='qwen2_vl', architectures=['Qwen2_5_VLForConditionalGeneration'], additional_saved_files=[], torch_dtype=None, is_multimodal=True, is_reward=False, task_type=None, ignore_patterns=None, requires=['transformers>=4.49', 'qwen_vl_utils>=0.0.6', 'decord'], tags=[])",
|
| 371 |
+
"model_dir": "/mnt/data/users/liamding/data/models/Qwen2.5-VL-7B-Instruct",
|
| 372 |
+
"hub": "<class 'swift.hub.hub.MSHub'>",
|
| 373 |
+
"evaluation_strategy": "epoch",
|
| 374 |
+
"training_args": "Seq2SeqTrainingArguments(output_dir='/mnt/data/users/liamding/data/MMMT/lora/selective_mask_plugin/qwen2.5vl-7b-thinking_full_v2-selective-plugin-weighted_ratio_2/v0-20250818-120403', overwrite_output_dir=False, do_train=False, do_eval=True, do_predict=False, eval_strategy=<IntervalStrategy.EPOCH: 'epoch'>, prediction_loss_only=False, per_device_train_batch_size=2, per_device_eval_batch_size=2, per_gpu_train_batch_size=None, per_gpu_eval_batch_size=None, gradient_accumulation_steps=2, eval_accumulation_steps=None, eval_delay=0, torch_empty_cache_steps=None, learning_rate=1e-06, weight_decay=0.001, adam_beta1=0.9, adam_beta2=0.95, adam_epsilon=1e-08, max_grad_norm=1.0, num_train_epochs=5.0, max_steps=-1, lr_scheduler_type=<SchedulerType.COSINE: 'cosine'>, lr_scheduler_kwargs=None, warmup_ratio=0.1, warmup_steps=0, log_level='passive', log_level_replica='warning', log_on_each_node=True, logging_dir='/mnt/data/users/liamding/data/MMMT/lora/selective_mask_plugin/qwen2.5vl-7b-thinking_full_v2-selective-plugin-weighted_ratio_2/v0-20250818-120403/runs', logging_strategy=<IntervalStrategy.STEPS: 'steps'>, logging_first_step=True, logging_steps=5, logging_nan_inf_filter=True, save_strategy=<SaveStrategy.EPOCH: 'epoch'>, save_steps=500, save_total_limit=5, save_safetensors=True, save_on_each_node=False, save_only_model=False, restore_callback_states_from_checkpoint=False, no_cuda=False, use_cpu=False, use_mps_device=False, seed=42, data_seed=42, jit_mode_eval=False, use_ipex=False, bf16=True, fp16=False, fp16_opt_level='O1', half_precision_backend='auto', bf16_full_eval=False, fp16_full_eval=False, tf32=None, local_rank=0, ddp_backend=None, tpu_num_cores=None, tpu_metrics_debug=False, debug=[], dataloader_drop_last=False, eval_steps=None, dataloader_num_workers=4, dataloader_prefetch_factor=10, past_index=-1, run_name='/mnt/data/users/liamding/data/MMMT/lora/selective_mask_plugin/qwen2.5vl-7b-thinking_full_v2-selective-plugin-weighted_ratio_2/v0-20250818-120403', disable_tqdm=False, remove_unused_columns=False, label_names=None, load_best_model_at_end=False, metric_for_best_model='loss', greater_is_better=False, ignore_data_skip=False, fsdp=[], fsdp_min_num_params=0, fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, tp_size=0, fsdp_transformer_layer_cls_to_wrap=None, accelerator_config=AcceleratorConfig(split_batches=False, dispatch_batches=False, even_batches=True, use_seedable_sampler=True, non_blocking=False, gradient_accumulation_kwargs=None, use_configured_state=False), deepspeed={'fp16': {'enabled': 'auto', 'loss_scale': 0, 'loss_scale_window': 1000, 'initial_scale_power': 16, 'hysteresis': 2, 'min_loss_scale': 1}, 'bf16': {'enabled': 'auto'}, 'zero_optimization': {'stage': 3, 'offload_optimizer': {'device': 'none', 'pin_memory': True}, 'offload_param': {'device': 'none', 'pin_memory': True}, 'overlap_comm': False, 'contiguous_gradients': True, 'sub_group_size': 1000000000.0, 'reduce_bucket_size': 'auto', 'zero_quantized_weights': False, 'zero_quantized_gradients': False, 'stage3_prefetch_bucket_size': 'auto', 'stage3_param_persistence_threshold': 'auto', 'stage3_max_live_parameters': 1000000000.0, 'stage3_max_reuse_distance': 1000000000.0, 'stage3_gather_16bit_weights_on_model_save': True}, 'gradient_accumulation_steps': 'auto', 'gradient_clipping': 'auto', 'steps_per_print': 2000, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'wall_clock_breakdown': False}, label_smoothing_factor=0.0, optim=<OptimizerNames.ADAMW_TORCH: 'adamw_torch'>, optim_args=None, adafactor=False, group_by_length=False, length_column_name='length', report_to=['swanlab'], ddp_find_unused_parameters=None, ddp_bucket_cap_mb=None, ddp_broadcast_buffers=None, dataloader_pin_memory=True, dataloader_persistent_workers=False, skip_memory_metrics=True, use_legacy_prediction_loop=False, push_to_hub=False, resume_from_checkpoint=None, hub_model_id=None, hub_strategy=<HubStrategy.EVERY_SAVE: 'every_save'>, hub_token=None, hub_private_repo=None, hub_always_push=False, gradient_checkpointing=True, gradient_checkpointing_kwargs=None, include_inputs_for_metrics=False, include_for_metrics=[], eval_do_concat_batches=True, fp16_backend='auto', push_to_hub_model_id=None, push_to_hub_organization=None, push_to_hub_token=None, mp_parameters='', auto_find_batch_size=False, full_determinism=False, torchdynamo=None, ray_scope='last', ddp_timeout=18000000, torch_compile=False, torch_compile_backend=None, torch_compile_mode=None, include_tokens_per_second=None, include_num_input_tokens_seen=None, neftune_noise_alpha=None, optim_target_modules=None, batch_eval_metrics=False, eval_on_start=False, use_liger_kernel=False, eval_use_gather_object=False, average_tokens_across_devices=None, sortish_sampler=False, predict_with_generate=False, generation_max_length=None, generation_num_beams=None, generation_config=None, vit_gradient_checkpointing=True, check_model=True, acc_strategy='token', train_dataloader_shuffle=True, max_epochs=None, aligner_lr=None, vit_lr=None, optimizer=None, use_logits_to_keep=None, channels=None, metric_warmup_step=0, fsdp_num=1, acc_steps=1, eval_use_evalscope=False, eval_datasets=[], eval_limit=None, eval_datasets_args=None, eval_generation_config=None, train_type='full', local_repo_path=None, galore_config=None)"
|
| 375 |
+
}
|
selective_loss/qwen2.5vl-7b-thinking_full_v2-spwr_lr1e-6_wd1e-3/v0-20250818-120403/checkpoint-280/added_tokens.json
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"</tool_call>": 151658,
|
| 3 |
+
"<tool_call>": 151657,
|
| 4 |
+
"<|box_end|>": 151649,
|
| 5 |
+
"<|box_start|>": 151648,
|
| 6 |
+
"<|endoftext|>": 151643,
|
| 7 |
+
"<|file_sep|>": 151664,
|
| 8 |
+
"<|fim_middle|>": 151660,
|
| 9 |
+
"<|fim_pad|>": 151662,
|
| 10 |
+
"<|fim_prefix|>": 151659,
|
| 11 |
+
"<|fim_suffix|>": 151661,
|
| 12 |
+
"<|im_end|>": 151645,
|
| 13 |
+
"<|im_start|>": 151644,
|
| 14 |
+
"<|image_pad|>": 151655,
|
| 15 |
+
"<|object_ref_end|>": 151647,
|
| 16 |
+
"<|object_ref_start|>": 151646,
|
| 17 |
+
"<|quad_end|>": 151651,
|
| 18 |
+
"<|quad_start|>": 151650,
|
| 19 |
+
"<|repo_name|>": 151663,
|
| 20 |
+
"<|video_pad|>": 151656,
|
| 21 |
+
"<|vision_end|>": 151653,
|
| 22 |
+
"<|vision_pad|>": 151654,
|
| 23 |
+
"<|vision_start|>": 151652
|
| 24 |
+
}
|
selective_loss/qwen2.5vl-7b-thinking_full_v2-spwr_lr1e-6_wd1e-3/v0-20250818-120403/checkpoint-280/args.json
ADDED
|
@@ -0,0 +1,375 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"output_dir": "/mnt/data/users/liamding/data/MMMT/lora/selective_mask_plugin/qwen2.5vl-7b-thinking_full_v2-selective-plugin-weighted_ratio_2/v0-20250818-120403",
|
| 3 |
+
"overwrite_output_dir": false,
|
| 4 |
+
"do_train": false,
|
| 5 |
+
"do_eval": false,
|
| 6 |
+
"do_predict": false,
|
| 7 |
+
"eval_strategy": "epoch",
|
| 8 |
+
"prediction_loss_only": false,
|
| 9 |
+
"per_device_train_batch_size": 2,
|
| 10 |
+
"per_device_eval_batch_size": 2,
|
| 11 |
+
"per_gpu_train_batch_size": null,
|
| 12 |
+
"per_gpu_eval_batch_size": null,
|
| 13 |
+
"gradient_accumulation_steps": 2,
|
| 14 |
+
"eval_accumulation_steps": null,
|
| 15 |
+
"eval_delay": 0,
|
| 16 |
+
"torch_empty_cache_steps": null,
|
| 17 |
+
"learning_rate": 1e-06,
|
| 18 |
+
"weight_decay": 0.001,
|
| 19 |
+
"adam_beta1": 0.9,
|
| 20 |
+
"adam_beta2": 0.95,
|
| 21 |
+
"adam_epsilon": 1e-08,
|
| 22 |
+
"max_grad_norm": 1.0,
|
| 23 |
+
"num_train_epochs": 5.0,
|
| 24 |
+
"max_steps": -1,
|
| 25 |
+
"lr_scheduler_type": "cosine",
|
| 26 |
+
"lr_scheduler_kwargs": null,
|
| 27 |
+
"warmup_ratio": 0.1,
|
| 28 |
+
"warmup_steps": 0,
|
| 29 |
+
"log_level": "passive",
|
| 30 |
+
"log_level_replica": "warning",
|
| 31 |
+
"log_on_each_node": true,
|
| 32 |
+
"logging_dir": "/mnt/data/users/liamding/data/MMMT/lora/selective_mask_plugin/qwen2.5vl-7b-thinking_full_v2-selective-plugin-weighted_ratio_2/v0-20250818-120403/runs",
|
| 33 |
+
"logging_strategy": "steps",
|
| 34 |
+
"logging_first_step": true,
|
| 35 |
+
"logging_steps": 5,
|
| 36 |
+
"logging_nan_inf_filter": true,
|
| 37 |
+
"save_strategy": "epoch",
|
| 38 |
+
"save_steps": 500,
|
| 39 |
+
"save_total_limit": 5,
|
| 40 |
+
"save_safetensors": true,
|
| 41 |
+
"save_on_each_node": false,
|
| 42 |
+
"save_only_model": false,
|
| 43 |
+
"restore_callback_states_from_checkpoint": false,
|
| 44 |
+
"no_cuda": false,
|
| 45 |
+
"use_cpu": false,
|
| 46 |
+
"use_mps_device": false,
|
| 47 |
+
"seed": 42,
|
| 48 |
+
"data_seed": 42,
|
| 49 |
+
"jit_mode_eval": false,
|
| 50 |
+
"use_ipex": false,
|
| 51 |
+
"bf16": true,
|
| 52 |
+
"fp16": false,
|
| 53 |
+
"fp16_opt_level": "O1",
|
| 54 |
+
"half_precision_backend": "auto",
|
| 55 |
+
"bf16_full_eval": false,
|
| 56 |
+
"fp16_full_eval": false,
|
| 57 |
+
"tf32": null,
|
| 58 |
+
"local_rank": 0,
|
| 59 |
+
"ddp_backend": null,
|
| 60 |
+
"tpu_num_cores": null,
|
| 61 |
+
"tpu_metrics_debug": false,
|
| 62 |
+
"debug": null,
|
| 63 |
+
"dataloader_drop_last": false,
|
| 64 |
+
"eval_steps": null,
|
| 65 |
+
"dataloader_num_workers": 4,
|
| 66 |
+
"dataloader_prefetch_factor": null,
|
| 67 |
+
"past_index": -1,
|
| 68 |
+
"run_name": "/mnt/data/users/liamding/data/MMMT/lora/selective_mask_plugin/qwen2.5vl-7b-thinking_full_v2-selective-plugin-weighted_ratio_2/v0-20250818-120403",
|
| 69 |
+
"disable_tqdm": null,
|
| 70 |
+
"remove_unused_columns": true,
|
| 71 |
+
"label_names": null,
|
| 72 |
+
"load_best_model_at_end": false,
|
| 73 |
+
"metric_for_best_model": "loss",
|
| 74 |
+
"greater_is_better": false,
|
| 75 |
+
"ignore_data_skip": false,
|
| 76 |
+
"fsdp": "",
|
| 77 |
+
"fsdp_min_num_params": 0,
|
| 78 |
+
"fsdp_config": null,
|
| 79 |
+
"tp_size": 0,
|
| 80 |
+
"fsdp_transformer_layer_cls_to_wrap": null,
|
| 81 |
+
"accelerator_config": {
|
| 82 |
+
"dispatch_batches": false
|
| 83 |
+
},
|
| 84 |
+
"deepspeed": {
|
| 85 |
+
"fp16": {
|
| 86 |
+
"enabled": "auto",
|
| 87 |
+
"loss_scale": 0,
|
| 88 |
+
"loss_scale_window": 1000,
|
| 89 |
+
"initial_scale_power": 16,
|
| 90 |
+
"hysteresis": 2,
|
| 91 |
+
"min_loss_scale": 1
|
| 92 |
+
},
|
| 93 |
+
"bf16": {
|
| 94 |
+
"enabled": "auto"
|
| 95 |
+
},
|
| 96 |
+
"zero_optimization": {
|
| 97 |
+
"stage": 3,
|
| 98 |
+
"offload_optimizer": {
|
| 99 |
+
"device": "none",
|
| 100 |
+
"pin_memory": true
|
| 101 |
+
},
|
| 102 |
+
"offload_param": {
|
| 103 |
+
"device": "none",
|
| 104 |
+
"pin_memory": true
|
| 105 |
+
},
|
| 106 |
+
"overlap_comm": false,
|
| 107 |
+
"contiguous_gradients": true,
|
| 108 |
+
"sub_group_size": 1000000000.0,
|
| 109 |
+
"reduce_bucket_size": "auto",
|
| 110 |
+
"zero_quantized_weights": false,
|
| 111 |
+
"zero_quantized_gradients": false,
|
| 112 |
+
"stage3_prefetch_bucket_size": "auto",
|
| 113 |
+
"stage3_param_persistence_threshold": "auto",
|
| 114 |
+
"stage3_max_live_parameters": 1000000000.0,
|
| 115 |
+
"stage3_max_reuse_distance": 1000000000.0,
|
| 116 |
+
"stage3_gather_16bit_weights_on_model_save": true
|
| 117 |
+
},
|
| 118 |
+
"gradient_accumulation_steps": "auto",
|
| 119 |
+
"gradient_clipping": "auto",
|
| 120 |
+
"steps_per_print": 2000,
|
| 121 |
+
"train_batch_size": "auto",
|
| 122 |
+
"train_micro_batch_size_per_gpu": "auto",
|
| 123 |
+
"wall_clock_breakdown": false
|
| 124 |
+
},
|
| 125 |
+
"label_smoothing_factor": 0.0,
|
| 126 |
+
"optim": "adamw_torch",
|
| 127 |
+
"optim_args": null,
|
| 128 |
+
"adafactor": false,
|
| 129 |
+
"group_by_length": false,
|
| 130 |
+
"length_column_name": "length",
|
| 131 |
+
"report_to": [
|
| 132 |
+
"swanlab"
|
| 133 |
+
],
|
| 134 |
+
"ddp_find_unused_parameters": null,
|
| 135 |
+
"ddp_bucket_cap_mb": null,
|
| 136 |
+
"ddp_broadcast_buffers": null,
|
| 137 |
+
"dataloader_pin_memory": true,
|
| 138 |
+
"dataloader_persistent_workers": false,
|
| 139 |
+
"skip_memory_metrics": true,
|
| 140 |
+
"use_legacy_prediction_loop": false,
|
| 141 |
+
"push_to_hub": false,
|
| 142 |
+
"resume_from_checkpoint": null,
|
| 143 |
+
"hub_model_id": null,
|
| 144 |
+
"hub_strategy": "every_save",
|
| 145 |
+
"hub_token": null,
|
| 146 |
+
"hub_private_repo": null,
|
| 147 |
+
"hub_always_push": false,
|
| 148 |
+
"gradient_checkpointing": true,
|
| 149 |
+
"gradient_checkpointing_kwargs": null,
|
| 150 |
+
"include_inputs_for_metrics": false,
|
| 151 |
+
"include_for_metrics": [],
|
| 152 |
+
"eval_do_concat_batches": true,
|
| 153 |
+
"fp16_backend": "auto",
|
| 154 |
+
"push_to_hub_model_id": null,
|
| 155 |
+
"push_to_hub_organization": null,
|
| 156 |
+
"push_to_hub_token": null,
|
| 157 |
+
"mp_parameters": "",
|
| 158 |
+
"auto_find_batch_size": false,
|
| 159 |
+
"full_determinism": false,
|
| 160 |
+
"torchdynamo": null,
|
| 161 |
+
"ray_scope": "last",
|
| 162 |
+
"ddp_timeout": 18000000,
|
| 163 |
+
"torch_compile": false,
|
| 164 |
+
"torch_compile_backend": null,
|
| 165 |
+
"torch_compile_mode": null,
|
| 166 |
+
"include_tokens_per_second": false,
|
| 167 |
+
"include_num_input_tokens_seen": false,
|
| 168 |
+
"neftune_noise_alpha": null,
|
| 169 |
+
"optim_target_modules": null,
|
| 170 |
+
"batch_eval_metrics": false,
|
| 171 |
+
"eval_on_start": false,
|
| 172 |
+
"use_liger_kernel": false,
|
| 173 |
+
"eval_use_gather_object": false,
|
| 174 |
+
"average_tokens_across_devices": false,
|
| 175 |
+
"sortish_sampler": false,
|
| 176 |
+
"predict_with_generate": false,
|
| 177 |
+
"generation_max_length": null,
|
| 178 |
+
"generation_num_beams": null,
|
| 179 |
+
"generation_config": null,
|
| 180 |
+
"vit_gradient_checkpointing": null,
|
| 181 |
+
"check_model": true,
|
| 182 |
+
"acc_strategy": "token",
|
| 183 |
+
"train_dataloader_shuffle": true,
|
| 184 |
+
"max_epochs": null,
|
| 185 |
+
"aligner_lr": null,
|
| 186 |
+
"vit_lr": null,
|
| 187 |
+
"optimizer": null,
|
| 188 |
+
"use_logits_to_keep": null,
|
| 189 |
+
"channels": null,
|
| 190 |
+
"metric_warmup_step": 0,
|
| 191 |
+
"fsdp_num": 1,
|
| 192 |
+
"acc_steps": 1,
|
| 193 |
+
"eval_use_evalscope": false,
|
| 194 |
+
"eval_datasets": [],
|
| 195 |
+
"eval_limit": null,
|
| 196 |
+
"eval_datasets_args": null,
|
| 197 |
+
"eval_generation_config": null,
|
| 198 |
+
"model": "/mnt/data/users/liamding/data/models/Qwen2.5-VL-7B-Instruct",
|
| 199 |
+
"model_type": "qwen2_5_vl",
|
| 200 |
+
"model_revision": null,
|
| 201 |
+
"task_type": "causal_lm",
|
| 202 |
+
"torch_dtype": "bfloat16",
|
| 203 |
+
"attn_impl": null,
|
| 204 |
+
"num_labels": null,
|
| 205 |
+
"problem_type": null,
|
| 206 |
+
"rope_scaling": null,
|
| 207 |
+
"device_map": null,
|
| 208 |
+
"max_memory": {},
|
| 209 |
+
"local_repo_path": null,
|
| 210 |
+
"init_strategy": null,
|
| 211 |
+
"template": "qwen2_5_vl",
|
| 212 |
+
"system": null,
|
| 213 |
+
"max_length": 32768,
|
| 214 |
+
"truncation_strategy": "delete",
|
| 215 |
+
"max_pixels": null,
|
| 216 |
+
"agent_template": null,
|
| 217 |
+
"norm_bbox": null,
|
| 218 |
+
"use_chat_template": true,
|
| 219 |
+
"padding_free": false,
|
| 220 |
+
"padding_side": "right",
|
| 221 |
+
"loss_scale": "default",
|
| 222 |
+
"sequence_parallel_size": 1,
|
| 223 |
+
"response_prefix": null,
|
| 224 |
+
"template_backend": "swift",
|
| 225 |
+
"dataset": [
|
| 226 |
+
"/mnt/data/users/liamding/data/3AM_Plus/final/training/thinking_v2/ambi_normal_train_thinking_772.json",
|
| 227 |
+
"/mnt/data/users/liamding/data/3AM_Plus/final/training/thinking_v2/mma_train_thinking_126.json",
|
| 228 |
+
"/mnt/data/users/liamding/data/3AM_Plus/final/training/thinking_v2/sp_train_thinking_102.json"
|
| 229 |
+
],
|
| 230 |
+
"val_dataset": [],
|
| 231 |
+
"split_dataset_ratio": 0.1,
|
| 232 |
+
"dataset_num_proc": 1,
|
| 233 |
+
"load_from_cache_file": true,
|
| 234 |
+
"dataset_shuffle": true,
|
| 235 |
+
"val_dataset_shuffle": false,
|
| 236 |
+
"streaming": false,
|
| 237 |
+
"interleave_prob": null,
|
| 238 |
+
"stopping_strategy": "first_exhausted",
|
| 239 |
+
"shuffle_buffer_size": 1000,
|
| 240 |
+
"download_mode": "reuse_dataset_if_exists",
|
| 241 |
+
"columns": {},
|
| 242 |
+
"strict": false,
|
| 243 |
+
"model_name": null,
|
| 244 |
+
"model_author": null,
|
| 245 |
+
"custom_dataset_info": [],
|
| 246 |
+
"quant_method": null,
|
| 247 |
+
"quant_bits": null,
|
| 248 |
+
"hqq_axis": null,
|
| 249 |
+
"bnb_4bit_compute_dtype": "bfloat16",
|
| 250 |
+
"bnb_4bit_quant_type": "nf4",
|
| 251 |
+
"bnb_4bit_use_double_quant": true,
|
| 252 |
+
"bnb_4bit_quant_storage": null,
|
| 253 |
+
"max_new_tokens": 64,
|
| 254 |
+
"temperature": 0.0,
|
| 255 |
+
"top_k": null,
|
| 256 |
+
"top_p": null,
|
| 257 |
+
"repetition_penalty": null,
|
| 258 |
+
"num_beams": 1,
|
| 259 |
+
"stream": false,
|
| 260 |
+
"stop_words": [],
|
| 261 |
+
"logprobs": false,
|
| 262 |
+
"top_logprobs": null,
|
| 263 |
+
"ckpt_dir": null,
|
| 264 |
+
"lora_modules": [],
|
| 265 |
+
"tuner_backend": "peft",
|
| 266 |
+
"train_type": "full",
|
| 267 |
+
"adapters": [],
|
| 268 |
+
"external_plugins": [],
|
| 269 |
+
"model_kwargs": {},
|
| 270 |
+
"load_args": false,
|
| 271 |
+
"load_data_args": false,
|
| 272 |
+
"packing": false,
|
| 273 |
+
"packing_cache": null,
|
| 274 |
+
"custom_register_path": [],
|
| 275 |
+
"use_hf": false,
|
| 276 |
+
"ignore_args_error": false,
|
| 277 |
+
"use_swift_lora": false,
|
| 278 |
+
"freeze_parameters": [
|
| 279 |
+
"visual",
|
| 280 |
+
"visual.merger"
|
| 281 |
+
],
|
| 282 |
+
"freeze_parameters_regex": null,
|
| 283 |
+
"freeze_parameters_ratio": 0.0,
|
| 284 |
+
"trainable_parameters": [],
|
| 285 |
+
"trainable_parameters_regex": null,
|
| 286 |
+
"freeze_llm": false,
|
| 287 |
+
"freeze_vit": true,
|
| 288 |
+
"freeze_aligner": true,
|
| 289 |
+
"target_modules": [
|
| 290 |
+
"all-linear"
|
| 291 |
+
],
|
| 292 |
+
"target_regex": null,
|
| 293 |
+
"modules_to_save": [],
|
| 294 |
+
"lora_rank": 8,
|
| 295 |
+
"lora_alpha": 32,
|
| 296 |
+
"lora_dropout": 0.05,
|
| 297 |
+
"lora_bias": "none",
|
| 298 |
+
"lora_dtype": null,
|
| 299 |
+
"lorap_lr_ratio": null,
|
| 300 |
+
"use_rslora": false,
|
| 301 |
+
"use_dora": false,
|
| 302 |
+
"lora_ga_batch_size": 2,
|
| 303 |
+
"lora_ga_iters": 2,
|
| 304 |
+
"lora_ga_max_length": 1024,
|
| 305 |
+
"lora_ga_direction": "ArB2r",
|
| 306 |
+
"lora_ga_scale": "stable",
|
| 307 |
+
"lora_ga_stable_gamma": 16,
|
| 308 |
+
"init_weights": true,
|
| 309 |
+
"fourier_n_frequency": 2000,
|
| 310 |
+
"fourier_scaling": 300.0,
|
| 311 |
+
"boft_block_size": 4,
|
| 312 |
+
"boft_block_num": 0,
|
| 313 |
+
"boft_n_butterfly_factor": 1,
|
| 314 |
+
"boft_dropout": 0.0,
|
| 315 |
+
"vera_rank": 256,
|
| 316 |
+
"vera_projection_prng_key": 0,
|
| 317 |
+
"vera_dropout": 0.0,
|
| 318 |
+
"vera_d_initial": 0.1,
|
| 319 |
+
"adapter_act": "gelu",
|
| 320 |
+
"adapter_length": 128,
|
| 321 |
+
"use_galore": false,
|
| 322 |
+
"galore_target_modules": null,
|
| 323 |
+
"galore_rank": 128,
|
| 324 |
+
"galore_update_proj_gap": 50,
|
| 325 |
+
"galore_scale": 1.0,
|
| 326 |
+
"galore_proj_type": "std",
|
| 327 |
+
"galore_optim_per_parameter": false,
|
| 328 |
+
"galore_with_embedding": false,
|
| 329 |
+
"galore_quantization": false,
|
| 330 |
+
"galore_proj_quant": false,
|
| 331 |
+
"galore_proj_bits": 4,
|
| 332 |
+
"galore_proj_group_size": 256,
|
| 333 |
+
"galore_cos_threshold": 0.4,
|
| 334 |
+
"galore_gamma_proj": 2,
|
| 335 |
+
"galore_queue_size": 5,
|
| 336 |
+
"adalora_target_r": 8,
|
| 337 |
+
"adalora_init_r": 12,
|
| 338 |
+
"adalora_tinit": 0,
|
| 339 |
+
"adalora_tfinal": 0,
|
| 340 |
+
"adalora_deltaT": 1,
|
| 341 |
+
"adalora_beta1": 0.85,
|
| 342 |
+
"adalora_beta2": 0.85,
|
| 343 |
+
"adalora_orth_reg_weight": 0.5,
|
| 344 |
+
"llamapro_num_new_blocks": 4,
|
| 345 |
+
"llamapro_num_groups": null,
|
| 346 |
+
"lisa_activated_layers": 0,
|
| 347 |
+
"lisa_step_interval": 20,
|
| 348 |
+
"reft_layer_key": null,
|
| 349 |
+
"reft_layers": null,
|
| 350 |
+
"reft_rank": 4,
|
| 351 |
+
"reft_intervention_type": "LoreftIntervention",
|
| 352 |
+
"reft_args": null,
|
| 353 |
+
"swanlab_token": null,
|
| 354 |
+
"swanlab_project": null,
|
| 355 |
+
"swanlab_workspace": null,
|
| 356 |
+
"swanlab_exp_name": "/mnt/data/users/liamding/data/MMMT/lora/selective_mask_plugin/qwen2.5vl-7b-thinking_full_v2-selective-plugin-weighted_ratio_2/v0-20250818-120403",
|
| 357 |
+
"swanlab_mode": "cloud",
|
| 358 |
+
"add_version": true,
|
| 359 |
+
"resume_only_model": false,
|
| 360 |
+
"create_checkpoint_symlink": false,
|
| 361 |
+
"lazy_tokenize": true,
|
| 362 |
+
"loss_type": "selective_translate_weighted_ratio",
|
| 363 |
+
"metric": null,
|
| 364 |
+
"zero_hpz_partition_size": null,
|
| 365 |
+
"rank": 0,
|
| 366 |
+
"global_world_size": 4,
|
| 367 |
+
"local_world_size": 4,
|
| 368 |
+
"model_suffix": "Qwen2.5-VL-7B-Instruct",
|
| 369 |
+
"model_info": "ModelInfo(model_type='qwen2_5_vl', model_dir='/mnt/data/users/liamding/data/models/Qwen2.5-VL-7B-Instruct', torch_dtype=torch.bfloat16, max_model_len=128000, quant_method=None, quant_bits=None, rope_scaling={'type': 'default', 'mrope_section': [16, 24, 24], 'rope_type': 'default'}, config=None, task_type='causal_lm', num_labels=None)",
|
| 370 |
+
"model_meta": "ModelMeta(model_type='qwen2_5_vl', model_groups=[ModelGroup(models=[Model(ms_model_id='Qwen/Qwen2.5-VL-3B-Instruct', hf_model_id='Qwen/Qwen2.5-VL-3B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-VL-7B-Instruct', hf_model_id='Qwen/Qwen2.5-VL-7B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-VL-32B-Instruct', hf_model_id='Qwen/Qwen2.5-VL-32B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-VL-72B-Instruct', hf_model_id='Qwen/Qwen2.5-VL-72B-Instruct', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[]), ModelGroup(models=[Model(ms_model_id='Qwen/Qwen2.5-VL-3B-Instruct-AWQ', hf_model_id='Qwen/Qwen2.5-VL-3B-Instruct-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-VL-7B-Instruct-AWQ', hf_model_id='Qwen/Qwen2.5-VL-7B-Instruct-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-VL-32B-Instruct-AWQ', hf_model_id='Qwen/Qwen2.5-VL-32B-Instruct-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-VL-72B-Instruct-AWQ', hf_model_id='Qwen/Qwen2.5-VL-72B-Instruct-AWQ', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[])], template='qwen2_5_vl', get_function=<function get_model_tokenizer_qwen2_5_vl at 0x7f719b4055a0>, model_arch='qwen2_vl', architectures=['Qwen2_5_VLForConditionalGeneration'], additional_saved_files=[], torch_dtype=None, is_multimodal=True, is_reward=False, task_type=None, ignore_patterns=None, requires=['transformers>=4.49', 'qwen_vl_utils>=0.0.6', 'decord'], tags=[])",
|
| 371 |
+
"model_dir": "/mnt/data/users/liamding/data/models/Qwen2.5-VL-7B-Instruct",
|
| 372 |
+
"hub": "<class 'swift.hub.hub.MSHub'>",
|
| 373 |
+
"evaluation_strategy": "epoch",
|
| 374 |
+
"training_args": "Seq2SeqTrainingArguments(output_dir='/mnt/data/users/liamding/data/MMMT/lora/selective_mask_plugin/qwen2.5vl-7b-thinking_full_v2-selective-plugin-weighted_ratio_2/v0-20250818-120403', overwrite_output_dir=False, do_train=False, do_eval=True, do_predict=False, eval_strategy=<IntervalStrategy.EPOCH: 'epoch'>, prediction_loss_only=False, per_device_train_batch_size=2, per_device_eval_batch_size=2, per_gpu_train_batch_size=None, per_gpu_eval_batch_size=None, gradient_accumulation_steps=2, eval_accumulation_steps=None, eval_delay=0, torch_empty_cache_steps=None, learning_rate=1e-06, weight_decay=0.001, adam_beta1=0.9, adam_beta2=0.95, adam_epsilon=1e-08, max_grad_norm=1.0, num_train_epochs=5.0, max_steps=-1, lr_scheduler_type=<SchedulerType.COSINE: 'cosine'>, lr_scheduler_kwargs=None, warmup_ratio=0.1, warmup_steps=0, log_level='passive', log_level_replica='warning', log_on_each_node=True, logging_dir='/mnt/data/users/liamding/data/MMMT/lora/selective_mask_plugin/qwen2.5vl-7b-thinking_full_v2-selective-plugin-weighted_ratio_2/v0-20250818-120403/runs', logging_strategy=<IntervalStrategy.STEPS: 'steps'>, logging_first_step=True, logging_steps=5, logging_nan_inf_filter=True, save_strategy=<SaveStrategy.EPOCH: 'epoch'>, save_steps=500, save_total_limit=5, save_safetensors=True, save_on_each_node=False, save_only_model=False, restore_callback_states_from_checkpoint=False, no_cuda=False, use_cpu=False, use_mps_device=False, seed=42, data_seed=42, jit_mode_eval=False, use_ipex=False, bf16=True, fp16=False, fp16_opt_level='O1', half_precision_backend='auto', bf16_full_eval=False, fp16_full_eval=False, tf32=None, local_rank=0, ddp_backend=None, tpu_num_cores=None, tpu_metrics_debug=False, debug=[], dataloader_drop_last=False, eval_steps=None, dataloader_num_workers=4, dataloader_prefetch_factor=10, past_index=-1, run_name='/mnt/data/users/liamding/data/MMMT/lora/selective_mask_plugin/qwen2.5vl-7b-thinking_full_v2-selective-plugin-weighted_ratio_2/v0-20250818-120403', disable_tqdm=False, remove_unused_columns=False, label_names=None, load_best_model_at_end=False, metric_for_best_model='loss', greater_is_better=False, ignore_data_skip=False, fsdp=[], fsdp_min_num_params=0, fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, tp_size=0, fsdp_transformer_layer_cls_to_wrap=None, accelerator_config=AcceleratorConfig(split_batches=False, dispatch_batches=False, even_batches=True, use_seedable_sampler=True, non_blocking=False, gradient_accumulation_kwargs=None, use_configured_state=False), deepspeed={'fp16': {'enabled': 'auto', 'loss_scale': 0, 'loss_scale_window': 1000, 'initial_scale_power': 16, 'hysteresis': 2, 'min_loss_scale': 1}, 'bf16': {'enabled': 'auto'}, 'zero_optimization': {'stage': 3, 'offload_optimizer': {'device': 'none', 'pin_memory': True}, 'offload_param': {'device': 'none', 'pin_memory': True}, 'overlap_comm': False, 'contiguous_gradients': True, 'sub_group_size': 1000000000.0, 'reduce_bucket_size': 'auto', 'zero_quantized_weights': False, 'zero_quantized_gradients': False, 'stage3_prefetch_bucket_size': 'auto', 'stage3_param_persistence_threshold': 'auto', 'stage3_max_live_parameters': 1000000000.0, 'stage3_max_reuse_distance': 1000000000.0, 'stage3_gather_16bit_weights_on_model_save': True}, 'gradient_accumulation_steps': 'auto', 'gradient_clipping': 'auto', 'steps_per_print': 2000, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'wall_clock_breakdown': False}, label_smoothing_factor=0.0, optim=<OptimizerNames.ADAMW_TORCH: 'adamw_torch'>, optim_args=None, adafactor=False, group_by_length=False, length_column_name='length', report_to=['swanlab'], ddp_find_unused_parameters=None, ddp_bucket_cap_mb=None, ddp_broadcast_buffers=None, dataloader_pin_memory=True, dataloader_persistent_workers=False, skip_memory_metrics=True, use_legacy_prediction_loop=False, push_to_hub=False, resume_from_checkpoint=None, hub_model_id=None, hub_strategy=<HubStrategy.EVERY_SAVE: 'every_save'>, hub_token=None, hub_private_repo=None, hub_always_push=False, gradient_checkpointing=True, gradient_checkpointing_kwargs=None, include_inputs_for_metrics=False, include_for_metrics=[], eval_do_concat_batches=True, fp16_backend='auto', push_to_hub_model_id=None, push_to_hub_organization=None, push_to_hub_token=None, mp_parameters='', auto_find_batch_size=False, full_determinism=False, torchdynamo=None, ray_scope='last', ddp_timeout=18000000, torch_compile=False, torch_compile_backend=None, torch_compile_mode=None, include_tokens_per_second=None, include_num_input_tokens_seen=None, neftune_noise_alpha=None, optim_target_modules=None, batch_eval_metrics=False, eval_on_start=False, use_liger_kernel=False, eval_use_gather_object=False, average_tokens_across_devices=None, sortish_sampler=False, predict_with_generate=False, generation_max_length=None, generation_num_beams=None, generation_config=None, vit_gradient_checkpointing=True, check_model=True, acc_strategy='token', train_dataloader_shuffle=True, max_epochs=None, aligner_lr=None, vit_lr=None, optimizer=None, use_logits_to_keep=None, channels=None, metric_warmup_step=0, fsdp_num=1, acc_steps=1, eval_use_evalscope=False, eval_datasets=[], eval_limit=None, eval_datasets_args=None, eval_generation_config=None, train_type='full', local_repo_path=None, galore_config=None)"
|
| 375 |
+
}
|
selective_loss/qwen2.5vl-7b-thinking_full_v2-spwr_lr1e-6_wd1e-3/v0-20250818-120403/checkpoint-280/chat_template.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"chat_template": "{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n{% endif %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}"
|
| 3 |
+
}
|
selective_loss/qwen2.5vl-7b-thinking_full_v2-spwr_lr1e-6_wd1e-3/v0-20250818-120403/checkpoint-280/config.json
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"Qwen2_5_VLForConditionalGeneration"
|
| 4 |
+
],
|
| 5 |
+
"attention_dropout": 0.0,
|
| 6 |
+
"bos_token_id": 151643,
|
| 7 |
+
"eos_token_id": 151645,
|
| 8 |
+
"hidden_act": "silu",
|
| 9 |
+
"hidden_size": 3584,
|
| 10 |
+
"image_token_id": 151655,
|
| 11 |
+
"initializer_range": 0.02,
|
| 12 |
+
"intermediate_size": 18944,
|
| 13 |
+
"max_position_embeddings": 128000,
|
| 14 |
+
"max_window_layers": 28,
|
| 15 |
+
"model_type": "qwen2_5_vl",
|
| 16 |
+
"num_attention_heads": 28,
|
| 17 |
+
"num_hidden_layers": 28,
|
| 18 |
+
"num_key_value_heads": 4,
|
| 19 |
+
"pad_token_id": 151643,
|
| 20 |
+
"rms_norm_eps": 1e-06,
|
| 21 |
+
"rope_scaling": {
|
| 22 |
+
"mrope_section": [
|
| 23 |
+
16,
|
| 24 |
+
24,
|
| 25 |
+
24
|
| 26 |
+
],
|
| 27 |
+
"rope_type": "default",
|
| 28 |
+
"type": "default"
|
| 29 |
+
},
|
| 30 |
+
"rope_theta": 1000000.0,
|
| 31 |
+
"sliding_window": 32768,
|
| 32 |
+
"tie_word_embeddings": false,
|
| 33 |
+
"torch_dtype": "bfloat16",
|
| 34 |
+
"transformers_version": "4.51.3",
|
| 35 |
+
"use_cache": false,
|
| 36 |
+
"use_sliding_window": false,
|
| 37 |
+
"video_token_id": 151656,
|
| 38 |
+
"vision_config": {
|
| 39 |
+
"depth": 32,
|
| 40 |
+
"fullatt_block_indexes": [
|
| 41 |
+
7,
|
| 42 |
+
15,
|
| 43 |
+
23,
|
| 44 |
+
31
|
| 45 |
+
],
|
| 46 |
+
"hidden_act": "silu",
|
| 47 |
+
"hidden_size": 1280,
|
| 48 |
+
"in_channels": 3,
|
| 49 |
+
"in_chans": 3,
|
| 50 |
+
"intermediate_size": 3420,
|
| 51 |
+
"model_type": "qwen2_5_vl",
|
| 52 |
+
"num_heads": 16,
|
| 53 |
+
"out_hidden_size": 3584,
|
| 54 |
+
"patch_size": 14,
|
| 55 |
+
"spatial_merge_size": 2,
|
| 56 |
+
"spatial_patch_size": 14,
|
| 57 |
+
"temporal_patch_size": 2,
|
| 58 |
+
"tokens_per_second": 2,
|
| 59 |
+
"torch_dtype": "bfloat16",
|
| 60 |
+
"window_size": 112
|
| 61 |
+
},
|
| 62 |
+
"vision_end_token_id": 151653,
|
| 63 |
+
"vision_start_token_id": 151652,
|
| 64 |
+
"vision_token_id": 151654,
|
| 65 |
+
"vocab_size": 152064
|
| 66 |
+
}
|
selective_loss/qwen2.5vl-7b-thinking_full_v2-spwr_lr1e-6_wd1e-3/v0-20250818-120403/checkpoint-280/generation_config.json
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token_id": 151643,
|
| 3 |
+
"do_sample": true,
|
| 4 |
+
"eos_token_id": [
|
| 5 |
+
151645,
|
| 6 |
+
151643
|
| 7 |
+
],
|
| 8 |
+
"pad_token_id": 151643,
|
| 9 |
+
"repetition_penalty": 1.05,
|
| 10 |
+
"temperature": 1e-06,
|
| 11 |
+
"transformers_version": "4.51.3"
|
| 12 |
+
}
|
selective_loss/qwen2.5vl-7b-thinking_full_v2-spwr_lr1e-6_wd1e-3/v0-20250818-120403/checkpoint-280/latest
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
global_step278
|