leiwx52 commited on
Commit
1b6b78a
·
verified ·
1 Parent(s): 0e894ba

Delete qwen3vl_8b_agentnetv1_fullsft_gb200

Browse files
qwen3vl_8b_agentnetv1_fullsft_gb200/added_tokens.json DELETED
@@ -1,28 +0,0 @@
1
- {
2
- "</think>": 151668,
3
- "</tool_call>": 151658,
4
- "</tool_response>": 151666,
5
- "<think>": 151667,
6
- "<tool_call>": 151657,
7
- "<tool_response>": 151665,
8
- "<|box_end|>": 151649,
9
- "<|box_start|>": 151648,
10
- "<|endoftext|>": 151643,
11
- "<|file_sep|>": 151664,
12
- "<|fim_middle|>": 151660,
13
- "<|fim_pad|>": 151662,
14
- "<|fim_prefix|>": 151659,
15
- "<|fim_suffix|>": 151661,
16
- "<|im_end|>": 151645,
17
- "<|im_start|>": 151644,
18
- "<|image_pad|>": 151655,
19
- "<|object_ref_end|>": 151647,
20
- "<|object_ref_start|>": 151646,
21
- "<|quad_end|>": 151651,
22
- "<|quad_start|>": 151650,
23
- "<|repo_name|>": 151663,
24
- "<|video_pad|>": 151656,
25
- "<|vision_end|>": 151653,
26
- "<|vision_pad|>": 151654,
27
- "<|vision_start|>": 151652
28
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
qwen3vl_8b_agentnetv1_fullsft_gb200/args.json DELETED
@@ -1,388 +0,0 @@
1
- {
2
- "output_dir": "/workspace/weixian/workplace/ms_swift_project/ms-swift/output/qwen3vl_8b_sft_full_agentnet_dist/v3-20260118-132920",
3
- "overwrite_output_dir": false,
4
- "do_train": false,
5
- "do_eval": false,
6
- "do_predict": false,
7
- "eval_strategy": "no",
8
- "prediction_loss_only": false,
9
- "per_device_train_batch_size": 8,
10
- "per_device_eval_batch_size": 1,
11
- "per_gpu_train_batch_size": null,
12
- "per_gpu_eval_batch_size": null,
13
- "gradient_accumulation_steps": 3,
14
- "eval_accumulation_steps": null,
15
- "eval_delay": 0,
16
- "torch_empty_cache_steps": null,
17
- "learning_rate": 2e-05,
18
- "weight_decay": 0.1,
19
- "adam_beta1": 0.9,
20
- "adam_beta2": 0.95,
21
- "adam_epsilon": 1e-08,
22
- "max_grad_norm": 1.0,
23
- "num_train_epochs": 2.0,
24
- "max_steps": -1,
25
- "lr_scheduler_type": "cosine",
26
- "lr_scheduler_kwargs": null,
27
- "warmup_ratio": 0.05,
28
- "warmup_steps": 0,
29
- "log_level": "passive",
30
- "log_level_replica": "warning",
31
- "log_on_each_node": true,
32
- "logging_dir": "/workspace/weixian/workplace/ms_swift_project/ms-swift/output/qwen3vl_8b_sft_full_agentnet_dist/v3-20260118-132920/runs",
33
- "logging_strategy": "steps",
34
- "logging_first_step": true,
35
- "logging_steps": 5,
36
- "logging_nan_inf_filter": true,
37
- "save_strategy": "steps",
38
- "save_steps": 200.0,
39
- "save_total_limit": 2,
40
- "save_safetensors": true,
41
- "save_on_each_node": false,
42
- "save_only_model": false,
43
- "restore_callback_states_from_checkpoint": false,
44
- "no_cuda": false,
45
- "use_cpu": false,
46
- "use_mps_device": false,
47
- "seed": 42,
48
- "data_seed": 42,
49
- "jit_mode_eval": false,
50
- "bf16": true,
51
- "fp16": false,
52
- "fp16_opt_level": "O1",
53
- "half_precision_backend": "auto",
54
- "bf16_full_eval": false,
55
- "fp16_full_eval": false,
56
- "tf32": null,
57
- "local_rank": 0,
58
- "ddp_backend": null,
59
- "tpu_num_cores": null,
60
- "tpu_metrics_debug": false,
61
- "debug": null,
62
- "dataloader_drop_last": false,
63
- "eval_steps": 200.0,
64
- "dataloader_num_workers": 8,
65
- "dataloader_prefetch_factor": null,
66
- "past_index": -1,
67
- "run_name": "/workspace/weixian/workplace/ms_swift_project/ms-swift/output/qwen3vl_8b_sft_full_agentnet_dist/v3-20260118-132920",
68
- "disable_tqdm": null,
69
- "remove_unused_columns": true,
70
- "label_names": null,
71
- "load_best_model_at_end": false,
72
- "metric_for_best_model": "loss",
73
- "greater_is_better": false,
74
- "ignore_data_skip": false,
75
- "fsdp": [],
76
- "fsdp_min_num_params": 0,
77
- "fsdp_config": null,
78
- "fsdp_transformer_layer_cls_to_wrap": null,
79
- "accelerator_config": {
80
- "dispatch_batches": false
81
- },
82
- "parallelism_config": null,
83
- "deepspeed": {
84
- "fp16": {
85
- "enabled": "auto",
86
- "loss_scale": 0,
87
- "loss_scale_window": 1000,
88
- "initial_scale_power": 16,
89
- "hysteresis": 2,
90
- "min_loss_scale": 1
91
- },
92
- "bf16": {
93
- "enabled": "auto"
94
- },
95
- "zero_optimization": {
96
- "stage": 1,
97
- "offload_optimizer": {
98
- "device": "none",
99
- "pin_memory": true
100
- },
101
- "allgather_partitions": true,
102
- "allgather_bucket_size": 200000000.0,
103
- "overlap_comm": false,
104
- "reduce_scatter": true,
105
- "reduce_bucket_size": 200000000.0,
106
- "contiguous_gradients": true
107
- },
108
- "gradient_accumulation_steps": "auto",
109
- "gradient_clipping": "auto",
110
- "steps_per_print": 2000,
111
- "train_batch_size": "auto",
112
- "train_micro_batch_size_per_gpu": "auto",
113
- "wall_clock_breakdown": false
114
- },
115
- "label_smoothing_factor": 0.0,
116
- "optim": "adamw_torch_fused",
117
- "optim_args": null,
118
- "adafactor": false,
119
- "group_by_length": false,
120
- "length_column_name": "length",
121
- "report_to": [
122
- "tensorboard"
123
- ],
124
- "project": "huggingface",
125
- "trackio_space_id": "trackio",
126
- "ddp_find_unused_parameters": null,
127
- "ddp_bucket_cap_mb": null,
128
- "ddp_broadcast_buffers": null,
129
- "dataloader_pin_memory": true,
130
- "dataloader_persistent_workers": false,
131
- "skip_memory_metrics": true,
132
- "use_legacy_prediction_loop": false,
133
- "push_to_hub": false,
134
- "resume_from_checkpoint": null,
135
- "hub_model_id": null,
136
- "hub_strategy": "every_save",
137
- "hub_token": null,
138
- "hub_private_repo": null,
139
- "hub_always_push": false,
140
- "hub_revision": null,
141
- "gradient_checkpointing": true,
142
- "gradient_checkpointing_kwargs": null,
143
- "include_inputs_for_metrics": false,
144
- "include_for_metrics": [],
145
- "eval_do_concat_batches": true,
146
- "fp16_backend": "auto",
147
- "push_to_hub_model_id": null,
148
- "push_to_hub_organization": null,
149
- "push_to_hub_token": null,
150
- "mp_parameters": "",
151
- "auto_find_batch_size": false,
152
- "full_determinism": false,
153
- "torchdynamo": null,
154
- "ray_scope": "last",
155
- "ddp_timeout": 18000000,
156
- "torch_compile": false,
157
- "torch_compile_backend": null,
158
- "torch_compile_mode": null,
159
- "include_tokens_per_second": false,
160
- "include_num_input_tokens_seen": false,
161
- "neftune_noise_alpha": null,
162
- "optim_target_modules": null,
163
- "batch_eval_metrics": false,
164
- "eval_on_start": false,
165
- "use_liger_kernel": true,
166
- "liger_kernel_config": null,
167
- "eval_use_gather_object": false,
168
- "average_tokens_across_devices": true,
169
- "sortish_sampler": false,
170
- "predict_with_generate": false,
171
- "generation_max_length": null,
172
- "generation_num_beams": null,
173
- "generation_config": null,
174
- "tuner_backend": "peft",
175
- "vit_gradient_checkpointing": null,
176
- "router_aux_loss_coef": 0.0,
177
- "enable_dft_loss": false,
178
- "enable_channel_loss": false,
179
- "check_model": true,
180
- "acc_strategy": "token",
181
- "train_dataloader_shuffle": true,
182
- "max_epochs": null,
183
- "aligner_lr": 1e-05,
184
- "vit_lr": 1e-05,
185
- "use_logits_to_keep": null,
186
- "ds3_gather_for_generation": true,
187
- "resume_only_model": false,
188
- "optimizer": null,
189
- "loss_type": null,
190
- "metric": null,
191
- "eval_use_evalscope": false,
192
- "eval_dataset": [],
193
- "eval_dataset_args": null,
194
- "eval_limit": null,
195
- "eval_generation_config": null,
196
- "extra_eval_args": null,
197
- "use_flash_ckpt": false,
198
- "use_ray": false,
199
- "ray_exp_name": null,
200
- "device_groups": null,
201
- "model": "/workspace/weixian/ckpt/Qwen3-VL-8B-Instruct",
202
- "model_type": "qwen3_vl",
203
- "model_revision": null,
204
- "task_type": "causal_lm",
205
- "torch_dtype": "bfloat16",
206
- "attn_impl": "flash_attn",
207
- "new_special_tokens": [],
208
- "num_labels": null,
209
- "problem_type": null,
210
- "rope_scaling": null,
211
- "device_map": null,
212
- "max_memory": {},
213
- "max_model_len": null,
214
- "local_repo_path": null,
215
- "init_strategy": null,
216
- "template": "qwen3_vl",
217
- "system": null,
218
- "max_length": 32768,
219
- "truncation_strategy": "delete",
220
- "max_pixels": null,
221
- "agent_template": null,
222
- "norm_bbox": null,
223
- "use_chat_template": true,
224
- "padding_side": "right",
225
- "padding_free": true,
226
- "loss_scale": "default",
227
- "sequence_parallel_size": 1,
228
- "template_backend": "swift",
229
- "response_prefix": null,
230
- "enable_thinking": null,
231
- "add_non_thinking_prefix": true,
232
- "dataset": [
233
- "/workspace/data/processed/AgentNet/agentnet_win_mac_18k.swift.v1.jsonl",
234
- "/workspace/data/processed/AgentNet/agentnet_ubuntu_5k.swift.v1.jsonl"
235
- ],
236
- "val_dataset": [],
237
- "cached_dataset": [],
238
- "cached_val_dataset": [],
239
- "split_dataset_ratio": 0.0,
240
- "dataset_num_proc": 8,
241
- "load_from_cache_file": false,
242
- "dataset_shuffle": true,
243
- "val_dataset_shuffle": false,
244
- "streaming": false,
245
- "interleave_prob": null,
246
- "stopping_strategy": "first_exhausted",
247
- "shuffle_buffer_size": 1000,
248
- "download_mode": "reuse_dataset_if_exists",
249
- "columns": {},
250
- "strict": false,
251
- "model_name": null,
252
- "model_author": null,
253
- "custom_dataset_info": [],
254
- "quant_method": null,
255
- "quant_bits": null,
256
- "hqq_axis": null,
257
- "bnb_4bit_compute_dtype": "bfloat16",
258
- "bnb_4bit_quant_type": "nf4",
259
- "bnb_4bit_use_double_quant": true,
260
- "bnb_4bit_quant_storage": null,
261
- "max_new_tokens": 64,
262
- "temperature": 0.0,
263
- "top_k": null,
264
- "top_p": null,
265
- "repetition_penalty": null,
266
- "num_beams": 1,
267
- "stream": false,
268
- "stop_words": [],
269
- "logprobs": false,
270
- "top_logprobs": null,
271
- "structured_outputs_regex": null,
272
- "ckpt_dir": null,
273
- "lora_modules": [],
274
- "train_type": "full",
275
- "adapters": [],
276
- "external_plugins": [],
277
- "model_kwargs": {},
278
- "load_args": false,
279
- "load_data_args": false,
280
- "packing": false,
281
- "packing_length": null,
282
- "packing_num_proc": 1,
283
- "lazy_tokenize": true,
284
- "custom_register_path": [],
285
- "use_hf": false,
286
- "ignore_args_error": false,
287
- "use_swift_lora": false,
288
- "freeze_parameters": [],
289
- "freeze_parameters_regex": null,
290
- "freeze_parameters_ratio": 0.0,
291
- "trainable_parameters": [
292
- "model.visual.merger",
293
- "model.visual.deepstack_merger_list"
294
- ],
295
- "trainable_parameters_regex": null,
296
- "freeze_llm": false,
297
- "freeze_vit": false,
298
- "freeze_aligner": false,
299
- "target_modules": [
300
- "all-linear"
301
- ],
302
- "target_regex": null,
303
- "target_parameters": null,
304
- "modules_to_save": [],
305
- "lora_rank": 8,
306
- "lora_alpha": 32,
307
- "lora_dropout": 0.05,
308
- "lora_bias": "none",
309
- "lora_dtype": null,
310
- "lorap_lr_ratio": null,
311
- "use_rslora": false,
312
- "use_dora": false,
313
- "lora_ga_batch_size": 2,
314
- "lora_ga_iters": 2,
315
- "lora_ga_max_length": 1024,
316
- "lora_ga_direction": "ArB2r",
317
- "lora_ga_scale": "stable",
318
- "lora_ga_stable_gamma": 16,
319
- "init_weights": true,
320
- "fourier_n_frequency": 2000,
321
- "fourier_scaling": 300.0,
322
- "boft_block_size": 4,
323
- "boft_block_num": 0,
324
- "boft_n_butterfly_factor": 1,
325
- "boft_dropout": 0.0,
326
- "vera_rank": 256,
327
- "vera_projection_prng_key": 0,
328
- "vera_dropout": 0.0,
329
- "vera_d_initial": 0.1,
330
- "adapter_act": "gelu",
331
- "adapter_length": 128,
332
- "use_galore": false,
333
- "galore_target_modules": null,
334
- "galore_rank": 128,
335
- "galore_update_proj_gap": 50,
336
- "galore_scale": 1.0,
337
- "galore_proj_type": "std",
338
- "galore_optim_per_parameter": false,
339
- "galore_with_embedding": false,
340
- "galore_quantization": false,
341
- "galore_proj_quant": false,
342
- "galore_proj_bits": 4,
343
- "galore_proj_group_size": 256,
344
- "galore_cos_threshold": 0.4,
345
- "galore_gamma_proj": 2,
346
- "galore_queue_size": 5,
347
- "adalora_target_r": 8,
348
- "adalora_init_r": 12,
349
- "adalora_tinit": 0,
350
- "adalora_tfinal": 0,
351
- "adalora_deltaT": 1,
352
- "adalora_beta1": 0.85,
353
- "adalora_beta2": 0.85,
354
- "adalora_orth_reg_weight": 0.5,
355
- "llamapro_num_new_blocks": 4,
356
- "llamapro_num_groups": null,
357
- "lisa_activated_layers": 0,
358
- "lisa_step_interval": 20,
359
- "reft_layer_key": null,
360
- "reft_layers": null,
361
- "reft_rank": 4,
362
- "reft_intervention_type": "LoreftIntervention",
363
- "reft_args": null,
364
- "swanlab_token": null,
365
- "swanlab_project": "ms-swift",
366
- "swanlab_workspace": null,
367
- "swanlab_exp_name": null,
368
- "swanlab_notification_method": null,
369
- "swanlab_webhook_url": null,
370
- "swanlab_secret": null,
371
- "swanlab_mode": "cloud",
372
- "add_version": true,
373
- "create_checkpoint_symlink": false,
374
- "zero_hpz_partition_size": null,
375
- "deepspeed_autotp_size": null,
376
- "early_stop_interval": null,
377
- "rank": 0,
378
- "global_world_size": 16,
379
- "local_world_size": 4,
380
- "model_suffix": "Qwen3-VL-8B-Instruct",
381
- "model_info": "ModelInfo(model_type='qwen3_vl', model_dir='/workspace/weixian/ckpt/Qwen3-VL-8B-Instruct', torch_dtype=torch.bfloat16, max_model_len=262144, quant_method=None, quant_bits=None, rope_scaling={'mrope_interleaved': True, 'mrope_section': [24, 20, 20], 'rope_type': 'default'}, is_moe_model=False, is_multimodal=True, config=None, task_type='causal_lm', num_labels=None)",
382
- "model_meta": "ModelMeta(model_type='qwen3_vl', model_groups=[ModelGroup(models=[Model(ms_model_id='Qwen/Qwen3-VL-2B-Instruct', hf_model_id='Qwen/Qwen3-VL-2B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-VL-2B-Thinking', hf_model_id='Qwen/Qwen3-VL-2B-Thinking', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-VL-2B-Instruct-FP8', hf_model_id='Qwen/Qwen3-VL-2B-Instruct-FP8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-VL-2B-Thinking-FP8', hf_model_id='Qwen/Qwen3-VL-2B-Thinking-FP8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-VL-4B-Instruct', hf_model_id='Qwen/Qwen3-VL-4B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-VL-4B-Thinking', hf_model_id='Qwen/Qwen3-VL-4B-Thinking', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-VL-4B-Instruct-FP8', hf_model_id='Qwen/Qwen3-VL-4B-Instruct-FP8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-VL-4B-Thinking-FP8', hf_model_id='Qwen/Qwen3-VL-4B-Thinking-FP8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-VL-8B-Instruct', hf_model_id='Qwen/Qwen3-VL-8B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-VL-8B-Thinking', hf_model_id='Qwen/Qwen3-VL-8B-Thinking', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-VL-8B-Instruct-FP8', hf_model_id='Qwen/Qwen3-VL-8B-Instruct-FP8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-VL-8B-Thinking-FP8', hf_model_id='Qwen/Qwen3-VL-8B-Thinking-FP8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-VL-32B-Instruct', hf_model_id='Qwen/Qwen3-VL-32B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-VL-32B-Thinking', hf_model_id='Qwen/Qwen3-VL-32B-Thinking', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-VL-32B-Instruct-FP8', hf_model_id='Qwen/Qwen3-VL-32B-Instruct-FP8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-VL-32B-Thinking-FP8', hf_model_id='Qwen/Qwen3-VL-32B-Thinking-FP8', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[])], template='qwen3_vl', get_function=<function get_model_tokenizer_qwen3_vl at 0xfffc20b36160>, model_arch=MultiModelKeys(arch_name='qwen3_vl', embedding=None, module_list=None, lm_head=None, q_proj=None, k_proj=None, v_proj=None, o_proj=None, attention=None, mlp=None, down_proj=None, qkv_proj=None, qk_proj=None, qa_proj=None, qb_proj=None, kv_proj=None, kva_proj=None, kvb_proj=None, language_model=['model.language_model', 'lm_head'], aligner=['model.visual.merger', 'model.visual.deepstack_merger_list'], vision_tower=['model.visual'], generator=[]), architectures=['Qwen3VLForConditionalGeneration'], additional_saved_files=[], torch_dtype=None, is_multimodal=True, is_reward=False, is_reranker=False, task_type=None, ignore_patterns=None, requires=['transformers>=4.57', 'qwen_vl_utils>=0.0.14', 'decord'], tags=['vision', 'video'])",
383
- "model_dir": "/workspace/weixian/ckpt/Qwen3-VL-8B-Instruct",
384
- "_val_dataset_exists": [],
385
- "hub": "<class 'swift.hub.hub.MSHub'>",
386
- "evaluation_strategy": "steps",
387
- "training_args": "Seq2SeqTrainingArguments(output_dir='/workspace/weixian/workplace/ms_swift_project/ms-swift/output/qwen3vl_8b_sft_full_agentnet_dist/v3-20260118-132920', overwrite_output_dir=False, do_train=False, do_eval=False, do_predict=False, eval_strategy=<IntervalStrategy.NO: 'no'>, prediction_loss_only=False, per_device_train_batch_size=8, per_device_eval_batch_size=1, per_gpu_train_batch_size=None, per_gpu_eval_batch_size=None, gradient_accumulation_steps=3, eval_accumulation_steps=None, eval_delay=0, torch_empty_cache_steps=None, learning_rate=2e-05, weight_decay=0.1, adam_beta1=0.9, adam_beta2=0.95, adam_epsilon=1e-08, max_grad_norm=1.0, num_train_epochs=2.0, max_steps=-1, lr_scheduler_type=<SchedulerType.COSINE: 'cosine'>, lr_scheduler_kwargs=None, warmup_ratio=0.05, warmup_steps=0, log_level='passive', log_level_replica='warning', log_on_each_node=True, logging_dir='/workspace/weixian/workplace/ms_swift_project/ms-swift/output/qwen3vl_8b_sft_full_agentnet_dist/v3-20260118-132920/runs', logging_strategy=<IntervalStrategy.STEPS: 'steps'>, logging_first_step=True, logging_steps=5, logging_nan_inf_filter=True, save_strategy=<SaveStrategy.STEPS: 'steps'>, save_steps=200, save_total_limit=2, save_safetensors=True, save_on_each_node=False, save_only_model=False, restore_callback_states_from_checkpoint=False, no_cuda=False, use_cpu=False, use_mps_device=False, seed=42, data_seed=42, jit_mode_eval=False, bf16=True, fp16=False, fp16_opt_level='O1', half_precision_backend='auto', bf16_full_eval=False, fp16_full_eval=False, tf32=None, local_rank=0, ddp_backend=None, tpu_num_cores=None, tpu_metrics_debug=False, debug=[], dataloader_drop_last=False, eval_steps=200.0, dataloader_num_workers=8, dataloader_prefetch_factor=2, past_index=-1, run_name='/workspace/weixian/workplace/ms_swift_project/ms-swift/output/qwen3vl_8b_sft_full_agentnet_dist/v3-20260118-132920', disable_tqdm=False, remove_unused_columns=False, label_names=None, load_best_model_at_end=False, metric_for_best_model='loss', greater_is_better=False, ignore_data_skip=False, fsdp=[], fsdp_min_num_params=0, fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, fsdp_transformer_layer_cls_to_wrap=None, accelerator_config=AcceleratorConfig(split_batches=False, dispatch_batches=False, even_batches=True, use_seedable_sampler=True, non_blocking=False, gradient_accumulation_kwargs=None, use_configured_state=False), parallelism_config=None, deepspeed={'fp16': {'enabled': 'auto', 'loss_scale': 0, 'loss_scale_window': 1000, 'initial_scale_power': 16, 'hysteresis': 2, 'min_loss_scale': 1}, 'bf16': {'enabled': 'auto'}, 'zero_optimization': {'stage': 1, 'offload_optimizer': {'device': 'none', 'pin_memory': True}, 'allgather_partitions': True, 'allgather_bucket_size': 200000000.0, 'overlap_comm': False, 'reduce_scatter': True, 'reduce_bucket_size': 200000000.0, 'contiguous_gradients': True}, 'gradient_accumulation_steps': 'auto', 'gradient_clipping': 'auto', 'steps_per_print': 2000, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'wall_clock_breakdown': False}, label_smoothing_factor=0.0, optim=<OptimizerNames.ADAMW_TORCH_FUSED: 'adamw_torch_fused'>, optim_args=None, adafactor=False, group_by_length=False, length_column_name='length', report_to=['tensorboard'], project='huggingface', trackio_space_id='trackio', ddp_find_unused_parameters=None, ddp_bucket_cap_mb=None, ddp_broadcast_buffers=None, dataloader_pin_memory=True, dataloader_persistent_workers=False, skip_memory_metrics=True, use_legacy_prediction_loop=False, push_to_hub=False, resume_from_checkpoint=None, hub_model_id=None, hub_strategy=<HubStrategy.EVERY_SAVE: 'every_save'>, hub_token=None, hub_private_repo=None, hub_always_push=False, hub_revision=None, gradient_checkpointing=True, gradient_checkpointing_kwargs=None, include_inputs_for_metrics=False, include_for_metrics=[], eval_do_concat_batches=True, fp16_backend='auto', push_to_hub_model_id=None, push_to_hub_organization=None, push_to_hub_token=None, mp_parameters='', auto_find_batch_size=False, full_determinism=False, torchdynamo=None, ray_scope='last', ddp_timeout=18000000, torch_compile=False, torch_compile_backend=None, torch_compile_mode=None, include_tokens_per_second=None, include_num_input_tokens_seen=None, neftune_noise_alpha=None, optim_target_modules=None, batch_eval_metrics=False, eval_on_start=False, use_liger_kernel=True, liger_kernel_config=None, eval_use_gather_object=False, average_tokens_across_devices=None, sortish_sampler=False, predict_with_generate=False, generation_max_length=None, generation_num_beams=None, generation_config=None, tuner_backend='peft', vit_gradient_checkpointing=True, router_aux_loss_coef=0.0, enable_dft_loss=False, enable_channel_loss=False, check_model=True, acc_strategy='token', train_dataloader_shuffle=True, max_epochs=None, aligner_lr=1e-05, vit_lr=1e-05, use_logits_to_keep=None, ds3_gather_for_generation=True, resume_only_model=False, optimizer='multimodal', loss_type=None, metric=None, eval_use_evalscope=False, eval_dataset=[], eval_dataset_args=None, eval_limit=None, eval_generation_config=None, extra_eval_args=None, use_flash_ckpt=False, sft_alpha=0, chord_sft_dataset=[], chord_sft_per_device_train_batch_size=None, chord_enable_phi_function=False, chord_mu_warmup_steps=None, chord_mu_decay_steps=None, chord_mu_peak=None, chord_mu_valley=None, train_type='full', local_repo_path=None, galore_config=None, task_type='causal_lm', problem_type=None)"
388
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
qwen3vl_8b_agentnetv1_fullsft_gb200/chat_template.jinja DELETED
@@ -1,120 +0,0 @@
1
- {%- if tools %}
2
- {{- '<|im_start|>system\n' }}
3
- {%- if messages[0].role == 'system' %}
4
- {%- if messages[0].content is string %}
5
- {{- messages[0].content }}
6
- {%- else %}
7
- {%- for content in messages[0].content %}
8
- {%- if 'text' in content %}
9
- {{- content.text }}
10
- {%- endif %}
11
- {%- endfor %}
12
- {%- endif %}
13
- {{- '\n\n' }}
14
- {%- endif %}
15
- {{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
16
- {%- for tool in tools %}
17
- {{- "\n" }}
18
- {{- tool | tojson }}
19
- {%- endfor %}
20
- {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
21
- {%- else %}
22
- {%- if messages[0].role == 'system' %}
23
- {{- '<|im_start|>system\n' }}
24
- {%- if messages[0].content is string %}
25
- {{- messages[0].content }}
26
- {%- else %}
27
- {%- for content in messages[0].content %}
28
- {%- if 'text' in content %}
29
- {{- content.text }}
30
- {%- endif %}
31
- {%- endfor %}
32
- {%- endif %}
33
- {{- '<|im_end|>\n' }}
34
- {%- endif %}
35
- {%- endif %}
36
- {%- set image_count = namespace(value=0) %}
37
- {%- set video_count = namespace(value=0) %}
38
- {%- for message in messages %}
39
- {%- if message.role == "user" %}
40
- {{- '<|im_start|>' + message.role + '\n' }}
41
- {%- if message.content is string %}
42
- {{- message.content }}
43
- {%- else %}
44
- {%- for content in message.content %}
45
- {%- if content.type == 'image' or 'image' in content or 'image_url' in content %}
46
- {%- set image_count.value = image_count.value + 1 %}
47
- {%- if add_vision_id %}Picture {{ image_count.value }}: {% endif -%}
48
- <|vision_start|><|image_pad|><|vision_end|>
49
- {%- elif content.type == 'video' or 'video' in content %}
50
- {%- set video_count.value = video_count.value + 1 %}
51
- {%- if add_vision_id %}Video {{ video_count.value }}: {% endif -%}
52
- <|vision_start|><|video_pad|><|vision_end|>
53
- {%- elif 'text' in content %}
54
- {{- content.text }}
55
- {%- endif %}
56
- {%- endfor %}
57
- {%- endif %}
58
- {{- '<|im_end|>\n' }}
59
- {%- elif message.role == "assistant" %}
60
- {{- '<|im_start|>' + message.role + '\n' }}
61
- {%- if message.content is string %}
62
- {{- message.content }}
63
- {%- else %}
64
- {%- for content_item in message.content %}
65
- {%- if 'text' in content_item %}
66
- {{- content_item.text }}
67
- {%- endif %}
68
- {%- endfor %}
69
- {%- endif %}
70
- {%- if message.tool_calls %}
71
- {%- for tool_call in message.tool_calls %}
72
- {%- if (loop.first and message.content) or (not loop.first) %}
73
- {{- '\n' }}
74
- {%- endif %}
75
- {%- if tool_call.function %}
76
- {%- set tool_call = tool_call.function %}
77
- {%- endif %}
78
- {{- '<tool_call>\n{"name": "' }}
79
- {{- tool_call.name }}
80
- {{- '", "arguments": ' }}
81
- {%- if tool_call.arguments is string %}
82
- {{- tool_call.arguments }}
83
- {%- else %}
84
- {{- tool_call.arguments | tojson }}
85
- {%- endif %}
86
- {{- '}\n</tool_call>' }}
87
- {%- endfor %}
88
- {%- endif %}
89
- {{- '<|im_end|>\n' }}
90
- {%- elif message.role == "tool" %}
91
- {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
92
- {{- '<|im_start|>user' }}
93
- {%- endif %}
94
- {{- '\n<tool_response>\n' }}
95
- {%- if message.content is string %}
96
- {{- message.content }}
97
- {%- else %}
98
- {%- for content in message.content %}
99
- {%- if content.type == 'image' or 'image' in content or 'image_url' in content %}
100
- {%- set image_count.value = image_count.value + 1 %}
101
- {%- if add_vision_id %}Picture {{ image_count.value }}: {% endif -%}
102
- <|vision_start|><|image_pad|><|vision_end|>
103
- {%- elif content.type == 'video' or 'video' in content %}
104
- {%- set video_count.value = video_count.value + 1 %}
105
- {%- if add_vision_id %}Video {{ video_count.value }}: {% endif -%}
106
- <|vision_start|><|video_pad|><|vision_end|>
107
- {%- elif 'text' in content %}
108
- {{- content.text }}
109
- {%- endif %}
110
- {%- endfor %}
111
- {%- endif %}
112
- {{- '\n</tool_response>' }}
113
- {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
114
- {{- '<|im_end|>\n' }}
115
- {%- endif %}
116
- {%- endif %}
117
- {%- endfor %}
118
- {%- if add_generation_prompt %}
119
- {{- '<|im_start|>assistant\n' }}
120
- {%- endif %}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
qwen3vl_8b_agentnetv1_fullsft_gb200/config.json DELETED
@@ -1,69 +0,0 @@
1
- {
2
- "architectures": [
3
- "Qwen3VLForConditionalGeneration"
4
- ],
5
- "dtype": "bfloat16",
6
- "eos_token_id": 151645,
7
- "hidden_size": 4096,
8
- "image_token_id": 151655,
9
- "model_type": "qwen3_vl",
10
- "pad_token_id": 151643,
11
- "text_config": {
12
- "attention_bias": false,
13
- "attention_dropout": 0.0,
14
- "bos_token_id": 151643,
15
- "dtype": "bfloat16",
16
- "eos_token_id": 151645,
17
- "head_dim": 128,
18
- "hidden_act": "silu",
19
- "hidden_size": 4096,
20
- "initializer_range": 0.02,
21
- "intermediate_size": 12288,
22
- "max_position_embeddings": 262144,
23
- "model_type": "qwen3_vl_text",
24
- "num_attention_heads": 32,
25
- "num_hidden_layers": 36,
26
- "num_key_value_heads": 8,
27
- "pad_token_id": 151643,
28
- "rms_norm_eps": 1e-06,
29
- "rope_scaling": {
30
- "mrope_interleaved": true,
31
- "mrope_section": [
32
- 24,
33
- 20,
34
- 20
35
- ],
36
- "rope_type": "default"
37
- },
38
- "rope_theta": 5000000,
39
- "use_cache": false,
40
- "vocab_size": 151936
41
- },
42
- "tie_word_embeddings": false,
43
- "transformers_version": "4.57.3",
44
- "video_token_id": 151656,
45
- "vision_config": {
46
- "deepstack_visual_indexes": [
47
- 8,
48
- 16,
49
- 24
50
- ],
51
- "depth": 27,
52
- "dtype": "bfloat16",
53
- "hidden_act": "gelu_pytorch_tanh",
54
- "hidden_size": 1152,
55
- "in_channels": 3,
56
- "initializer_range": 0.02,
57
- "intermediate_size": 4304,
58
- "model_type": "qwen3_vl",
59
- "num_heads": 16,
60
- "num_position_embeddings": 2304,
61
- "out_hidden_size": 4096,
62
- "pad_token_id": 151643,
63
- "patch_size": 16,
64
- "spatial_merge_size": 2,
65
- "temporal_patch_size": 2
66
- },
67
- "vision_end_token_id": 151653,
68
- "vision_start_token_id": 151652
69
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
qwen3vl_8b_agentnetv1_fullsft_gb200/generation_config.json DELETED
@@ -1,13 +0,0 @@
1
- {
2
- "bos_token_id": 151643,
3
- "do_sample": true,
4
- "eos_token_id": [
5
- 151645,
6
- 151643
7
- ],
8
- "pad_token_id": 151643,
9
- "temperature": 0.7,
10
- "top_k": 20,
11
- "top_p": 0.8,
12
- "transformers_version": "4.57.3"
13
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
qwen3vl_8b_agentnetv1_fullsft_gb200/latest DELETED
@@ -1 +0,0 @@
1
- global_step2196
 
 
qwen3vl_8b_agentnetv1_fullsft_gb200/merges.txt DELETED
The diff for this file is too large to render. See raw diff
 
qwen3vl_8b_agentnetv1_fullsft_gb200/model-00001-of-00004.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:6c5126fa717c00c356e35a32c1db42a079af3805557fa1a8e4bb5686863d9d64
3
- size 4998056552
 
 
 
 
qwen3vl_8b_agentnetv1_fullsft_gb200/model-00002-of-00004.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d32d38b2efa518aa561523e8cd3e272817b9c86ee66020e1061e1d15a940af3f
3
- size 4915962464
 
 
 
 
qwen3vl_8b_agentnetv1_fullsft_gb200/model-00003-of-00004.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:0f7f584474093d5ea8003ccbfed8d1ad7bc7ecc4f51083a4c0139f9114bfbf3f
3
- size 4915962496
 
 
 
 
qwen3vl_8b_agentnetv1_fullsft_gb200/model-00004-of-00004.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a01f61fc3265c162770716c611d27873fe8719527c47956b15480209b1c15a49
3
- size 2704357976
 
 
 
 
qwen3vl_8b_agentnetv1_fullsft_gb200/model.safetensors.index.json DELETED
@@ -1,758 +0,0 @@
1
- {
2
- "metadata": {
3
- "total_parameters": 8767123696,
4
- "total_size": 17534247392
5
- },
6
- "weight_map": {
7
- "lm_head.weight": "model-00004-of-00004.safetensors",
8
- "model.language_model.embed_tokens.weight": "model-00001-of-00004.safetensors",
9
- "model.language_model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
10
- "model.language_model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
11
- "model.language_model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
12
- "model.language_model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
13
- "model.language_model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
14
- "model.language_model.layers.0.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
15
- "model.language_model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
16
- "model.language_model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
17
- "model.language_model.layers.0.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
18
- "model.language_model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
19
- "model.language_model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
20
- "model.language_model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
21
- "model.language_model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
22
- "model.language_model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
23
- "model.language_model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
24
- "model.language_model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
25
- "model.language_model.layers.1.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
26
- "model.language_model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
27
- "model.language_model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
28
- "model.language_model.layers.1.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
29
- "model.language_model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
30
- "model.language_model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
31
- "model.language_model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
32
- "model.language_model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
33
- "model.language_model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
34
- "model.language_model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
35
- "model.language_model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
36
- "model.language_model.layers.10.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
37
- "model.language_model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
38
- "model.language_model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
39
- "model.language_model.layers.10.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
40
- "model.language_model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
41
- "model.language_model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
42
- "model.language_model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
43
- "model.language_model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
44
- "model.language_model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
45
- "model.language_model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
46
- "model.language_model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
47
- "model.language_model.layers.11.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
48
- "model.language_model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
49
- "model.language_model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
50
- "model.language_model.layers.11.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
51
- "model.language_model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
52
- "model.language_model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
53
- "model.language_model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
54
- "model.language_model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
55
- "model.language_model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
56
- "model.language_model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
57
- "model.language_model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
58
- "model.language_model.layers.12.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
59
- "model.language_model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
60
- "model.language_model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
61
- "model.language_model.layers.12.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
62
- "model.language_model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
63
- "model.language_model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
64
- "model.language_model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
65
- "model.language_model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
66
- "model.language_model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
67
- "model.language_model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
68
- "model.language_model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
69
- "model.language_model.layers.13.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
70
- "model.language_model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
71
- "model.language_model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
72
- "model.language_model.layers.13.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
73
- "model.language_model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
74
- "model.language_model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
75
- "model.language_model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
76
- "model.language_model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
77
- "model.language_model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
78
- "model.language_model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
79
- "model.language_model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
80
- "model.language_model.layers.14.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
81
- "model.language_model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
82
- "model.language_model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
83
- "model.language_model.layers.14.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
84
- "model.language_model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
85
- "model.language_model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
86
- "model.language_model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
87
- "model.language_model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
88
- "model.language_model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
89
- "model.language_model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
90
- "model.language_model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
91
- "model.language_model.layers.15.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
92
- "model.language_model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
93
- "model.language_model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
94
- "model.language_model.layers.15.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
95
- "model.language_model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
96
- "model.language_model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
97
- "model.language_model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
98
- "model.language_model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
99
- "model.language_model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
100
- "model.language_model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
101
- "model.language_model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
102
- "model.language_model.layers.16.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
103
- "model.language_model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
104
- "model.language_model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
105
- "model.language_model.layers.16.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
106
- "model.language_model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
107
- "model.language_model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
108
- "model.language_model.layers.17.input_layernorm.weight": "model-00002-of-00004.safetensors",
109
- "model.language_model.layers.17.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
110
- "model.language_model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
111
- "model.language_model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
112
- "model.language_model.layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
113
- "model.language_model.layers.17.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
114
- "model.language_model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
115
- "model.language_model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
116
- "model.language_model.layers.17.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
117
- "model.language_model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
118
- "model.language_model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
119
- "model.language_model.layers.18.input_layernorm.weight": "model-00002-of-00004.safetensors",
120
- "model.language_model.layers.18.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
121
- "model.language_model.layers.18.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
122
- "model.language_model.layers.18.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
123
- "model.language_model.layers.18.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
124
- "model.language_model.layers.18.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
125
- "model.language_model.layers.18.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
126
- "model.language_model.layers.18.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
127
- "model.language_model.layers.18.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
128
- "model.language_model.layers.18.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
129
- "model.language_model.layers.18.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
130
- "model.language_model.layers.19.input_layernorm.weight": "model-00003-of-00004.safetensors",
131
- "model.language_model.layers.19.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
132
- "model.language_model.layers.19.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
133
- "model.language_model.layers.19.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
134
- "model.language_model.layers.19.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
135
- "model.language_model.layers.19.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
136
- "model.language_model.layers.19.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
137
- "model.language_model.layers.19.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
138
- "model.language_model.layers.19.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
139
- "model.language_model.layers.19.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
140
- "model.language_model.layers.19.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
141
- "model.language_model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
142
- "model.language_model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
143
- "model.language_model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
144
- "model.language_model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
145
- "model.language_model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
146
- "model.language_model.layers.2.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
147
- "model.language_model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
148
- "model.language_model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
149
- "model.language_model.layers.2.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
150
- "model.language_model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
151
- "model.language_model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
152
- "model.language_model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
153
- "model.language_model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
154
- "model.language_model.layers.20.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
155
- "model.language_model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
156
- "model.language_model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
157
- "model.language_model.layers.20.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
158
- "model.language_model.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
159
- "model.language_model.layers.20.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
160
- "model.language_model.layers.20.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
161
- "model.language_model.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
162
- "model.language_model.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
163
- "model.language_model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
164
- "model.language_model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
165
- "model.language_model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
166
- "model.language_model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
167
- "model.language_model.layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
168
- "model.language_model.layers.21.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
169
- "model.language_model.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
170
- "model.language_model.layers.21.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
171
- "model.language_model.layers.21.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
172
- "model.language_model.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
173
- "model.language_model.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
174
- "model.language_model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
175
- "model.language_model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
176
- "model.language_model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
177
- "model.language_model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
178
- "model.language_model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
179
- "model.language_model.layers.22.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
180
- "model.language_model.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
181
- "model.language_model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
182
- "model.language_model.layers.22.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
183
- "model.language_model.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
184
- "model.language_model.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
185
- "model.language_model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
186
- "model.language_model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
187
- "model.language_model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
188
- "model.language_model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
189
- "model.language_model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
190
- "model.language_model.layers.23.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
191
- "model.language_model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
192
- "model.language_model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
193
- "model.language_model.layers.23.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
194
- "model.language_model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
195
- "model.language_model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
196
- "model.language_model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
197
- "model.language_model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
198
- "model.language_model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
199
- "model.language_model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
200
- "model.language_model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
201
- "model.language_model.layers.24.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
202
- "model.language_model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
203
- "model.language_model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
204
- "model.language_model.layers.24.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
205
- "model.language_model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
206
- "model.language_model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
207
- "model.language_model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
208
- "model.language_model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
209
- "model.language_model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
210
- "model.language_model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
211
- "model.language_model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
212
- "model.language_model.layers.25.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
213
- "model.language_model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
214
- "model.language_model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
215
- "model.language_model.layers.25.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
216
- "model.language_model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
217
- "model.language_model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
218
- "model.language_model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
219
- "model.language_model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
220
- "model.language_model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
221
- "model.language_model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
222
- "model.language_model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
223
- "model.language_model.layers.26.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
224
- "model.language_model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
225
- "model.language_model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
226
- "model.language_model.layers.26.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
227
- "model.language_model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
228
- "model.language_model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
229
- "model.language_model.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors",
230
- "model.language_model.layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
231
- "model.language_model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
232
- "model.language_model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
233
- "model.language_model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
234
- "model.language_model.layers.27.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
235
- "model.language_model.layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
236
- "model.language_model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
237
- "model.language_model.layers.27.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
238
- "model.language_model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
239
- "model.language_model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
240
- "model.language_model.layers.28.input_layernorm.weight": "model-00003-of-00004.safetensors",
241
- "model.language_model.layers.28.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
242
- "model.language_model.layers.28.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
243
- "model.language_model.layers.28.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
244
- "model.language_model.layers.28.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
245
- "model.language_model.layers.28.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
246
- "model.language_model.layers.28.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
247
- "model.language_model.layers.28.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
248
- "model.language_model.layers.28.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
249
- "model.language_model.layers.28.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
250
- "model.language_model.layers.28.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
251
- "model.language_model.layers.29.input_layernorm.weight": "model-00003-of-00004.safetensors",
252
- "model.language_model.layers.29.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
253
- "model.language_model.layers.29.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
254
- "model.language_model.layers.29.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
255
- "model.language_model.layers.29.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
256
- "model.language_model.layers.29.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
257
- "model.language_model.layers.29.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
258
- "model.language_model.layers.29.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
259
- "model.language_model.layers.29.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
260
- "model.language_model.layers.29.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
261
- "model.language_model.layers.29.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
262
- "model.language_model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
263
- "model.language_model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
264
- "model.language_model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
265
- "model.language_model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
266
- "model.language_model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
267
- "model.language_model.layers.3.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
268
- "model.language_model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
269
- "model.language_model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
270
- "model.language_model.layers.3.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
271
- "model.language_model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
272
- "model.language_model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
273
- "model.language_model.layers.30.input_layernorm.weight": "model-00003-of-00004.safetensors",
274
- "model.language_model.layers.30.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
275
- "model.language_model.layers.30.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
276
- "model.language_model.layers.30.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
277
- "model.language_model.layers.30.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
278
- "model.language_model.layers.30.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
279
- "model.language_model.layers.30.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
280
- "model.language_model.layers.30.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
281
- "model.language_model.layers.30.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
282
- "model.language_model.layers.30.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
283
- "model.language_model.layers.30.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
284
- "model.language_model.layers.31.input_layernorm.weight": "model-00003-of-00004.safetensors",
285
- "model.language_model.layers.31.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
286
- "model.language_model.layers.31.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
287
- "model.language_model.layers.31.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
288
- "model.language_model.layers.31.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
289
- "model.language_model.layers.31.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
290
- "model.language_model.layers.31.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
291
- "model.language_model.layers.31.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
292
- "model.language_model.layers.31.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
293
- "model.language_model.layers.31.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
294
- "model.language_model.layers.31.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
295
- "model.language_model.layers.32.input_layernorm.weight": "model-00004-of-00004.safetensors",
296
- "model.language_model.layers.32.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
297
- "model.language_model.layers.32.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
298
- "model.language_model.layers.32.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
299
- "model.language_model.layers.32.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
300
- "model.language_model.layers.32.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
301
- "model.language_model.layers.32.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
302
- "model.language_model.layers.32.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
303
- "model.language_model.layers.32.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
304
- "model.language_model.layers.32.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
305
- "model.language_model.layers.32.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
306
- "model.language_model.layers.33.input_layernorm.weight": "model-00004-of-00004.safetensors",
307
- "model.language_model.layers.33.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
308
- "model.language_model.layers.33.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
309
- "model.language_model.layers.33.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
310
- "model.language_model.layers.33.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
311
- "model.language_model.layers.33.self_attn.k_norm.weight": "model-00004-of-00004.safetensors",
312
- "model.language_model.layers.33.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
313
- "model.language_model.layers.33.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
314
- "model.language_model.layers.33.self_attn.q_norm.weight": "model-00004-of-00004.safetensors",
315
- "model.language_model.layers.33.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
316
- "model.language_model.layers.33.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
317
- "model.language_model.layers.34.input_layernorm.weight": "model-00004-of-00004.safetensors",
318
- "model.language_model.layers.34.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
319
- "model.language_model.layers.34.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
320
- "model.language_model.layers.34.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
321
- "model.language_model.layers.34.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
322
- "model.language_model.layers.34.self_attn.k_norm.weight": "model-00004-of-00004.safetensors",
323
- "model.language_model.layers.34.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
324
- "model.language_model.layers.34.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
325
- "model.language_model.layers.34.self_attn.q_norm.weight": "model-00004-of-00004.safetensors",
326
- "model.language_model.layers.34.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
327
- "model.language_model.layers.34.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
328
- "model.language_model.layers.35.input_layernorm.weight": "model-00004-of-00004.safetensors",
329
- "model.language_model.layers.35.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
330
- "model.language_model.layers.35.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
331
- "model.language_model.layers.35.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
332
- "model.language_model.layers.35.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
333
- "model.language_model.layers.35.self_attn.k_norm.weight": "model-00004-of-00004.safetensors",
334
- "model.language_model.layers.35.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
335
- "model.language_model.layers.35.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
336
- "model.language_model.layers.35.self_attn.q_norm.weight": "model-00004-of-00004.safetensors",
337
- "model.language_model.layers.35.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
338
- "model.language_model.layers.35.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
339
- "model.language_model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
340
- "model.language_model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
341
- "model.language_model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
342
- "model.language_model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
343
- "model.language_model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
344
- "model.language_model.layers.4.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
345
- "model.language_model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
346
- "model.language_model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
347
- "model.language_model.layers.4.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
348
- "model.language_model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
349
- "model.language_model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
350
- "model.language_model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
351
- "model.language_model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
352
- "model.language_model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
353
- "model.language_model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
354
- "model.language_model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
355
- "model.language_model.layers.5.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
356
- "model.language_model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
357
- "model.language_model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
358
- "model.language_model.layers.5.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
359
- "model.language_model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
360
- "model.language_model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
361
- "model.language_model.layers.6.input_layernorm.weight": "model-00002-of-00004.safetensors",
362
- "model.language_model.layers.6.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
363
- "model.language_model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
364
- "model.language_model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
365
- "model.language_model.layers.6.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
366
- "model.language_model.layers.6.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
367
- "model.language_model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
368
- "model.language_model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
369
- "model.language_model.layers.6.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
370
- "model.language_model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
371
- "model.language_model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
372
- "model.language_model.layers.7.input_layernorm.weight": "model-00002-of-00004.safetensors",
373
- "model.language_model.layers.7.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
374
- "model.language_model.layers.7.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
375
- "model.language_model.layers.7.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
376
- "model.language_model.layers.7.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
377
- "model.language_model.layers.7.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
378
- "model.language_model.layers.7.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
379
- "model.language_model.layers.7.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
380
- "model.language_model.layers.7.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
381
- "model.language_model.layers.7.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
382
- "model.language_model.layers.7.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
383
- "model.language_model.layers.8.input_layernorm.weight": "model-00002-of-00004.safetensors",
384
- "model.language_model.layers.8.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
385
- "model.language_model.layers.8.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
386
- "model.language_model.layers.8.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
387
- "model.language_model.layers.8.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
388
- "model.language_model.layers.8.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
389
- "model.language_model.layers.8.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
390
- "model.language_model.layers.8.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
391
- "model.language_model.layers.8.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
392
- "model.language_model.layers.8.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
393
- "model.language_model.layers.8.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
394
- "model.language_model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
395
- "model.language_model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
396
- "model.language_model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
397
- "model.language_model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
398
- "model.language_model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
399
- "model.language_model.layers.9.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
400
- "model.language_model.layers.9.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
401
- "model.language_model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
402
- "model.language_model.layers.9.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
403
- "model.language_model.layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
404
- "model.language_model.layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
405
- "model.language_model.norm.weight": "model-00004-of-00004.safetensors",
406
- "model.visual.blocks.0.attn.proj.bias": "model-00001-of-00004.safetensors",
407
- "model.visual.blocks.0.attn.proj.weight": "model-00001-of-00004.safetensors",
408
- "model.visual.blocks.0.attn.qkv.bias": "model-00001-of-00004.safetensors",
409
- "model.visual.blocks.0.attn.qkv.weight": "model-00001-of-00004.safetensors",
410
- "model.visual.blocks.0.mlp.linear_fc1.bias": "model-00001-of-00004.safetensors",
411
- "model.visual.blocks.0.mlp.linear_fc1.weight": "model-00001-of-00004.safetensors",
412
- "model.visual.blocks.0.mlp.linear_fc2.bias": "model-00001-of-00004.safetensors",
413
- "model.visual.blocks.0.mlp.linear_fc2.weight": "model-00001-of-00004.safetensors",
414
- "model.visual.blocks.0.norm1.bias": "model-00001-of-00004.safetensors",
415
- "model.visual.blocks.0.norm1.weight": "model-00001-of-00004.safetensors",
416
- "model.visual.blocks.0.norm2.bias": "model-00001-of-00004.safetensors",
417
- "model.visual.blocks.0.norm2.weight": "model-00001-of-00004.safetensors",
418
- "model.visual.blocks.1.attn.proj.bias": "model-00001-of-00004.safetensors",
419
- "model.visual.blocks.1.attn.proj.weight": "model-00001-of-00004.safetensors",
420
- "model.visual.blocks.1.attn.qkv.bias": "model-00001-of-00004.safetensors",
421
- "model.visual.blocks.1.attn.qkv.weight": "model-00001-of-00004.safetensors",
422
- "model.visual.blocks.1.mlp.linear_fc1.bias": "model-00001-of-00004.safetensors",
423
- "model.visual.blocks.1.mlp.linear_fc1.weight": "model-00001-of-00004.safetensors",
424
- "model.visual.blocks.1.mlp.linear_fc2.bias": "model-00001-of-00004.safetensors",
425
- "model.visual.blocks.1.mlp.linear_fc2.weight": "model-00001-of-00004.safetensors",
426
- "model.visual.blocks.1.norm1.bias": "model-00001-of-00004.safetensors",
427
- "model.visual.blocks.1.norm1.weight": "model-00001-of-00004.safetensors",
428
- "model.visual.blocks.1.norm2.bias": "model-00001-of-00004.safetensors",
429
- "model.visual.blocks.1.norm2.weight": "model-00001-of-00004.safetensors",
430
- "model.visual.blocks.10.attn.proj.bias": "model-00001-of-00004.safetensors",
431
- "model.visual.blocks.10.attn.proj.weight": "model-00001-of-00004.safetensors",
432
- "model.visual.blocks.10.attn.qkv.bias": "model-00001-of-00004.safetensors",
433
- "model.visual.blocks.10.attn.qkv.weight": "model-00001-of-00004.safetensors",
434
- "model.visual.blocks.10.mlp.linear_fc1.bias": "model-00001-of-00004.safetensors",
435
- "model.visual.blocks.10.mlp.linear_fc1.weight": "model-00001-of-00004.safetensors",
436
- "model.visual.blocks.10.mlp.linear_fc2.bias": "model-00001-of-00004.safetensors",
437
- "model.visual.blocks.10.mlp.linear_fc2.weight": "model-00001-of-00004.safetensors",
438
- "model.visual.blocks.10.norm1.bias": "model-00001-of-00004.safetensors",
439
- "model.visual.blocks.10.norm1.weight": "model-00001-of-00004.safetensors",
440
- "model.visual.blocks.10.norm2.bias": "model-00001-of-00004.safetensors",
441
- "model.visual.blocks.10.norm2.weight": "model-00001-of-00004.safetensors",
442
- "model.visual.blocks.11.attn.proj.bias": "model-00001-of-00004.safetensors",
443
- "model.visual.blocks.11.attn.proj.weight": "model-00001-of-00004.safetensors",
444
- "model.visual.blocks.11.attn.qkv.bias": "model-00001-of-00004.safetensors",
445
- "model.visual.blocks.11.attn.qkv.weight": "model-00001-of-00004.safetensors",
446
- "model.visual.blocks.11.mlp.linear_fc1.bias": "model-00001-of-00004.safetensors",
447
- "model.visual.blocks.11.mlp.linear_fc1.weight": "model-00001-of-00004.safetensors",
448
- "model.visual.blocks.11.mlp.linear_fc2.bias": "model-00001-of-00004.safetensors",
449
- "model.visual.blocks.11.mlp.linear_fc2.weight": "model-00001-of-00004.safetensors",
450
- "model.visual.blocks.11.norm1.bias": "model-00001-of-00004.safetensors",
451
- "model.visual.blocks.11.norm1.weight": "model-00001-of-00004.safetensors",
452
- "model.visual.blocks.11.norm2.bias": "model-00001-of-00004.safetensors",
453
- "model.visual.blocks.11.norm2.weight": "model-00001-of-00004.safetensors",
454
- "model.visual.blocks.12.attn.proj.bias": "model-00001-of-00004.safetensors",
455
- "model.visual.blocks.12.attn.proj.weight": "model-00001-of-00004.safetensors",
456
- "model.visual.blocks.12.attn.qkv.bias": "model-00001-of-00004.safetensors",
457
- "model.visual.blocks.12.attn.qkv.weight": "model-00001-of-00004.safetensors",
458
- "model.visual.blocks.12.mlp.linear_fc1.bias": "model-00001-of-00004.safetensors",
459
- "model.visual.blocks.12.mlp.linear_fc1.weight": "model-00001-of-00004.safetensors",
460
- "model.visual.blocks.12.mlp.linear_fc2.bias": "model-00001-of-00004.safetensors",
461
- "model.visual.blocks.12.mlp.linear_fc2.weight": "model-00001-of-00004.safetensors",
462
- "model.visual.blocks.12.norm1.bias": "model-00001-of-00004.safetensors",
463
- "model.visual.blocks.12.norm1.weight": "model-00001-of-00004.safetensors",
464
- "model.visual.blocks.12.norm2.bias": "model-00001-of-00004.safetensors",
465
- "model.visual.blocks.12.norm2.weight": "model-00001-of-00004.safetensors",
466
- "model.visual.blocks.13.attn.proj.bias": "model-00001-of-00004.safetensors",
467
- "model.visual.blocks.13.attn.proj.weight": "model-00001-of-00004.safetensors",
468
- "model.visual.blocks.13.attn.qkv.bias": "model-00001-of-00004.safetensors",
469
- "model.visual.blocks.13.attn.qkv.weight": "model-00001-of-00004.safetensors",
470
- "model.visual.blocks.13.mlp.linear_fc1.bias": "model-00001-of-00004.safetensors",
471
- "model.visual.blocks.13.mlp.linear_fc1.weight": "model-00001-of-00004.safetensors",
472
- "model.visual.blocks.13.mlp.linear_fc2.bias": "model-00001-of-00004.safetensors",
473
- "model.visual.blocks.13.mlp.linear_fc2.weight": "model-00001-of-00004.safetensors",
474
- "model.visual.blocks.13.norm1.bias": "model-00001-of-00004.safetensors",
475
- "model.visual.blocks.13.norm1.weight": "model-00001-of-00004.safetensors",
476
- "model.visual.blocks.13.norm2.bias": "model-00001-of-00004.safetensors",
477
- "model.visual.blocks.13.norm2.weight": "model-00001-of-00004.safetensors",
478
- "model.visual.blocks.14.attn.proj.bias": "model-00001-of-00004.safetensors",
479
- "model.visual.blocks.14.attn.proj.weight": "model-00001-of-00004.safetensors",
480
- "model.visual.blocks.14.attn.qkv.bias": "model-00001-of-00004.safetensors",
481
- "model.visual.blocks.14.attn.qkv.weight": "model-00001-of-00004.safetensors",
482
- "model.visual.blocks.14.mlp.linear_fc1.bias": "model-00001-of-00004.safetensors",
483
- "model.visual.blocks.14.mlp.linear_fc1.weight": "model-00001-of-00004.safetensors",
484
- "model.visual.blocks.14.mlp.linear_fc2.bias": "model-00001-of-00004.safetensors",
485
- "model.visual.blocks.14.mlp.linear_fc2.weight": "model-00001-of-00004.safetensors",
486
- "model.visual.blocks.14.norm1.bias": "model-00001-of-00004.safetensors",
487
- "model.visual.blocks.14.norm1.weight": "model-00001-of-00004.safetensors",
488
- "model.visual.blocks.14.norm2.bias": "model-00001-of-00004.safetensors",
489
- "model.visual.blocks.14.norm2.weight": "model-00001-of-00004.safetensors",
490
- "model.visual.blocks.15.attn.proj.bias": "model-00001-of-00004.safetensors",
491
- "model.visual.blocks.15.attn.proj.weight": "model-00001-of-00004.safetensors",
492
- "model.visual.blocks.15.attn.qkv.bias": "model-00001-of-00004.safetensors",
493
- "model.visual.blocks.15.attn.qkv.weight": "model-00001-of-00004.safetensors",
494
- "model.visual.blocks.15.mlp.linear_fc1.bias": "model-00001-of-00004.safetensors",
495
- "model.visual.blocks.15.mlp.linear_fc1.weight": "model-00001-of-00004.safetensors",
496
- "model.visual.blocks.15.mlp.linear_fc2.bias": "model-00001-of-00004.safetensors",
497
- "model.visual.blocks.15.mlp.linear_fc2.weight": "model-00001-of-00004.safetensors",
498
- "model.visual.blocks.15.norm1.bias": "model-00001-of-00004.safetensors",
499
- "model.visual.blocks.15.norm1.weight": "model-00001-of-00004.safetensors",
500
- "model.visual.blocks.15.norm2.bias": "model-00001-of-00004.safetensors",
501
- "model.visual.blocks.15.norm2.weight": "model-00001-of-00004.safetensors",
502
- "model.visual.blocks.16.attn.proj.bias": "model-00001-of-00004.safetensors",
503
- "model.visual.blocks.16.attn.proj.weight": "model-00001-of-00004.safetensors",
504
- "model.visual.blocks.16.attn.qkv.bias": "model-00001-of-00004.safetensors",
505
- "model.visual.blocks.16.attn.qkv.weight": "model-00001-of-00004.safetensors",
506
- "model.visual.blocks.16.mlp.linear_fc1.bias": "model-00001-of-00004.safetensors",
507
- "model.visual.blocks.16.mlp.linear_fc1.weight": "model-00001-of-00004.safetensors",
508
- "model.visual.blocks.16.mlp.linear_fc2.bias": "model-00001-of-00004.safetensors",
509
- "model.visual.blocks.16.mlp.linear_fc2.weight": "model-00001-of-00004.safetensors",
510
- "model.visual.blocks.16.norm1.bias": "model-00001-of-00004.safetensors",
511
- "model.visual.blocks.16.norm1.weight": "model-00001-of-00004.safetensors",
512
- "model.visual.blocks.16.norm2.bias": "model-00001-of-00004.safetensors",
513
- "model.visual.blocks.16.norm2.weight": "model-00001-of-00004.safetensors",
514
- "model.visual.blocks.17.attn.proj.bias": "model-00001-of-00004.safetensors",
515
- "model.visual.blocks.17.attn.proj.weight": "model-00001-of-00004.safetensors",
516
- "model.visual.blocks.17.attn.qkv.bias": "model-00001-of-00004.safetensors",
517
- "model.visual.blocks.17.attn.qkv.weight": "model-00001-of-00004.safetensors",
518
- "model.visual.blocks.17.mlp.linear_fc1.bias": "model-00001-of-00004.safetensors",
519
- "model.visual.blocks.17.mlp.linear_fc1.weight": "model-00001-of-00004.safetensors",
520
- "model.visual.blocks.17.mlp.linear_fc2.bias": "model-00001-of-00004.safetensors",
521
- "model.visual.blocks.17.mlp.linear_fc2.weight": "model-00001-of-00004.safetensors",
522
- "model.visual.blocks.17.norm1.bias": "model-00001-of-00004.safetensors",
523
- "model.visual.blocks.17.norm1.weight": "model-00001-of-00004.safetensors",
524
- "model.visual.blocks.17.norm2.bias": "model-00001-of-00004.safetensors",
525
- "model.visual.blocks.17.norm2.weight": "model-00001-of-00004.safetensors",
526
- "model.visual.blocks.18.attn.proj.bias": "model-00001-of-00004.safetensors",
527
- "model.visual.blocks.18.attn.proj.weight": "model-00001-of-00004.safetensors",
528
- "model.visual.blocks.18.attn.qkv.bias": "model-00001-of-00004.safetensors",
529
- "model.visual.blocks.18.attn.qkv.weight": "model-00001-of-00004.safetensors",
530
- "model.visual.blocks.18.mlp.linear_fc1.bias": "model-00001-of-00004.safetensors",
531
- "model.visual.blocks.18.mlp.linear_fc1.weight": "model-00001-of-00004.safetensors",
532
- "model.visual.blocks.18.mlp.linear_fc2.bias": "model-00001-of-00004.safetensors",
533
- "model.visual.blocks.18.mlp.linear_fc2.weight": "model-00001-of-00004.safetensors",
534
- "model.visual.blocks.18.norm1.bias": "model-00001-of-00004.safetensors",
535
- "model.visual.blocks.18.norm1.weight": "model-00001-of-00004.safetensors",
536
- "model.visual.blocks.18.norm2.bias": "model-00001-of-00004.safetensors",
537
- "model.visual.blocks.18.norm2.weight": "model-00001-of-00004.safetensors",
538
- "model.visual.blocks.19.attn.proj.bias": "model-00001-of-00004.safetensors",
539
- "model.visual.blocks.19.attn.proj.weight": "model-00001-of-00004.safetensors",
540
- "model.visual.blocks.19.attn.qkv.bias": "model-00001-of-00004.safetensors",
541
- "model.visual.blocks.19.attn.qkv.weight": "model-00001-of-00004.safetensors",
542
- "model.visual.blocks.19.mlp.linear_fc1.bias": "model-00001-of-00004.safetensors",
543
- "model.visual.blocks.19.mlp.linear_fc1.weight": "model-00001-of-00004.safetensors",
544
- "model.visual.blocks.19.mlp.linear_fc2.bias": "model-00001-of-00004.safetensors",
545
- "model.visual.blocks.19.mlp.linear_fc2.weight": "model-00001-of-00004.safetensors",
546
- "model.visual.blocks.19.norm1.bias": "model-00001-of-00004.safetensors",
547
- "model.visual.blocks.19.norm1.weight": "model-00001-of-00004.safetensors",
548
- "model.visual.blocks.19.norm2.bias": "model-00001-of-00004.safetensors",
549
- "model.visual.blocks.19.norm2.weight": "model-00001-of-00004.safetensors",
550
- "model.visual.blocks.2.attn.proj.bias": "model-00001-of-00004.safetensors",
551
- "model.visual.blocks.2.attn.proj.weight": "model-00001-of-00004.safetensors",
552
- "model.visual.blocks.2.attn.qkv.bias": "model-00001-of-00004.safetensors",
553
- "model.visual.blocks.2.attn.qkv.weight": "model-00001-of-00004.safetensors",
554
- "model.visual.blocks.2.mlp.linear_fc1.bias": "model-00001-of-00004.safetensors",
555
- "model.visual.blocks.2.mlp.linear_fc1.weight": "model-00001-of-00004.safetensors",
556
- "model.visual.blocks.2.mlp.linear_fc2.bias": "model-00001-of-00004.safetensors",
557
- "model.visual.blocks.2.mlp.linear_fc2.weight": "model-00001-of-00004.safetensors",
558
- "model.visual.blocks.2.norm1.bias": "model-00001-of-00004.safetensors",
559
- "model.visual.blocks.2.norm1.weight": "model-00001-of-00004.safetensors",
560
- "model.visual.blocks.2.norm2.bias": "model-00001-of-00004.safetensors",
561
- "model.visual.blocks.2.norm2.weight": "model-00001-of-00004.safetensors",
562
- "model.visual.blocks.20.attn.proj.bias": "model-00001-of-00004.safetensors",
563
- "model.visual.blocks.20.attn.proj.weight": "model-00001-of-00004.safetensors",
564
- "model.visual.blocks.20.attn.qkv.bias": "model-00001-of-00004.safetensors",
565
- "model.visual.blocks.20.attn.qkv.weight": "model-00001-of-00004.safetensors",
566
- "model.visual.blocks.20.mlp.linear_fc1.bias": "model-00001-of-00004.safetensors",
567
- "model.visual.blocks.20.mlp.linear_fc1.weight": "model-00001-of-00004.safetensors",
568
- "model.visual.blocks.20.mlp.linear_fc2.bias": "model-00001-of-00004.safetensors",
569
- "model.visual.blocks.20.mlp.linear_fc2.weight": "model-00001-of-00004.safetensors",
570
- "model.visual.blocks.20.norm1.bias": "model-00001-of-00004.safetensors",
571
- "model.visual.blocks.20.norm1.weight": "model-00001-of-00004.safetensors",
572
- "model.visual.blocks.20.norm2.bias": "model-00001-of-00004.safetensors",
573
- "model.visual.blocks.20.norm2.weight": "model-00001-of-00004.safetensors",
574
- "model.visual.blocks.21.attn.proj.bias": "model-00001-of-00004.safetensors",
575
- "model.visual.blocks.21.attn.proj.weight": "model-00001-of-00004.safetensors",
576
- "model.visual.blocks.21.attn.qkv.bias": "model-00001-of-00004.safetensors",
577
- "model.visual.blocks.21.attn.qkv.weight": "model-00001-of-00004.safetensors",
578
- "model.visual.blocks.21.mlp.linear_fc1.bias": "model-00001-of-00004.safetensors",
579
- "model.visual.blocks.21.mlp.linear_fc1.weight": "model-00001-of-00004.safetensors",
580
- "model.visual.blocks.21.mlp.linear_fc2.bias": "model-00001-of-00004.safetensors",
581
- "model.visual.blocks.21.mlp.linear_fc2.weight": "model-00001-of-00004.safetensors",
582
- "model.visual.blocks.21.norm1.bias": "model-00001-of-00004.safetensors",
583
- "model.visual.blocks.21.norm1.weight": "model-00001-of-00004.safetensors",
584
- "model.visual.blocks.21.norm2.bias": "model-00001-of-00004.safetensors",
585
- "model.visual.blocks.21.norm2.weight": "model-00001-of-00004.safetensors",
586
- "model.visual.blocks.22.attn.proj.bias": "model-00001-of-00004.safetensors",
587
- "model.visual.blocks.22.attn.proj.weight": "model-00001-of-00004.safetensors",
588
- "model.visual.blocks.22.attn.qkv.bias": "model-00001-of-00004.safetensors",
589
- "model.visual.blocks.22.attn.qkv.weight": "model-00001-of-00004.safetensors",
590
- "model.visual.blocks.22.mlp.linear_fc1.bias": "model-00001-of-00004.safetensors",
591
- "model.visual.blocks.22.mlp.linear_fc1.weight": "model-00001-of-00004.safetensors",
592
- "model.visual.blocks.22.mlp.linear_fc2.bias": "model-00001-of-00004.safetensors",
593
- "model.visual.blocks.22.mlp.linear_fc2.weight": "model-00001-of-00004.safetensors",
594
- "model.visual.blocks.22.norm1.bias": "model-00001-of-00004.safetensors",
595
- "model.visual.blocks.22.norm1.weight": "model-00001-of-00004.safetensors",
596
- "model.visual.blocks.22.norm2.bias": "model-00001-of-00004.safetensors",
597
- "model.visual.blocks.22.norm2.weight": "model-00001-of-00004.safetensors",
598
- "model.visual.blocks.23.attn.proj.bias": "model-00001-of-00004.safetensors",
599
- "model.visual.blocks.23.attn.proj.weight": "model-00001-of-00004.safetensors",
600
- "model.visual.blocks.23.attn.qkv.bias": "model-00001-of-00004.safetensors",
601
- "model.visual.blocks.23.attn.qkv.weight": "model-00001-of-00004.safetensors",
602
- "model.visual.blocks.23.mlp.linear_fc1.bias": "model-00001-of-00004.safetensors",
603
- "model.visual.blocks.23.mlp.linear_fc1.weight": "model-00001-of-00004.safetensors",
604
- "model.visual.blocks.23.mlp.linear_fc2.bias": "model-00001-of-00004.safetensors",
605
- "model.visual.blocks.23.mlp.linear_fc2.weight": "model-00001-of-00004.safetensors",
606
- "model.visual.blocks.23.norm1.bias": "model-00001-of-00004.safetensors",
607
- "model.visual.blocks.23.norm1.weight": "model-00001-of-00004.safetensors",
608
- "model.visual.blocks.23.norm2.bias": "model-00001-of-00004.safetensors",
609
- "model.visual.blocks.23.norm2.weight": "model-00001-of-00004.safetensors",
610
- "model.visual.blocks.24.attn.proj.bias": "model-00001-of-00004.safetensors",
611
- "model.visual.blocks.24.attn.proj.weight": "model-00001-of-00004.safetensors",
612
- "model.visual.blocks.24.attn.qkv.bias": "model-00001-of-00004.safetensors",
613
- "model.visual.blocks.24.attn.qkv.weight": "model-00001-of-00004.safetensors",
614
- "model.visual.blocks.24.mlp.linear_fc1.bias": "model-00001-of-00004.safetensors",
615
- "model.visual.blocks.24.mlp.linear_fc1.weight": "model-00001-of-00004.safetensors",
616
- "model.visual.blocks.24.mlp.linear_fc2.bias": "model-00001-of-00004.safetensors",
617
- "model.visual.blocks.24.mlp.linear_fc2.weight": "model-00001-of-00004.safetensors",
618
- "model.visual.blocks.24.norm1.bias": "model-00001-of-00004.safetensors",
619
- "model.visual.blocks.24.norm1.weight": "model-00001-of-00004.safetensors",
620
- "model.visual.blocks.24.norm2.bias": "model-00001-of-00004.safetensors",
621
- "model.visual.blocks.24.norm2.weight": "model-00001-of-00004.safetensors",
622
- "model.visual.blocks.25.attn.proj.bias": "model-00001-of-00004.safetensors",
623
- "model.visual.blocks.25.attn.proj.weight": "model-00001-of-00004.safetensors",
624
- "model.visual.blocks.25.attn.qkv.bias": "model-00001-of-00004.safetensors",
625
- "model.visual.blocks.25.attn.qkv.weight": "model-00001-of-00004.safetensors",
626
- "model.visual.blocks.25.mlp.linear_fc1.bias": "model-00001-of-00004.safetensors",
627
- "model.visual.blocks.25.mlp.linear_fc1.weight": "model-00001-of-00004.safetensors",
628
- "model.visual.blocks.25.mlp.linear_fc2.bias": "model-00001-of-00004.safetensors",
629
- "model.visual.blocks.25.mlp.linear_fc2.weight": "model-00001-of-00004.safetensors",
630
- "model.visual.blocks.25.norm1.bias": "model-00001-of-00004.safetensors",
631
- "model.visual.blocks.25.norm1.weight": "model-00001-of-00004.safetensors",
632
- "model.visual.blocks.25.norm2.bias": "model-00001-of-00004.safetensors",
633
- "model.visual.blocks.25.norm2.weight": "model-00001-of-00004.safetensors",
634
- "model.visual.blocks.26.attn.proj.bias": "model-00001-of-00004.safetensors",
635
- "model.visual.blocks.26.attn.proj.weight": "model-00001-of-00004.safetensors",
636
- "model.visual.blocks.26.attn.qkv.bias": "model-00001-of-00004.safetensors",
637
- "model.visual.blocks.26.attn.qkv.weight": "model-00001-of-00004.safetensors",
638
- "model.visual.blocks.26.mlp.linear_fc1.bias": "model-00001-of-00004.safetensors",
639
- "model.visual.blocks.26.mlp.linear_fc1.weight": "model-00001-of-00004.safetensors",
640
- "model.visual.blocks.26.mlp.linear_fc2.bias": "model-00001-of-00004.safetensors",
641
- "model.visual.blocks.26.mlp.linear_fc2.weight": "model-00001-of-00004.safetensors",
642
- "model.visual.blocks.26.norm1.bias": "model-00001-of-00004.safetensors",
643
- "model.visual.blocks.26.norm1.weight": "model-00001-of-00004.safetensors",
644
- "model.visual.blocks.26.norm2.bias": "model-00001-of-00004.safetensors",
645
- "model.visual.blocks.26.norm2.weight": "model-00001-of-00004.safetensors",
646
- "model.visual.blocks.3.attn.proj.bias": "model-00001-of-00004.safetensors",
647
- "model.visual.blocks.3.attn.proj.weight": "model-00001-of-00004.safetensors",
648
- "model.visual.blocks.3.attn.qkv.bias": "model-00001-of-00004.safetensors",
649
- "model.visual.blocks.3.attn.qkv.weight": "model-00001-of-00004.safetensors",
650
- "model.visual.blocks.3.mlp.linear_fc1.bias": "model-00001-of-00004.safetensors",
651
- "model.visual.blocks.3.mlp.linear_fc1.weight": "model-00001-of-00004.safetensors",
652
- "model.visual.blocks.3.mlp.linear_fc2.bias": "model-00001-of-00004.safetensors",
653
- "model.visual.blocks.3.mlp.linear_fc2.weight": "model-00001-of-00004.safetensors",
654
- "model.visual.blocks.3.norm1.bias": "model-00001-of-00004.safetensors",
655
- "model.visual.blocks.3.norm1.weight": "model-00001-of-00004.safetensors",
656
- "model.visual.blocks.3.norm2.bias": "model-00001-of-00004.safetensors",
657
- "model.visual.blocks.3.norm2.weight": "model-00001-of-00004.safetensors",
658
- "model.visual.blocks.4.attn.proj.bias": "model-00001-of-00004.safetensors",
659
- "model.visual.blocks.4.attn.proj.weight": "model-00001-of-00004.safetensors",
660
- "model.visual.blocks.4.attn.qkv.bias": "model-00001-of-00004.safetensors",
661
- "model.visual.blocks.4.attn.qkv.weight": "model-00001-of-00004.safetensors",
662
- "model.visual.blocks.4.mlp.linear_fc1.bias": "model-00001-of-00004.safetensors",
663
- "model.visual.blocks.4.mlp.linear_fc1.weight": "model-00001-of-00004.safetensors",
664
- "model.visual.blocks.4.mlp.linear_fc2.bias": "model-00001-of-00004.safetensors",
665
- "model.visual.blocks.4.mlp.linear_fc2.weight": "model-00001-of-00004.safetensors",
666
- "model.visual.blocks.4.norm1.bias": "model-00001-of-00004.safetensors",
667
- "model.visual.blocks.4.norm1.weight": "model-00001-of-00004.safetensors",
668
- "model.visual.blocks.4.norm2.bias": "model-00001-of-00004.safetensors",
669
- "model.visual.blocks.4.norm2.weight": "model-00001-of-00004.safetensors",
670
- "model.visual.blocks.5.attn.proj.bias": "model-00001-of-00004.safetensors",
671
- "model.visual.blocks.5.attn.proj.weight": "model-00001-of-00004.safetensors",
672
- "model.visual.blocks.5.attn.qkv.bias": "model-00001-of-00004.safetensors",
673
- "model.visual.blocks.5.attn.qkv.weight": "model-00001-of-00004.safetensors",
674
- "model.visual.blocks.5.mlp.linear_fc1.bias": "model-00001-of-00004.safetensors",
675
- "model.visual.blocks.5.mlp.linear_fc1.weight": "model-00001-of-00004.safetensors",
676
- "model.visual.blocks.5.mlp.linear_fc2.bias": "model-00001-of-00004.safetensors",
677
- "model.visual.blocks.5.mlp.linear_fc2.weight": "model-00001-of-00004.safetensors",
678
- "model.visual.blocks.5.norm1.bias": "model-00001-of-00004.safetensors",
679
- "model.visual.blocks.5.norm1.weight": "model-00001-of-00004.safetensors",
680
- "model.visual.blocks.5.norm2.bias": "model-00001-of-00004.safetensors",
681
- "model.visual.blocks.5.norm2.weight": "model-00001-of-00004.safetensors",
682
- "model.visual.blocks.6.attn.proj.bias": "model-00001-of-00004.safetensors",
683
- "model.visual.blocks.6.attn.proj.weight": "model-00001-of-00004.safetensors",
684
- "model.visual.blocks.6.attn.qkv.bias": "model-00001-of-00004.safetensors",
685
- "model.visual.blocks.6.attn.qkv.weight": "model-00001-of-00004.safetensors",
686
- "model.visual.blocks.6.mlp.linear_fc1.bias": "model-00001-of-00004.safetensors",
687
- "model.visual.blocks.6.mlp.linear_fc1.weight": "model-00001-of-00004.safetensors",
688
- "model.visual.blocks.6.mlp.linear_fc2.bias": "model-00001-of-00004.safetensors",
689
- "model.visual.blocks.6.mlp.linear_fc2.weight": "model-00001-of-00004.safetensors",
690
- "model.visual.blocks.6.norm1.bias": "model-00001-of-00004.safetensors",
691
- "model.visual.blocks.6.norm1.weight": "model-00001-of-00004.safetensors",
692
- "model.visual.blocks.6.norm2.bias": "model-00001-of-00004.safetensors",
693
- "model.visual.blocks.6.norm2.weight": "model-00001-of-00004.safetensors",
694
- "model.visual.blocks.7.attn.proj.bias": "model-00001-of-00004.safetensors",
695
- "model.visual.blocks.7.attn.proj.weight": "model-00001-of-00004.safetensors",
696
- "model.visual.blocks.7.attn.qkv.bias": "model-00001-of-00004.safetensors",
697
- "model.visual.blocks.7.attn.qkv.weight": "model-00001-of-00004.safetensors",
698
- "model.visual.blocks.7.mlp.linear_fc1.bias": "model-00001-of-00004.safetensors",
699
- "model.visual.blocks.7.mlp.linear_fc1.weight": "model-00001-of-00004.safetensors",
700
- "model.visual.blocks.7.mlp.linear_fc2.bias": "model-00001-of-00004.safetensors",
701
- "model.visual.blocks.7.mlp.linear_fc2.weight": "model-00001-of-00004.safetensors",
702
- "model.visual.blocks.7.norm1.bias": "model-00001-of-00004.safetensors",
703
- "model.visual.blocks.7.norm1.weight": "model-00001-of-00004.safetensors",
704
- "model.visual.blocks.7.norm2.bias": "model-00001-of-00004.safetensors",
705
- "model.visual.blocks.7.norm2.weight": "model-00001-of-00004.safetensors",
706
- "model.visual.blocks.8.attn.proj.bias": "model-00001-of-00004.safetensors",
707
- "model.visual.blocks.8.attn.proj.weight": "model-00001-of-00004.safetensors",
708
- "model.visual.blocks.8.attn.qkv.bias": "model-00001-of-00004.safetensors",
709
- "model.visual.blocks.8.attn.qkv.weight": "model-00001-of-00004.safetensors",
710
- "model.visual.blocks.8.mlp.linear_fc1.bias": "model-00001-of-00004.safetensors",
711
- "model.visual.blocks.8.mlp.linear_fc1.weight": "model-00001-of-00004.safetensors",
712
- "model.visual.blocks.8.mlp.linear_fc2.bias": "model-00001-of-00004.safetensors",
713
- "model.visual.blocks.8.mlp.linear_fc2.weight": "model-00001-of-00004.safetensors",
714
- "model.visual.blocks.8.norm1.bias": "model-00001-of-00004.safetensors",
715
- "model.visual.blocks.8.norm1.weight": "model-00001-of-00004.safetensors",
716
- "model.visual.blocks.8.norm2.bias": "model-00001-of-00004.safetensors",
717
- "model.visual.blocks.8.norm2.weight": "model-00001-of-00004.safetensors",
718
- "model.visual.blocks.9.attn.proj.bias": "model-00001-of-00004.safetensors",
719
- "model.visual.blocks.9.attn.proj.weight": "model-00001-of-00004.safetensors",
720
- "model.visual.blocks.9.attn.qkv.bias": "model-00001-of-00004.safetensors",
721
- "model.visual.blocks.9.attn.qkv.weight": "model-00001-of-00004.safetensors",
722
- "model.visual.blocks.9.mlp.linear_fc1.bias": "model-00001-of-00004.safetensors",
723
- "model.visual.blocks.9.mlp.linear_fc1.weight": "model-00001-of-00004.safetensors",
724
- "model.visual.blocks.9.mlp.linear_fc2.bias": "model-00001-of-00004.safetensors",
725
- "model.visual.blocks.9.mlp.linear_fc2.weight": "model-00001-of-00004.safetensors",
726
- "model.visual.blocks.9.norm1.bias": "model-00001-of-00004.safetensors",
727
- "model.visual.blocks.9.norm1.weight": "model-00001-of-00004.safetensors",
728
- "model.visual.blocks.9.norm2.bias": "model-00001-of-00004.safetensors",
729
- "model.visual.blocks.9.norm2.weight": "model-00001-of-00004.safetensors",
730
- "model.visual.deepstack_merger_list.0.linear_fc1.bias": "model-00001-of-00004.safetensors",
731
- "model.visual.deepstack_merger_list.0.linear_fc1.weight": "model-00001-of-00004.safetensors",
732
- "model.visual.deepstack_merger_list.0.linear_fc2.bias": "model-00001-of-00004.safetensors",
733
- "model.visual.deepstack_merger_list.0.linear_fc2.weight": "model-00001-of-00004.safetensors",
734
- "model.visual.deepstack_merger_list.0.norm.bias": "model-00001-of-00004.safetensors",
735
- "model.visual.deepstack_merger_list.0.norm.weight": "model-00001-of-00004.safetensors",
736
- "model.visual.deepstack_merger_list.1.linear_fc1.bias": "model-00001-of-00004.safetensors",
737
- "model.visual.deepstack_merger_list.1.linear_fc1.weight": "model-00001-of-00004.safetensors",
738
- "model.visual.deepstack_merger_list.1.linear_fc2.bias": "model-00001-of-00004.safetensors",
739
- "model.visual.deepstack_merger_list.1.linear_fc2.weight": "model-00001-of-00004.safetensors",
740
- "model.visual.deepstack_merger_list.1.norm.bias": "model-00001-of-00004.safetensors",
741
- "model.visual.deepstack_merger_list.1.norm.weight": "model-00001-of-00004.safetensors",
742
- "model.visual.deepstack_merger_list.2.linear_fc1.bias": "model-00001-of-00004.safetensors",
743
- "model.visual.deepstack_merger_list.2.linear_fc1.weight": "model-00001-of-00004.safetensors",
744
- "model.visual.deepstack_merger_list.2.linear_fc2.bias": "model-00001-of-00004.safetensors",
745
- "model.visual.deepstack_merger_list.2.linear_fc2.weight": "model-00001-of-00004.safetensors",
746
- "model.visual.deepstack_merger_list.2.norm.bias": "model-00001-of-00004.safetensors",
747
- "model.visual.deepstack_merger_list.2.norm.weight": "model-00001-of-00004.safetensors",
748
- "model.visual.merger.linear_fc1.bias": "model-00001-of-00004.safetensors",
749
- "model.visual.merger.linear_fc1.weight": "model-00001-of-00004.safetensors",
750
- "model.visual.merger.linear_fc2.bias": "model-00001-of-00004.safetensors",
751
- "model.visual.merger.linear_fc2.weight": "model-00001-of-00004.safetensors",
752
- "model.visual.merger.norm.bias": "model-00001-of-00004.safetensors",
753
- "model.visual.merger.norm.weight": "model-00001-of-00004.safetensors",
754
- "model.visual.patch_embed.proj.bias": "model-00001-of-00004.safetensors",
755
- "model.visual.patch_embed.proj.weight": "model-00001-of-00004.safetensors",
756
- "model.visual.pos_embed.weight": "model-00001-of-00004.safetensors"
757
- }
758
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
qwen3vl_8b_agentnetv1_fullsft_gb200/preprocessor_config.json DELETED
@@ -1,21 +0,0 @@
1
- {
2
- "size": {
3
- "longest_edge": 16777216,
4
- "shortest_edge": 65536
5
- },
6
- "patch_size": 16,
7
- "temporal_patch_size": 2,
8
- "merge_size": 2,
9
- "image_mean": [
10
- 0.5,
11
- 0.5,
12
- 0.5
13
- ],
14
- "image_std": [
15
- 0.5,
16
- 0.5,
17
- 0.5
18
- ],
19
- "processor_class": "Qwen3VLProcessor",
20
- "image_processor_type": "Qwen2VLImageProcessorFast"
21
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
qwen3vl_8b_agentnetv1_fullsft_gb200/special_tokens_map.json DELETED
@@ -1,31 +0,0 @@
1
- {
2
- "additional_special_tokens": [
3
- "<|im_start|>",
4
- "<|im_end|>",
5
- "<|object_ref_start|>",
6
- "<|object_ref_end|>",
7
- "<|box_start|>",
8
- "<|box_end|>",
9
- "<|quad_start|>",
10
- "<|quad_end|>",
11
- "<|vision_start|>",
12
- "<|vision_end|>",
13
- "<|vision_pad|>",
14
- "<|image_pad|>",
15
- "<|video_pad|>"
16
- ],
17
- "eos_token": {
18
- "content": "<|im_end|>",
19
- "lstrip": false,
20
- "normalized": false,
21
- "rstrip": false,
22
- "single_word": false
23
- },
24
- "pad_token": {
25
- "content": "<|endoftext|>",
26
- "lstrip": false,
27
- "normalized": false,
28
- "rstrip": false,
29
- "single_word": false
30
- }
31
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
qwen3vl_8b_agentnetv1_fullsft_gb200/tokenizer.json DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:aeb13307a71acd8fe81861d94ad54ab689df773318809eed3cbe794b4492dae4
3
- size 11422654
 
 
 
 
qwen3vl_8b_agentnetv1_fullsft_gb200/tokenizer_config.json DELETED
@@ -1,240 +0,0 @@
1
- {
2
- "add_bos_token": false,
3
- "add_prefix_space": false,
4
- "added_tokens_decoder": {
5
- "151643": {
6
- "content": "<|endoftext|>",
7
- "lstrip": false,
8
- "normalized": false,
9
- "rstrip": false,
10
- "single_word": false,
11
- "special": true
12
- },
13
- "151644": {
14
- "content": "<|im_start|>",
15
- "lstrip": false,
16
- "normalized": false,
17
- "rstrip": false,
18
- "single_word": false,
19
- "special": true
20
- },
21
- "151645": {
22
- "content": "<|im_end|>",
23
- "lstrip": false,
24
- "normalized": false,
25
- "rstrip": false,
26
- "single_word": false,
27
- "special": true
28
- },
29
- "151646": {
30
- "content": "<|object_ref_start|>",
31
- "lstrip": false,
32
- "normalized": false,
33
- "rstrip": false,
34
- "single_word": false,
35
- "special": true
36
- },
37
- "151647": {
38
- "content": "<|object_ref_end|>",
39
- "lstrip": false,
40
- "normalized": false,
41
- "rstrip": false,
42
- "single_word": false,
43
- "special": true
44
- },
45
- "151648": {
46
- "content": "<|box_start|>",
47
- "lstrip": false,
48
- "normalized": false,
49
- "rstrip": false,
50
- "single_word": false,
51
- "special": true
52
- },
53
- "151649": {
54
- "content": "<|box_end|>",
55
- "lstrip": false,
56
- "normalized": false,
57
- "rstrip": false,
58
- "single_word": false,
59
- "special": true
60
- },
61
- "151650": {
62
- "content": "<|quad_start|>",
63
- "lstrip": false,
64
- "normalized": false,
65
- "rstrip": false,
66
- "single_word": false,
67
- "special": true
68
- },
69
- "151651": {
70
- "content": "<|quad_end|>",
71
- "lstrip": false,
72
- "normalized": false,
73
- "rstrip": false,
74
- "single_word": false,
75
- "special": true
76
- },
77
- "151652": {
78
- "content": "<|vision_start|>",
79
- "lstrip": false,
80
- "normalized": false,
81
- "rstrip": false,
82
- "single_word": false,
83
- "special": true
84
- },
85
- "151653": {
86
- "content": "<|vision_end|>",
87
- "lstrip": false,
88
- "normalized": false,
89
- "rstrip": false,
90
- "single_word": false,
91
- "special": true
92
- },
93
- "151654": {
94
- "content": "<|vision_pad|>",
95
- "lstrip": false,
96
- "normalized": false,
97
- "rstrip": false,
98
- "single_word": false,
99
- "special": true
100
- },
101
- "151655": {
102
- "content": "<|image_pad|>",
103
- "lstrip": false,
104
- "normalized": false,
105
- "rstrip": false,
106
- "single_word": false,
107
- "special": true
108
- },
109
- "151656": {
110
- "content": "<|video_pad|>",
111
- "lstrip": false,
112
- "normalized": false,
113
- "rstrip": false,
114
- "single_word": false,
115
- "special": true
116
- },
117
- "151657": {
118
- "content": "<tool_call>",
119
- "lstrip": false,
120
- "normalized": false,
121
- "rstrip": false,
122
- "single_word": false,
123
- "special": false
124
- },
125
- "151658": {
126
- "content": "</tool_call>",
127
- "lstrip": false,
128
- "normalized": false,
129
- "rstrip": false,
130
- "single_word": false,
131
- "special": false
132
- },
133
- "151659": {
134
- "content": "<|fim_prefix|>",
135
- "lstrip": false,
136
- "normalized": false,
137
- "rstrip": false,
138
- "single_word": false,
139
- "special": false
140
- },
141
- "151660": {
142
- "content": "<|fim_middle|>",
143
- "lstrip": false,
144
- "normalized": false,
145
- "rstrip": false,
146
- "single_word": false,
147
- "special": false
148
- },
149
- "151661": {
150
- "content": "<|fim_suffix|>",
151
- "lstrip": false,
152
- "normalized": false,
153
- "rstrip": false,
154
- "single_word": false,
155
- "special": false
156
- },
157
- "151662": {
158
- "content": "<|fim_pad|>",
159
- "lstrip": false,
160
- "normalized": false,
161
- "rstrip": false,
162
- "single_word": false,
163
- "special": false
164
- },
165
- "151663": {
166
- "content": "<|repo_name|>",
167
- "lstrip": false,
168
- "normalized": false,
169
- "rstrip": false,
170
- "single_word": false,
171
- "special": false
172
- },
173
- "151664": {
174
- "content": "<|file_sep|>",
175
- "lstrip": false,
176
- "normalized": false,
177
- "rstrip": false,
178
- "single_word": false,
179
- "special": false
180
- },
181
- "151665": {
182
- "content": "<tool_response>",
183
- "lstrip": false,
184
- "normalized": false,
185
- "rstrip": false,
186
- "single_word": false,
187
- "special": false
188
- },
189
- "151666": {
190
- "content": "</tool_response>",
191
- "lstrip": false,
192
- "normalized": false,
193
- "rstrip": false,
194
- "single_word": false,
195
- "special": false
196
- },
197
- "151667": {
198
- "content": "<think>",
199
- "lstrip": false,
200
- "normalized": false,
201
- "rstrip": false,
202
- "single_word": false,
203
- "special": false
204
- },
205
- "151668": {
206
- "content": "</think>",
207
- "lstrip": false,
208
- "normalized": false,
209
- "rstrip": false,
210
- "single_word": false,
211
- "special": false
212
- }
213
- },
214
- "additional_special_tokens": [
215
- "<|im_start|>",
216
- "<|im_end|>",
217
- "<|object_ref_start|>",
218
- "<|object_ref_end|>",
219
- "<|box_start|>",
220
- "<|box_end|>",
221
- "<|quad_start|>",
222
- "<|quad_end|>",
223
- "<|vision_start|>",
224
- "<|vision_end|>",
225
- "<|vision_pad|>",
226
- "<|image_pad|>",
227
- "<|video_pad|>"
228
- ],
229
- "bos_token": null,
230
- "clean_up_tokenization_spaces": false,
231
- "eos_token": "<|im_end|>",
232
- "errors": "replace",
233
- "extra_special_tokens": {},
234
- "model_max_length": 262144,
235
- "pad_token": "<|endoftext|>",
236
- "processor_class": "Qwen3VLProcessor",
237
- "split_special_tokens": false,
238
- "tokenizer_class": "Qwen2Tokenizer",
239
- "unk_token": null
240
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
qwen3vl_8b_agentnetv1_fullsft_gb200/trainer_state.json DELETED
@@ -1,3114 +0,0 @@
1
- {
2
- "best_global_step": null,
3
- "best_metric": null,
4
- "best_model_checkpoint": null,
5
- "epoch": 2.0,
6
- "eval_steps": 200.0,
7
- "global_step": 2196,
8
- "is_hyper_param_search": false,
9
- "is_local_process_zero": true,
10
- "is_world_process_zero": true,
11
- "log_history": [
12
- {
13
- "epoch": 0.0009110233829334953,
14
- "grad_norm": 17.559492111206055,
15
- "learning_rate": 9.090909090909091e-08,
16
- "loss": 1.2423300743103027,
17
- "step": 1
18
- },
19
- {
20
- "epoch": 0.004555116914667477,
21
- "grad_norm": 25.693878173828125,
22
- "learning_rate": 4.5454545454545457e-07,
23
- "loss": 1.2533537149429321,
24
- "step": 5
25
- },
26
- {
27
- "epoch": 0.009110233829334954,
28
- "grad_norm": 13.45212459564209,
29
- "learning_rate": 9.090909090909091e-07,
30
- "loss": 1.1839993476867676,
31
- "step": 10
32
- },
33
- {
34
- "epoch": 0.013665350744002429,
35
- "grad_norm": 4.8773298263549805,
36
- "learning_rate": 1.3636363636363636e-06,
37
- "loss": 0.942754077911377,
38
- "step": 15
39
- },
40
- {
41
- "epoch": 0.018220467658669907,
42
- "grad_norm": 6.213735103607178,
43
- "learning_rate": 1.8181818181818183e-06,
44
- "loss": 0.8017440795898437,
45
- "step": 20
46
- },
47
- {
48
- "epoch": 0.022775584573337383,
49
- "grad_norm": 4.074641704559326,
50
- "learning_rate": 2.2727272727272728e-06,
51
- "loss": 0.7369897842407227,
52
- "step": 25
53
- },
54
- {
55
- "epoch": 0.027330701488004858,
56
- "grad_norm": 6.384721755981445,
57
- "learning_rate": 2.7272727272727272e-06,
58
- "loss": 0.6929409027099609,
59
- "step": 30
60
- },
61
- {
62
- "epoch": 0.031885818402672336,
63
- "grad_norm": 4.765110492706299,
64
- "learning_rate": 3.181818181818182e-06,
65
- "loss": 0.6680615425109864,
66
- "step": 35
67
- },
68
- {
69
- "epoch": 0.036440935317339815,
70
- "grad_norm": 5.50184440612793,
71
- "learning_rate": 3.6363636363636366e-06,
72
- "loss": 0.6583371639251709,
73
- "step": 40
74
- },
75
- {
76
- "epoch": 0.040996052232007286,
77
- "grad_norm": 7.0187482833862305,
78
- "learning_rate": 4.0909090909090915e-06,
79
- "loss": 0.6485747337341309,
80
- "step": 45
81
- },
82
- {
83
- "epoch": 0.045551169146674765,
84
- "grad_norm": 4.4937214851379395,
85
- "learning_rate": 4.5454545454545455e-06,
86
- "loss": 0.6382402420043946,
87
- "step": 50
88
- },
89
- {
90
- "epoch": 0.050106286061342244,
91
- "grad_norm": 6.880841255187988,
92
- "learning_rate": 5e-06,
93
- "loss": 0.6306743144989013,
94
- "step": 55
95
- },
96
- {
97
- "epoch": 0.054661402976009715,
98
- "grad_norm": 4.950007915496826,
99
- "learning_rate": 5.4545454545454545e-06,
100
- "loss": 0.6264668464660644,
101
- "step": 60
102
- },
103
- {
104
- "epoch": 0.059216519890677194,
105
- "grad_norm": 13.052614212036133,
106
- "learning_rate": 5.90909090909091e-06,
107
- "loss": 0.6193904876708984,
108
- "step": 65
109
- },
110
- {
111
- "epoch": 0.06377163680534467,
112
- "grad_norm": 16.394208908081055,
113
- "learning_rate": 6.363636363636364e-06,
114
- "loss": 0.6106473922729492,
115
- "step": 70
116
- },
117
- {
118
- "epoch": 0.06832675372001215,
119
- "grad_norm": 5.312663555145264,
120
- "learning_rate": 6.818181818181818e-06,
121
- "loss": 0.6170936584472656,
122
- "step": 75
123
- },
124
- {
125
- "epoch": 0.07288187063467963,
126
- "grad_norm": 10.03520679473877,
127
- "learning_rate": 7.272727272727273e-06,
128
- "loss": 0.5990739345550538,
129
- "step": 80
130
- },
131
- {
132
- "epoch": 0.0774369875493471,
133
- "grad_norm": 9.031923294067383,
134
- "learning_rate": 7.727272727272727e-06,
135
- "loss": 0.6070697784423829,
136
- "step": 85
137
- },
138
- {
139
- "epoch": 0.08199210446401457,
140
- "grad_norm": 7.924659729003906,
141
- "learning_rate": 8.181818181818183e-06,
142
- "loss": 0.5944653511047363,
143
- "step": 90
144
- },
145
- {
146
- "epoch": 0.08654722137868205,
147
- "grad_norm": 9.117231369018555,
148
- "learning_rate": 8.636363636363637e-06,
149
- "loss": 0.5960843086242675,
150
- "step": 95
151
- },
152
- {
153
- "epoch": 0.09110233829334953,
154
- "grad_norm": 34.10580825805664,
155
- "learning_rate": 9.090909090909091e-06,
156
- "loss": 0.5919606208801269,
157
- "step": 100
158
- },
159
- {
160
- "epoch": 0.09565745520801701,
161
- "grad_norm": 9.232901573181152,
162
- "learning_rate": 9.545454545454547e-06,
163
- "loss": 0.5938567638397216,
164
- "step": 105
165
- },
166
- {
167
- "epoch": 0.10021257212268449,
168
- "grad_norm": 14.370579719543457,
169
- "learning_rate": 1e-05,
170
- "loss": 0.5873185157775879,
171
- "step": 110
172
- },
173
- {
174
- "epoch": 0.10476768903735195,
175
- "grad_norm": 7.017094612121582,
176
- "learning_rate": 9.999858241502208e-06,
177
- "loss": 0.5942224025726318,
178
- "step": 115
179
- },
180
- {
181
- "epoch": 0.10932280595201943,
182
- "grad_norm": 5.101506233215332,
183
- "learning_rate": 9.999432974047014e-06,
184
- "loss": 0.5906107902526856,
185
- "step": 120
186
- },
187
- {
188
- "epoch": 0.11387792286668691,
189
- "grad_norm": 7.324491024017334,
190
- "learning_rate": 9.99872422174853e-06,
191
- "loss": 0.591792631149292,
192
- "step": 125
193
- },
194
- {
195
- "epoch": 0.11843303978135439,
196
- "grad_norm": 8.754467964172363,
197
- "learning_rate": 9.99773202479542e-06,
198
- "loss": 0.5856371879577636,
199
- "step": 130
200
- },
201
- {
202
- "epoch": 0.12298815669602187,
203
- "grad_norm": 5.618054389953613,
204
- "learning_rate": 9.996456439448628e-06,
205
- "loss": 0.5825910091400146,
206
- "step": 135
207
- },
208
- {
209
- "epoch": 0.12754327361068934,
210
- "grad_norm": 5.029508590698242,
211
- "learning_rate": 9.994897538038176e-06,
212
- "loss": 0.580083179473877,
213
- "step": 140
214
- },
215
- {
216
- "epoch": 0.13209839052535682,
217
- "grad_norm": 5.695907115936279,
218
- "learning_rate": 9.99305540895907e-06,
219
- "loss": 0.5816529750823974,
220
- "step": 145
221
- },
222
- {
223
- "epoch": 0.1366535074400243,
224
- "grad_norm": 4.026976585388184,
225
- "learning_rate": 9.990930156666294e-06,
226
- "loss": 0.5851906299591064,
227
- "step": 150
228
- },
229
- {
230
- "epoch": 0.14120862435469178,
231
- "grad_norm": 3.444056987762451,
232
- "learning_rate": 9.988521901668878e-06,
233
- "loss": 0.5782979011535645,
234
- "step": 155
235
- },
236
- {
237
- "epoch": 0.14576374126935926,
238
- "grad_norm": 6.146154880523682,
239
- "learning_rate": 9.985830780523062e-06,
240
- "loss": 0.5769307613372803,
241
- "step": 160
242
- },
243
- {
244
- "epoch": 0.1503188581840267,
245
- "grad_norm": 4.238720893859863,
246
- "learning_rate": 9.982856945824567e-06,
247
- "loss": 0.5825564861297607,
248
- "step": 165
249
- },
250
- {
251
- "epoch": 0.1548739750986942,
252
- "grad_norm": 3.784989595413208,
253
- "learning_rate": 9.979600566199926e-06,
254
- "loss": 0.5774458885192871,
255
- "step": 170
256
- },
257
- {
258
- "epoch": 0.15942909201336167,
259
- "grad_norm": 5.971307277679443,
260
- "learning_rate": 9.976061826296933e-06,
261
- "loss": 0.5697900295257569,
262
- "step": 175
263
- },
264
- {
265
- "epoch": 0.16398420892802915,
266
- "grad_norm": 2.7221076488494873,
267
- "learning_rate": 9.972240926774167e-06,
268
- "loss": 0.574206018447876,
269
- "step": 180
270
- },
271
- {
272
- "epoch": 0.16853932584269662,
273
- "grad_norm": 3.3567593097686768,
274
- "learning_rate": 9.968138084289625e-06,
275
- "loss": 0.5786737918853759,
276
- "step": 185
277
- },
278
- {
279
- "epoch": 0.1730944427573641,
280
- "grad_norm": 7.935139179229736,
281
- "learning_rate": 9.963753531488416e-06,
282
- "loss": 0.5722726345062256,
283
- "step": 190
284
- },
285
- {
286
- "epoch": 0.17764955967203158,
287
- "grad_norm": 3.7399966716766357,
288
- "learning_rate": 9.959087516989589e-06,
289
- "loss": 0.5803302764892578,
290
- "step": 195
291
- },
292
- {
293
- "epoch": 0.18220467658669906,
294
- "grad_norm": 3.1717324256896973,
295
- "learning_rate": 9.954140305372026e-06,
296
- "loss": 0.5747191429138183,
297
- "step": 200
298
- },
299
- {
300
- "epoch": 0.18675979350136654,
301
- "grad_norm": 4.224859714508057,
302
- "learning_rate": 9.948912177159445e-06,
303
- "loss": 0.5714075088500976,
304
- "step": 205
305
- },
306
- {
307
- "epoch": 0.19131491041603402,
308
- "grad_norm": 2.968369722366333,
309
- "learning_rate": 9.943403428804482e-06,
310
- "loss": 0.5736642837524414,
311
- "step": 210
312
- },
313
- {
314
- "epoch": 0.1958700273307015,
315
- "grad_norm": 4.710346698760986,
316
- "learning_rate": 9.937614372671896e-06,
317
- "loss": 0.568629264831543,
318
- "step": 215
319
- },
320
- {
321
- "epoch": 0.20042514424536897,
322
- "grad_norm": 2.995421886444092,
323
- "learning_rate": 9.93154533702085e-06,
324
- "loss": 0.5707715034484864,
325
- "step": 220
326
- },
327
- {
328
- "epoch": 0.20498026116003645,
329
- "grad_norm": 2.9990322589874268,
330
- "learning_rate": 9.925196665986289e-06,
331
- "loss": 0.5626624584197998,
332
- "step": 225
333
- },
334
- {
335
- "epoch": 0.2095353780747039,
336
- "grad_norm": 3.209749221801758,
337
- "learning_rate": 9.918568719559443e-06,
338
- "loss": 0.5639097213745117,
339
- "step": 230
340
- },
341
- {
342
- "epoch": 0.21409049498937138,
343
- "grad_norm": 3.255554437637329,
344
- "learning_rate": 9.911661873567407e-06,
345
- "loss": 0.5606301307678223,
346
- "step": 235
347
- },
348
- {
349
- "epoch": 0.21864561190403886,
350
- "grad_norm": 3.2023773193359375,
351
- "learning_rate": 9.904476519651822e-06,
352
- "loss": 0.5634839534759521,
353
- "step": 240
354
- },
355
- {
356
- "epoch": 0.22320072881870634,
357
- "grad_norm": 4.02494478225708,
358
- "learning_rate": 9.89701306524668e-06,
359
- "loss": 0.5576680183410645,
360
- "step": 245
361
- },
362
- {
363
- "epoch": 0.22775584573337382,
364
- "grad_norm": 2.883610725402832,
365
- "learning_rate": 9.889271933555214e-06,
366
- "loss": 0.5645258903503418,
367
- "step": 250
368
- },
369
- {
370
- "epoch": 0.2323109626480413,
371
- "grad_norm": 4.670317649841309,
372
- "learning_rate": 9.881253563525905e-06,
373
- "loss": 0.56104736328125,
374
- "step": 255
375
- },
376
- {
377
- "epoch": 0.23686607956270878,
378
- "grad_norm": 4.898850917816162,
379
- "learning_rate": 9.872958409827588e-06,
380
- "loss": 0.5592937469482422,
381
- "step": 260
382
- },
383
- {
384
- "epoch": 0.24142119647737625,
385
- "grad_norm": 3.393101692199707,
386
- "learning_rate": 9.864386942823678e-06,
387
- "loss": 0.5690124034881592,
388
- "step": 265
389
- },
390
- {
391
- "epoch": 0.24597631339204373,
392
- "grad_norm": 3.7247464656829834,
393
- "learning_rate": 9.855539648545485e-06,
394
- "loss": 0.560127067565918,
395
- "step": 270
396
- },
397
- {
398
- "epoch": 0.2505314303067112,
399
- "grad_norm": 3.4709503650665283,
400
- "learning_rate": 9.846417028664668e-06,
401
- "loss": 0.5557423114776612,
402
- "step": 275
403
- },
404
- {
405
- "epoch": 0.2550865472213787,
406
- "grad_norm": 2.8535945415496826,
407
- "learning_rate": 9.837019600464783e-06,
408
- "loss": 0.5636860847473144,
409
- "step": 280
410
- },
411
- {
412
- "epoch": 0.25964166413604617,
413
- "grad_norm": 3.1316518783569336,
414
- "learning_rate": 9.827347896811954e-06,
415
- "loss": 0.5631931304931641,
416
- "step": 285
417
- },
418
- {
419
- "epoch": 0.26419678105071365,
420
- "grad_norm": 2.6706693172454834,
421
- "learning_rate": 9.817402466124652e-06,
422
- "loss": 0.5706257820129395,
423
- "step": 290
424
- },
425
- {
426
- "epoch": 0.2687518979653811,
427
- "grad_norm": 2.1610724925994873,
428
- "learning_rate": 9.807183872342603e-06,
429
- "loss": 0.5647665023803711,
430
- "step": 295
431
- },
432
- {
433
- "epoch": 0.2733070148800486,
434
- "grad_norm": 3.649000644683838,
435
- "learning_rate": 9.796692694894809e-06,
436
- "loss": 0.56468505859375,
437
- "step": 300
438
- },
439
- {
440
- "epoch": 0.2778621317947161,
441
- "grad_norm": 4.748224258422852,
442
- "learning_rate": 9.785929528666691e-06,
443
- "loss": 0.561229133605957,
444
- "step": 305
445
- },
446
- {
447
- "epoch": 0.28241724870938356,
448
- "grad_norm": 3.2732272148132324,
449
- "learning_rate": 9.77489498396636e-06,
450
- "loss": 0.5538298606872558,
451
- "step": 310
452
- },
453
- {
454
- "epoch": 0.28697236562405104,
455
- "grad_norm": 2.783526659011841,
456
- "learning_rate": 9.76358968649001e-06,
457
- "loss": 0.5541868686676026,
458
- "step": 315
459
- },
460
- {
461
- "epoch": 0.2915274825387185,
462
- "grad_norm": 3.165250062942505,
463
- "learning_rate": 9.752014277286433e-06,
464
- "loss": 0.5500054359436035,
465
- "step": 320
466
- },
467
- {
468
- "epoch": 0.296082599453386,
469
- "grad_norm": 3.244180679321289,
470
- "learning_rate": 9.740169412720677e-06,
471
- "loss": 0.5560133934020997,
472
- "step": 325
473
- },
474
- {
475
- "epoch": 0.3006377163680534,
476
- "grad_norm": 3.5502259731292725,
477
- "learning_rate": 9.728055764436828e-06,
478
- "loss": 0.5537430763244628,
479
- "step": 330
480
- },
481
- {
482
- "epoch": 0.3051928332827209,
483
- "grad_norm": 2.9355859756469727,
484
- "learning_rate": 9.715674019319917e-06,
485
- "loss": 0.550521993637085,
486
- "step": 335
487
- },
488
- {
489
- "epoch": 0.3097479501973884,
490
- "grad_norm": 3.7240347862243652,
491
- "learning_rate": 9.703024879456978e-06,
492
- "loss": 0.556462287902832,
493
- "step": 340
494
- },
495
- {
496
- "epoch": 0.31430306711205586,
497
- "grad_norm": 2.31040096282959,
498
- "learning_rate": 9.690109062097241e-06,
499
- "loss": 0.5485388278961182,
500
- "step": 345
501
- },
502
- {
503
- "epoch": 0.31885818402672333,
504
- "grad_norm": 2.3651797771453857,
505
- "learning_rate": 9.67692729961145e-06,
506
- "loss": 0.5521252632141114,
507
- "step": 350
508
- },
509
- {
510
- "epoch": 0.3234133009413908,
511
- "grad_norm": 3.3303847312927246,
512
- "learning_rate": 9.663480339450344e-06,
513
- "loss": 0.552528190612793,
514
- "step": 355
515
- },
516
- {
517
- "epoch": 0.3279684178560583,
518
- "grad_norm": 2.7311720848083496,
519
- "learning_rate": 9.649768944102272e-06,
520
- "loss": 0.5484151363372802,
521
- "step": 360
522
- },
523
- {
524
- "epoch": 0.33252353477072577,
525
- "grad_norm": 2.9131832122802734,
526
- "learning_rate": 9.635793891049959e-06,
527
- "loss": 0.5590497493743897,
528
- "step": 365
529
- },
530
- {
531
- "epoch": 0.33707865168539325,
532
- "grad_norm": 5.8473687171936035,
533
- "learning_rate": 9.621555972726416e-06,
534
- "loss": 0.5552995204925537,
535
- "step": 370
536
- },
537
- {
538
- "epoch": 0.3416337686000607,
539
- "grad_norm": 1.9673869609832764,
540
- "learning_rate": 9.607055996470003e-06,
541
- "loss": 0.5548224449157715,
542
- "step": 375
543
- },
544
- {
545
- "epoch": 0.3461888855147282,
546
- "grad_norm": 2.8362224102020264,
547
- "learning_rate": 9.592294784478668e-06,
548
- "loss": 0.5551294326782227,
549
- "step": 380
550
- },
551
- {
552
- "epoch": 0.3507440024293957,
553
- "grad_norm": 5.5295257568359375,
554
- "learning_rate": 9.577273173763302e-06,
555
- "loss": 0.5563198089599609,
556
- "step": 385
557
- },
558
- {
559
- "epoch": 0.35529911934406316,
560
- "grad_norm": 1.7222570180892944,
561
- "learning_rate": 9.561992016100293e-06,
562
- "loss": 0.5539894580841065,
563
- "step": 390
564
- },
565
- {
566
- "epoch": 0.35985423625873064,
567
- "grad_norm": 1.6861451864242554,
568
- "learning_rate": 9.546452177983223e-06,
569
- "loss": 0.5516669273376464,
570
- "step": 395
571
- },
572
- {
573
- "epoch": 0.3644093531733981,
574
- "grad_norm": 2.1530921459198,
575
- "learning_rate": 9.530654540573736e-06,
576
- "loss": 0.5513467788696289,
577
- "step": 400
578
- },
579
- {
580
- "epoch": 0.3689644700880656,
581
- "grad_norm": 4.1498517990112305,
582
- "learning_rate": 9.514599999651572e-06,
583
- "loss": 0.5544273853302002,
584
- "step": 405
585
- },
586
- {
587
- "epoch": 0.3735195870027331,
588
- "grad_norm": 3.0634090900421143,
589
- "learning_rate": 9.498289465563769e-06,
590
- "loss": 0.5443705558776856,
591
- "step": 410
592
- },
593
- {
594
- "epoch": 0.37807470391740056,
595
- "grad_norm": 2.7513556480407715,
596
- "learning_rate": 9.481723863173054e-06,
597
- "loss": 0.5389851093292236,
598
- "step": 415
599
- },
600
- {
601
- "epoch": 0.38262982083206803,
602
- "grad_norm": 32.76044845581055,
603
- "learning_rate": 9.46490413180539e-06,
604
- "loss": 0.5442268371582031,
605
- "step": 420
606
- },
607
- {
608
- "epoch": 0.3871849377467355,
609
- "grad_norm": 3.125256299972534,
610
- "learning_rate": 9.44783122519672e-06,
611
- "loss": 0.5439101219177246,
612
- "step": 425
613
- },
614
- {
615
- "epoch": 0.391740054661403,
616
- "grad_norm": 2.8991496562957764,
617
- "learning_rate": 9.430506111438877e-06,
618
- "loss": 0.5462981224060058,
619
- "step": 430
620
- },
621
- {
622
- "epoch": 0.39629517157607047,
623
- "grad_norm": 13.466605186462402,
624
- "learning_rate": 9.412929772924707e-06,
625
- "loss": 0.5357834815979003,
626
- "step": 435
627
- },
628
- {
629
- "epoch": 0.40085028849073795,
630
- "grad_norm": 15.509262084960938,
631
- "learning_rate": 9.395103206292342e-06,
632
- "loss": 0.5422843933105469,
633
- "step": 440
634
- },
635
- {
636
- "epoch": 0.40540540540540543,
637
- "grad_norm": 5.389743328094482,
638
- "learning_rate": 9.377027422368707e-06,
639
- "loss": 0.5465549468994141,
640
- "step": 445
641
- },
642
- {
643
- "epoch": 0.4099605223200729,
644
- "grad_norm": 4.2232842445373535,
645
- "learning_rate": 9.358703446112194e-06,
646
- "loss": 0.5402978420257568,
647
- "step": 450
648
- },
649
- {
650
- "epoch": 0.4145156392347404,
651
- "grad_norm": 4.203605651855469,
652
- "learning_rate": 9.34013231655454e-06,
653
- "loss": 0.5347804069519043,
654
- "step": 455
655
- },
656
- {
657
- "epoch": 0.4190707561494078,
658
- "grad_norm": 8.337684631347656,
659
- "learning_rate": 9.321315086741916e-06,
660
- "loss": 0.5400158882141113,
661
- "step": 460
662
- },
663
- {
664
- "epoch": 0.4236258730640753,
665
- "grad_norm": 3.43294095993042,
666
- "learning_rate": 9.302252823675217e-06,
667
- "loss": 0.5414864540100097,
668
- "step": 465
669
- },
670
- {
671
- "epoch": 0.42818098997874277,
672
- "grad_norm": 3.3577237129211426,
673
- "learning_rate": 9.282946608249553e-06,
674
- "loss": 0.5411586284637451,
675
- "step": 470
676
- },
677
- {
678
- "epoch": 0.43273610689341024,
679
- "grad_norm": 2.9969730377197266,
680
- "learning_rate": 9.263397535192959e-06,
681
- "loss": 0.543182373046875,
682
- "step": 475
683
- },
684
- {
685
- "epoch": 0.4372912238080777,
686
- "grad_norm": 3.317013740539551,
687
- "learning_rate": 9.24360671300433e-06,
688
- "loss": 0.54539794921875,
689
- "step": 480
690
- },
691
- {
692
- "epoch": 0.4418463407227452,
693
- "grad_norm": 4.378854751586914,
694
- "learning_rate": 9.223575263890556e-06,
695
- "loss": 0.5474021434783936,
696
- "step": 485
697
- },
698
- {
699
- "epoch": 0.4464014576374127,
700
- "grad_norm": 2.789015769958496,
701
- "learning_rate": 9.20330432370289e-06,
702
- "loss": 0.5468668460845947,
703
- "step": 490
704
- },
705
- {
706
- "epoch": 0.45095657455208016,
707
- "grad_norm": 3.1256697177886963,
708
- "learning_rate": 9.182795041872543e-06,
709
- "loss": 0.5451174736022949,
710
- "step": 495
711
- },
712
- {
713
- "epoch": 0.45551169146674764,
714
- "grad_norm": 19.11293601989746,
715
- "learning_rate": 9.162048581345511e-06,
716
- "loss": 0.5430747032165527,
717
- "step": 500
718
- },
719
- {
720
- "epoch": 0.4600668083814151,
721
- "grad_norm": 2.99991774559021,
722
- "learning_rate": 9.141066118516625e-06,
723
- "loss": 0.5492184638977051,
724
- "step": 505
725
- },
726
- {
727
- "epoch": 0.4646219252960826,
728
- "grad_norm": 3.0613291263580322,
729
- "learning_rate": 9.119848843162844e-06,
730
- "loss": 0.5410381317138672,
731
- "step": 510
732
- },
733
- {
734
- "epoch": 0.4691770422107501,
735
- "grad_norm": 3.8927035331726074,
736
- "learning_rate": 9.098397958375807e-06,
737
- "loss": 0.5446810722351074,
738
- "step": 515
739
- },
740
- {
741
- "epoch": 0.47373215912541755,
742
- "grad_norm": 2.757336139678955,
743
- "learning_rate": 9.076714680493594e-06,
744
- "loss": 0.5469783306121826,
745
- "step": 520
746
- },
747
- {
748
- "epoch": 0.47828727604008503,
749
- "grad_norm": 2.6528310775756836,
750
- "learning_rate": 9.054800239031763e-06,
751
- "loss": 0.5482687473297119,
752
- "step": 525
753
- },
754
- {
755
- "epoch": 0.4828423929547525,
756
- "grad_norm": 2.5695135593414307,
757
- "learning_rate": 9.032655876613636e-06,
758
- "loss": 0.542782211303711,
759
- "step": 530
760
- },
761
- {
762
- "epoch": 0.48739750986942,
763
- "grad_norm": 4.648599624633789,
764
- "learning_rate": 9.010282848899833e-06,
765
- "loss": 0.5455782890319825,
766
- "step": 535
767
- },
768
- {
769
- "epoch": 0.49195262678408747,
770
- "grad_norm": 3.9627435207366943,
771
- "learning_rate": 8.987682424517075e-06,
772
- "loss": 0.5418254375457764,
773
- "step": 540
774
- },
775
- {
776
- "epoch": 0.49650774369875494,
777
- "grad_norm": 7.180903434753418,
778
- "learning_rate": 8.964855884986244e-06,
779
- "loss": 0.5390671253204345,
780
- "step": 545
781
- },
782
- {
783
- "epoch": 0.5010628606134224,
784
- "grad_norm": 2.5528030395507812,
785
- "learning_rate": 8.941804524649721e-06,
786
- "loss": 0.546663761138916,
787
- "step": 550
788
- },
789
- {
790
- "epoch": 0.5056179775280899,
791
- "grad_norm": 3.286468029022217,
792
- "learning_rate": 8.918529650597996e-06,
793
- "loss": 0.5427966117858887,
794
- "step": 555
795
- },
796
- {
797
- "epoch": 0.5101730944427574,
798
- "grad_norm": 3.3196663856506348,
799
- "learning_rate": 8.895032582595537e-06,
800
- "loss": 0.536742877960205,
801
- "step": 560
802
- },
803
- {
804
- "epoch": 0.5147282113574249,
805
- "grad_norm": 2.7130608558654785,
806
- "learning_rate": 8.871314653005972e-06,
807
- "loss": 0.5361935138702393,
808
- "step": 565
809
- },
810
- {
811
- "epoch": 0.5192833282720923,
812
- "grad_norm": 2.7196788787841797,
813
- "learning_rate": 8.847377206716527e-06,
814
- "loss": 0.5388089179992676,
815
- "step": 570
816
- },
817
- {
818
- "epoch": 0.5238384451867598,
819
- "grad_norm": 3.419187307357788,
820
- "learning_rate": 8.823221601061774e-06,
821
- "loss": 0.5345405578613281,
822
- "step": 575
823
- },
824
- {
825
- "epoch": 0.5283935621014273,
826
- "grad_norm": 2.363970994949341,
827
- "learning_rate": 8.79884920574666e-06,
828
- "loss": 0.536346435546875,
829
- "step": 580
830
- },
831
- {
832
- "epoch": 0.5329486790160948,
833
- "grad_norm": 3.2918245792388916,
834
- "learning_rate": 8.774261402768844e-06,
835
- "loss": 0.5398552894592286,
836
- "step": 585
837
- },
838
- {
839
- "epoch": 0.5375037959307623,
840
- "grad_norm": 5.764172077178955,
841
- "learning_rate": 8.749459586340334e-06,
842
- "loss": 0.5393971920013427,
843
- "step": 590
844
- },
845
- {
846
- "epoch": 0.5420589128454297,
847
- "grad_norm": 2.588685989379883,
848
- "learning_rate": 8.724445162808423e-06,
849
- "loss": 0.5340259075164795,
850
- "step": 595
851
- },
852
- {
853
- "epoch": 0.5466140297600972,
854
- "grad_norm": 3.468891143798828,
855
- "learning_rate": 8.699219550575954e-06,
856
- "loss": 0.5304669857025146,
857
- "step": 600
858
- },
859
- {
860
- "epoch": 0.5511691466747647,
861
- "grad_norm": 4.80936336517334,
862
- "learning_rate": 8.673784180020884e-06,
863
- "loss": 0.529237174987793,
864
- "step": 605
865
- },
866
- {
867
- "epoch": 0.5557242635894322,
868
- "grad_norm": 4.984926223754883,
869
- "learning_rate": 8.648140493415183e-06,
870
- "loss": 0.5376770496368408,
871
- "step": 610
872
- },
873
- {
874
- "epoch": 0.5602793805040996,
875
- "grad_norm": 2.2710659503936768,
876
- "learning_rate": 8.622289944843047e-06,
877
- "loss": 0.5376224994659424,
878
- "step": 615
879
- },
880
- {
881
- "epoch": 0.5648344974187671,
882
- "grad_norm": 2.312422513961792,
883
- "learning_rate": 8.596234000118446e-06,
884
- "loss": 0.532253646850586,
885
- "step": 620
886
- },
887
- {
888
- "epoch": 0.5693896143334346,
889
- "grad_norm": 2.549513816833496,
890
- "learning_rate": 8.569974136702019e-06,
891
- "loss": 0.5293230533599853,
892
- "step": 625
893
- },
894
- {
895
- "epoch": 0.5739447312481021,
896
- "grad_norm": 2.343762159347534,
897
- "learning_rate": 8.543511843617277e-06,
898
- "loss": 0.5317315101623535,
899
- "step": 630
900
- },
901
- {
902
- "epoch": 0.5784998481627696,
903
- "grad_norm": 2.1737287044525146,
904
- "learning_rate": 8.516848621366188e-06,
905
- "loss": 0.5308964729309082,
906
- "step": 635
907
- },
908
- {
909
- "epoch": 0.583054965077437,
910
- "grad_norm": 2.0589590072631836,
911
- "learning_rate": 8.489985981844086e-06,
912
- "loss": 0.5311801910400391,
913
- "step": 640
914
- },
915
- {
916
- "epoch": 0.5876100819921045,
917
- "grad_norm": 4.078348159790039,
918
- "learning_rate": 8.46292544825394e-06,
919
- "loss": 0.5306229591369629,
920
- "step": 645
921
- },
922
- {
923
- "epoch": 0.592165198906772,
924
- "grad_norm": 2.8249433040618896,
925
- "learning_rate": 8.435668555019987e-06,
926
- "loss": 0.5330135345458984,
927
- "step": 650
928
- },
929
- {
930
- "epoch": 0.5967203158214394,
931
- "grad_norm": 3.677963972091675,
932
- "learning_rate": 8.408216847700725e-06,
933
- "loss": 0.5320111274719238,
934
- "step": 655
935
- },
936
- {
937
- "epoch": 0.6012754327361068,
938
- "grad_norm": 2.4316532611846924,
939
- "learning_rate": 8.380571882901265e-06,
940
- "loss": 0.5290947914123535,
941
- "step": 660
942
- },
943
- {
944
- "epoch": 0.6058305496507743,
945
- "grad_norm": 3.219510555267334,
946
- "learning_rate": 8.352735228185085e-06,
947
- "loss": 0.529725456237793,
948
- "step": 665
949
- },
950
- {
951
- "epoch": 0.6103856665654418,
952
- "grad_norm": 3.452469825744629,
953
- "learning_rate": 8.324708461985124e-06,
954
- "loss": 0.5379016399383545,
955
- "step": 670
956
- },
957
- {
958
- "epoch": 0.6149407834801093,
959
- "grad_norm": 2.2365944385528564,
960
- "learning_rate": 8.296493173514294e-06,
961
- "loss": 0.5272422790527344,
962
- "step": 675
963
- },
964
- {
965
- "epoch": 0.6194959003947768,
966
- "grad_norm": 4.5478339195251465,
967
- "learning_rate": 8.268090962675358e-06,
968
- "loss": 0.5276835441589356,
969
- "step": 680
970
- },
971
- {
972
- "epoch": 0.6240510173094442,
973
- "grad_norm": 2.9637320041656494,
974
- "learning_rate": 8.239503439970212e-06,
975
- "loss": 0.5249893188476562,
976
- "step": 685
977
- },
978
- {
979
- "epoch": 0.6286061342241117,
980
- "grad_norm": 3.1997034549713135,
981
- "learning_rate": 8.210732226408566e-06,
982
- "loss": 0.5261757850646973,
983
- "step": 690
984
- },
985
- {
986
- "epoch": 0.6331612511387792,
987
- "grad_norm": 4.2955756187438965,
988
- "learning_rate": 8.181778953416025e-06,
989
- "loss": 0.525751781463623,
990
- "step": 695
991
- },
992
- {
993
- "epoch": 0.6377163680534467,
994
- "grad_norm": 2.975689172744751,
995
- "learning_rate": 8.152645262741586e-06,
996
- "loss": 0.5300833702087402,
997
- "step": 700
998
- },
999
- {
1000
- "epoch": 0.6422714849681141,
1001
- "grad_norm": 2.491178512573242,
1002
- "learning_rate": 8.123332806364537e-06,
1003
- "loss": 0.5283273696899414,
1004
- "step": 705
1005
- },
1006
- {
1007
- "epoch": 0.6468266018827816,
1008
- "grad_norm": 5.0057549476623535,
1009
- "learning_rate": 8.09384324640079e-06,
1010
- "loss": 0.5246441841125489,
1011
- "step": 710
1012
- },
1013
- {
1014
- "epoch": 0.6513817187974491,
1015
- "grad_norm": 3.7425742149353027,
1016
- "learning_rate": 8.064178255008637e-06,
1017
- "loss": 0.5283780097961426,
1018
- "step": 715
1019
- },
1020
- {
1021
- "epoch": 0.6559368357121166,
1022
- "grad_norm": 5.3434906005859375,
1023
- "learning_rate": 8.03433951429392e-06,
1024
- "loss": 0.5267109394073486,
1025
- "step": 720
1026
- },
1027
- {
1028
- "epoch": 0.6604919526267841,
1029
- "grad_norm": 5.699710369110107,
1030
- "learning_rate": 8.004328716214667e-06,
1031
- "loss": 0.5268173217773438,
1032
- "step": 725
1033
- },
1034
- {
1035
- "epoch": 0.6650470695414515,
1036
- "grad_norm": 3.340369701385498,
1037
- "learning_rate": 7.97414756248514e-06,
1038
- "loss": 0.5249145030975342,
1039
- "step": 730
1040
- },
1041
- {
1042
- "epoch": 0.669602186456119,
1043
- "grad_norm": 2.61981201171875,
1044
- "learning_rate": 7.943797764479338e-06,
1045
- "loss": 0.5252110481262207,
1046
- "step": 735
1047
- },
1048
- {
1049
- "epoch": 0.6741573033707865,
1050
- "grad_norm": 1.742195963859558,
1051
- "learning_rate": 7.913281043133978e-06,
1052
- "loss": 0.5280135154724122,
1053
- "step": 740
1054
- },
1055
- {
1056
- "epoch": 0.678712420285454,
1057
- "grad_norm": 2.602257251739502,
1058
- "learning_rate": 7.882599128850888e-06,
1059
- "loss": 0.525156307220459,
1060
- "step": 745
1061
- },
1062
- {
1063
- "epoch": 0.6832675372001215,
1064
- "grad_norm": 2.2966225147247314,
1065
- "learning_rate": 7.851753761398895e-06,
1066
- "loss": 0.5265485286712647,
1067
- "step": 750
1068
- },
1069
- {
1070
- "epoch": 0.6878226541147889,
1071
- "grad_norm": 2.6013224124908447,
1072
- "learning_rate": 7.820746689815188e-06,
1073
- "loss": 0.5309466361999512,
1074
- "step": 755
1075
- },
1076
- {
1077
- "epoch": 0.6923777710294564,
1078
- "grad_norm": 6.142124176025391,
1079
- "learning_rate": 7.789579672306117e-06,
1080
- "loss": 0.5194272041320801,
1081
- "step": 760
1082
- },
1083
- {
1084
- "epoch": 0.6969328879441239,
1085
- "grad_norm": 2.3743906021118164,
1086
- "learning_rate": 7.758254476147515e-06,
1087
- "loss": 0.528320026397705,
1088
- "step": 765
1089
- },
1090
- {
1091
- "epoch": 0.7014880048587914,
1092
- "grad_norm": 1.3435266017913818,
1093
- "learning_rate": 7.726772877584486e-06,
1094
- "loss": 0.5290194988250733,
1095
- "step": 770
1096
- },
1097
- {
1098
- "epoch": 0.7060431217734588,
1099
- "grad_norm": 1.8624411821365356,
1100
- "learning_rate": 7.695136661730677e-06,
1101
- "loss": 0.5187951564788819,
1102
- "step": 775
1103
- },
1104
- {
1105
- "epoch": 0.7105982386881263,
1106
- "grad_norm": 1.8843101263046265,
1107
- "learning_rate": 7.66334762246706e-06,
1108
- "loss": 0.5258677005767822,
1109
- "step": 780
1110
- },
1111
- {
1112
- "epoch": 0.7151533556027938,
1113
- "grad_norm": 2.0883312225341797,
1114
- "learning_rate": 7.631407562340215e-06,
1115
- "loss": 0.5251790046691894,
1116
- "step": 785
1117
- },
1118
- {
1119
- "epoch": 0.7197084725174613,
1120
- "grad_norm": 2.7113304138183594,
1121
- "learning_rate": 7.599318292460125e-06,
1122
- "loss": 0.525829267501831,
1123
- "step": 790
1124
- },
1125
- {
1126
- "epoch": 0.7242635894321288,
1127
- "grad_norm": 2.360489845275879,
1128
- "learning_rate": 7.567081632397462e-06,
1129
- "loss": 0.517587947845459,
1130
- "step": 795
1131
- },
1132
- {
1133
- "epoch": 0.7288187063467962,
1134
- "grad_norm": 1.8589805364608765,
1135
- "learning_rate": 7.534699410080429e-06,
1136
- "loss": 0.5235529899597168,
1137
- "step": 800
1138
- },
1139
- {
1140
- "epoch": 0.7333738232614637,
1141
- "grad_norm": 2.6437175273895264,
1142
- "learning_rate": 7.502173461691104e-06,
1143
- "loss": 0.5219250202178956,
1144
- "step": 805
1145
- },
1146
- {
1147
- "epoch": 0.7379289401761312,
1148
- "grad_norm": 14.303297996520996,
1149
- "learning_rate": 7.469505631561318e-06,
1150
- "loss": 0.5251981258392334,
1151
- "step": 810
1152
- },
1153
- {
1154
- "epoch": 0.7424840570907987,
1155
- "grad_norm": 3.5786635875701904,
1156
- "learning_rate": 7.436697772068083e-06,
1157
- "loss": 0.522415018081665,
1158
- "step": 815
1159
- },
1160
- {
1161
- "epoch": 0.7470391740054662,
1162
- "grad_norm": 3.2803893089294434,
1163
- "learning_rate": 7.40375174352855e-06,
1164
- "loss": 0.519214677810669,
1165
- "step": 820
1166
- },
1167
- {
1168
- "epoch": 0.7515942909201336,
1169
- "grad_norm": 3.6064188480377197,
1170
- "learning_rate": 7.370669414094522e-06,
1171
- "loss": 0.5226556777954101,
1172
- "step": 825
1173
- },
1174
- {
1175
- "epoch": 0.7561494078348011,
1176
- "grad_norm": 3.3592140674591064,
1177
- "learning_rate": 7.3374526596465334e-06,
1178
- "loss": 0.5165884494781494,
1179
- "step": 830
1180
- },
1181
- {
1182
- "epoch": 0.7607045247494686,
1183
- "grad_norm": 5.043455600738525,
1184
- "learning_rate": 7.3041033636874635e-06,
1185
- "loss": 0.514227819442749,
1186
- "step": 835
1187
- },
1188
- {
1189
- "epoch": 0.7652596416641361,
1190
- "grad_norm": 4.375437259674072,
1191
- "learning_rate": 7.2706234172357556e-06,
1192
- "loss": 0.5168005943298339,
1193
- "step": 840
1194
- },
1195
- {
1196
- "epoch": 0.7698147585788035,
1197
- "grad_norm": 5.3485541343688965,
1198
- "learning_rate": 7.2370147187181736e-06,
1199
- "loss": 0.517771053314209,
1200
- "step": 845
1201
- },
1202
- {
1203
- "epoch": 0.774369875493471,
1204
- "grad_norm": 3.3749423027038574,
1205
- "learning_rate": 7.203279173862164e-06,
1206
- "loss": 0.517666244506836,
1207
- "step": 850
1208
- },
1209
- {
1210
- "epoch": 0.7789249924081385,
1211
- "grad_norm": 5.747758388519287,
1212
- "learning_rate": 7.169418695587791e-06,
1213
- "loss": 0.5237475395202636,
1214
- "step": 855
1215
- },
1216
- {
1217
- "epoch": 0.783480109322806,
1218
- "grad_norm": 2.7103781700134277,
1219
- "learning_rate": 7.135435203899269e-06,
1220
- "loss": 0.5126797676086425,
1221
- "step": 860
1222
- },
1223
- {
1224
- "epoch": 0.7880352262374735,
1225
- "grad_norm": 2.7993369102478027,
1226
- "learning_rate": 7.10133062577609e-06,
1227
- "loss": 0.5152403354644776,
1228
- "step": 865
1229
- },
1230
- {
1231
- "epoch": 0.7925903431521409,
1232
- "grad_norm": 2.4125592708587646,
1233
- "learning_rate": 7.067106895063761e-06,
1234
- "loss": 0.5169252395629883,
1235
- "step": 870
1236
- },
1237
- {
1238
- "epoch": 0.7971454600668084,
1239
- "grad_norm": 2.7944040298461914,
1240
- "learning_rate": 7.032765952364142e-06,
1241
- "loss": 0.5117922782897949,
1242
- "step": 875
1243
- },
1244
- {
1245
- "epoch": 0.8017005769814759,
1246
- "grad_norm": 5.195308685302734,
1247
- "learning_rate": 6.998309744925411e-06,
1248
- "loss": 0.516599178314209,
1249
- "step": 880
1250
- },
1251
- {
1252
- "epoch": 0.8062556938961434,
1253
- "grad_norm": 3.4537782669067383,
1254
- "learning_rate": 6.9637402265316546e-06,
1255
- "loss": 0.5194319725036621,
1256
- "step": 885
1257
- },
1258
- {
1259
- "epoch": 0.8108108108108109,
1260
- "grad_norm": 3.8817193508148193,
1261
- "learning_rate": 6.9290593573920705e-06,
1262
- "loss": 0.5188677787780762,
1263
- "step": 890
1264
- },
1265
- {
1266
- "epoch": 0.8153659277254783,
1267
- "grad_norm": 4.101870059967041,
1268
- "learning_rate": 6.894269104029821e-06,
1269
- "loss": 0.5178657531738281,
1270
- "step": 895
1271
- },
1272
- {
1273
- "epoch": 0.8199210446401458,
1274
- "grad_norm": 2.8397769927978516,
1275
- "learning_rate": 6.85937143917053e-06,
1276
- "loss": 0.5115979194641114,
1277
- "step": 900
1278
- },
1279
- {
1280
- "epoch": 0.8244761615548133,
1281
- "grad_norm": 5.299158573150635,
1282
- "learning_rate": 6.824368341630417e-06,
1283
- "loss": 0.5131528854370118,
1284
- "step": 905
1285
- },
1286
- {
1287
- "epoch": 0.8290312784694808,
1288
- "grad_norm": 5.552743434906006,
1289
- "learning_rate": 6.7892617962040895e-06,
1290
- "loss": 0.5187356472015381,
1291
- "step": 910
1292
- },
1293
- {
1294
- "epoch": 0.8335863953841481,
1295
- "grad_norm": 3.70224666595459,
1296
- "learning_rate": 6.754053793552005e-06,
1297
- "loss": 0.5094799995422363,
1298
- "step": 915
1299
- },
1300
- {
1301
- "epoch": 0.8381415122988156,
1302
- "grad_norm": 6.331211566925049,
1303
- "learning_rate": 6.718746330087592e-06,
1304
- "loss": 0.5205501556396485,
1305
- "step": 920
1306
- },
1307
- {
1308
- "epoch": 0.8426966292134831,
1309
- "grad_norm": 5.271122455596924,
1310
- "learning_rate": 6.683341407864041e-06,
1311
- "loss": 0.513223934173584,
1312
- "step": 925
1313
- },
1314
- {
1315
- "epoch": 0.8472517461281506,
1316
- "grad_norm": 5.599212646484375,
1317
- "learning_rate": 6.64784103446079e-06,
1318
- "loss": 0.5125411987304688,
1319
- "step": 930
1320
- },
1321
- {
1322
- "epoch": 0.851806863042818,
1323
- "grad_norm": 4.971273422241211,
1324
- "learning_rate": 6.61224722286968e-06,
1325
- "loss": 0.512675142288208,
1326
- "step": 935
1327
- },
1328
- {
1329
- "epoch": 0.8563619799574855,
1330
- "grad_norm": 19.01078987121582,
1331
- "learning_rate": 6.576561991380813e-06,
1332
- "loss": 0.5141432762145997,
1333
- "step": 940
1334
- },
1335
- {
1336
- "epoch": 0.860917096872153,
1337
- "grad_norm": 3.6665432453155518,
1338
- "learning_rate": 6.540787363468117e-06,
1339
- "loss": 0.5151987075805664,
1340
- "step": 945
1341
- },
1342
- {
1343
- "epoch": 0.8654722137868205,
1344
- "grad_norm": 11.245736122131348,
1345
- "learning_rate": 6.504925367674595e-06,
1346
- "loss": 0.5133233070373535,
1347
- "step": 950
1348
- },
1349
- {
1350
- "epoch": 0.870027330701488,
1351
- "grad_norm": 11.291261672973633,
1352
- "learning_rate": 6.468978037497306e-06,
1353
- "loss": 0.5141026496887207,
1354
- "step": 955
1355
- },
1356
- {
1357
- "epoch": 0.8745824476161554,
1358
- "grad_norm": 4.820343971252441,
1359
- "learning_rate": 6.432947411272061e-06,
1360
- "loss": 0.5152844429016114,
1361
- "step": 960
1362
- },
1363
- {
1364
- "epoch": 0.8791375645308229,
1365
- "grad_norm": 5.331433296203613,
1366
- "learning_rate": 6.396835532057842e-06,
1367
- "loss": 0.5164792537689209,
1368
- "step": 965
1369
- },
1370
- {
1371
- "epoch": 0.8836926814454904,
1372
- "grad_norm": 4.908500671386719,
1373
- "learning_rate": 6.360644447520946e-06,
1374
- "loss": 0.5097151756286621,
1375
- "step": 970
1376
- },
1377
- {
1378
- "epoch": 0.8882477983601579,
1379
- "grad_norm": 4.368012428283691,
1380
- "learning_rate": 6.3243762098188856e-06,
1381
- "loss": 0.513163948059082,
1382
- "step": 975
1383
- },
1384
- {
1385
- "epoch": 0.8928029152748254,
1386
- "grad_norm": 4.894745826721191,
1387
- "learning_rate": 6.288032875484017e-06,
1388
- "loss": 0.511021900177002,
1389
- "step": 980
1390
- },
1391
- {
1392
- "epoch": 0.8973580321894928,
1393
- "grad_norm": 4.908347129821777,
1394
- "learning_rate": 6.251616505306933e-06,
1395
- "loss": 0.5213098049163818,
1396
- "step": 985
1397
- },
1398
- {
1399
- "epoch": 0.9019131491041603,
1400
- "grad_norm": 7.74894905090332,
1401
- "learning_rate": 6.215129164219609e-06,
1402
- "loss": 0.5086194515228272,
1403
- "step": 990
1404
- },
1405
- {
1406
- "epoch": 0.9064682660188278,
1407
- "grad_norm": 3.935709238052368,
1408
- "learning_rate": 6.178572921178303e-06,
1409
- "loss": 0.5145331859588623,
1410
- "step": 995
1411
- },
1412
- {
1413
- "epoch": 0.9110233829334953,
1414
- "grad_norm": 3.560208320617676,
1415
- "learning_rate": 6.141949849046259e-06,
1416
- "loss": 0.5098010540008545,
1417
- "step": 1000
1418
- },
1419
- {
1420
- "epoch": 0.9155784998481628,
1421
- "grad_norm": 4.882101058959961,
1422
- "learning_rate": 6.1052620244761554e-06,
1423
- "loss": 0.504856538772583,
1424
- "step": 1005
1425
- },
1426
- {
1427
- "epoch": 0.9201336167628302,
1428
- "grad_norm": 3.6763391494750977,
1429
- "learning_rate": 6.068511527792345e-06,
1430
- "loss": 0.507168960571289,
1431
- "step": 1010
1432
- },
1433
- {
1434
- "epoch": 0.9246887336774977,
1435
- "grad_norm": 4.296417713165283,
1436
- "learning_rate": 6.031700442872913e-06,
1437
- "loss": 0.5144553184509277,
1438
- "step": 1015
1439
- },
1440
- {
1441
- "epoch": 0.9292438505921652,
1442
- "grad_norm": 4.9462995529174805,
1443
- "learning_rate": 5.9948308570315e-06,
1444
- "loss": 0.5141644954681397,
1445
- "step": 1020
1446
- },
1447
- {
1448
- "epoch": 0.9337989675068327,
1449
- "grad_norm": 5.650740623474121,
1450
- "learning_rate": 5.957904860898945e-06,
1451
- "loss": 0.5157180309295655,
1452
- "step": 1025
1453
- },
1454
- {
1455
- "epoch": 0.9383540844215001,
1456
- "grad_norm": 2.4969570636749268,
1457
- "learning_rate": 5.920924548304745e-06,
1458
- "loss": 0.5117298126220703,
1459
- "step": 1030
1460
- },
1461
- {
1462
- "epoch": 0.9429092013361676,
1463
- "grad_norm": 4.6982035636901855,
1464
- "learning_rate": 5.883892016158327e-06,
1465
- "loss": 0.5127541542053222,
1466
- "step": 1035
1467
- },
1468
- {
1469
- "epoch": 0.9474643182508351,
1470
- "grad_norm": 2.9895901679992676,
1471
- "learning_rate": 5.84680936433014e-06,
1472
- "loss": 0.5123614311218262,
1473
- "step": 1040
1474
- },
1475
- {
1476
- "epoch": 0.9520194351655026,
1477
- "grad_norm": 6.107245445251465,
1478
- "learning_rate": 5.809678695532589e-06,
1479
- "loss": 0.5081177711486816,
1480
- "step": 1045
1481
- },
1482
- {
1483
- "epoch": 0.9565745520801701,
1484
- "grad_norm": 2.194211006164551,
1485
- "learning_rate": 5.77250211520081e-06,
1486
- "loss": 0.5117453575134278,
1487
- "step": 1050
1488
- },
1489
- {
1490
- "epoch": 0.9611296689948375,
1491
- "grad_norm": 2.4194414615631104,
1492
- "learning_rate": 5.735281731373271e-06,
1493
- "loss": 0.5113804340362549,
1494
- "step": 1055
1495
- },
1496
- {
1497
- "epoch": 0.965684785909505,
1498
- "grad_norm": 3.714282989501953,
1499
- "learning_rate": 5.698019654572257e-06,
1500
- "loss": 0.5118466377258301,
1501
- "step": 1060
1502
- },
1503
- {
1504
- "epoch": 0.9702399028241725,
1505
- "grad_norm": 2.9563801288604736,
1506
- "learning_rate": 5.660717997684176e-06,
1507
- "loss": 0.505659818649292,
1508
- "step": 1065
1509
- },
1510
- {
1511
- "epoch": 0.97479501973884,
1512
- "grad_norm": 4.059390544891357,
1513
- "learning_rate": 5.623378875839769e-06,
1514
- "loss": 0.5077022075653076,
1515
- "step": 1070
1516
- },
1517
- {
1518
- "epoch": 0.9793501366535075,
1519
- "grad_norm": 8.951316833496094,
1520
- "learning_rate": 5.586004406294163e-06,
1521
- "loss": 0.5117686271667481,
1522
- "step": 1075
1523
- },
1524
- {
1525
- "epoch": 0.9839052535681749,
1526
- "grad_norm": 4.351177215576172,
1527
- "learning_rate": 5.5485967083068225e-06,
1528
- "loss": 0.5095709800720215,
1529
- "step": 1080
1530
- },
1531
- {
1532
- "epoch": 0.9884603704828424,
1533
- "grad_norm": 3.330329179763794,
1534
- "learning_rate": 5.511157903021376e-06,
1535
- "loss": 0.5055630207061768,
1536
- "step": 1085
1537
- },
1538
- {
1539
- "epoch": 0.9930154873975099,
1540
- "grad_norm": 163.6642303466797,
1541
- "learning_rate": 5.473690113345343e-06,
1542
- "loss": 0.5033265113830566,
1543
- "step": 1090
1544
- },
1545
- {
1546
- "epoch": 0.9975706043121774,
1547
- "grad_norm": 2.309828996658325,
1548
- "learning_rate": 5.436195463829753e-06,
1549
- "loss": 0.5086031913757324,
1550
- "step": 1095
1551
- },
1552
- {
1553
- "epoch": 1.001822046765867,
1554
- "grad_norm": 2.9510703086853027,
1555
- "learning_rate": 5.398676080548686e-06,
1556
- "loss": 0.4903547286987305,
1557
- "step": 1100
1558
- },
1559
- {
1560
- "epoch": 1.0063771636805345,
1561
- "grad_norm": 3.1362714767456055,
1562
- "learning_rate": 5.361134090978706e-06,
1563
- "loss": 0.4678180694580078,
1564
- "step": 1105
1565
- },
1566
- {
1567
- "epoch": 1.010932280595202,
1568
- "grad_norm": 7.239353656768799,
1569
- "learning_rate": 5.323571623878228e-06,
1570
- "loss": 0.46486196517944334,
1571
- "step": 1110
1572
- },
1573
- {
1574
- "epoch": 1.0154873975098695,
1575
- "grad_norm": 7.031823635101318,
1576
- "learning_rate": 5.285990809166819e-06,
1577
- "loss": 0.4655783653259277,
1578
- "step": 1115
1579
- },
1580
- {
1581
- "epoch": 1.0200425144245369,
1582
- "grad_norm": 2.0763893127441406,
1583
- "learning_rate": 5.248393777804414e-06,
1584
- "loss": 0.4618667125701904,
1585
- "step": 1120
1586
- },
1587
- {
1588
- "epoch": 1.0245976313392045,
1589
- "grad_norm": 2.655989646911621,
1590
- "learning_rate": 5.210782661670486e-06,
1591
- "loss": 0.46256508827209475,
1592
- "step": 1125
1593
- },
1594
- {
1595
- "epoch": 1.0291527482538718,
1596
- "grad_norm": 3.223036527633667,
1597
- "learning_rate": 5.173159593443169e-06,
1598
- "loss": 0.46468095779418944,
1599
- "step": 1130
1600
- },
1601
- {
1602
- "epoch": 1.0337078651685394,
1603
- "grad_norm": 1.3973139524459839,
1604
- "learning_rate": 5.135526706478312e-06,
1605
- "loss": 0.4647266387939453,
1606
- "step": 1135
1607
- },
1608
- {
1609
- "epoch": 1.0382629820832068,
1610
- "grad_norm": 1.9962592124938965,
1611
- "learning_rate": 5.0978861346885275e-06,
1612
- "loss": 0.46361312866210935,
1613
- "step": 1140
1614
- },
1615
- {
1616
- "epoch": 1.0428180989978744,
1617
- "grad_norm": 1.7671151161193848,
1618
- "learning_rate": 5.060240012422179e-06,
1619
- "loss": 0.46665177345275877,
1620
- "step": 1145
1621
- },
1622
- {
1623
- "epoch": 1.0473732159125417,
1624
- "grad_norm": 2.207895517349243,
1625
- "learning_rate": 5.022590474342364e-06,
1626
- "loss": 0.4580831527709961,
1627
- "step": 1150
1628
- },
1629
- {
1630
- "epoch": 1.0519283328272093,
1631
- "grad_norm": 2.9564528465270996,
1632
- "learning_rate": 4.984939655305865e-06,
1633
- "loss": 0.46053476333618165,
1634
- "step": 1155
1635
- },
1636
- {
1637
- "epoch": 1.0564834497418767,
1638
- "grad_norm": 1.5557438135147095,
1639
- "learning_rate": 4.947289690242103e-06,
1640
- "loss": 0.4670742988586426,
1641
- "step": 1160
1642
- },
1643
- {
1644
- "epoch": 1.0610385666565443,
1645
- "grad_norm": 1.9870871305465698,
1646
- "learning_rate": 4.909642714032074e-06,
1647
- "loss": 0.45641288757324217,
1648
- "step": 1165
1649
- },
1650
- {
1651
- "epoch": 1.0655936835712116,
1652
- "grad_norm": 1.6024967432022095,
1653
- "learning_rate": 4.872000861387291e-06,
1654
- "loss": 0.46791677474975585,
1655
- "step": 1170
1656
- },
1657
- {
1658
- "epoch": 1.0701488004858792,
1659
- "grad_norm": 1.6948623657226562,
1660
- "learning_rate": 4.834366266728753e-06,
1661
- "loss": 0.4647111415863037,
1662
- "step": 1175
1663
- },
1664
- {
1665
- "epoch": 1.0747039174005466,
1666
- "grad_norm": 1.7235485315322876,
1667
- "learning_rate": 4.796741064065902e-06,
1668
- "loss": 0.46364355087280273,
1669
- "step": 1180
1670
- },
1671
- {
1672
- "epoch": 1.0792590343152142,
1673
- "grad_norm": 3.3120858669281006,
1674
- "learning_rate": 4.759127386875618e-06,
1675
- "loss": 0.45574679374694826,
1676
- "step": 1185
1677
- },
1678
- {
1679
- "epoch": 1.0838141512298816,
1680
- "grad_norm": 1.5388199090957642,
1681
- "learning_rate": 4.721527367981251e-06,
1682
- "loss": 0.46392068862915037,
1683
- "step": 1190
1684
- },
1685
- {
1686
- "epoch": 1.088369268144549,
1687
- "grad_norm": 1.50483238697052,
1688
- "learning_rate": 4.683943139431683e-06,
1689
- "loss": 0.4626826286315918,
1690
- "step": 1195
1691
- },
1692
- {
1693
- "epoch": 1.0929243850592165,
1694
- "grad_norm": 1.2712243795394897,
1695
- "learning_rate": 4.646376832380423e-06,
1696
- "loss": 0.4646759986877441,
1697
- "step": 1200
1698
- },
1699
- {
1700
- "epoch": 1.097479501973884,
1701
- "grad_norm": 1.4449741840362549,
1702
- "learning_rate": 4.608830576964772e-06,
1703
- "loss": 0.46281089782714846,
1704
- "step": 1205
1705
- },
1706
- {
1707
- "epoch": 1.1020346188885515,
1708
- "grad_norm": 1.2682076692581177,
1709
- "learning_rate": 4.571306502185043e-06,
1710
- "loss": 0.4645559787750244,
1711
- "step": 1210
1712
- },
1713
- {
1714
- "epoch": 1.1065897358032188,
1715
- "grad_norm": 1.3985633850097656,
1716
- "learning_rate": 4.533806735783815e-06,
1717
- "loss": 0.46173458099365233,
1718
- "step": 1215
1719
- },
1720
- {
1721
- "epoch": 1.1111448527178864,
1722
- "grad_norm": 2.7544267177581787,
1723
- "learning_rate": 4.496333404125314e-06,
1724
- "loss": 0.4600971221923828,
1725
- "step": 1220
1726
- },
1727
- {
1728
- "epoch": 1.1156999696325538,
1729
- "grad_norm": 1.4135196208953857,
1730
- "learning_rate": 4.4588886320748235e-06,
1731
- "loss": 0.45669097900390626,
1732
- "step": 1225
1733
- },
1734
- {
1735
- "epoch": 1.1202550865472214,
1736
- "grad_norm": 1.51726233959198,
1737
- "learning_rate": 4.4214745428781946e-06,
1738
- "loss": 0.46077938079833985,
1739
- "step": 1230
1740
- },
1741
- {
1742
- "epoch": 1.1248102034618888,
1743
- "grad_norm": 1.4505730867385864,
1744
- "learning_rate": 4.384093258041459e-06,
1745
- "loss": 0.46395068168640136,
1746
- "step": 1235
1747
- },
1748
- {
1749
- "epoch": 1.1293653203765563,
1750
- "grad_norm": 1.9110316038131714,
1751
- "learning_rate": 4.346746897210534e-06,
1752
- "loss": 0.45387964248657225,
1753
- "step": 1240
1754
- },
1755
- {
1756
- "epoch": 1.1339204372912237,
1757
- "grad_norm": 1.3564186096191406,
1758
- "learning_rate": 4.309437578051021e-06,
1759
- "loss": 0.4566159248352051,
1760
- "step": 1245
1761
- },
1762
- {
1763
- "epoch": 1.1384755542058913,
1764
- "grad_norm": 1.4822899103164673,
1765
- "learning_rate": 4.272167416128134e-06,
1766
- "loss": 0.4583602428436279,
1767
- "step": 1250
1768
- },
1769
- {
1770
- "epoch": 1.1430306711205587,
1771
- "grad_norm": 1.3349090814590454,
1772
- "learning_rate": 4.234938524786744e-06,
1773
- "loss": 0.45874862670898436,
1774
- "step": 1255
1775
- },
1776
- {
1777
- "epoch": 1.1475857880352263,
1778
- "grad_norm": 2.713782787322998,
1779
- "learning_rate": 4.19775301503153e-06,
1780
- "loss": 0.46090211868286135,
1781
- "step": 1260
1782
- },
1783
- {
1784
- "epoch": 1.1521409049498936,
1785
- "grad_norm": 1.5619561672210693,
1786
- "learning_rate": 4.160612995407296e-06,
1787
- "loss": 0.4585545539855957,
1788
- "step": 1265
1789
- },
1790
- {
1791
- "epoch": 1.1566960218645612,
1792
- "grad_norm": 1.8272922039031982,
1793
- "learning_rate": 4.1235205718793995e-06,
1794
- "loss": 0.45654687881469724,
1795
- "step": 1270
1796
- },
1797
- {
1798
- "epoch": 1.1612511387792286,
1799
- "grad_norm": 2.169624090194702,
1800
- "learning_rate": 4.086477847714331e-06,
1801
- "loss": 0.4591087341308594,
1802
- "step": 1275
1803
- },
1804
- {
1805
- "epoch": 1.1658062556938962,
1806
- "grad_norm": 2.21856689453125,
1807
- "learning_rate": 4.0494869233604685e-06,
1808
- "loss": 0.4587860107421875,
1809
- "step": 1280
1810
- },
1811
- {
1812
- "epoch": 1.1703613726085635,
1813
- "grad_norm": 1.5166852474212646,
1814
- "learning_rate": 4.012549896328957e-06,
1815
- "loss": 0.4574915885925293,
1816
- "step": 1285
1817
- },
1818
- {
1819
- "epoch": 1.1749164895232311,
1820
- "grad_norm": 1.3856573104858398,
1821
- "learning_rate": 3.975668861074783e-06,
1822
- "loss": 0.4564501762390137,
1823
- "step": 1290
1824
- },
1825
- {
1826
- "epoch": 1.1794716064378985,
1827
- "grad_norm": 1.2893812656402588,
1828
- "learning_rate": 3.938845908878006e-06,
1829
- "loss": 0.45852975845336913,
1830
- "step": 1295
1831
- },
1832
- {
1833
- "epoch": 1.184026723352566,
1834
- "grad_norm": 1.315671682357788,
1835
- "learning_rate": 3.902083127725186e-06,
1836
- "loss": 0.45477943420410155,
1837
- "step": 1300
1838
- },
1839
- {
1840
- "epoch": 1.1885818402672335,
1841
- "grad_norm": 1.489163875579834,
1842
- "learning_rate": 3.865382602190972e-06,
1843
- "loss": 0.45960540771484376,
1844
- "step": 1305
1845
- },
1846
- {
1847
- "epoch": 1.193136957181901,
1848
- "grad_norm": 1.664601445198059,
1849
- "learning_rate": 3.828746413319911e-06,
1850
- "loss": 0.4604182720184326,
1851
- "step": 1310
1852
- },
1853
- {
1854
- "epoch": 1.1976920740965684,
1855
- "grad_norm": 2.8415708541870117,
1856
- "learning_rate": 3.7921766385084447e-06,
1857
- "loss": 0.4568796634674072,
1858
- "step": 1315
1859
- },
1860
- {
1861
- "epoch": 1.202247191011236,
1862
- "grad_norm": 1.8271973133087158,
1863
- "learning_rate": 3.755675351387107e-06,
1864
- "loss": 0.4560407638549805,
1865
- "step": 1320
1866
- },
1867
- {
1868
- "epoch": 1.2068023079259034,
1869
- "grad_norm": 1.6760120391845703,
1870
- "learning_rate": 3.719244621702952e-06,
1871
- "loss": 0.4565584182739258,
1872
- "step": 1325
1873
- },
1874
- {
1875
- "epoch": 1.211357424840571,
1876
- "grad_norm": 1.4044896364212036,
1877
- "learning_rate": 3.6828865152021857e-06,
1878
- "loss": 0.4525318145751953,
1879
- "step": 1330
1880
- },
1881
- {
1882
- "epoch": 1.2159125417552383,
1883
- "grad_norm": 1.3660306930541992,
1884
- "learning_rate": 3.64660309351303e-06,
1885
- "loss": 0.45788965225219724,
1886
- "step": 1335
1887
- },
1888
- {
1889
- "epoch": 1.220467658669906,
1890
- "grad_norm": 1.7708740234375,
1891
- "learning_rate": 3.610396414028827e-06,
1892
- "loss": 0.45180473327636717,
1893
- "step": 1340
1894
- },
1895
- {
1896
- "epoch": 1.2250227755845733,
1897
- "grad_norm": 1.551252007484436,
1898
- "learning_rate": 3.5742685297913773e-06,
1899
- "loss": 0.4559042930603027,
1900
- "step": 1345
1901
- },
1902
- {
1903
- "epoch": 1.2295778924992409,
1904
- "grad_norm": 1.6479007005691528,
1905
- "learning_rate": 3.538221489374516e-06,
1906
- "loss": 0.4566207408905029,
1907
- "step": 1350
1908
- },
1909
- {
1910
- "epoch": 1.2341330094139082,
1911
- "grad_norm": 1.4534903764724731,
1912
- "learning_rate": 3.5022573367679626e-06,
1913
- "loss": 0.45155649185180663,
1914
- "step": 1355
1915
- },
1916
- {
1917
- "epoch": 1.2386881263285758,
1918
- "grad_norm": 1.7879937887191772,
1919
- "learning_rate": 3.4663781112614192e-06,
1920
- "loss": 0.44574294090270994,
1921
- "step": 1360
1922
- },
1923
- {
1924
- "epoch": 1.2432432432432432,
1925
- "grad_norm": 1.3173577785491943,
1926
- "learning_rate": 3.43058584732893e-06,
1927
- "loss": 0.45929703712463377,
1928
- "step": 1365
1929
- },
1930
- {
1931
- "epoch": 1.2477983601579108,
1932
- "grad_norm": 1.5794878005981445,
1933
- "learning_rate": 3.3948825745135196e-06,
1934
- "loss": 0.456890869140625,
1935
- "step": 1370
1936
- },
1937
- {
1938
- "epoch": 1.2523534770725782,
1939
- "grad_norm": 1.5050439834594727,
1940
- "learning_rate": 3.3592703173121155e-06,
1941
- "loss": 0.45540714263916016,
1942
- "step": 1375
1943
- },
1944
- {
1945
- "epoch": 1.2569085939872457,
1946
- "grad_norm": 2.2978973388671875,
1947
- "learning_rate": 3.323751095060756e-06,
1948
- "loss": 0.45746870040893556,
1949
- "step": 1380
1950
- },
1951
- {
1952
- "epoch": 1.2614637109019131,
1953
- "grad_norm": 1.3030719757080078,
1954
- "learning_rate": 3.288326921820072e-06,
1955
- "loss": 0.4567601203918457,
1956
- "step": 1385
1957
- },
1958
- {
1959
- "epoch": 1.2660188278165807,
1960
- "grad_norm": 1.585959553718567,
1961
- "learning_rate": 3.2529998062611e-06,
1962
- "loss": 0.45557346343994143,
1963
- "step": 1390
1964
- },
1965
- {
1966
- "epoch": 1.270573944731248,
1967
- "grad_norm": 1.2981617450714111,
1968
- "learning_rate": 3.2177717515513723e-06,
1969
- "loss": 0.4497828960418701,
1970
- "step": 1395
1971
- },
1972
- {
1973
- "epoch": 1.2751290616459157,
1974
- "grad_norm": 2.575040102005005,
1975
- "learning_rate": 3.1826447552413337e-06,
1976
- "loss": 0.46262612342834475,
1977
- "step": 1400
1978
- },
1979
- {
1980
- "epoch": 1.279684178560583,
1981
- "grad_norm": 1.5246721506118774,
1982
- "learning_rate": 3.147620809151078e-06,
1983
- "loss": 0.45871772766113283,
1984
- "step": 1405
1985
- },
1986
- {
1987
- "epoch": 1.2842392954752504,
1988
- "grad_norm": 1.6146317720413208,
1989
- "learning_rate": 3.1127018992573994e-06,
1990
- "loss": 0.4530343055725098,
1991
- "step": 1410
1992
- },
1993
- {
1994
- "epoch": 1.288794412389918,
1995
- "grad_norm": 1.3936330080032349,
1996
- "learning_rate": 3.07789000558118e-06,
1997
- "loss": 0.44968690872192385,
1998
- "step": 1415
1999
- },
2000
- {
2001
- "epoch": 1.2933495293045856,
2002
- "grad_norm": 2.6830527782440186,
2003
- "learning_rate": 3.043187102075121e-06,
2004
- "loss": 0.45221376419067383,
2005
- "step": 1420
2006
- },
2007
- {
2008
- "epoch": 1.297904646219253,
2009
- "grad_norm": 1.5950199365615845,
2010
- "learning_rate": 3.0085951565118142e-06,
2011
- "loss": 0.45414228439331056,
2012
- "step": 1425
2013
- },
2014
- {
2015
- "epoch": 1.3024597631339203,
2016
- "grad_norm": 2.076038122177124,
2017
- "learning_rate": 2.974116130372151e-06,
2018
- "loss": 0.4510965347290039,
2019
- "step": 1430
2020
- },
2021
- {
2022
- "epoch": 1.307014880048588,
2023
- "grad_norm": 2.966803550720215,
2024
- "learning_rate": 2.9397519787341144e-06,
2025
- "loss": 0.45758919715881347,
2026
- "step": 1435
2027
- },
2028
- {
2029
- "epoch": 1.3115699969632555,
2030
- "grad_norm": 1.5716099739074707,
2031
- "learning_rate": 2.9055046501619088e-06,
2032
- "loss": 0.45397381782531737,
2033
- "step": 1440
2034
- },
2035
- {
2036
- "epoch": 1.3161251138779229,
2037
- "grad_norm": 1.8856912851333618,
2038
- "learning_rate": 2.8713760865954743e-06,
2039
- "loss": 0.4520057201385498,
2040
- "step": 1445
2041
- },
2042
- {
2043
- "epoch": 1.3206802307925902,
2044
- "grad_norm": 1.5396655797958374,
2045
- "learning_rate": 2.837368223240372e-06,
2046
- "loss": 0.4507561683654785,
2047
- "step": 1450
2048
- },
2049
- {
2050
- "epoch": 1.3252353477072578,
2051
- "grad_norm": 1.9894485473632812,
2052
- "learning_rate": 2.803482988458054e-06,
2053
- "loss": 0.4559630393981934,
2054
- "step": 1455
2055
- },
2056
- {
2057
- "epoch": 1.3297904646219254,
2058
- "grad_norm": 9.26878547668457,
2059
- "learning_rate": 2.7697223036565103e-06,
2060
- "loss": 0.44710407257080076,
2061
- "step": 1460
2062
- },
2063
- {
2064
- "epoch": 1.3343455815365928,
2065
- "grad_norm": 1.7867859601974487,
2066
- "learning_rate": 2.736088083181324e-06,
2067
- "loss": 0.4569823741912842,
2068
- "step": 1465
2069
- },
2070
- {
2071
- "epoch": 1.3389006984512601,
2072
- "grad_norm": 4.057967662811279,
2073
- "learning_rate": 2.702582234207126e-06,
2074
- "loss": 0.4553251266479492,
2075
- "step": 1470
2076
- },
2077
- {
2078
- "epoch": 1.3434558153659277,
2079
- "grad_norm": 1.4120380878448486,
2080
- "learning_rate": 2.6692066566294393e-06,
2081
- "loss": 0.45798654556274415,
2082
- "step": 1475
2083
- },
2084
- {
2085
- "epoch": 1.3480109322805953,
2086
- "grad_norm": 1.817967176437378,
2087
- "learning_rate": 2.6359632429569652e-06,
2088
- "loss": 0.4523132801055908,
2089
- "step": 1480
2090
- },
2091
- {
2092
- "epoch": 1.3525660491952627,
2093
- "grad_norm": 2.0179030895233154,
2094
- "learning_rate": 2.6028538782042547e-06,
2095
- "loss": 0.4514320373535156,
2096
- "step": 1485
2097
- },
2098
- {
2099
- "epoch": 1.35712116610993,
2100
- "grad_norm": 2.939159870147705,
2101
- "learning_rate": 2.5698804397848294e-06,
2102
- "loss": 0.45455131530761717,
2103
- "step": 1490
2104
- },
2105
- {
2106
- "epoch": 1.3616762830245976,
2107
- "grad_norm": 1.9223039150238037,
2108
- "learning_rate": 2.5370447974047308e-06,
2109
- "loss": 0.45690116882324217,
2110
- "step": 1495
2111
- },
2112
- {
2113
- "epoch": 1.3662313999392652,
2114
- "grad_norm": 2.0542290210723877,
2115
- "learning_rate": 2.5043488129564954e-06,
2116
- "loss": 0.4534407138824463,
2117
- "step": 1500
2118
- },
2119
- {
2120
- "epoch": 1.3707865168539326,
2121
- "grad_norm": 1.5204252004623413,
2122
- "learning_rate": 2.471794340413578e-06,
2123
- "loss": 0.453262996673584,
2124
- "step": 1505
2125
- },
2126
- {
2127
- "epoch": 1.3753416337686,
2128
- "grad_norm": 1.4880733489990234,
2129
- "learning_rate": 2.4393832257252253e-06,
2130
- "loss": 0.4536959171295166,
2131
- "step": 1510
2132
- },
2133
- {
2134
- "epoch": 1.3798967506832676,
2135
- "grad_norm": 1.7958362102508545,
2136
- "learning_rate": 2.4071173067118136e-06,
2137
- "loss": 0.4493096351623535,
2138
- "step": 1515
2139
- },
2140
- {
2141
- "epoch": 1.3844518675979351,
2142
- "grad_norm": 1.4270687103271484,
2143
- "learning_rate": 2.374998412960623e-06,
2144
- "loss": 0.4516870975494385,
2145
- "step": 1520
2146
- },
2147
- {
2148
- "epoch": 1.3890069845126025,
2149
- "grad_norm": 1.7491718530654907,
2150
- "learning_rate": 2.343028365722109e-06,
2151
- "loss": 0.45421199798583983,
2152
- "step": 1525
2153
- },
2154
- {
2155
- "epoch": 1.3935621014272699,
2156
- "grad_norm": 1.595752239227295,
2157
- "learning_rate": 2.311208977806617e-06,
2158
- "loss": 0.45371274948120116,
2159
- "step": 1530
2160
- },
2161
- {
2162
- "epoch": 1.3981172183419375,
2163
- "grad_norm": 1.572836995124817,
2164
- "learning_rate": 2.279542053481599e-06,
2165
- "loss": 0.45087580680847167,
2166
- "step": 1535
2167
- },
2168
- {
2169
- "epoch": 1.4026723352566048,
2170
- "grad_norm": 3.447502851486206,
2171
- "learning_rate": 2.2480293883693053e-06,
2172
- "loss": 0.4540461540222168,
2173
- "step": 1540
2174
- },
2175
- {
2176
- "epoch": 1.4072274521712724,
2177
- "grad_norm": 5.559060096740723,
2178
- "learning_rate": 2.216672769344965e-06,
2179
- "loss": 0.4529916763305664,
2180
- "step": 1545
2181
- },
2182
- {
2183
- "epoch": 1.4117825690859398,
2184
- "grad_norm": 2.572004795074463,
2185
- "learning_rate": 2.185473974435459e-06,
2186
- "loss": 0.45006046295166013,
2187
- "step": 1550
2188
- },
2189
- {
2190
- "epoch": 1.4163376860006074,
2191
- "grad_norm": 1.64500892162323,
2192
- "learning_rate": 2.1544347727185067e-06,
2193
- "loss": 0.4464961051940918,
2194
- "step": 1555
2195
- },
2196
- {
2197
- "epoch": 1.4208928029152748,
2198
- "grad_norm": 2.7676432132720947,
2199
- "learning_rate": 2.1235569242223537e-06,
2200
- "loss": 0.44682931900024414,
2201
- "step": 1560
2202
- },
2203
- {
2204
- "epoch": 1.4254479198299423,
2205
- "grad_norm": 1.6749385595321655,
2206
- "learning_rate": 2.0928421798259633e-06,
2207
- "loss": 0.45028390884399416,
2208
- "step": 1565
2209
- },
2210
- {
2211
- "epoch": 1.4300030367446097,
2212
- "grad_norm": 1.5821510553359985,
2213
- "learning_rate": 2.0622922811597512e-06,
2214
- "loss": 0.4520574569702148,
2215
- "step": 1570
2216
- },
2217
- {
2218
- "epoch": 1.4345581536592773,
2219
- "grad_norm": 2.120412826538086,
2220
- "learning_rate": 2.031908960506811e-06,
2221
- "loss": 0.44826512336730956,
2222
- "step": 1575
2223
- },
2224
- {
2225
- "epoch": 1.4391132705739447,
2226
- "grad_norm": 1.8699346780776978,
2227
- "learning_rate": 2.0016939407046987e-06,
2228
- "loss": 0.4471128463745117,
2229
- "step": 1580
2230
- },
2231
- {
2232
- "epoch": 1.4436683874886123,
2233
- "grad_norm": 2.077984571456909,
2234
- "learning_rate": 1.971648935047744e-06,
2235
- "loss": 0.44693751335144044,
2236
- "step": 1585
2237
- },
2238
- {
2239
- "epoch": 1.4482235044032796,
2240
- "grad_norm": 1.5535467863082886,
2241
- "learning_rate": 1.941775647189895e-06,
2242
- "loss": 0.44760980606079104,
2243
- "step": 1590
2244
- },
2245
- {
2246
- "epoch": 1.4527786213179472,
2247
- "grad_norm": 2.7341277599334717,
2248
- "learning_rate": 1.9120757710481155e-06,
2249
- "loss": 0.44966630935668944,
2250
- "step": 1595
2251
- },
2252
- {
2253
- "epoch": 1.4573337382326146,
2254
- "grad_norm": 1.7916532754898071,
2255
- "learning_rate": 1.8825509907063328e-06,
2256
- "loss": 0.44744625091552737,
2257
- "step": 1600
2258
- },
2259
- {
2260
- "epoch": 1.4618888551472822,
2261
- "grad_norm": 2.4260597229003906,
2262
- "learning_rate": 1.8532029803199537e-06,
2263
- "loss": 0.4553190231323242,
2264
- "step": 1605
2265
- },
2266
- {
2267
- "epoch": 1.4664439720619495,
2268
- "grad_norm": 1.6395586729049683,
2269
- "learning_rate": 1.8240334040209252e-06,
2270
- "loss": 0.45157713890075685,
2271
- "step": 1610
2272
- },
2273
- {
2274
- "epoch": 1.4709990889766171,
2275
- "grad_norm": 2.9597060680389404,
2276
- "learning_rate": 1.795043915823373e-06,
2277
- "loss": 0.45125951766967776,
2278
- "step": 1615
2279
- },
2280
- {
2281
- "epoch": 1.4755542058912845,
2282
- "grad_norm": 1.5317883491516113,
2283
- "learning_rate": 1.7662361595298144e-06,
2284
- "loss": 0.44574918746948244,
2285
- "step": 1620
2286
- },
2287
- {
2288
- "epoch": 1.480109322805952,
2289
- "grad_norm": 2.6433119773864746,
2290
- "learning_rate": 1.7376117686379562e-06,
2291
- "loss": 0.4534473896026611,
2292
- "step": 1625
2293
- },
2294
- {
2295
- "epoch": 1.4846644397206195,
2296
- "grad_norm": 1.9677019119262695,
2297
- "learning_rate": 1.7091723662480557e-06,
2298
- "loss": 0.44940710067749023,
2299
- "step": 1630
2300
- },
2301
- {
2302
- "epoch": 1.489219556635287,
2303
- "grad_norm": 1.8388527631759644,
2304
- "learning_rate": 1.6809195649708998e-06,
2305
- "loss": 0.4490818977355957,
2306
- "step": 1635
2307
- },
2308
- {
2309
- "epoch": 1.4937746735499544,
2310
- "grad_norm": 2.5333993434906006,
2311
- "learning_rate": 1.6528549668363586e-06,
2312
- "loss": 0.44977474212646484,
2313
- "step": 1640
2314
- },
2315
- {
2316
- "epoch": 1.498329790464622,
2317
- "grad_norm": 1.6673647165298462,
2318
- "learning_rate": 1.6249801632025337e-06,
2319
- "loss": 0.447948694229126,
2320
- "step": 1645
2321
- },
2322
- {
2323
- "epoch": 1.5028849073792894,
2324
- "grad_norm": 3.8595633506774902,
2325
- "learning_rate": 1.5972967346655449e-06,
2326
- "loss": 0.4469724178314209,
2327
- "step": 1650
2328
- },
2329
- {
2330
- "epoch": 1.507440024293957,
2331
- "grad_norm": 1.906414270401001,
2332
- "learning_rate": 1.5698062509698908e-06,
2333
- "loss": 0.4501968860626221,
2334
- "step": 1655
2335
- },
2336
- {
2337
- "epoch": 1.5119951412086243,
2338
- "grad_norm": 1.8252265453338623,
2339
- "learning_rate": 1.5425102709194374e-06,
2340
- "loss": 0.44973297119140626,
2341
- "step": 1660
2342
- },
2343
- {
2344
- "epoch": 1.5165502581232917,
2345
- "grad_norm": 1.7581197023391724,
2346
- "learning_rate": 1.5154103422890394e-06,
2347
- "loss": 0.44851016998291016,
2348
- "step": 1665
2349
- },
2350
- {
2351
- "epoch": 1.5211053750379593,
2352
- "grad_norm": 2.9670493602752686,
2353
- "learning_rate": 1.4885080017367632e-06,
2354
- "loss": 0.4460925102233887,
2355
- "step": 1670
2356
- },
2357
- {
2358
- "epoch": 1.5256604919526269,
2359
- "grad_norm": 2.03843355178833,
2360
- "learning_rate": 1.4618047747167613e-06,
2361
- "loss": 0.4519230365753174,
2362
- "step": 1675
2363
- },
2364
- {
2365
- "epoch": 1.5302156088672942,
2366
- "grad_norm": 2.041097402572632,
2367
- "learning_rate": 1.4353021753927753e-06,
2368
- "loss": 0.4467913627624512,
2369
- "step": 1680
2370
- },
2371
- {
2372
- "epoch": 1.5347707257819616,
2373
- "grad_norm": 2.044473886489868,
2374
- "learning_rate": 1.4090017065522731e-06,
2375
- "loss": 0.4481149673461914,
2376
- "step": 1685
2377
- },
2378
- {
2379
- "epoch": 1.5393258426966292,
2380
- "grad_norm": 2.0631234645843506,
2381
- "learning_rate": 1.3829048595212358e-06,
2382
- "loss": 0.45535945892333984,
2383
- "step": 1690
2384
- },
2385
- {
2386
- "epoch": 1.5438809596112968,
2387
- "grad_norm": 3.0296971797943115,
2388
- "learning_rate": 1.3570131140795934e-06,
2389
- "loss": 0.4471914291381836,
2390
- "step": 1695
2391
- },
2392
- {
2393
- "epoch": 1.5484360765259642,
2394
- "grad_norm": 2.544480323791504,
2395
- "learning_rate": 1.3313279383773258e-06,
2396
- "loss": 0.44684619903564454,
2397
- "step": 1700
2398
- },
2399
- {
2400
- "epoch": 1.5529911934406315,
2401
- "grad_norm": 1.992871642112732,
2402
- "learning_rate": 1.3058507888511985e-06,
2403
- "loss": 0.44977169036865233,
2404
- "step": 1705
2405
- },
2406
- {
2407
- "epoch": 1.5575463103552991,
2408
- "grad_norm": 3.2738561630249023,
2409
- "learning_rate": 1.2805831101421923e-06,
2410
- "loss": 0.4530197620391846,
2411
- "step": 1710
2412
- },
2413
- {
2414
- "epoch": 1.5621014272699667,
2415
- "grad_norm": 2.0382983684539795,
2416
- "learning_rate": 1.255526335013577e-06,
2417
- "loss": 0.4496905326843262,
2418
- "step": 1715
2419
- },
2420
- {
2421
- "epoch": 1.566656544184634,
2422
- "grad_norm": 2.9439361095428467,
2423
- "learning_rate": 1.2306818842696716e-06,
2424
- "loss": 0.4521991729736328,
2425
- "step": 1720
2426
- },
2427
- {
2428
- "epoch": 1.5712116610993014,
2429
- "grad_norm": 23.51557159423828,
2430
- "learning_rate": 1.206051166675284e-06,
2431
- "loss": 0.44781079292297366,
2432
- "step": 1725
2433
- },
2434
- {
2435
- "epoch": 1.575766778013969,
2436
- "grad_norm": 2.009943723678589,
2437
- "learning_rate": 1.181635578875826e-06,
2438
- "loss": 0.4488945007324219,
2439
- "step": 1730
2440
- },
2441
- {
2442
- "epoch": 1.5803218949286366,
2443
- "grad_norm": 4.892741680145264,
2444
- "learning_rate": 1.1574365053181152e-06,
2445
- "loss": 0.448914098739624,
2446
- "step": 1735
2447
- },
2448
- {
2449
- "epoch": 1.584877011843304,
2450
- "grad_norm": 2.3487164974212646,
2451
- "learning_rate": 1.1334553181718771e-06,
2452
- "loss": 0.4451763153076172,
2453
- "step": 1740
2454
- },
2455
- {
2456
- "epoch": 1.5894321287579714,
2457
- "grad_norm": 2.0576467514038086,
2458
- "learning_rate": 1.1096933772519408e-06,
2459
- "loss": 0.4390605926513672,
2460
- "step": 1745
2461
- },
2462
- {
2463
- "epoch": 1.593987245672639,
2464
- "grad_norm": 5.04032564163208,
2465
- "learning_rate": 1.086152029941122e-06,
2466
- "loss": 0.4442657470703125,
2467
- "step": 1750
2468
- },
2469
- {
2470
- "epoch": 1.5985423625873065,
2471
- "grad_norm": 3.1288936138153076,
2472
- "learning_rate": 1.0628326111138377e-06,
2473
- "loss": 0.44927182197570803,
2474
- "step": 1755
2475
- },
2476
- {
2477
- "epoch": 1.603097479501974,
2478
- "grad_norm": 2.3182742595672607,
2479
- "learning_rate": 1.0397364430603978e-06,
2480
- "loss": 0.45226001739501953,
2481
- "step": 1760
2482
- },
2483
- {
2484
- "epoch": 1.6076525964166413,
2485
- "grad_norm": 2.4209063053131104,
2486
- "learning_rate": 1.016864835412037e-06,
2487
- "loss": 0.4492661476135254,
2488
- "step": 1765
2489
- },
2490
- {
2491
- "epoch": 1.6122077133313089,
2492
- "grad_norm": 1.9613839387893677,
2493
- "learning_rate": 9.94219085066654e-07,
2494
- "loss": 0.4481070518493652,
2495
- "step": 1770
2496
- },
2497
- {
2498
- "epoch": 1.6167628302459764,
2499
- "grad_norm": 3.977729082107544,
2500
- "learning_rate": 9.718004761152689e-07,
2501
- "loss": 0.4445545196533203,
2502
- "step": 1775
2503
- },
2504
- {
2505
- "epoch": 1.6213179471606438,
2506
- "grad_norm": 2.3831753730773926,
2507
- "learning_rate": 9.496102797692125e-07,
2508
- "loss": 0.4480759620666504,
2509
- "step": 1780
2510
- },
2511
- {
2512
- "epoch": 1.6258730640753112,
2513
- "grad_norm": 1.8846925497055054,
2514
- "learning_rate": 9.276497542880431e-07,
2515
- "loss": 0.4429009437561035,
2516
- "step": 1785
2517
- },
2518
- {
2519
- "epoch": 1.6304281809899788,
2520
- "grad_norm": 4.497838497161865,
2521
- "learning_rate": 9.059201449082045e-07,
2522
- "loss": 0.4449130058288574,
2523
- "step": 1790
2524
- },
2525
- {
2526
- "epoch": 1.6349832979046464,
2527
- "grad_norm": 2.514615535736084,
2528
- "learning_rate": 8.844226837724079e-07,
2529
- "loss": 0.4506368160247803,
2530
- "step": 1795
2531
- },
2532
- {
2533
- "epoch": 1.6395384148193137,
2534
- "grad_norm": 2.240323066711426,
2535
- "learning_rate": 8.631585898597739e-07,
2536
- "loss": 0.4447455883026123,
2537
- "step": 1800
2538
- },
2539
- {
2540
- "epoch": 1.644093531733981,
2541
- "grad_norm": 2.0742299556732178,
2542
- "learning_rate": 8.421290689167056e-07,
2543
- "loss": 0.44998984336853026,
2544
- "step": 1805
2545
- },
2546
- {
2547
- "epoch": 1.6486486486486487,
2548
- "grad_norm": 1.7966265678405762,
2549
- "learning_rate": 8.213353133885211e-07,
2550
- "loss": 0.4495072841644287,
2551
- "step": 1810
2552
- },
2553
- {
2554
- "epoch": 1.6532037655633163,
2555
- "grad_norm": 2.6778430938720703,
2556
- "learning_rate": 8.0077850235184e-07,
2557
- "loss": 0.44173946380615237,
2558
- "step": 1815
2559
- },
2560
- {
2561
- "epoch": 1.6577588824779836,
2562
- "grad_norm": 1.9878642559051514,
2563
- "learning_rate": 7.804598014477255e-07,
2564
- "loss": 0.4415739059448242,
2565
- "step": 1820
2566
- },
2567
- {
2568
- "epoch": 1.662313999392651,
2569
- "grad_norm": 2.5896787643432617,
2570
- "learning_rate": 7.603803628155821e-07,
2571
- "loss": 0.44545326232910154,
2572
- "step": 1825
2573
- },
2574
- {
2575
- "epoch": 1.6668691163073186,
2576
- "grad_norm": 1.7139712572097778,
2577
- "learning_rate": 7.405413250278315e-07,
2578
- "loss": 0.45055904388427737,
2579
- "step": 1830
2580
- },
2581
- {
2582
- "epoch": 1.6714242332219862,
2583
- "grad_norm": 1.7959189414978027,
2584
- "learning_rate": 7.209438130253538e-07,
2585
- "loss": 0.4413741111755371,
2586
- "step": 1835
2587
- },
2588
- {
2589
- "epoch": 1.6759793501366536,
2590
- "grad_norm": 5.144044876098633,
2591
- "learning_rate": 7.015889380536939e-07,
2592
- "loss": 0.44967002868652345,
2593
- "step": 1840
2594
- },
2595
- {
2596
- "epoch": 1.680534467051321,
2597
- "grad_norm": 2.06825852394104,
2598
- "learning_rate": 6.824777976000513e-07,
2599
- "loss": 0.44098854064941406,
2600
- "step": 1845
2601
- },
2602
- {
2603
- "epoch": 1.6850895839659885,
2604
- "grad_norm": 1.9740947484970093,
2605
- "learning_rate": 6.636114753310496e-07,
2606
- "loss": 0.45333032608032225,
2607
- "step": 1850
2608
- },
2609
- {
2610
- "epoch": 1.689644700880656,
2611
- "grad_norm": 20.434785842895508,
2612
- "learning_rate": 6.449910410312927e-07,
2613
- "loss": 0.44796323776245117,
2614
- "step": 1855
2615
- },
2616
- {
2617
- "epoch": 1.6941998177953232,
2618
- "grad_norm": 2.0130579471588135,
2619
- "learning_rate": 6.266175505426958e-07,
2620
- "loss": 0.451617431640625,
2621
- "step": 1860
2622
- },
2623
- {
2624
- "epoch": 1.6987549347099908,
2625
- "grad_norm": 3.6143994331359863,
2626
- "learning_rate": 6.08492045704625e-07,
2627
- "loss": 0.4540760040283203,
2628
- "step": 1865
2629
- },
2630
- {
2631
- "epoch": 1.7033100516246584,
2632
- "grad_norm": 1.749821424484253,
2633
- "learning_rate": 5.906155542948172e-07,
2634
- "loss": 0.4482609748840332,
2635
- "step": 1870
2636
- },
2637
- {
2638
- "epoch": 1.7078651685393258,
2639
- "grad_norm": 2.197422981262207,
2640
- "learning_rate": 5.729890899710949e-07,
2641
- "loss": 0.4451118469238281,
2642
- "step": 1875
2643
- },
2644
- {
2645
- "epoch": 1.7124202854539932,
2646
- "grad_norm": 1.8987295627593994,
2647
- "learning_rate": 5.556136522139027e-07,
2648
- "loss": 0.44547395706176757,
2649
- "step": 1880
2650
- },
2651
- {
2652
- "epoch": 1.7169754023686608,
2653
- "grad_norm": 2.9210968017578125,
2654
- "learning_rate": 5.384902262696229e-07,
2655
- "loss": 0.44892401695251466,
2656
- "step": 1885
2657
- },
2658
- {
2659
- "epoch": 1.7215305192833283,
2660
- "grad_norm": 2.459737777709961,
2661
- "learning_rate": 5.216197830947095e-07,
2662
- "loss": 0.45287532806396485,
2663
- "step": 1890
2664
- },
2665
- {
2666
- "epoch": 1.7260856361979957,
2667
- "grad_norm": 2.4369258880615234,
2668
- "learning_rate": 5.05003279300637e-07,
2669
- "loss": 0.4450559139251709,
2670
- "step": 1895
2671
- },
2672
- {
2673
- "epoch": 1.730640753112663,
2674
- "grad_norm": 2.977600336074829,
2675
- "learning_rate": 4.886416570996505e-07,
2676
- "loss": 0.4451436996459961,
2677
- "step": 1900
2678
- },
2679
- {
2680
- "epoch": 1.7351958700273307,
2681
- "grad_norm": 2.3304171562194824,
2682
- "learning_rate": 4.7253584425134393e-07,
2683
- "loss": 0.4494274139404297,
2684
- "step": 1905
2685
- },
2686
- {
2687
- "epoch": 1.7397509869419983,
2688
- "grad_norm": 1.9524421691894531,
2689
- "learning_rate": 4.5668675401005113e-07,
2690
- "loss": 0.449497127532959,
2691
- "step": 1910
2692
- },
2693
- {
2694
- "epoch": 1.7443061038566656,
2695
- "grad_norm": 1.9427039623260498,
2696
- "learning_rate": 4.410952850730638e-07,
2697
- "loss": 0.4483028411865234,
2698
- "step": 1915
2699
- },
2700
- {
2701
- "epoch": 1.748861220771333,
2702
- "grad_norm": 3.3043453693389893,
2703
- "learning_rate": 4.2576232152966577e-07,
2704
- "loss": 0.4501804828643799,
2705
- "step": 1920
2706
- },
2707
- {
2708
- "epoch": 1.7534163376860006,
2709
- "grad_norm": 2.155041456222534,
2710
- "learning_rate": 4.106887328110082e-07,
2711
- "loss": 0.4523191452026367,
2712
- "step": 1925
2713
- },
2714
- {
2715
- "epoch": 1.7579714546006682,
2716
- "grad_norm": 2.140929937362671,
2717
- "learning_rate": 3.958753736408105e-07,
2718
- "loss": 0.4435746192932129,
2719
- "step": 1930
2720
- },
2721
- {
2722
- "epoch": 1.7625265715153355,
2723
- "grad_norm": 2.4108352661132812,
2724
- "learning_rate": 3.8132308398688813e-07,
2725
- "loss": 0.4462573051452637,
2726
- "step": 1935
2727
- },
2728
- {
2729
- "epoch": 1.767081688430003,
2730
- "grad_norm": 4.496824741363525,
2731
- "learning_rate": 3.6703268901353007e-07,
2732
- "loss": 0.4435447692871094,
2733
- "step": 1940
2734
- },
2735
- {
2736
- "epoch": 1.7716368053446705,
2737
- "grad_norm": 3.001497507095337,
2738
- "learning_rate": 3.530049990347068e-07,
2739
- "loss": 0.4490827560424805,
2740
- "step": 1945
2741
- },
2742
- {
2743
- "epoch": 1.776191922259338,
2744
- "grad_norm": 1.8158936500549316,
2745
- "learning_rate": 3.392408094681193e-07,
2746
- "loss": 0.44419255256652834,
2747
- "step": 1950
2748
- },
2749
- {
2750
- "epoch": 1.7807470391740055,
2751
- "grad_norm": 1.8862253427505493,
2752
- "learning_rate": 3.257409007901047e-07,
2753
- "loss": 0.44574751853942873,
2754
- "step": 1955
2755
- },
2756
- {
2757
- "epoch": 1.7853021560886728,
2758
- "grad_norm": 3.064157247543335,
2759
- "learning_rate": 3.125060384913725e-07,
2760
- "loss": 0.4461554527282715,
2761
- "step": 1960
2762
- },
2763
- {
2764
- "epoch": 1.7898572730033404,
2765
- "grad_norm": 3.538055658340454,
2766
- "learning_rate": 2.995369730336012e-07,
2767
- "loss": 0.45227828025817873,
2768
- "step": 1965
2769
- },
2770
- {
2771
- "epoch": 1.794412389918008,
2772
- "grad_norm": 1.7463148832321167,
2773
- "learning_rate": 2.8683443980688506e-07,
2774
- "loss": 0.4456148147583008,
2775
- "step": 1970
2776
- },
2777
- {
2778
- "epoch": 1.7989675068326754,
2779
- "grad_norm": 2.07248854637146,
2780
- "learning_rate": 2.7439915908803736e-07,
2781
- "loss": 0.4530907154083252,
2782
- "step": 1975
2783
- },
2784
- {
2785
- "epoch": 1.8035226237473427,
2786
- "grad_norm": 1.6984100341796875,
2787
- "learning_rate": 2.622318359997417e-07,
2788
- "loss": 0.44570345878601075,
2789
- "step": 1980
2790
- },
2791
- {
2792
- "epoch": 1.8080777406620103,
2793
- "grad_norm": 2.3377933502197266,
2794
- "learning_rate": 2.5033316047057664e-07,
2795
- "loss": 0.4517060279846191,
2796
- "step": 1985
2797
- },
2798
- {
2799
- "epoch": 1.812632857576678,
2800
- "grad_norm": 4.431271076202393,
2801
- "learning_rate": 2.387038071958897e-07,
2802
- "loss": 0.444257926940918,
2803
- "step": 1990
2804
- },
2805
- {
2806
- "epoch": 1.8171879744913453,
2807
- "grad_norm": 5.051254749298096,
2808
- "learning_rate": 2.2734443559954023e-07,
2809
- "loss": 0.4467195510864258,
2810
- "step": 1995
2811
- },
2812
- {
2813
- "epoch": 1.8217430914060126,
2814
- "grad_norm": 2.7545220851898193,
2815
- "learning_rate": 2.1625568979651012e-07,
2816
- "loss": 0.4499521732330322,
2817
- "step": 2000
2818
- },
2819
- {
2820
- "epoch": 1.8262982083206802,
2821
- "grad_norm": 2.4163920879364014,
2822
- "learning_rate": 2.054381985563786e-07,
2823
- "loss": 0.44539585113525393,
2824
- "step": 2005
2825
- },
2826
- {
2827
- "epoch": 1.8308533252353478,
2828
- "grad_norm": 2.1222238540649414,
2829
- "learning_rate": 1.9489257526766891e-07,
2830
- "loss": 0.445588493347168,
2831
- "step": 2010
2832
- },
2833
- {
2834
- "epoch": 1.8354084421500152,
2835
- "grad_norm": 1.582443356513977,
2836
- "learning_rate": 1.8461941790306637e-07,
2837
- "loss": 0.446135950088501,
2838
- "step": 2015
2839
- },
2840
- {
2841
- "epoch": 1.8399635590646826,
2842
- "grad_norm": 2.354520797729492,
2843
- "learning_rate": 1.7461930898551472e-07,
2844
- "loss": 0.44951620101928713,
2845
- "step": 2020
2846
- },
2847
- {
2848
- "epoch": 1.8445186759793502,
2849
- "grad_norm": 2.66642689704895,
2850
- "learning_rate": 1.6489281555517934e-07,
2851
- "loss": 0.443574857711792,
2852
- "step": 2025
2853
- },
2854
- {
2855
- "epoch": 1.8490737928940177,
2856
- "grad_norm": 3.7442924976348877,
2857
- "learning_rate": 1.5544048913730126e-07,
2858
- "loss": 0.44850635528564453,
2859
- "step": 2030
2860
- },
2861
- {
2862
- "epoch": 1.853628909808685,
2863
- "grad_norm": 1.5329631567001343,
2864
- "learning_rate": 1.4626286571091664e-07,
2865
- "loss": 0.4458890914916992,
2866
- "step": 2035
2867
- },
2868
- {
2869
- "epoch": 1.8581840267233525,
2870
- "grad_norm": 2.335324764251709,
2871
- "learning_rate": 1.373604656784694e-07,
2872
- "loss": 0.4490960597991943,
2873
- "step": 2040
2874
- },
2875
- {
2876
- "epoch": 1.86273914363802,
2877
- "grad_norm": 2.4603464603424072,
2878
- "learning_rate": 1.2873379383630203e-07,
2879
- "loss": 0.44408578872680665,
2880
- "step": 2045
2881
- },
2882
- {
2883
- "epoch": 1.8672942605526877,
2884
- "grad_norm": 2.4164867401123047,
2885
- "learning_rate": 1.2038333934603187e-07,
2886
- "loss": 0.44702978134155275,
2887
- "step": 2050
2888
- },
2889
- {
2890
- "epoch": 1.871849377467355,
2891
- "grad_norm": 4.425102710723877,
2892
- "learning_rate": 1.1230957570681211e-07,
2893
- "loss": 0.4475153923034668,
2894
- "step": 2055
2895
- },
2896
- {
2897
- "epoch": 1.8764044943820224,
2898
- "grad_norm": 2.0736005306243896,
2899
- "learning_rate": 1.0451296072848394e-07,
2900
- "loss": 0.44957666397094725,
2901
- "step": 2060
2902
- },
2903
- {
2904
- "epoch": 1.88095961129669,
2905
- "grad_norm": 2.4189772605895996,
2906
- "learning_rate": 9.699393650561883e-08,
2907
- "loss": 0.44267587661743163,
2908
- "step": 2065
2909
- },
2910
- {
2911
- "epoch": 1.8855147282113576,
2912
- "grad_norm": 4.224188327789307,
2913
- "learning_rate": 8.975292939244928e-08,
2914
- "loss": 0.4467939853668213,
2915
- "step": 2070
2916
- },
2917
- {
2918
- "epoch": 1.890069845126025,
2919
- "grad_norm": 1.827042818069458,
2920
- "learning_rate": 8.27903499786903e-08,
2921
- "loss": 0.4426234722137451,
2922
- "step": 2075
2923
- },
2924
- {
2925
- "epoch": 1.8946249620406923,
2926
- "grad_norm": 1.7865091562271118,
2927
- "learning_rate": 7.610659306626134e-08,
2928
- "loss": 0.4465123176574707,
2929
- "step": 2080
2930
- },
2931
- {
2932
- "epoch": 1.89918007895536,
2933
- "grad_norm": 1.8325212001800537,
2934
- "learning_rate": 6.970203764689931e-08,
2935
- "loss": 0.44530882835388186,
2936
- "step": 2085
2937
- },
2938
- {
2939
- "epoch": 1.9037351958700275,
2940
- "grad_norm": 1.6083518266677856,
2941
- "learning_rate": 6.357704688066457e-08,
2942
- "loss": 0.44170894622802737,
2943
- "step": 2090
2944
- },
2945
- {
2946
- "epoch": 1.9082903127846949,
2947
- "grad_norm": 2.245140314102173,
2948
- "learning_rate": 5.773196807535464e-08,
2949
- "loss": 0.448030948638916,
2950
- "step": 2095
2951
- },
2952
- {
2953
- "epoch": 1.9128454296993622,
2954
- "grad_norm": 1.7852219343185425,
2955
- "learning_rate": 5.2167132666806063e-08,
2956
- "loss": 0.4466597557067871,
2957
- "step": 2100
2958
- },
2959
- {
2960
- "epoch": 1.9174005466140298,
2961
- "grad_norm": 1.8668675422668457,
2962
- "learning_rate": 4.6882856200101135e-08,
2963
- "loss": 0.4466597557067871,
2964
- "step": 2105
2965
- },
2966
- {
2967
- "epoch": 1.9219556635286974,
2968
- "grad_norm": 2.011796712875366,
2969
- "learning_rate": 4.1879438311677176e-08,
2970
- "loss": 0.4484766960144043,
2971
- "step": 2110
2972
- },
2973
- {
2974
- "epoch": 1.9265107804433648,
2975
- "grad_norm": 2.214890241622925,
2976
- "learning_rate": 3.715716271233627e-08,
2977
- "loss": 0.4424152374267578,
2978
- "step": 2115
2979
- },
2980
- {
2981
- "epoch": 1.9310658973580321,
2982
- "grad_norm": 2.4538991451263428,
2983
- "learning_rate": 3.271629717115643e-08,
2984
- "loss": 0.44472498893737794,
2985
- "step": 2120
2986
- },
2987
- {
2988
- "epoch": 1.9356210142726997,
2989
- "grad_norm": 2.28452467918396,
2990
- "learning_rate": 2.8557093500308774e-08,
2991
- "loss": 0.45107216835021974,
2992
- "step": 2125
2993
- },
2994
- {
2995
- "epoch": 1.940176131187367,
2996
- "grad_norm": 1.7527275085449219,
2997
- "learning_rate": 2.4679787540779488e-08,
2998
- "loss": 0.443121337890625,
2999
- "step": 2130
3000
- },
3001
- {
3002
- "epoch": 1.9447312481020345,
3003
- "grad_norm": 1.5625219345092773,
3004
- "learning_rate": 2.1084599148994966e-08,
3005
- "loss": 0.4465321063995361,
3006
- "step": 2135
3007
- },
3008
- {
3009
- "epoch": 1.949286365016702,
3010
- "grad_norm": 2.579392671585083,
3011
- "learning_rate": 1.7771732184357905e-08,
3012
- "loss": 0.4496023178100586,
3013
- "step": 2140
3014
- },
3015
- {
3016
- "epoch": 1.9538414819313696,
3017
- "grad_norm": 1.7549206018447876,
3018
- "learning_rate": 1.4741374497686534e-08,
3019
- "loss": 0.4448676109313965,
3020
- "step": 2145
3021
- },
3022
- {
3023
- "epoch": 1.958396598846037,
3024
- "grad_norm": 5.741093158721924,
3025
- "learning_rate": 1.1993697920561486e-08,
3026
- "loss": 0.44547858238220217,
3027
- "step": 2150
3028
- },
3029
- {
3030
- "epoch": 1.9629517157607044,
3031
- "grad_norm": 2.295653820037842,
3032
- "learning_rate": 9.528858255584694e-09,
3033
- "loss": 0.4428717136383057,
3034
- "step": 2155
3035
- },
3036
- {
3037
- "epoch": 1.967506832675372,
3038
- "grad_norm": 2.983132839202881,
3039
- "learning_rate": 7.3469952675436774e-09,
3040
- "loss": 0.4444404125213623,
3041
- "step": 2160
3042
- },
3043
- {
3044
- "epoch": 1.9720619495900396,
3045
- "grad_norm": 2.4041531085968018,
3046
- "learning_rate": 5.448232675485665e-09,
3047
- "loss": 0.44472641944885255,
3048
- "step": 2165
3049
- },
3050
- {
3051
- "epoch": 1.976617066504707,
3052
- "grad_norm": 1.7280186414718628,
3053
- "learning_rate": 3.832678145704316e-09,
3054
- "loss": 0.44418935775756835,
3055
- "step": 2170
3056
- },
3057
- {
3058
- "epoch": 1.9811721834193743,
3059
- "grad_norm": 2.51718807220459,
3060
- "learning_rate": 2.500423285632381e-09,
3061
- "loss": 0.44251017570495604,
3062
- "step": 2175
3063
- },
3064
- {
3065
- "epoch": 1.9857273003340419,
3066
- "grad_norm": 2.018040180206299,
3067
- "learning_rate": 1.4515436386497439e-09,
3068
- "loss": 0.44551925659179686,
3069
- "step": 2180
3070
- },
3071
- {
3072
- "epoch": 1.9902824172487095,
3073
- "grad_norm": 2.1868982315063477,
3074
- "learning_rate": 6.860986797968538e-10,
3075
- "loss": 0.44702887535095215,
3076
- "step": 2185
3077
- },
3078
- {
3079
- "epoch": 1.9948375341633768,
3080
- "grad_norm": 2.0124709606170654,
3081
- "learning_rate": 2.0413181240519498e-10,
3082
- "loss": 0.44887866973876955,
3083
- "step": 2190
3084
- },
3085
- {
3086
- "epoch": 1.9993926510780442,
3087
- "grad_norm": 2.1505117416381836,
3088
- "learning_rate": 5.670365634258268e-12,
3089
- "loss": 0.4479702949523926,
3090
- "step": 2195
3091
- }
3092
- ],
3093
- "logging_steps": 5,
3094
- "max_steps": 2196,
3095
- "num_input_tokens_seen": 0,
3096
- "num_train_epochs": 2,
3097
- "save_steps": 200,
3098
- "stateful_callbacks": {
3099
- "TrainerControl": {
3100
- "args": {
3101
- "should_epoch_stop": false,
3102
- "should_evaluate": false,
3103
- "should_log": false,
3104
- "should_save": true,
3105
- "should_training_stop": true
3106
- },
3107
- "attributes": {}
3108
- }
3109
- },
3110
- "total_flos": 2.3603289861491838e+20,
3111
- "train_batch_size": 8,
3112
- "trial_name": null,
3113
- "trial_params": null
3114
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
qwen3vl_8b_agentnetv1_fullsft_gb200/training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e1b36131cc0286c12218ef134fa6916b868c8a2171f01c50266afdff7f461fdf
3
- size 9169
 
 
 
 
qwen3vl_8b_agentnetv1_fullsft_gb200/video_preprocessor_config.json DELETED
@@ -1,41 +0,0 @@
1
- {
2
- "crop_size": null,
3
- "data_format": "channels_first",
4
- "default_to_square": true,
5
- "device": null,
6
- "do_center_crop": null,
7
- "do_convert_rgb": true,
8
- "do_normalize": true,
9
- "do_rescale": true,
10
- "do_resize": true,
11
- "do_sample_frames": true,
12
- "fps": 2,
13
- "image_mean": [
14
- 0.5,
15
- 0.5,
16
- 0.5
17
- ],
18
- "image_std": [
19
- 0.5,
20
- 0.5,
21
- 0.5
22
- ],
23
- "input_data_format": null,
24
- "max_frames": 768,
25
- "merge_size": 2,
26
- "min_frames": 4,
27
- "num_frames": null,
28
- "pad_size": null,
29
- "patch_size": 16,
30
- "processor_class": "Qwen3VLProcessor",
31
- "resample": 3,
32
- "rescale_factor": 0.00392156862745098,
33
- "return_metadata": false,
34
- "size": {
35
- "longest_edge": 25165824,
36
- "shortest_edge": 4096
37
- },
38
- "temporal_patch_size": 2,
39
- "video_metadata": null,
40
- "video_processor_type": "Qwen3VLVideoProcessor"
41
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
qwen3vl_8b_agentnetv1_fullsft_gb200/vocab.json DELETED
The diff for this file is too large to render. See raw diff
 
qwen3vl_8b_agentnetv1_fullsft_gb200/zero_to_fp32.py DELETED
@@ -1,760 +0,0 @@
1
- #!/usr/bin/env python
2
-
3
- # Copyright (c) Microsoft Corporation.
4
- # SPDX-License-Identifier: Apache-2.0
5
-
6
- # DeepSpeed Team
7
-
8
- # This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
9
- # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
10
- # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
11
- # application.
12
- #
13
- # example:
14
- # python zero_to_fp32.py . output_dir/
15
- # or
16
- # python zero_to_fp32.py . output_dir/ --safe_serialization
17
-
18
- import argparse
19
- import torch
20
- import glob
21
- import math
22
- import os
23
- import re
24
- import gc
25
- import json
26
- import numpy as np
27
- from tqdm import tqdm
28
- from collections import OrderedDict
29
- from dataclasses import dataclass
30
-
31
- # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
32
- # DeepSpeed data structures it has to be available in the current python environment.
33
- from deepspeed.utils import logger
34
- from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
35
- FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
36
- FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
37
-
38
-
39
- @dataclass
40
- class zero_model_state:
41
- buffers: dict()
42
- param_shapes: dict()
43
- shared_params: list
44
- ds_version: int
45
- frozen_param_shapes: dict()
46
- frozen_param_fragments: dict()
47
-
48
-
49
- debug = 0
50
-
51
- # load to cpu
52
- device = torch.device('cpu')
53
-
54
-
55
- def atoi(text):
56
- return int(text) if text.isdigit() else text
57
-
58
-
59
- def natural_keys(text):
60
- '''
61
- alist.sort(key=natural_keys) sorts in human order
62
- http://nedbatchelder.com/blog/200712/human_sorting.html
63
- (See Toothy's implementation in the comments)
64
- '''
65
- return [atoi(c) for c in re.split(r'(\d+)', text)]
66
-
67
-
68
- def get_model_state_file(checkpoint_dir, zero_stage):
69
- if not os.path.isdir(checkpoint_dir):
70
- raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
71
-
72
- # there should be only one file
73
- if zero_stage <= 2:
74
- file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
75
- elif zero_stage == 3:
76
- file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
77
-
78
- if not os.path.exists(file):
79
- raise FileNotFoundError(f"can't find model states file at '{file}'")
80
-
81
- return file
82
-
83
-
84
- def get_checkpoint_files(checkpoint_dir, glob_pattern):
85
- # XXX: need to test that this simple glob rule works for multi-node setup too
86
- ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
87
-
88
- if len(ckpt_files) == 0:
89
- raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
90
-
91
- return ckpt_files
92
-
93
-
94
- def get_optim_files(checkpoint_dir):
95
- return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
96
-
97
-
98
- def get_model_state_files(checkpoint_dir):
99
- return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
100
-
101
-
102
- def parse_model_states(files):
103
- zero_model_states = []
104
- for file in files:
105
- state_dict = torch.load(file, map_location=device, weights_only=False)
106
-
107
- if BUFFER_NAMES not in state_dict:
108
- raise ValueError(f"{file} is not a model state checkpoint")
109
- buffer_names = state_dict[BUFFER_NAMES]
110
- if debug:
111
- print("Found buffers:", buffer_names)
112
-
113
- # recover just the buffers while restoring them to fp32 if they were saved in fp16
114
- buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
115
- param_shapes = state_dict[PARAM_SHAPES]
116
-
117
- # collect parameters that are included in param_shapes
118
- param_names = []
119
- for s in param_shapes:
120
- for name in s.keys():
121
- param_names.append(name)
122
-
123
- # update with frozen parameters
124
- frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
125
- if frozen_param_shapes is not None:
126
- if debug:
127
- print(f"Found frozen_param_shapes: {frozen_param_shapes}")
128
- param_names += list(frozen_param_shapes.keys())
129
-
130
- # handle shared params
131
- shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
132
-
133
- ds_version = state_dict.get(DS_VERSION, None)
134
-
135
- frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
136
-
137
- z_model_state = zero_model_state(buffers=buffers,
138
- param_shapes=param_shapes,
139
- shared_params=shared_params,
140
- ds_version=ds_version,
141
- frozen_param_shapes=frozen_param_shapes,
142
- frozen_param_fragments=frozen_param_fragments)
143
- zero_model_states.append(z_model_state)
144
-
145
- return zero_model_states
146
-
147
-
148
- def parse_optim_states(files, ds_checkpoint_dir):
149
- total_files = len(files)
150
- state_dicts = []
151
- for f in tqdm(files, desc='Loading checkpoint shards'):
152
- state_dict = torch.load(f, map_location=device, mmap=True, weights_only=False)
153
- # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
154
- # and also handle the case where it was already removed by another helper script
155
- state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
156
- state_dicts.append(state_dict)
157
-
158
- if ZERO_STAGE not in state_dicts[0][OPTIMIZER_STATE_DICT]:
159
- raise ValueError(f"{files[0]} is not a zero checkpoint")
160
- zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
161
- world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
162
-
163
- # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
164
- # parameters can be different from data parallelism for non-expert parameters. So we can just
165
- # use the max of the partition_count to get the dp world_size.
166
-
167
- if type(world_size) is list:
168
- world_size = max(world_size)
169
-
170
- if world_size != total_files:
171
- raise ValueError(
172
- f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
173
- "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
174
- )
175
-
176
- # the groups are named differently in each stage
177
- if zero_stage <= 2:
178
- fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
179
- elif zero_stage == 3:
180
- fp32_groups_key = FP32_FLAT_GROUPS
181
- else:
182
- raise ValueError(f"unknown zero stage {zero_stage}")
183
-
184
- fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
185
- return zero_stage, world_size, fp32_flat_groups
186
-
187
-
188
- def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
189
- """
190
- Returns fp32 state_dict reconstructed from ds checkpoint
191
-
192
- Args:
193
- - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
194
-
195
- """
196
- print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
197
-
198
- optim_files = get_optim_files(ds_checkpoint_dir)
199
- zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
200
- print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
201
-
202
- model_files = get_model_state_files(ds_checkpoint_dir)
203
-
204
- zero_model_states = parse_model_states(model_files)
205
- print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
206
-
207
- if zero_stage <= 2:
208
- return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
209
- exclude_frozen_parameters)
210
- elif zero_stage == 3:
211
- return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
212
- exclude_frozen_parameters)
213
-
214
-
215
- def _zero2_merge_frozen_params(state_dict, zero_model_states):
216
- if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
217
- return
218
-
219
- frozen_param_shapes = zero_model_states[0].frozen_param_shapes
220
- frozen_param_fragments = zero_model_states[0].frozen_param_fragments
221
-
222
- if debug:
223
- num_elem = sum(s.numel() for s in frozen_param_shapes.values())
224
- print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
225
-
226
- wanted_params = len(frozen_param_shapes)
227
- wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
228
- avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
229
- print(f'Frozen params: Have {avail_numel} numels to process.')
230
- print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
231
-
232
- total_params = 0
233
- total_numel = 0
234
- for name, shape in frozen_param_shapes.items():
235
- total_params += 1
236
- unpartitioned_numel = shape.numel()
237
- total_numel += unpartitioned_numel
238
-
239
- state_dict[name] = frozen_param_fragments[name]
240
-
241
- if debug:
242
- print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
243
-
244
- print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
245
-
246
-
247
- def _has_callable(obj, fn):
248
- attr = getattr(obj, fn, None)
249
- return callable(attr)
250
-
251
-
252
- def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
253
- param_shapes = zero_model_states[0].param_shapes
254
-
255
- # Reconstruction protocol:
256
- #
257
- # XXX: document this
258
-
259
- if debug:
260
- for i in range(world_size):
261
- for j in range(len(fp32_flat_groups[0])):
262
- print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
263
-
264
- # XXX: memory usage doubles here (zero2)
265
- num_param_groups = len(fp32_flat_groups[0])
266
- merged_single_partition_of_fp32_groups = []
267
- for i in range(num_param_groups):
268
- merged_partitions = [sd[i] for sd in fp32_flat_groups]
269
- full_single_fp32_vector = torch.cat(merged_partitions, 0)
270
- merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
271
- avail_numel = sum(
272
- [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
273
-
274
- if debug:
275
- wanted_params = sum([len(shapes) for shapes in param_shapes])
276
- wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
277
- # not asserting if there is a mismatch due to possible padding
278
- print(f"Have {avail_numel} numels to process.")
279
- print(f"Need {wanted_numel} numels in {wanted_params} params.")
280
-
281
- # params
282
- # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
283
- # out-of-core computing solution
284
- total_numel = 0
285
- total_params = 0
286
- for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
287
- offset = 0
288
- avail_numel = full_single_fp32_vector.numel()
289
- for name, shape in shapes.items():
290
-
291
- unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
292
- total_numel += unpartitioned_numel
293
- total_params += 1
294
-
295
- if debug:
296
- print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
297
- state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
298
- offset += unpartitioned_numel
299
-
300
- # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
301
- # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
302
- # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
303
- # live optimizer object, so we are checking that the numbers are within the right range
304
- align_to = 2 * world_size
305
-
306
- def zero2_align(x):
307
- return align_to * math.ceil(x / align_to)
308
-
309
- if debug:
310
- print(f"original offset={offset}, avail_numel={avail_numel}")
311
-
312
- offset = zero2_align(offset)
313
- avail_numel = zero2_align(avail_numel)
314
-
315
- if debug:
316
- print(f"aligned offset={offset}, avail_numel={avail_numel}")
317
-
318
- # Sanity check
319
- if offset != avail_numel:
320
- raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
321
-
322
- print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
323
-
324
-
325
- def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
326
- exclude_frozen_parameters):
327
- state_dict = OrderedDict()
328
-
329
- # buffers
330
- buffers = zero_model_states[0].buffers
331
- state_dict.update(buffers)
332
- if debug:
333
- print(f"added {len(buffers)} buffers")
334
-
335
- if not exclude_frozen_parameters:
336
- _zero2_merge_frozen_params(state_dict, zero_model_states)
337
-
338
- _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
339
-
340
- # recover shared parameters
341
- for pair in zero_model_states[0].shared_params:
342
- if pair[1] in state_dict:
343
- state_dict[pair[0]] = state_dict[pair[1]]
344
-
345
- return state_dict
346
-
347
-
348
- def zero3_partitioned_param_info(unpartitioned_numel, world_size):
349
- remainder = unpartitioned_numel % world_size
350
- padding_numel = (world_size - remainder) if remainder else 0
351
- partitioned_numel = math.ceil(unpartitioned_numel / world_size)
352
- return partitioned_numel, padding_numel
353
-
354
-
355
- def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
356
- if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
357
- return
358
-
359
- if debug:
360
- for i in range(world_size):
361
- num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
362
- print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
363
-
364
- frozen_param_shapes = zero_model_states[0].frozen_param_shapes
365
- wanted_params = len(frozen_param_shapes)
366
- wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
367
- avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
368
- print(f'Frozen params: Have {avail_numel} numels to process.')
369
- print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
370
-
371
- total_params = 0
372
- total_numel = 0
373
- for name, shape in zero_model_states[0].frozen_param_shapes.items():
374
- total_params += 1
375
- unpartitioned_numel = shape.numel()
376
- total_numel += unpartitioned_numel
377
-
378
- param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
379
- state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
380
-
381
- partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
382
-
383
- if debug:
384
- print(
385
- f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
386
- )
387
-
388
- print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
389
-
390
-
391
- class GatheredTensor:
392
- """
393
- A pseudo tensor that collects partitioned weights.
394
- It is more memory efficient when there are multiple groups.
395
- """
396
-
397
- def __init__(self, flat_groups, flat_groups_offset, offset, partitioned_numel, shape):
398
- self.flat_groups = flat_groups
399
- self.flat_groups_offset = flat_groups_offset
400
- self.offset = offset
401
- self.partitioned_numel = partitioned_numel
402
- self.shape = shape
403
- self.dtype = self.flat_groups[0][0].dtype
404
-
405
- def contiguous(self):
406
- """
407
- Merge partitioned weights from flat_groups into a single tensor.
408
- """
409
- end_idx = self.offset + self.partitioned_numel
410
- world_size = len(self.flat_groups)
411
- pad_flat_param_chunks = []
412
-
413
- for rank_i in range(world_size):
414
- # for each rank, we need to collect weights from related group/groups
415
- flat_groups_at_rank_i = self.flat_groups[rank_i]
416
- start_group_id = None
417
- end_group_id = None
418
- for group_id in range(len(self.flat_groups_offset)):
419
- if self.flat_groups_offset[group_id] <= self.offset < self.flat_groups_offset[group_id + 1]:
420
- start_group_id = group_id
421
- if self.flat_groups_offset[group_id] < end_idx <= self.flat_groups_offset[group_id + 1]:
422
- end_group_id = group_id
423
- break
424
- # collect weights from related group/groups
425
- for group_id in range(start_group_id, end_group_id + 1):
426
- flat_tensor = flat_groups_at_rank_i[group_id]
427
- start_offset = self.offset - self.flat_groups_offset[group_id]
428
- end_offset = min(end_idx, self.flat_groups_offset[group_id + 1]) - self.flat_groups_offset[group_id]
429
- pad_flat_param_chunks.append(flat_tensor[start_offset:end_offset])
430
-
431
- # collect weights from all ranks
432
- pad_flat_param = torch.cat(pad_flat_param_chunks, dim=0)
433
- param = pad_flat_param[:self.shape.numel()].view(self.shape).contiguous()
434
- return param
435
-
436
-
437
- def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
438
- param_shapes = zero_model_states[0].param_shapes
439
- avail_numel = sum([flat_group.numel() for flat_group in fp32_flat_groups[0]]) * world_size
440
-
441
- # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
442
- # param, re-consolidating each param, while dealing with padding if any
443
-
444
- # merge list of dicts, preserving order
445
- param_shapes = {k: v for d in param_shapes for k, v in d.items()}
446
-
447
- if debug:
448
- for i in range(world_size):
449
- print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
450
-
451
- wanted_params = len(param_shapes)
452
- wanted_numel = sum(shape.numel() for shape in param_shapes.values())
453
- # not asserting if there is a mismatch due to possible padding
454
- avail_numel = fp32_flat_groups[0].numel() * world_size
455
- print(f"Trainable params: Have {avail_numel} numels to process.")
456
- print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
457
-
458
- # params
459
- # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
460
- # out-of-core computing solution
461
- offset = 0
462
- total_numel = 0
463
- total_params = 0
464
- flat_groups_offset = [0] + list(np.cumsum([flat_tensor.numel() for flat_tensor in fp32_flat_groups[0]]))
465
- for name, shape in tqdm(param_shapes.items(), desc='Gathering sharded weights'):
466
- unpartitioned_numel = shape.numel()
467
- total_numel += unpartitioned_numel
468
- total_params += 1
469
- partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
470
-
471
- if debug:
472
- print(
473
- f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
474
- )
475
-
476
- # memory efficient tensor
477
- tensor = GatheredTensor(fp32_flat_groups, flat_groups_offset, offset, partitioned_numel, shape)
478
- state_dict[name] = tensor
479
- offset += partitioned_numel
480
-
481
- offset *= world_size
482
-
483
- # Sanity check
484
- if offset != avail_numel:
485
- raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
486
-
487
- print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
488
-
489
-
490
- def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
491
- exclude_frozen_parameters):
492
- state_dict = OrderedDict()
493
-
494
- # buffers
495
- buffers = zero_model_states[0].buffers
496
- state_dict.update(buffers)
497
- if debug:
498
- print(f"added {len(buffers)} buffers")
499
-
500
- if not exclude_frozen_parameters:
501
- _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
502
-
503
- _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
504
-
505
- # recover shared parameters
506
- for pair in zero_model_states[0].shared_params:
507
- if pair[1] in state_dict:
508
- state_dict[pair[0]] = state_dict[pair[1]]
509
-
510
- return state_dict
511
-
512
-
513
- def to_torch_tensor(state_dict, return_empty_tensor=False):
514
- """
515
- Convert state_dict of GatheredTensor to torch tensor
516
- """
517
- torch_state_dict = {}
518
- converted_tensors = {}
519
- for name, tensor in state_dict.items():
520
- tensor_id = id(tensor)
521
- if tensor_id in converted_tensors: # shared tensors
522
- shared_tensor = torch_state_dict[converted_tensors[tensor_id]]
523
- torch_state_dict[name] = shared_tensor
524
- else:
525
- converted_tensors[tensor_id] = name
526
- if return_empty_tensor:
527
- torch_state_dict[name] = torch.empty(tensor.shape, dtype=tensor.dtype)
528
- else:
529
- torch_state_dict[name] = tensor.contiguous()
530
- return torch_state_dict
531
-
532
-
533
- def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
534
- tag=None,
535
- exclude_frozen_parameters=False,
536
- lazy_mode=False):
537
- """
538
- Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
539
- ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
540
- via a model hub.
541
-
542
- Args:
543
- - ``checkpoint_dir``: path to the desired checkpoint folder
544
- - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
545
- - ``exclude_frozen_parameters``: exclude frozen parameters
546
- - ``lazy_mode``: get state_dict in lazy mode. It returns a dict of pesduo tensor instead of torch tensor, which is more memory efficient.
547
- Convert the pesduo tensor to torch tensor by ``.contiguous()``
548
-
549
- Returns:
550
- - pytorch ``state_dict``
551
-
552
- A typical usage might be ::
553
-
554
- from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
555
- # do the training and checkpoint saving
556
- state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
557
- model = model.cpu() # move to cpu
558
- model.load_state_dict(state_dict)
559
- # submit to model hub or save the model to share with others
560
-
561
- In this example the ``model`` will no longer be usable in the deepspeed context of the same
562
- application. i.e. you will need to re-initialize the deepspeed engine, since
563
- ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
564
-
565
- If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
566
-
567
- Note: the above usage may not work if your application doesn't have sufficient free CPU memory.
568
- You may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
569
- the checkpoint. Or you can load state_dict in lazy mode ::
570
-
571
- from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
572
- state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, lazy_mode=True) # not on cpu
573
- for name, lazy_tensor in state_dict.item():
574
- tensor = lazy_tensor.contiguous() # to cpu
575
- print(name, tensor)
576
- # del tensor to release memory if it no longer in use
577
- """
578
- if tag is None:
579
- latest_path = os.path.join(checkpoint_dir, 'latest')
580
- if os.path.isfile(latest_path):
581
- with open(latest_path, 'r') as fd:
582
- tag = fd.read().strip()
583
- else:
584
- raise ValueError(f"Unable to find 'latest' file at {latest_path}")
585
-
586
- ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
587
-
588
- if not os.path.isdir(ds_checkpoint_dir):
589
- raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
590
-
591
- state_dict = _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
592
- if lazy_mode:
593
- return state_dict
594
- else:
595
- return to_torch_tensor(state_dict)
596
-
597
-
598
- def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir,
599
- output_dir,
600
- max_shard_size="5GB",
601
- safe_serialization=False,
602
- tag=None,
603
- exclude_frozen_parameters=False):
604
- """
605
- Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
606
- loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
607
-
608
- Args:
609
- - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
610
- - ``output_dir``: directory to the pytorch fp32 state_dict output files
611
- - ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB
612
- - ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
613
- - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
614
- - ``exclude_frozen_parameters``: exclude frozen parameters
615
- """
616
-
617
- # Dependency pre-check
618
- if safe_serialization:
619
- try:
620
- from safetensors.torch import save_file
621
- except ImportError:
622
- print('If you want to use `safe_serialization`, please `pip install safetensors`')
623
- raise
624
- if max_shard_size is not None:
625
- try:
626
- from huggingface_hub import split_torch_state_dict_into_shards
627
- except ImportError:
628
- print('If you want to use `max_shard_size`, please `pip install huggingface_hub`')
629
- raise
630
-
631
- # Convert zero checkpoint to state_dict
632
- state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
633
- tag,
634
- exclude_frozen_parameters,
635
- lazy_mode=True)
636
-
637
- # Shard the model if it is too big.
638
- weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin"
639
- if max_shard_size is not None:
640
- filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors")
641
- # an memory-efficient approach for sharding
642
- empty_state_dict = to_torch_tensor(state_dict, return_empty_tensor=True)
643
- state_dict_split = split_torch_state_dict_into_shards(empty_state_dict,
644
- filename_pattern=filename_pattern,
645
- max_shard_size=max_shard_size)
646
- else:
647
- from collections import namedtuple
648
- StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"])
649
- state_dict_split = StateDictSplit(is_sharded=False,
650
- filename_to_tensors={weights_name: list(state_dict.keys())})
651
-
652
- # Save the model by shard
653
- os.makedirs(output_dir, exist_ok=True)
654
- filename_to_tensors = state_dict_split.filename_to_tensors.items()
655
- for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"):
656
- shard_state_dict = {tensor_name: state_dict[tensor_name] for tensor_name in tensors}
657
- shard_state_dict = to_torch_tensor(shard_state_dict)
658
- output_path = os.path.join(output_dir, shard_file)
659
- if safe_serialization:
660
- save_file(shard_state_dict, output_path, metadata={"format": "pt"})
661
- else:
662
- torch.save(shard_state_dict, output_path)
663
- # release the memory of current shard
664
- for tensor_name in list(shard_state_dict.keys()):
665
- del state_dict[tensor_name]
666
- del shard_state_dict[tensor_name]
667
- del shard_state_dict
668
- gc.collect()
669
-
670
- # Save index if sharded
671
- if state_dict_split.is_sharded:
672
- index = {
673
- "metadata": state_dict_split.metadata,
674
- "weight_map": state_dict_split.tensor_to_filename,
675
- }
676
- save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json"
677
- save_index_file = os.path.join(output_dir, save_index_file)
678
- with open(save_index_file, "w", encoding="utf-8") as f:
679
- content = json.dumps(index, indent=2, sort_keys=True) + "\n"
680
- f.write(content)
681
-
682
-
683
- def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
684
- """
685
- 1. Put the provided model to cpu
686
- 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
687
- 3. Load it into the provided model
688
-
689
- Args:
690
- - ``model``: the model object to update
691
- - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
692
- - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
693
-
694
- Returns:
695
- - ``model`: modified model
696
-
697
- Make sure you have plenty of CPU memory available before you call this function. If you don't
698
- have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
699
- conveniently placed for you in the checkpoint folder.
700
-
701
- A typical usage might be ::
702
-
703
- from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
704
- model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
705
- # submit to model hub or save the model to share with others
706
-
707
- Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
708
- of the same application. i.e. you will need to re-initialize the deepspeed engine, since
709
- ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
710
-
711
- """
712
- logger.info("Extracting fp32 weights")
713
- state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
714
-
715
- logger.info("Overwriting model with fp32 weights")
716
- model = model.cpu()
717
- model.load_state_dict(state_dict, strict=False)
718
-
719
- return model
720
-
721
-
722
- if __name__ == "__main__":
723
- parser = argparse.ArgumentParser()
724
- parser.add_argument("checkpoint_dir",
725
- type=str,
726
- help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
727
- parser.add_argument("output_dir",
728
- type=str,
729
- help="directory to the pytorch fp32 state_dict output files"
730
- "(e.g. path/checkpoint-12-output/)")
731
- parser.add_argument(
732
- "--max_shard_size",
733
- type=str,
734
- default="5GB",
735
- help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size"
736
- "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`"
737
- "We default it to 5GB in order for models to be able to run easily on free-tier google colab instances"
738
- "without CPU OOM issues.")
739
- parser.add_argument(
740
- "--safe_serialization",
741
- default=False,
742
- action='store_true',
743
- help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).")
744
- parser.add_argument("-t",
745
- "--tag",
746
- type=str,
747
- default=None,
748
- help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
749
- parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
750
- parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
751
- args = parser.parse_args()
752
-
753
- debug = args.debug
754
-
755
- convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
756
- args.output_dir,
757
- max_shard_size=args.max_shard_size,
758
- safe_serialization=args.safe_serialization,
759
- tag=args.tag,
760
- exclude_frozen_parameters=args.exclude_frozen_parameters)