owl10 commited on
Commit
a219443
·
verified ·
1 Parent(s): ac69033

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
added_tokens.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</think>": 151668,
3
+ "</tool_call>": 151658,
4
+ "</tool_response>": 151666,
5
+ "<think>": 151667,
6
+ "<tool_call>": 151657,
7
+ "<tool_response>": 151665,
8
+ "<|box_end|>": 151649,
9
+ "<|box_start|>": 151648,
10
+ "<|endoftext|>": 151643,
11
+ "<|file_sep|>": 151664,
12
+ "<|fim_middle|>": 151660,
13
+ "<|fim_pad|>": 151662,
14
+ "<|fim_prefix|>": 151659,
15
+ "<|fim_suffix|>": 151661,
16
+ "<|im_end|>": 151645,
17
+ "<|im_start|>": 151644,
18
+ "<|image_pad|>": 151655,
19
+ "<|object_ref_end|>": 151647,
20
+ "<|object_ref_start|>": 151646,
21
+ "<|quad_end|>": 151651,
22
+ "<|quad_start|>": 151650,
23
+ "<|repo_name|>": 151663,
24
+ "<|video_pad|>": 151656,
25
+ "<|vision_end|>": 151653,
26
+ "<|vision_pad|>": 151654,
27
+ "<|vision_start|>": 151652
28
+ }
args.json ADDED
@@ -0,0 +1,718 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "use_ray": false,
3
+ "ray_exp_name": null,
4
+ "device_groups": null,
5
+ "model": "/high_perf_store3/world-model/zhuzhenxin/ckpts/Qwen3-VL-2B-Instruct/",
6
+ "model_type": "qwen3_vl",
7
+ "model_revision": null,
8
+ "task_type": "causal_lm",
9
+ "torch_dtype": "bfloat16",
10
+ "attn_impl": null,
11
+ "new_special_tokens": [
12
+ "<FRONT_VIEW>",
13
+ "<FRONT_LEFT_VIEW>",
14
+ "<FRONT_RIGHT_VIEW>",
15
+ "<BACK_LEFT_VIEW>",
16
+ "<BACK_RIGHT_VIEW>",
17
+ "<BACK_VIEW>"
18
+ ],
19
+ "num_labels": null,
20
+ "problem_type": null,
21
+ "rope_scaling": {
22
+ "mrope_interleaved": true,
23
+ "mrope_section": [
24
+ 24,
25
+ 20,
26
+ 20
27
+ ],
28
+ "rope_type": "default"
29
+ },
30
+ "device_map": null,
31
+ "max_memory": {},
32
+ "max_model_len": null,
33
+ "local_repo_path": null,
34
+ "init_strategy": null,
35
+ "template": "qwen3_vl",
36
+ "system": "Generalist Autonomous Driving Agent\nRole: You are an advanced, multimodal AI brain for an autonomous vehicle, capable of Perception, Reasoning, and Planning. Your goal is to drive safely, follow instructions, and deeply understand the dynamic world around you.\n\nContext & Coordinate System\n- Ego-Centric View: You are at the origin (0,0). The X-axis represents the lateral distance (perpendicular), and the Y-axis represents the longitudinal distance (forward).\n- Inputs: You receive multi-view visual observations (<FRONT_VIEW>, <BACK_VIEW>, etc.), historical ego-motion, and vehicle states (velocity, acceleration).\n\nCore Capabilities\n1. **Driving & Planning**:\n - Objective: Generate a safe, comfortable, and feasible 3-second trajectory (6 waypoints, 0.5s interval).\n - Constraints: Strictly adhere to traffic rules, avoid collisions, and respect kinematic limits.\n - Output Format: A sequence of coordinates [(x1,y1), ..., (x6,y6)].\n\n2. **Reasoning & VQA** (Chain-of-Thought):\n - Tasks: Analyze traffic scenes, explain causal logic (e.g., \"Why stop?\"), identify hazards, and answer queries about the environment (weather, road layout, traffic lights).\n - Reasoning: Break down complex scenarios into step-by-step logic, grounding your answers in visual evidence.\n\n3. **Instruction Following & Grounding**:\n - Tasks: Execute navigation commands (e.g., \"Park behind the red truck\") and ground textual descriptions to specific visual regions or objects.\n\n4. **Perception & World Modeling** (Future & Current State):\n - Tasks: Detect and track objects, predict their future motion, and estimate 3D occupancy or scene geometry (Gaussian Splatting/Occ).\n - Understanding: Map semantic elements (lanes, crossings) and dynamic agents into a coherent world model.\n\nInstructions\n- For **Planning** tasks: Output the \"Trajectory\".\n- For **QA/Reasoning** tasks: Provide a clear, logical, and helpful text response.\n- For **Perception** tasks: Output structured descriptions or specific formats as requested.\n\nAlways prioritize safety and clarity in your responses.\n",
37
+ "max_length": 16384,
38
+ "truncation_strategy": "delete",
39
+ "max_pixels": null,
40
+ "agent_template": null,
41
+ "norm_bbox": null,
42
+ "use_chat_template": true,
43
+ "padding_side": "right",
44
+ "padding_free": true,
45
+ "loss_scale": "default",
46
+ "sequence_parallel_size": 1,
47
+ "template_backend": "swift",
48
+ "response_prefix": null,
49
+ "enable_thinking": null,
50
+ "add_non_thinking_prefix": true,
51
+ "dataset": [
52
+ "/high_perf_store3/world-model/yongkangli/ABCDEFG_NISHIDASHABI/A/B/UniDriveVLA/Bench2Drive/data/b2d_planning_qa_train_residual.jsonl",
53
+ "/high_perf_store3/world-model/yongkangli/Dataset_vqa/Orion_Data/train_converted_processed.jsonl",
54
+ "/high_perf_store3/world-model/yongkangli/B2D/Bench2DriveZoo-tcp-admlp/output_final_modified_finalview_processed.jsonl",
55
+ "/high_perf_store3/world-model/yongkangli/finevision_subset_cleaned.jsonl#1141184"
56
+ ],
57
+ "val_dataset": [],
58
+ "cached_dataset": [],
59
+ "cached_val_dataset": [],
60
+ "split_dataset_ratio": 0.01,
61
+ "data_seed": 42,
62
+ "dataset_num_proc": 32,
63
+ "load_from_cache_file": true,
64
+ "dataset_shuffle": true,
65
+ "val_dataset_shuffle": false,
66
+ "streaming": false,
67
+ "interleave_prob": null,
68
+ "stopping_strategy": "first_exhausted",
69
+ "shuffle_buffer_size": 1000,
70
+ "download_mode": "reuse_dataset_if_exists",
71
+ "columns": {},
72
+ "strict": false,
73
+ "remove_unused_columns": true,
74
+ "model_name": null,
75
+ "model_author": null,
76
+ "custom_dataset_info": [],
77
+ "quant_method": null,
78
+ "quant_bits": null,
79
+ "hqq_axis": null,
80
+ "bnb_4bit_compute_dtype": "bfloat16",
81
+ "bnb_4bit_quant_type": "nf4",
82
+ "bnb_4bit_use_double_quant": true,
83
+ "bnb_4bit_quant_storage": null,
84
+ "max_new_tokens": null,
85
+ "temperature": 0.9,
86
+ "top_k": 50,
87
+ "top_p": 0.9,
88
+ "repetition_penalty": 1.0,
89
+ "num_beams": 1,
90
+ "stream": false,
91
+ "stop_words": [],
92
+ "logprobs": false,
93
+ "top_logprobs": null,
94
+ "structured_outputs_regex": null,
95
+ "ckpt_dir": null,
96
+ "lora_modules": [],
97
+ "tuner_backend": "peft",
98
+ "train_type": "full",
99
+ "adapters": [],
100
+ "external_plugins": [],
101
+ "seed": 42,
102
+ "model_kwargs": {},
103
+ "load_args": false,
104
+ "load_data_args": false,
105
+ "packing": true,
106
+ "packing_length": 16384,
107
+ "packing_num_proc": 1,
108
+ "lazy_tokenize": false,
109
+ "custom_register_path": [],
110
+ "use_hf": false,
111
+ "hub_token": null,
112
+ "ddp_timeout": 18000000,
113
+ "ddp_backend": null,
114
+ "ignore_args_error": false,
115
+ "use_swift_lora": false,
116
+ "freeze_llm": false,
117
+ "freeze_vit": false,
118
+ "freeze_aligner": false,
119
+ "freeze_parameters": [],
120
+ "freeze_parameters_regex": null,
121
+ "freeze_parameters_ratio": 0.0,
122
+ "trainable_parameters": [],
123
+ "trainable_parameters_regex": null,
124
+ "adapter_load": null,
125
+ "target_modules": [
126
+ "all-linear"
127
+ ],
128
+ "target_regex": null,
129
+ "modules_to_save": [],
130
+ "lora_rank": 8,
131
+ "lora_alpha": 32,
132
+ "lora_dropout": 0.05,
133
+ "lora_bias": "none",
134
+ "lora_dtype": null,
135
+ "use_rslora": false,
136
+ "rlhf_type": null,
137
+ "ref_load": null,
138
+ "ref_adapter_load": null,
139
+ "beta": null,
140
+ "rpo_alpha": null,
141
+ "reference_free": false,
142
+ "label_smoothing": 0.0,
143
+ "f_divergence_type": "reverse_kl",
144
+ "loss_type": null,
145
+ "desirable_weight": 1.0,
146
+ "undesirable_weight": 1.0,
147
+ "calculate_KL": null,
148
+ "center_rewards_coefficient": null,
149
+ "teacher_model": null,
150
+ "teacher_model_type": null,
151
+ "teacher_model_revision": null,
152
+ "lmbda": 0.5,
153
+ "seq_kd": false,
154
+ "offload_teacher_model": false,
155
+ "sft_alpha": 0.0,
156
+ "generation_batch_size": null,
157
+ "steps_per_generation": null,
158
+ "num_generations": 8,
159
+ "num_generations_eval": null,
160
+ "max_completion_length": 512,
161
+ "importance_sampling_level": "token",
162
+ "tau_pos": 1.0,
163
+ "tau_neg": 1.05,
164
+ "epsilon": 0.2,
165
+ "epsilon_high": null,
166
+ "delta": null,
167
+ "use_vllm": true,
168
+ "vllm_mode": null,
169
+ "vllm_enable_prefix_caching": true,
170
+ "vllm_gpu_memory_utilization": 0.9,
171
+ "vllm_tensor_parallel_size": 1,
172
+ "vllm_max_model_len": null,
173
+ "vllm_enforce_eager": false,
174
+ "vllm_limit_mm_per_prompt": null,
175
+ "vllm_disable_cascade_attn": false,
176
+ "vllm_max_num_seqs": null,
177
+ "vllm_mm_processor_cache_gb": null,
178
+ "vllm_engine_kwargs": null,
179
+ "sleep_level": 0,
180
+ "offload_optimizer": false,
181
+ "offload_model": false,
182
+ "offload_bridge": false,
183
+ "vllm_server_base_url": null,
184
+ "vllm_server_host": null,
185
+ "vllm_server_port": [
186
+ 8000
187
+ ],
188
+ "vllm_server_timeout": 240.0,
189
+ "vllm_server_group_port": null,
190
+ "reward_funcs": [],
191
+ "reward_weights": null,
192
+ "cosine_min_len_value_wrong": -0.5,
193
+ "cosine_max_len_value_wrong": 0.0,
194
+ "cosine_min_len_value_correct": 1.0,
195
+ "cosine_max_len_value_correct": 0.5,
196
+ "cosine_max_len": null,
197
+ "repetition_n_grams": 3,
198
+ "repetition_max_penalty": -1.0,
199
+ "soft_max_length": null,
200
+ "soft_cache_length": null,
201
+ "dynamic_sample": false,
202
+ "max_resample_times": 3,
203
+ "overlong_filter": false,
204
+ "scale_rewards": "group",
205
+ "advantage_estimator": "grpo",
206
+ "kl_in_reward": false,
207
+ "wandb_log_unique_prompts": null,
208
+ "log_completions": false,
209
+ "rollout_importance_sampling_mode": null,
210
+ "rollout_importance_sampling_threshold": 2.0,
211
+ "log_rollout_offpolicy_metrics": false,
212
+ "off_policy_sequence_mask_delta": null,
213
+ "log_entropy": false,
214
+ "top_entropy_quantile": 1.0,
215
+ "reward_model": null,
216
+ "reward_model_plugin": null,
217
+ "sync_ref_model": false,
218
+ "ref_model_sync_steps": 512,
219
+ "ref_model_mixup_alpha": 0.6,
220
+ "async_generate": false,
221
+ "move_model_batches": null,
222
+ "multi_turn_scheduler": null,
223
+ "max_turns": null,
224
+ "completion_length_limit_scope": "per_round",
225
+ "vllm_server_pass_dataset": false,
226
+ "num_iterations": 1,
227
+ "check_model": true,
228
+ "padded_vocab_size": 151936,
229
+ "initialize_embedding": false,
230
+ "mlp_padding_free": false,
231
+ "load_safetensors": true,
232
+ "save_safetensors": true,
233
+ "ref_model": null,
234
+ "ref_adapters": [],
235
+ "merge_lora": true,
236
+ "max_shard_size": "5GB",
237
+ "train_dataloader_shuffle": true,
238
+ "dataloader_pin_memory": true,
239
+ "dataloader_persistent_workers": true,
240
+ "dataloader_prefetch_factor": 2,
241
+ "group_by_length": false,
242
+ "architectures": "Qwen3VLForConditionalGeneration",
243
+ "llm_architectures": "Qwen3VLForConditionalGeneration",
244
+ "max_epochs": 3,
245
+ "enable_dft_loss": false,
246
+ "enable_channel_loss": false,
247
+ "save_strategy": "steps",
248
+ "original_max_position_embeddings": null,
249
+ "partial_rotary_factor": null,
250
+ "use_shared_expert_gate": false,
251
+ "report_to": null,
252
+ "vit_gradient_checkpointing": true,
253
+ "vit_lr": null,
254
+ "aligner_lr": null,
255
+ "gradient_checkpointing_kwargs": null,
256
+ "linear_num_value_heads": null,
257
+ "linear_num_key_heads": null,
258
+ "linear_key_head_dim": null,
259
+ "linear_value_head_dim": null,
260
+ "linear_conv_kernel_dim": null,
261
+ "layer_types": null,
262
+ "mrope_interleaved": true,
263
+ "micro_batch_size": 1,
264
+ "global_batch_size": 128,
265
+ "recompute_granularity": "selective",
266
+ "recompute_method": null,
267
+ "recompute_num_layers": null,
268
+ "recompute_modules": [
269
+ "core_attn"
270
+ ],
271
+ "use_cpu_initialization": false,
272
+ "deterministic_mode": false,
273
+ "train_iters": null,
274
+ "log_interval": 5,
275
+ "tensorboard_dir": "/high_perf_store3/world-model/yongkangli/ms-swift-main/megatron_output/Qwen3-VL-2B-Instruct-3-7-b2d/v1-20260213-201159/runs",
276
+ "no_masked_softmax_fusion": false,
277
+ "no_bias_dropout_fusion": false,
278
+ "no_bias_swiglu_fusion": false,
279
+ "no_rope_fusion": false,
280
+ "no_gradient_accumulation_fusion": false,
281
+ "cross_entropy_loss_fusion": true,
282
+ "cross_entropy_fusion_impl": "native",
283
+ "calculate_per_token_loss": true,
284
+ "use_flash_attn": false,
285
+ "attention_backend": "flash",
286
+ "optimizer": "adam",
287
+ "optimizer_cpu_offload": false,
288
+ "optimizer_offload_fraction": 1.0,
289
+ "use_precision_aware_optimizer": false,
290
+ "main_grads_dtype": "fp32",
291
+ "main_params_dtype": "fp32",
292
+ "exp_avg_dtype": "fp32",
293
+ "exp_avg_sq_dtype": "fp32",
294
+ "dataloader_type": "cyclic",
295
+ "manual_gc": false,
296
+ "manual_gc_interval": 0,
297
+ "lr": 4e-05,
298
+ "lr_decay_style": "cosine",
299
+ "lr_decay_iters": null,
300
+ "lr_warmup_iters": 0,
301
+ "lr_warmup_fraction": 0.05,
302
+ "min_lr": 1e-06,
303
+ "weight_decay": 0.1,
304
+ "clip_grad": 1.0,
305
+ "adam_beta1": 0.9,
306
+ "adam_beta2": 0.95,
307
+ "adam_eps": 1e-08,
308
+ "sgd_momentum": 0.9,
309
+ "save": "/high_perf_store3/world-model/yongkangli/ms-swift-main/megatron_output/Qwen3-VL-2B-Instruct-3-7-b2d/v1-20260213-201159",
310
+ "save_interval": 500,
311
+ "save_retain_interval": null,
312
+ "no_save_optim": true,
313
+ "no_save_rng": true,
314
+ "load": null,
315
+ "no_load_optim": false,
316
+ "no_load_rng": false,
317
+ "finetune": true,
318
+ "ckpt_format": "torch_dist",
319
+ "no_initialization": true,
320
+ "auto_detect_ckpt_format": true,
321
+ "exit_on_missing_checkpoint": true,
322
+ "async_save": false,
323
+ "use_persistent_ckpt_worker": false,
324
+ "ckpt_fully_parallel_load": false,
325
+ "ckpt_assume_constant_structure": false,
326
+ "distributed_backend": "nccl",
327
+ "local_rank": 0,
328
+ "use_distributed_optimizer": true,
329
+ "tensor_model_parallel_size": 1,
330
+ "pipeline_model_parallel_size": 1,
331
+ "decoder_first_pipeline_num_layers": null,
332
+ "decoder_last_pipeline_num_layers": null,
333
+ "account_for_embedding_in_pipeline_split": false,
334
+ "account_for_loss_in_pipeline_split": false,
335
+ "sequence_parallel": true,
336
+ "context_parallel_size": 1,
337
+ "tp_comm_overlap": false,
338
+ "overlap_grad_reduce": false,
339
+ "overlap_param_gather": false,
340
+ "distributed_timeout_minutes": 300000,
341
+ "num_layers_per_virtual_pipeline_stage": null,
342
+ "num_virtual_stages_per_pipeline_rank": null,
343
+ "microbatch_group_size_per_virtual_pipeline_stage": null,
344
+ "pipeline_model_parallel_layout": null,
345
+ "num_layers": 28,
346
+ "hidden_size": 2048,
347
+ "ffn_hidden_size": 6144,
348
+ "num_attention_heads": 16,
349
+ "group_query_attention": true,
350
+ "num_query_groups": 8,
351
+ "softmax_type": "vanilla",
352
+ "window_size": null,
353
+ "window_attn_skip_freq": null,
354
+ "max_position_embeddings": 262144,
355
+ "position_embedding_type": "mrope",
356
+ "mrope_section": [
357
+ 24,
358
+ 20,
359
+ 20
360
+ ],
361
+ "rotary_base": 5000000,
362
+ "rotary_percent": 1.0,
363
+ "rotary_interleaved": false,
364
+ "normalization": "RMSNorm",
365
+ "norm_epsilon": 1e-06,
366
+ "swiglu": true,
367
+ "quick_geglu": false,
368
+ "activation_func_clamp_value": null,
369
+ "glu_linear_offset": 0.0,
370
+ "untie_embeddings_and_output_weights": false,
371
+ "disable_bias_linear": true,
372
+ "add_qkv_bias": false,
373
+ "attention_dropout": 0.0,
374
+ "hidden_dropout": 0.0,
375
+ "kv_channels": 128,
376
+ "qk_layernorm": true,
377
+ "qk_l2_norm": null,
378
+ "no_rope_freq": null,
379
+ "moe_apply_probs_on_input": null,
380
+ "transformer_impl": "transformer_engine",
381
+ "num_experts": null,
382
+ "moe_layer_freq": 1,
383
+ "moe_ffn_hidden_size": null,
384
+ "moe_shared_expert_intermediate_size": null,
385
+ "moe_router_topk": 2,
386
+ "moe_router_num_groups": null,
387
+ "moe_router_group_topk": null,
388
+ "moe_router_pre_softmax": false,
389
+ "moe_router_dtype": "fp32",
390
+ "moe_router_score_function": "softmax",
391
+ "moe_router_bias_update_rate": null,
392
+ "moe_router_enable_expert_bias": false,
393
+ "moe_router_topk_scaling_factor": null,
394
+ "moe_router_load_balancing_type": "aux_loss",
395
+ "expert_model_parallel_size": 1,
396
+ "expert_tensor_parallel_size": 1,
397
+ "moe_token_dispatcher_type": null,
398
+ "moe_enable_deepep": false,
399
+ "moe_grouped_gemm": true,
400
+ "moe_permute_fusion": false,
401
+ "moe_aux_loss_coeff": 0.0,
402
+ "moe_z_loss_coeff": null,
403
+ "moe_shared_expert_overlap": false,
404
+ "moe_layer_recompute": false,
405
+ "moe_expert_capacity_factor": null,
406
+ "moe_pad_expert_input_to_capacity": false,
407
+ "moe_token_drop_policy": null,
408
+ "multi_latent_attention": false,
409
+ "q_lora_rank": null,
410
+ "kv_lora_rank": 32,
411
+ "qk_head_dim": 128,
412
+ "qk_pos_emb_head_dim": 64,
413
+ "mtp_num_layers": null,
414
+ "mtp_loss_scaling_factor": 0.1,
415
+ "fp8_format": null,
416
+ "fp8_recipe": "delayed",
417
+ "fp8_amax_history_len": 1024,
418
+ "fp8_amax_compute_algo": "max",
419
+ "fp8_param_gather": false,
420
+ "fp16": false,
421
+ "bf16": true,
422
+ "apply_query_key_layer_scaling": false,
423
+ "attention_softmax_in_fp32": true,
424
+ "log_params_norm": false,
425
+ "log_throughput": false,
426
+ "tensorboard_log_interval": 1,
427
+ "tensorboard_queue_size": 50,
428
+ "log_timers_to_tensorboard": true,
429
+ "no_log_learning_rate_to_tensorboard": false,
430
+ "log_validation_ppl_to_tensorboard": true,
431
+ "log_memory_to_tensorboard": true,
432
+ "logging_level": null,
433
+ "wandb_project": "megatron-swift",
434
+ "wandb_exp_name": "/high_perf_store3/world-model/yongkangli/ms-swift-main/megatron_output/Qwen3-VL-2B-Instruct-3-7-b2d/v1-20260213-201159",
435
+ "wandb_save_dir": null,
436
+ "eval_iters": -1,
437
+ "eval_interval": 500,
438
+ "seq_length": 16384,
439
+ "num_workers": 32,
440
+ "no_data_sharding": false,
441
+ "megatron_extra_kwargs": {},
442
+ "add_version": true,
443
+ "rank": 0,
444
+ "global_world_size": 32,
445
+ "local_world_size": 8,
446
+ "model_suffix": "Qwen3-VL-2B-Instruct",
447
+ "model_info": "ModelInfo(model_type='qwen3_vl', model_dir='/high_perf_store3/world-model/zhuzhenxin/ckpts/Qwen3-VL-2B-Instruct', torch_dtype=torch.bfloat16, max_model_len=262144, quant_method=None, quant_bits=None, rope_scaling={'mrope_interleaved': True, 'mrope_section': [24, 20, 20], 'rope_type': 'default'}, is_moe_model=False, is_multimodal=True, config=None, task_type='causal_lm', num_labels=None)",
448
+ "model_meta": "ModelMeta(model_type='qwen3_vl', model_groups=[ModelGroup(models=[Model(ms_model_id='Qwen/Qwen3-VL-2B-Instruct', hf_model_id='Qwen/Qwen3-VL-2B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-VL-2B-Thinking', hf_model_id='Qwen/Qwen3-VL-2B-Thinking', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-VL-2B-Instruct-FP8', hf_model_id='Qwen/Qwen3-VL-2B-Instruct-FP8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-VL-2B-Thinking-FP8', hf_model_id='Qwen/Qwen3-VL-2B-Thinking-FP8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-VL-4B-Instruct', hf_model_id='Qwen/Qwen3-VL-4B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-VL-4B-Thinking', hf_model_id='Qwen/Qwen3-VL-4B-Thinking', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-VL-4B-Instruct-FP8', hf_model_id='Qwen/Qwen3-VL-4B-Instruct-FP8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-VL-4B-Thinking-FP8', hf_model_id='Qwen/Qwen3-VL-4B-Thinking-FP8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-VL-8B-Instruct', hf_model_id='Qwen/Qwen3-VL-8B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-VL-8B-Thinking', hf_model_id='Qwen/Qwen3-VL-8B-Thinking', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-VL-8B-Instruct-FP8', hf_model_id='Qwen/Qwen3-VL-8B-Instruct-FP8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-VL-8B-Thinking-FP8', hf_model_id='Qwen/Qwen3-VL-8B-Thinking-FP8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-VL-32B-Instruct', hf_model_id='Qwen/Qwen3-VL-32B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-VL-32B-Thinking', hf_model_id='Qwen/Qwen3-VL-32B-Thinking', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-VL-32B-Instruct-FP8', hf_model_id='Qwen/Qwen3-VL-32B-Instruct-FP8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-VL-32B-Thinking-FP8', hf_model_id='Qwen/Qwen3-VL-32B-Thinking-FP8', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[])], template='qwen3_vl', get_function=<function get_model_tokenizer_qwen3_vl at 0x7fc8e8576d40>, model_arch=MultiModelKeys(arch_name='qwen3_vl', embedding=None, module_list=None, lm_head=None, q_proj=None, k_proj=None, v_proj=None, o_proj=None, attention=None, mlp=None, down_proj=None, qkv_proj=None, qk_proj=None, qa_proj=None, qb_proj=None, kv_proj=None, kva_proj=None, kvb_proj=None, language_model=['model.language_model', 'lm_head'], aligner=['model.visual.merger', 'model.visual.deepstack_merger_list'], vision_tower=['model.visual'], generator=[]), architectures=['Qwen3VLForConditionalGeneration'], additional_saved_files=[], torch_dtype=None, is_multimodal=True, is_reward=False, is_reranker=False, task_type=None, ignore_patterns=None, requires=['transformers>=4.57', 'qwen_vl_utils>=0.0.14', 'decord'], tags=['vision', 'video'])",
449
+ "model_dir": "/high_perf_store3/world-model/zhuzhenxin/ckpts/Qwen3-VL-2B-Instruct",
450
+ "_val_dataset_exists": true,
451
+ "hub": "<class 'swift.hub.hub.MSHub'>",
452
+ "megatron_model_meta": "MegatronModelMeta(megatron_model_type='qwen3_vl', model_types=['qwen3_vl', 'qwen3_moe_vl'], is_multimodal=True, bridge_cls=<class 'swift.megatron.model.gpt_bridge.MultimodalGPTBridge'>, model_cls=<class 'swift.megatron.model.mm_gpt.qwen3_vl.Qwen3VLGPTModel'>, get_transformer_layer_spec=None, model_provider=<function model_provider at 0x7fc86522f240>, visual_cls=<class 'swift.megatron.model.mm_gpt.qwen3_vl.Qwen3VL_Vit'>, get_mtp_block_spec=None, extra_args_provider=None)",
453
+ "extra_args": {
454
+ "model_dir": "/high_perf_store3/world-model/zhuzhenxin/ckpts/Qwen3-VL-2B-Instruct",
455
+ "is_multimodal": true,
456
+ "hf_model_type": "qwen3_vl",
457
+ "use_ray": false,
458
+ "ray_exp_name": null,
459
+ "device_groups": null,
460
+ "model": "/high_perf_store3/world-model/zhuzhenxin/ckpts/Qwen3-VL-2B-Instruct/",
461
+ "model_type": "qwen3_vl",
462
+ "model_revision": null,
463
+ "task_type": "causal_lm",
464
+ "torch_dtype": "bfloat16",
465
+ "attn_impl": null,
466
+ "new_special_tokens": [
467
+ "<FRONT_VIEW>",
468
+ "<FRONT_LEFT_VIEW>",
469
+ "<FRONT_RIGHT_VIEW>",
470
+ "<BACK_LEFT_VIEW>",
471
+ "<BACK_RIGHT_VIEW>",
472
+ "<BACK_VIEW>"
473
+ ],
474
+ "num_labels": null,
475
+ "problem_type": null,
476
+ "rope_scaling": {
477
+ "mrope_interleaved": true,
478
+ "mrope_section": [
479
+ 24,
480
+ 20,
481
+ 20
482
+ ],
483
+ "rope_type": "default"
484
+ },
485
+ "device_map": null,
486
+ "max_memory": {},
487
+ "max_model_len": null,
488
+ "local_repo_path": null,
489
+ "init_strategy": null,
490
+ "template": "qwen3_vl",
491
+ "system": "Generalist Autonomous Driving Agent\nRole: You are an advanced, multimodal AI brain for an autonomous vehicle, capable of Perception, Reasoning, and Planning. Your goal is to drive safely, follow instructions, and deeply understand the dynamic world around you.\n\nContext & Coordinate System\n- Ego-Centric View: You are at the origin (0,0). The X-axis represents the lateral distance (perpendicular), and the Y-axis represents the longitudinal distance (forward).\n- Inputs: You receive multi-view visual observations (<FRONT_VIEW>, <BACK_VIEW>, etc.), historical ego-motion, and vehicle states (velocity, acceleration).\n\nCore Capabilities\n1. **Driving & Planning**:\n - Objective: Generate a safe, comfortable, and feasible 3-second trajectory (6 waypoints, 0.5s interval).\n - Constraints: Strictly adhere to traffic rules, avoid collisions, and respect kinematic limits.\n - Output Format: A sequence of coordinates [(x1,y1), ..., (x6,y6)].\n\n2. **Reasoning & VQA** (Chain-of-Thought):\n - Tasks: Analyze traffic scenes, explain causal logic (e.g., \"Why stop?\"), identify hazards, and answer queries about the environment (weather, road layout, traffic lights).\n - Reasoning: Break down complex scenarios into step-by-step logic, grounding your answers in visual evidence.\n\n3. **Instruction Following & Grounding**:\n - Tasks: Execute navigation commands (e.g., \"Park behind the red truck\") and ground textual descriptions to specific visual regions or objects.\n\n4. **Perception & World Modeling** (Future & Current State):\n - Tasks: Detect and track objects, predict their future motion, and estimate 3D occupancy or scene geometry (Gaussian Splatting/Occ).\n - Understanding: Map semantic elements (lanes, crossings) and dynamic agents into a coherent world model.\n\nInstructions\n- For **Planning** tasks: Output the \"Trajectory\".\n- For **QA/Reasoning** tasks: Provide a clear, logical, and helpful text response.\n- For **Perception** tasks: Output structured descriptions or specific formats as requested.\n\nAlways prioritize safety and clarity in your responses.\n",
492
+ "max_length": 16384,
493
+ "truncation_strategy": "delete",
494
+ "max_pixels": null,
495
+ "agent_template": null,
496
+ "norm_bbox": null,
497
+ "use_chat_template": true,
498
+ "padding_side": "right",
499
+ "padding_free": true,
500
+ "sequence_parallel_size": 1,
501
+ "template_backend": "swift",
502
+ "response_prefix": null,
503
+ "enable_thinking": null,
504
+ "add_non_thinking_prefix": true,
505
+ "dataset": [
506
+ "/high_perf_store3/world-model/yongkangli/ABCDEFG_NISHIDASHABI/A/B/UniDriveVLA/Bench2Drive/data/b2d_planning_qa_train_residual.jsonl",
507
+ "/high_perf_store3/world-model/yongkangli/Dataset_vqa/Orion_Data/train_converted_processed.jsonl",
508
+ "/high_perf_store3/world-model/yongkangli/B2D/Bench2DriveZoo-tcp-admlp/output_final_modified_finalview_processed.jsonl",
509
+ "/high_perf_store3/world-model/yongkangli/finevision_subset_cleaned.jsonl#1141184"
510
+ ],
511
+ "val_dataset": [],
512
+ "cached_dataset": [],
513
+ "cached_val_dataset": [],
514
+ "split_dataset_ratio": 0.01,
515
+ "data_seed": 42,
516
+ "dataset_num_proc": 32,
517
+ "load_from_cache_file": true,
518
+ "dataset_shuffle": true,
519
+ "val_dataset_shuffle": false,
520
+ "streaming": false,
521
+ "interleave_prob": null,
522
+ "stopping_strategy": "first_exhausted",
523
+ "shuffle_buffer_size": 1000,
524
+ "download_mode": "reuse_dataset_if_exists",
525
+ "columns": {},
526
+ "strict": false,
527
+ "remove_unused_columns": true,
528
+ "model_name": null,
529
+ "model_author": null,
530
+ "custom_dataset_info": [],
531
+ "quant_method": null,
532
+ "quant_bits": null,
533
+ "hqq_axis": null,
534
+ "bnb_4bit_compute_dtype": "bfloat16",
535
+ "bnb_4bit_quant_type": "nf4",
536
+ "bnb_4bit_use_double_quant": true,
537
+ "bnb_4bit_quant_storage": null,
538
+ "max_new_tokens": null,
539
+ "temperature": 0.9,
540
+ "top_k": 50,
541
+ "top_p": 0.9,
542
+ "repetition_penalty": 1.0,
543
+ "num_beams": 1,
544
+ "stream": false,
545
+ "stop_words": [],
546
+ "logprobs": false,
547
+ "top_logprobs": null,
548
+ "structured_outputs_regex": null,
549
+ "ckpt_dir": null,
550
+ "lora_modules": [],
551
+ "tuner_backend": "peft",
552
+ "train_type": "full",
553
+ "adapters": [],
554
+ "external_plugins": [],
555
+ "model_kwargs": {},
556
+ "load_args": false,
557
+ "load_data_args": false,
558
+ "packing": true,
559
+ "packing_length": 16384,
560
+ "packing_num_proc": 1,
561
+ "lazy_tokenize": false,
562
+ "custom_register_path": [],
563
+ "use_hf": false,
564
+ "hub_token": null,
565
+ "ddp_timeout": 18000000,
566
+ "ddp_backend": null,
567
+ "ignore_args_error": false,
568
+ "use_swift_lora": false,
569
+ "freeze_llm": false,
570
+ "freeze_vit": false,
571
+ "freeze_aligner": false,
572
+ "freeze_parameters": [],
573
+ "freeze_parameters_regex": null,
574
+ "freeze_parameters_ratio": 0.0,
575
+ "trainable_parameters": [],
576
+ "trainable_parameters_regex": null,
577
+ "adapter_load": null,
578
+ "target_modules": [
579
+ "all-linear"
580
+ ],
581
+ "target_regex": null,
582
+ "modules_to_save": [],
583
+ "lora_rank": 8,
584
+ "lora_alpha": 32,
585
+ "lora_dropout": 0.05,
586
+ "lora_bias": "none",
587
+ "lora_dtype": null,
588
+ "use_rslora": false,
589
+ "rlhf_type": null,
590
+ "ref_load": null,
591
+ "ref_adapter_load": null,
592
+ "beta": null,
593
+ "rpo_alpha": null,
594
+ "reference_free": false,
595
+ "label_smoothing": 0.0,
596
+ "f_divergence_type": "reverse_kl",
597
+ "loss_type": null,
598
+ "desirable_weight": 1.0,
599
+ "undesirable_weight": 1.0,
600
+ "calculate_KL": null,
601
+ "center_rewards_coefficient": null,
602
+ "teacher_model": null,
603
+ "teacher_model_type": null,
604
+ "teacher_model_revision": null,
605
+ "lmbda": 0.5,
606
+ "seq_kd": false,
607
+ "offload_teacher_model": false,
608
+ "sft_alpha": 0.0,
609
+ "generation_batch_size": null,
610
+ "steps_per_generation": null,
611
+ "num_generations": 8,
612
+ "num_generations_eval": null,
613
+ "max_completion_length": 512,
614
+ "importance_sampling_level": "token",
615
+ "tau_pos": 1.0,
616
+ "tau_neg": 1.05,
617
+ "epsilon": 0.2,
618
+ "epsilon_high": null,
619
+ "delta": null,
620
+ "use_vllm": true,
621
+ "vllm_mode": null,
622
+ "vllm_enable_prefix_caching": true,
623
+ "vllm_gpu_memory_utilization": 0.9,
624
+ "vllm_tensor_parallel_size": 1,
625
+ "vllm_max_model_len": null,
626
+ "vllm_enforce_eager": false,
627
+ "vllm_limit_mm_per_prompt": null,
628
+ "vllm_disable_cascade_attn": false,
629
+ "vllm_max_num_seqs": null,
630
+ "vllm_mm_processor_cache_gb": null,
631
+ "vllm_engine_kwargs": null,
632
+ "sleep_level": 0,
633
+ "offload_optimizer": false,
634
+ "offload_model": false,
635
+ "offload_bridge": false,
636
+ "vllm_server_base_url": null,
637
+ "vllm_server_host": null,
638
+ "vllm_server_port": [
639
+ 8000
640
+ ],
641
+ "vllm_server_timeout": 240.0,
642
+ "vllm_server_group_port": null,
643
+ "reward_funcs": [],
644
+ "reward_weights": null,
645
+ "cosine_min_len_value_wrong": -0.5,
646
+ "cosine_max_len_value_wrong": 0.0,
647
+ "cosine_min_len_value_correct": 1.0,
648
+ "cosine_max_len_value_correct": 0.5,
649
+ "cosine_max_len": null,
650
+ "repetition_n_grams": 3,
651
+ "repetition_max_penalty": -1.0,
652
+ "soft_max_length": null,
653
+ "soft_cache_length": null,
654
+ "dynamic_sample": false,
655
+ "max_resample_times": 3,
656
+ "overlong_filter": false,
657
+ "scale_rewards": "group",
658
+ "advantage_estimator": "grpo",
659
+ "kl_in_reward": false,
660
+ "wandb_log_unique_prompts": null,
661
+ "log_completions": false,
662
+ "rollout_importance_sampling_mode": null,
663
+ "rollout_importance_sampling_threshold": 2.0,
664
+ "log_rollout_offpolicy_metrics": false,
665
+ "off_policy_sequence_mask_delta": null,
666
+ "log_entropy": false,
667
+ "top_entropy_quantile": 1.0,
668
+ "reward_model": null,
669
+ "reward_model_plugin": null,
670
+ "sync_ref_model": false,
671
+ "ref_model_sync_steps": 512,
672
+ "ref_model_mixup_alpha": 0.6,
673
+ "async_generate": false,
674
+ "move_model_batches": null,
675
+ "multi_turn_scheduler": null,
676
+ "max_turns": null,
677
+ "completion_length_limit_scope": "per_round",
678
+ "vllm_server_pass_dataset": false,
679
+ "num_iterations": 1,
680
+ "check_model": true,
681
+ "padded_vocab_size": 151936,
682
+ "initialize_embedding": false,
683
+ "mlp_padding_free": false,
684
+ "load_safetensors": true,
685
+ "save_safetensors": true,
686
+ "ref_model": null,
687
+ "ref_adapters": [],
688
+ "merge_lora": true,
689
+ "max_shard_size": "5GB",
690
+ "train_dataloader_shuffle": true,
691
+ "dataloader_pin_memory": true,
692
+ "dataloader_persistent_workers": true,
693
+ "dataloader_prefetch_factor": 2,
694
+ "group_by_length": false,
695
+ "architectures": "Qwen3VLForConditionalGeneration",
696
+ "llm_architectures": "Qwen3VLForConditionalGeneration",
697
+ "max_epochs": 3,
698
+ "enable_dft_loss": false,
699
+ "enable_channel_loss": false,
700
+ "save_strategy": "steps",
701
+ "original_max_position_embeddings": null,
702
+ "partial_rotary_factor": null,
703
+ "use_shared_expert_gate": false,
704
+ "report_to": null,
705
+ "vit_gradient_checkpointing": true,
706
+ "vit_lr": null,
707
+ "aligner_lr": null,
708
+ "gradient_checkpointing_kwargs": null,
709
+ "linear_num_value_heads": null,
710
+ "linear_num_key_heads": null,
711
+ "linear_key_head_dim": null,
712
+ "linear_value_head_dim": null,
713
+ "linear_conv_kernel_dim": null,
714
+ "layer_types": null,
715
+ "mrope_interleaved": true,
716
+ "add_version": true
717
+ }
718
+ }
chat_template.jinja ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0].role == 'system' %}
4
+ {%- if messages[0].content is string %}
5
+ {{- messages[0].content }}
6
+ {%- else %}
7
+ {%- for content in messages[0].content %}
8
+ {%- if 'text' in content %}
9
+ {{- content.text }}
10
+ {%- endif %}
11
+ {%- endfor %}
12
+ {%- endif %}
13
+ {{- '\n\n' }}
14
+ {%- endif %}
15
+ {{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
16
+ {%- for tool in tools %}
17
+ {{- "\n" }}
18
+ {{- tool | tojson }}
19
+ {%- endfor %}
20
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
21
+ {%- else %}
22
+ {%- if messages[0].role == 'system' %}
23
+ {{- '<|im_start|>system\n' }}
24
+ {%- if messages[0].content is string %}
25
+ {{- messages[0].content }}
26
+ {%- else %}
27
+ {%- for content in messages[0].content %}
28
+ {%- if 'text' in content %}
29
+ {{- content.text }}
30
+ {%- endif %}
31
+ {%- endfor %}
32
+ {%- endif %}
33
+ {{- '<|im_end|>\n' }}
34
+ {%- endif %}
35
+ {%- endif %}
36
+ {%- set image_count = namespace(value=0) %}
37
+ {%- set video_count = namespace(value=0) %}
38
+ {%- for message in messages %}
39
+ {%- if message.role == "user" %}
40
+ {{- '<|im_start|>' + message.role + '\n' }}
41
+ {%- if message.content is string %}
42
+ {{- message.content }}
43
+ {%- else %}
44
+ {%- for content in message.content %}
45
+ {%- if content.type == 'image' or 'image' in content or 'image_url' in content %}
46
+ {%- set image_count.value = image_count.value + 1 %}
47
+ {%- if add_vision_id %}Picture {{ image_count.value }}: {% endif -%}
48
+ <|vision_start|><|image_pad|><|vision_end|>
49
+ {%- elif content.type == 'video' or 'video' in content %}
50
+ {%- set video_count.value = video_count.value + 1 %}
51
+ {%- if add_vision_id %}Video {{ video_count.value }}: {% endif -%}
52
+ <|vision_start|><|video_pad|><|vision_end|>
53
+ {%- elif 'text' in content %}
54
+ {{- content.text }}
55
+ {%- endif %}
56
+ {%- endfor %}
57
+ {%- endif %}
58
+ {{- '<|im_end|>\n' }}
59
+ {%- elif message.role == "assistant" %}
60
+ {{- '<|im_start|>' + message.role + '\n' }}
61
+ {%- if message.content is string %}
62
+ {{- message.content }}
63
+ {%- else %}
64
+ {%- for content_item in message.content %}
65
+ {%- if 'text' in content_item %}
66
+ {{- content_item.text }}
67
+ {%- endif %}
68
+ {%- endfor %}
69
+ {%- endif %}
70
+ {%- if message.tool_calls %}
71
+ {%- for tool_call in message.tool_calls %}
72
+ {%- if (loop.first and message.content) or (not loop.first) %}
73
+ {{- '\n' }}
74
+ {%- endif %}
75
+ {%- if tool_call.function %}
76
+ {%- set tool_call = tool_call.function %}
77
+ {%- endif %}
78
+ {{- '<tool_call>\n{"name": "' }}
79
+ {{- tool_call.name }}
80
+ {{- '", "arguments": ' }}
81
+ {%- if tool_call.arguments is string %}
82
+ {{- tool_call.arguments }}
83
+ {%- else %}
84
+ {{- tool_call.arguments | tojson }}
85
+ {%- endif %}
86
+ {{- '}\n</tool_call>' }}
87
+ {%- endfor %}
88
+ {%- endif %}
89
+ {{- '<|im_end|>\n' }}
90
+ {%- elif message.role == "tool" %}
91
+ {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
92
+ {{- '<|im_start|>user' }}
93
+ {%- endif %}
94
+ {{- '\n<tool_response>\n' }}
95
+ {%- if message.content is string %}
96
+ {{- message.content }}
97
+ {%- else %}
98
+ {%- for content in message.content %}
99
+ {%- if content.type == 'image' or 'image' in content or 'image_url' in content %}
100
+ {%- set image_count.value = image_count.value + 1 %}
101
+ {%- if add_vision_id %}Picture {{ image_count.value }}: {% endif -%}
102
+ <|vision_start|><|image_pad|><|vision_end|>
103
+ {%- elif content.type == 'video' or 'video' in content %}
104
+ {%- set video_count.value = video_count.value + 1 %}
105
+ {%- if add_vision_id %}Video {{ video_count.value }}: {% endif -%}
106
+ <|vision_start|><|video_pad|><|vision_end|>
107
+ {%- elif 'text' in content %}
108
+ {{- content.text }}
109
+ {%- endif %}
110
+ {%- endfor %}
111
+ {%- endif %}
112
+ {{- '\n</tool_response>' }}
113
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
114
+ {{- '<|im_end|>\n' }}
115
+ {%- endif %}
116
+ {%- endif %}
117
+ {%- endfor %}
118
+ {%- if add_generation_prompt %}
119
+ {{- '<|im_start|>assistant\n' }}
120
+ {%- endif %}
config.json ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen3VLForConditionalGeneration"
4
+ ],
5
+ "dtype": "bfloat16",
6
+ "hidden_size": 2048,
7
+ "image_token_id": 151655,
8
+ "model_type": "qwen3_vl",
9
+ "pad_token_id": 151643,
10
+ "text_config": {
11
+ "attention_bias": false,
12
+ "attention_dropout": 0.0,
13
+ "bos_token_id": 151643,
14
+ "dtype": "bfloat16",
15
+ "eos_token_id": 151645,
16
+ "head_dim": 128,
17
+ "hidden_act": "silu",
18
+ "hidden_size": 2048,
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 6144,
21
+ "max_position_embeddings": 262144,
22
+ "model_type": "qwen3_vl_text",
23
+ "num_attention_heads": 16,
24
+ "num_hidden_layers": 28,
25
+ "num_key_value_heads": 8,
26
+ "pad_token_id": 151643,
27
+ "rms_norm_eps": 1e-06,
28
+ "rope_scaling": {
29
+ "mrope_interleaved": true,
30
+ "mrope_section": [
31
+ 24,
32
+ 20,
33
+ 20
34
+ ],
35
+ "rope_type": "default"
36
+ },
37
+ "rope_theta": 5000000,
38
+ "tie_word_embeddings": true,
39
+ "use_cache": true,
40
+ "vocab_size": 151936
41
+ },
42
+ "tie_word_embeddings": true,
43
+ "transformers_version": "4.57.6",
44
+ "video_token_id": 151656,
45
+ "vision_config": {
46
+ "deepstack_visual_indexes": [
47
+ 5,
48
+ 11,
49
+ 17
50
+ ],
51
+ "depth": 24,
52
+ "dtype": "bfloat16",
53
+ "hidden_act": "gelu_pytorch_tanh",
54
+ "hidden_size": 1024,
55
+ "in_channels": 3,
56
+ "initializer_range": 0.02,
57
+ "intermediate_size": 4096,
58
+ "model_type": "qwen3_vl",
59
+ "num_heads": 16,
60
+ "num_position_embeddings": 2304,
61
+ "out_hidden_size": 2048,
62
+ "pad_token_id": 151643,
63
+ "patch_size": 16,
64
+ "spatial_merge_size": 2,
65
+ "temporal_patch_size": 2
66
+ },
67
+ "vision_end_token_id": 151653,
68
+ "vision_start_token_id": 151652,
69
+ "vocab_size": 151936
70
+ }
latest_checkpointed_iteration.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ 4382
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cca7cecb0fa3554b3ead41d46c1689af51ef480c6a8158cdecf1e5f7dfbe9d18
3
+ size 4255140280
preprocessor_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "size": {
3
+ "longest_edge": 16777216,
4
+ "shortest_edge": 65536
5
+ },
6
+ "patch_size": 16,
7
+ "temporal_patch_size": 2,
8
+ "merge_size": 2,
9
+ "image_mean": [
10
+ 0.5,
11
+ 0.5,
12
+ 0.5
13
+ ],
14
+ "image_std": [
15
+ 0.5,
16
+ 0.5,
17
+ 0.5
18
+ ],
19
+ "processor_class": "Qwen3VLProcessor",
20
+ "image_processor_type": "Qwen2VLImageProcessorFast"
21
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aeb13307a71acd8fe81861d94ad54ab689df773318809eed3cbe794b4492dae4
3
+ size 11422654
tokenizer_config.json ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ },
181
+ "151665": {
182
+ "content": "<tool_response>",
183
+ "lstrip": false,
184
+ "normalized": false,
185
+ "rstrip": false,
186
+ "single_word": false,
187
+ "special": false
188
+ },
189
+ "151666": {
190
+ "content": "</tool_response>",
191
+ "lstrip": false,
192
+ "normalized": false,
193
+ "rstrip": false,
194
+ "single_word": false,
195
+ "special": false
196
+ },
197
+ "151667": {
198
+ "content": "<think>",
199
+ "lstrip": false,
200
+ "normalized": false,
201
+ "rstrip": false,
202
+ "single_word": false,
203
+ "special": false
204
+ },
205
+ "151668": {
206
+ "content": "</think>",
207
+ "lstrip": false,
208
+ "normalized": false,
209
+ "rstrip": false,
210
+ "single_word": false,
211
+ "special": false
212
+ }
213
+ },
214
+ "additional_special_tokens": [
215
+ "<|im_start|>",
216
+ "<|im_end|>",
217
+ "<|object_ref_start|>",
218
+ "<|object_ref_end|>",
219
+ "<|box_start|>",
220
+ "<|box_end|>",
221
+ "<|quad_start|>",
222
+ "<|quad_end|>",
223
+ "<|vision_start|>",
224
+ "<|vision_end|>",
225
+ "<|vision_pad|>",
226
+ "<|image_pad|>",
227
+ "<|video_pad|>"
228
+ ],
229
+ "bos_token": null,
230
+ "clean_up_tokenization_spaces": false,
231
+ "eos_token": "<|im_end|>",
232
+ "errors": "replace",
233
+ "extra_special_tokens": {},
234
+ "model_max_length": 262144,
235
+ "pad_token": "<|endoftext|>",
236
+ "processor_class": "Qwen3VLProcessor",
237
+ "split_special_tokens": false,
238
+ "tokenizer_class": "Qwen2Tokenizer",
239
+ "unk_token": null
240
+ }
video_preprocessor_config.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": null,
3
+ "data_format": "channels_first",
4
+ "default_to_square": true,
5
+ "device": null,
6
+ "do_center_crop": null,
7
+ "do_convert_rgb": true,
8
+ "do_normalize": true,
9
+ "do_rescale": true,
10
+ "do_resize": true,
11
+ "do_sample_frames": true,
12
+ "fps": 2,
13
+ "image_mean": [
14
+ 0.5,
15
+ 0.5,
16
+ 0.5
17
+ ],
18
+ "image_std": [
19
+ 0.5,
20
+ 0.5,
21
+ 0.5
22
+ ],
23
+ "input_data_format": null,
24
+ "max_frames": 768,
25
+ "merge_size": 2,
26
+ "min_frames": 4,
27
+ "num_frames": null,
28
+ "pad_size": null,
29
+ "patch_size": 16,
30
+ "processor_class": "Qwen3VLProcessor",
31
+ "resample": 3,
32
+ "rescale_factor": 0.00392156862745098,
33
+ "return_metadata": false,
34
+ "size": {
35
+ "longest_edge": 25165824,
36
+ "shortest_edge": 4096
37
+ },
38
+ "temporal_patch_size": 2,
39
+ "video_metadata": null,
40
+ "video_processor_type": "Qwen3VLVideoProcessor"
41
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff