| { | |
| "add_faster_video": false, | |
| "add_time_instruction": false, | |
| "anneal_start_block_size": 1, | |
| "architectures": [ | |
| "DiffusionVL_Qwen2_5_VL_ForConditionalGeneration" | |
| ], | |
| "attention_dropout": 0.0, | |
| "bd3lm_antithetic_sampling": true, | |
| "bd3lm_attn_backend": "sdpa", | |
| "bd3lm_block_aligned_eos": true, | |
| "bd3lm_block_size": 8, | |
| "bd3lm_complementary_mask": false, | |
| "bd3lm_cross_attn": true, | |
| "bd3lm_ignore_bos": true, | |
| "bd3lm_mask_prob": 0.5, | |
| "bd3lm_noise_granularity": "block", | |
| "bd3lm_noise_type": "loglinear", | |
| "bd3lm_parameterization": "subs", | |
| "bd3lm_resample": false, | |
| "bd3lm_sampling_eps_max": 1.0, | |
| "bd3lm_sampling_eps_min": 0.001, | |
| "bd3lm_time_conditioning": false, | |
| "bd3lm_token_shift_prediction": false, | |
| "bd3lm_var_min": true, | |
| "bos_token_id": 151643, | |
| "enable_bd3lm": true, | |
| "enable_block_size_annealing": false, | |
| "enable_mtd": false, | |
| "enable_noise_level_annealing": false, | |
| "eos_token_id": 151645, | |
| "faster_token_stride": 10, | |
| "force_sample": false, | |
| "hidden_act": "silu", | |
| "hidden_size": 2048, | |
| "image_aspect_ratio": "pad", | |
| "image_crop_resolution": null, | |
| "image_grid_pinpoints": null, | |
| "image_split_resolution": null, | |
| "image_token_id": null, | |
| "initializer_range": 0.02, | |
| "intermediate_size": 11008, | |
| "layer_types": [ | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention" | |
| ], | |
| "max_pixels": 262144, | |
| "max_position_embeddings": 128000, | |
| "max_window_layers": 70, | |
| "min_pixels": 147456, | |
| "mm_hidden_size": 1280, | |
| "mm_newline_position": "grid", | |
| "mm_patch_merge_type": "flat", | |
| "mm_projector_lr": null, | |
| "mm_projector_type": "qwen_merger", | |
| "mm_resampler_type": null, | |
| "mm_spatial_pool_mode": "bilinear", | |
| "mm_spatial_pool_stride": null, | |
| "mm_tunable_parts": "mm_vision_tower,mm_mlp_adapter,mm_language_model", | |
| "mm_use_im_patch_token": false, | |
| "mm_use_im_start_end": false, | |
| "mm_vision_select_feature": "patch", | |
| "mm_vision_select_layer": -2, | |
| "mm_vision_tower": "/path/to/Qwen2.5-VL-3B-Instruct-DiffusionVL", | |
| "mm_vision_tower_lr": 2e-06, | |
| "model_max_length": 8192, | |
| "model_type": "diffusionvl_qwenvl", | |
| "num_attention_heads": 16, | |
| "num_hidden_layers": 36, | |
| "num_key_value_heads": 2, | |
| "pos_skipping_range": 4096, | |
| "rms_norm_eps": 1e-06, | |
| "rope_scaling": { | |
| "mrope_section": [ | |
| 16, | |
| 24, | |
| 24 | |
| ], | |
| "rope_type": "default", | |
| "type": "default" | |
| }, | |
| "rope_theta": 1000000.0, | |
| "sliding_window": null, | |
| "text_config": { | |
| "architectures": [ | |
| "Qwen2_5_VLForConditionalGeneration" | |
| ], | |
| "attention_dropout": 0.0, | |
| "bos_token_id": 151643, | |
| "eos_token_id": 151645, | |
| "hidden_act": "silu", | |
| "hidden_size": 2048, | |
| "image_token_id": null, | |
| "initializer_range": 0.02, | |
| "intermediate_size": 11008, | |
| "layer_types": [ | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention" | |
| ], | |
| "max_position_embeddings": 128000, | |
| "max_window_layers": 70, | |
| "model_type": "qwen2_5_vl_text", | |
| "num_attention_heads": 16, | |
| "num_hidden_layers": 36, | |
| "num_key_value_heads": 2, | |
| "rms_norm_eps": 1e-06, | |
| "rope_scaling": { | |
| "mrope_section": [ | |
| 16, | |
| 24, | |
| 24 | |
| ], | |
| "rope_type": "default", | |
| "type": "default" | |
| }, | |
| "rope_theta": 1000000.0, | |
| "sliding_window": null, | |
| "tie_word_embeddings": true, | |
| "torch_dtype": "float32", | |
| "use_cache": true, | |
| "use_sliding_window": false, | |
| "video_token_id": null, | |
| "vision_end_token_id": 151653, | |
| "vision_start_token_id": 151652, | |
| "vision_token_id": 151654, | |
| "vocab_size": 151936 | |
| }, | |
| "tie_word_embeddings": true, | |
| "tokenizer_model_max_length": 8192, | |
| "tokenizer_padding_side": "right", | |
| "torch_dtype": "bfloat16", | |
| "transformers_version": "4.55.0", | |
| "use_cache": true, | |
| "use_mm_proj": true, | |
| "use_pos_skipping": false, | |
| "use_sliding_window": false, | |
| "video_token_id": null, | |
| "vision_config": { | |
| "depth": 32, | |
| "fullatt_block_indexes": [ | |
| 7, | |
| 15, | |
| 23, | |
| 31 | |
| ], | |
| "hidden_act": "silu", | |
| "hidden_size": 1280, | |
| "in_channels": 3, | |
| "in_chans": 3, | |
| "initializer_range": 0.02, | |
| "intermediate_size": 3420, | |
| "model_type": "", | |
| "num_heads": 16, | |
| "out_hidden_size": 2048, | |
| "patch_size": 14, | |
| "spatial_merge_size": 2, | |
| "spatial_patch_size": 14, | |
| "temporal_patch_size": 2, | |
| "tokens_per_second": 2, | |
| "torch_dtype": "float32", | |
| "window_size": 112 | |
| }, | |
| "vision_end_token_id": 151653, | |
| "vision_start_token_id": 151652, | |
| "vision_token_id": 151654, | |
| "vision_tower_pretrained": null, | |
| "vocab_size": 151936, | |
| "mask_token_id": 151671, | |
| "auto_map": { | |
| "AutoConfig": "configuration_diffusionvl_qwen2_5_vl.DiffusionVL_Qwen2_5_VL_Config", | |
| "AutoModelForCausalLM": "modeling_diffusionvl_qwen2_5_vl.DiffusionVL_Qwen2_5_VL_ForConditionalGeneration", | |
| "AutoProcessor": "processing_diffusionvl_qwen2_5_vl.DiffusionVL_Qwen2_5_VL_Processor" | |
| } | |
| } |