| { |
| "_name_or_path": "", |
| "architectures": [ |
| "Gemma4VisionModel" |
| ], |
| "attention_bias": false, |
| "attention_dropout": 0.0, |
| "chunk_size_feed_forward": 0, |
| "default_output_length": 280, |
| "dtype": "bfloat16", |
| "global_head_dim": 72, |
| "head_dim": 72, |
| "hidden_activation": "gelu_pytorch_tanh", |
| "hidden_size": 1152, |
| "id2label": { |
| "0": "LABEL_0", |
| "1": "LABEL_1" |
| }, |
| "initializer_range": 0.02, |
| "intermediate_size": 4304, |
| "is_encoder_decoder": false, |
| "label2id": { |
| "LABEL_0": 0, |
| "LABEL_1": 1 |
| }, |
| "max_position_embeddings": 131072, |
| "model_type": "gemma4_vision", |
| "num_attention_heads": 16, |
| "num_hidden_layers": 27, |
| "num_key_value_heads": 16, |
| "output_attentions": false, |
| "output_hidden_states": false, |
| "patch_size": 16, |
| "pooling_kernel_size": 3, |
| "position_embedding_size": 10240, |
| "problem_type": null, |
| "return_dict": true, |
| "rms_norm_eps": 1e-06, |
| "rope_parameters": { |
| "rope_theta": 100.0, |
| "rope_type": "default" |
| }, |
| "standardize": true, |
| "use_clipped_linears": false, |
| "torch_dtype": "bfloat16", |
| "_source_model": "google/gemma-4-31B-it", |
| "_extraction_note": "Vision tower extracted from 31B model", |
| "_verified_total_params": 569550384 |
| } |