Commit ·
419b2ef
0
Parent(s):
Preparing for release!
Browse filesCo-authored-by: Rocketknight1 <Rocketknight1@users.noreply.huggingface.co>
Co-authored-by: dougreid <dougreid@users.noreply.huggingface.co>
Co-authored-by: RyanMullins <RyanMullins@users.noreply.huggingface.co>
Co-authored-by: pcuenq <pcuenq@users.noreply.huggingface.co>
Co-authored-by: bebechien <bebechien@users.noreply.huggingface.co>
Co-authored-by: osanseviero <osanseviero@users.noreply.huggingface.co>
Co-authored-by: douglas reid <douglas reid@users.noreply.huggingface.co>
- .gitattributes +38 -0
- README.md +513 -0
- chat_template.jinja +266 -0
- config.json +176 -0
- generation_config.json +14 -0
- model-00001-of-00002.safetensors +3 -0
- model-00002-of-00002.safetensors +3 -0
- model.safetensors.index.json +0 -0
- processor_config.json +75 -0
- tokenizer.json +3 -0
- tokenizer_config.json +74 -0
.gitattributes
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
model-00001-of-00002.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
model-00002-of-00002.safetensors filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
|
@@ -0,0 +1,513 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
library_name: transformers
|
| 3 |
+
license: apache-2.0
|
| 4 |
+
license_link: https://ai.google.dev/gemma/docs/gemma_4_license
|
| 5 |
+
pipeline_tag: image-text-to-text
|
| 6 |
+
---
|
| 7 |
+
|
| 8 |
+
<div align="center">
|
| 9 |
+
<img src=https://ai.google.dev/gemma/images/gemma4_banner.png>
|
| 10 |
+
</div>
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
<p align="center">
|
| 14 |
+
<a href="https://huggingface.co/collections/google/gemma-4" target="_blank">Hugging Face</a> |
|
| 15 |
+
<a href="https://github.com/google-gemma" target="_blank">GitHub</a> |
|
| 16 |
+
<a href="https://blog.google/innovation-and-ai/technology/developers-tools/gemma-4/" target="_blank">Launch Blog</a> |
|
| 17 |
+
<a href="https://ai.google.dev/gemma/docs/core" target="_blank">Documentation</a>
|
| 18 |
+
<br>
|
| 19 |
+
<b>License</b>: <a href="https://ai.google.dev/gemma/docs/gemma_4_license" target="_blank">Apache 2.0</a> | <b>Authors</b>: <a href="https://deepmind.google/models/gemma/" target="_blank">Google DeepMind</a>
|
| 20 |
+
</p>
|
| 21 |
+
|
| 22 |
+
Gemma is a family of open models built by Google DeepMind. Gemma 4 models are multimodal, handling text and image input (with audio supported on small models) and generating text output. This release includes open-weights models in both pre-trained and instruction-tuned variants. Gemma 4 features a context window of up to 256K tokens and maintains multilingual support in over 140 languages.
|
| 23 |
+
|
| 24 |
+
Featuring both Dense and Mixture-of-Experts (MoE) architectures, Gemma 4 is well-suited for tasks like text generation, coding, and reasoning. The models are available in four distinct sizes: **E2B**, **E4B**, **26B A4B**, and **31B**. Their diverse sizes make them deployable in environments ranging from high-end phones to laptops and servers, democratizing access to state-of-the-art AI.
|
| 25 |
+
|
| 26 |
+
Gemma 4 introduces key **capability and architectural advancements**:
|
| 27 |
+
|
| 28 |
+
* **Reasoning** – All models in the family are designed as highly capable reasoners, with configurable thinking modes.
|
| 29 |
+
|
| 30 |
+
* **Extended Multimodalities** – Processes Text, Image with variable aspect ratio and resolution support (all models), Video, and Audio (featured natively on the E2B and E4B models).
|
| 31 |
+
|
| 32 |
+
* **Diverse & Efficient Architectures** – Offers Dense and Mixture-of-Experts (MoE) variants of different sizes for scalable deployment.
|
| 33 |
+
|
| 34 |
+
* **Optimized for On-Device** – Smaller models are specifically designed for efficient local execution on laptops and mobile devices.
|
| 35 |
+
|
| 36 |
+
* **Increased Context Window** – The small models feature a 128K context window, while the medium models support 256K.
|
| 37 |
+
|
| 38 |
+
* **Enhanced Coding & Agentic Capabilities** – Achieves notable improvements in coding benchmarks alongside native function-calling support, powering highly capable autonomous agents.
|
| 39 |
+
|
| 40 |
+
* **Native System Prompt Support** – Gemma 4 introduces native support for the `system` role, enabling more structured and controllable conversations.
|
| 41 |
+
|
| 42 |
+
## **Models Overview**
|
| 43 |
+
|
| 44 |
+
Gemma 4 models are designed to deliver frontier-level performance at each size, targeting deployment scenarios from mobile and edge devices (E2B, E4B) to consumer GPUs and workstations (26B A4B, 31B). They are well-suited for reasoning, agentic workflows, coding, and multimodal understanding.
|
| 45 |
+
|
| 46 |
+
The models employ a hybrid attention mechanism that interleaves local sliding window attention with full global attention, ensuring the final layer is always global. This hybrid design delivers the processing speed and low memory footprint of a lightweight model without sacrificing the deep awareness required for complex, long-context tasks. To optimize memory for long contexts, global layers feature unified Keys and Values, and apply Proportional RoPE (p-RoPE).
|
| 47 |
+
|
| 48 |
+
### Dense Models
|
| 49 |
+
|
| 50 |
+
| Property | E2B | E4B | 31B Dense |
|
| 51 |
+
| :---- | :---- | :---- | :---- |
|
| 52 |
+
| **Total Parameters** | 2.3B effective (5.1B with embeddings) | 4.5B effective (8B with embeddings) | 30.7B |
|
| 53 |
+
| **Layers** | 35 | 42 | 60 |
|
| 54 |
+
| **Sliding Window** | 512 tokens | 512 tokens | 1024 tokens |
|
| 55 |
+
| **Context Length** | 128K tokens | 128K tokens | 256K tokens |
|
| 56 |
+
| **Vocabulary Size** | 262K | 262K | 262K |
|
| 57 |
+
| **Supported Modalities** | Text, Image, Audio | Text, Image, Audio | Text, Image |
|
| 58 |
+
| **Vision Encoder Parameters** | *~150M* | *~150M* | *~550M* |
|
| 59 |
+
| **Audio Encoder Parameters** | *~300M* | *~300M* | No Audio |
|
| 60 |
+
|
| 61 |
+
The "E" in E2B and E4B stands for "effective" parameters. The smaller models incorporate Per-Layer Embeddings (PLE) to maximize parameter efficiency in on-device deployments. Rather than adding more layers or parameters to the model, PLE gives each decoder layer its own small embedding for every token. These embedding tables are large but are only used for quick lookups, which is why the effective parameter count is much smaller than the total.
|
| 62 |
+
|
| 63 |
+
### Mixture-of-Experts (MoE) Model
|
| 64 |
+
|
| 65 |
+
| Property | 26B A4B MoE |
|
| 66 |
+
| :---- | :---- |
|
| 67 |
+
| **Total Parameters** | 25.2B |
|
| 68 |
+
| **Active Parameters** | 3.8B |
|
| 69 |
+
| **Layers** | 30 |
|
| 70 |
+
| **Sliding Window** | 1024 tokens |
|
| 71 |
+
| **Context Length** | 256K tokens |
|
| 72 |
+
| **Vocabulary Size** | 262K |
|
| 73 |
+
| **Expert Count** | 8 active / 128 total and 1 shared |
|
| 74 |
+
| **Supported Modalities** | Text, Image |
|
| 75 |
+
| **Vision Encoder Parameters** | *~550M* |
|
| 76 |
+
|
| 77 |
+
The "A" in 26B A4B stands for "active parameters" in contrast to the total number of parameters the model contains. By only activating a 4B subset of parameters during inference, the Mixture-of-Experts model runs much faster than its 26B total might suggest. This makes it an excellent choice for fast inference compared to the dense 31B model since it runs almost as fast as a 4B-parameter model.
|
| 78 |
+
|
| 79 |
+
## **Benchmark Results**
|
| 80 |
+
|
| 81 |
+
These models were evaluated against a large collection of different datasets and metrics to cover different aspects of text generation. Evaluation results marked in the table are for instruction-tuned models.
|
| 82 |
+
|
| 83 |
+
| | Gemma 4 31B | Gemma 4 26B A4B | Gemma 4 E4B | Gemma 4 E2B | Gemma 3 27B (no think) |
|
| 84 |
+
| :---- | :---- | :---- | :---- | :---- | :---- |
|
| 85 |
+
| MMLU Pro | 85.2% | 82.6% | 69.4% | 60.0% | 67.6% |
|
| 86 |
+
| AIME 2026 no tools | 89.2% | 88.3% | 42.5% | 37.5% | 20.8% |
|
| 87 |
+
| LiveCodeBench v6 | 80.0% | 77.1% | 52.0% | 44.0% | 29.1% |
|
| 88 |
+
| Codeforces ELO | 2150 | 1718 | 940 | 633 | 110 |
|
| 89 |
+
| GPQA Diamond | 84.3% | 82.3% | 58.6% | 43.4% | 42.4% |
|
| 90 |
+
| Tau2 (average over 3) | 76.9% | 68.2% | 42.2% | 24.5% | 16.2% |
|
| 91 |
+
| HLE no tools | 19.5% | 8.7% | - | - | - |
|
| 92 |
+
| HLE with search | 26.5% | 17.2% | - | - | - |
|
| 93 |
+
| BigBench Extra Hard | 74.4% | 64.8% | 33.1% | 21.9% | 19.3% |
|
| 94 |
+
| MMMLU | 88.4% | 86.3% | 76.6% | 67.4% | 70.7% |
|
| 95 |
+
| **Vision** | | | | | |
|
| 96 |
+
| MMMU Pro | 76.9% | 73.8% | 52.6% | 44.2% | 49.7% |
|
| 97 |
+
| OmniDocBench 1.5 (average edit distance, lower is better) | 0.131 | 0.149 | 0.181 | 0.290 | 0.365 |
|
| 98 |
+
| MATH-Vision | 85.6% | 82.4% | 59.5% | 52.4% | 46.0% |
|
| 99 |
+
| MedXPertQA MM | 61.3% | 58.1% | 28.7% | 23.5% | - |
|
| 100 |
+
| **Audio** | | | | | |
|
| 101 |
+
| CoVoST | - | - | 35.54 | 33.47 | - |
|
| 102 |
+
| FLEURS (lower is better) | - | - | 0.08 | 0.09 | - |
|
| 103 |
+
| **Long Context** | | | | | |
|
| 104 |
+
| MRCR v2 8 needle 128k (average) | 66.4% | 44.1% | 25.4% | 19.1% | 13.5% |
|
| 105 |
+
|
| 106 |
+
## **Core Capabilities**
|
| 107 |
+
|
| 108 |
+
Gemma 4 models handle a broad range of tasks across text, vision, and audio. Key capabilities include:
|
| 109 |
+
|
| 110 |
+
* **Thinking** – Built-in reasoning mode that lets the model think step-by-step before answering.
|
| 111 |
+
* **Long Context** – Context windows of up to 128K tokens (E2B/E4B) and 256K tokens (26B A4B/31B).
|
| 112 |
+
* **Image Understanding** – Object detection, Document/PDF parsing, screen and UI understanding, chart comprehension, OCR (including multilingual), handwriting recognition, and pointing. Images can be processed at variable aspect ratios and resolutions.
|
| 113 |
+
* **Video Understanding** – Analyze video by processing sequences of frames.
|
| 114 |
+
* **Interleaved Multimodal Input** – Freely mix text and images in any order within a single prompt.
|
| 115 |
+
* **Function Calling** – Native support for structured tool use, enabling agentic workflows.
|
| 116 |
+
* **Coding** – Code generation, completion, and correction.
|
| 117 |
+
* **Multilingual** – Out-of-the-box support for 35+ languages, pre-trained on 140+ languages.
|
| 118 |
+
* **Audio** (E2B and E4B only) – Automatic speech recognition (ASR) and speech-to-translated-text translation across multiple languages.
|
| 119 |
+
|
| 120 |
+
## Getting Started
|
| 121 |
+
|
| 122 |
+
You can use all Gemma 4 models with the latest version of Transformers. To get started, install the necessary dependencies in your environment:
|
| 123 |
+
|
| 124 |
+
`pip install -U transformers torch accelerate`
|
| 125 |
+
|
| 126 |
+
Once you have everything installed, you can proceed to load the model with the code below:
|
| 127 |
+
|
| 128 |
+
```python
|
| 129 |
+
from transformers import AutoProcessor, AutoModelForCausalLM
|
| 130 |
+
|
| 131 |
+
MODEL_ID = "google/gemma-4-31B-it"
|
| 132 |
+
|
| 133 |
+
# Load model
|
| 134 |
+
processor = AutoProcessor.from_pretrained(MODEL_ID)
|
| 135 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 136 |
+
MODEL_ID,
|
| 137 |
+
dtype="auto",
|
| 138 |
+
device_map="auto"
|
| 139 |
+
)
|
| 140 |
+
```
|
| 141 |
+
|
| 142 |
+
Once the model is loaded, you can start generating output:
|
| 143 |
+
|
| 144 |
+
```python
|
| 145 |
+
# Prompt
|
| 146 |
+
messages = [
|
| 147 |
+
{"role": "system", "content": "You are a helpful assistant."},
|
| 148 |
+
{"role": "user", "content": "Write a short joke about saving RAM."},
|
| 149 |
+
]
|
| 150 |
+
|
| 151 |
+
# Process input
|
| 152 |
+
text = processor.apply_chat_template(
|
| 153 |
+
messages,
|
| 154 |
+
tokenize=False,
|
| 155 |
+
add_generation_prompt=True,
|
| 156 |
+
enable_thinking=False
|
| 157 |
+
)
|
| 158 |
+
inputs = processor(text=text, return_tensors="pt").to(model.device)
|
| 159 |
+
input_len = inputs["input_ids"].shape[-1]
|
| 160 |
+
|
| 161 |
+
# Generate output
|
| 162 |
+
outputs = model.generate(**inputs, max_new_tokens=1024)
|
| 163 |
+
response = processor.decode(outputs[0][input_len:], skip_special_tokens=False)
|
| 164 |
+
|
| 165 |
+
# Parse output
|
| 166 |
+
processor.parse_response(response)
|
| 167 |
+
```
|
| 168 |
+
|
| 169 |
+
To enable reasoning, set `enable_thinking=True` and the `parse_response` function will take care of parsing the thinking output.
|
| 170 |
+
|
| 171 |
+
Below, you will also find snippets for processing audio (E2B and E4B only), images, and video alongside text:
|
| 172 |
+
|
| 173 |
+
<details>
|
| 174 |
+
<summary>Code for processing Audio</summary>
|
| 175 |
+
|
| 176 |
+
Instead of using `AutoModelForCausalLM`, you can use `AutoModelForMultimodalLM` to process audio. To use it, make sure to install the following packages:
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
`pip install -U transformers torch librosa accelerate`
|
| 180 |
+
|
| 181 |
+
You can then load the model with the code below:
|
| 182 |
+
|
| 183 |
+
```python
|
| 184 |
+
from transformers import AutoProcessor, AutoModelForMultimodalLM
|
| 185 |
+
|
| 186 |
+
MODEL_ID = "google/gemma-4-E2B-it"
|
| 187 |
+
|
| 188 |
+
# Load model
|
| 189 |
+
processor = AutoProcessor.from_pretrained(MODEL_ID)
|
| 190 |
+
model = AutoModelForMultimodalLM.from_pretrained(
|
| 191 |
+
MODEL_ID,
|
| 192 |
+
dtype="auto",
|
| 193 |
+
device_map="auto"
|
| 194 |
+
)
|
| 195 |
+
```
|
| 196 |
+
|
| 197 |
+
Once the model is loaded, you can start generating output by directly referencing the audio URL in the prompt:
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
```python
|
| 201 |
+
# Prompt - add audio before text
|
| 202 |
+
messages = [
|
| 203 |
+
{
|
| 204 |
+
"role": "user",
|
| 205 |
+
"content": [
|
| 206 |
+
{"type": "audio", "audio": "https://raw.githubusercontent.com/google-gemma/cookbook/refs/heads/main/Demos/sample-data/journal1.wav"},
|
| 207 |
+
{"type": "text", "text": "Transcribe the following speech segment in its original language. Follow these specific instructions for formatting the answer:\n* Only output the transcription, with no newlines.\n* When transcribing numbers, write the digits, i.e. write 1.7 and not one point seven, and write 3 instead of three."},
|
| 208 |
+
]
|
| 209 |
+
}
|
| 210 |
+
]
|
| 211 |
+
|
| 212 |
+
# Process input
|
| 213 |
+
inputs = processor.apply_chat_template(
|
| 214 |
+
messages,
|
| 215 |
+
tokenize=True,
|
| 216 |
+
return_dict=True,
|
| 217 |
+
return_tensors="pt",
|
| 218 |
+
add_generation_prompt=True,
|
| 219 |
+
).to(model.device)
|
| 220 |
+
input_len = inputs["input_ids"].shape[-1]
|
| 221 |
+
|
| 222 |
+
# Generate output
|
| 223 |
+
outputs = model.generate(**inputs, max_new_tokens=512)
|
| 224 |
+
response = processor.decode(outputs[0][input_len:], skip_special_tokens=False)
|
| 225 |
+
|
| 226 |
+
# Parse output
|
| 227 |
+
processor.parse_response(response)
|
| 228 |
+
```
|
| 229 |
+
|
| 230 |
+
</details>
|
| 231 |
+
|
| 232 |
+
<details>
|
| 233 |
+
<summary>Code for processing Images</summary>
|
| 234 |
+
|
| 235 |
+
Instead of using `AutoModelForCausalLM`, you can use `AutoModelForMultimodalLM` to process images. To use it, make sure to install the following packages:
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
`pip install -U transformers torch torchvision accelerate`
|
| 239 |
+
|
| 240 |
+
You can then load the model with the code below:
|
| 241 |
+
|
| 242 |
+
```python
|
| 243 |
+
from transformers import AutoProcessor, AutoModelForMultimodalLM
|
| 244 |
+
|
| 245 |
+
MODEL_ID = "google/gemma-4-31B-it"
|
| 246 |
+
|
| 247 |
+
# Load model
|
| 248 |
+
processor = AutoProcessor.from_pretrained(MODEL_ID)
|
| 249 |
+
model = AutoModelForMultimodalLM.from_pretrained(
|
| 250 |
+
MODEL_ID,
|
| 251 |
+
dtype="auto",
|
| 252 |
+
device_map="auto"
|
| 253 |
+
)
|
| 254 |
+
```
|
| 255 |
+
|
| 256 |
+
Once the model is loaded, you can start generating output by directly referencing the image URL in the prompt:
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
```python
|
| 260 |
+
# Prompt - add image before text
|
| 261 |
+
messages = [
|
| 262 |
+
{
|
| 263 |
+
"role": "user", "content": [
|
| 264 |
+
{"type": "image", "url": "https://raw.githubusercontent.com/google-gemma/cookbook/refs/heads/main/Demos/sample-data/GoldenGate.png"},
|
| 265 |
+
{"type": "text", "text": "What is shown in this image?"}
|
| 266 |
+
]
|
| 267 |
+
}
|
| 268 |
+
]
|
| 269 |
+
|
| 270 |
+
# Process input
|
| 271 |
+
inputs = processor.apply_chat_template(
|
| 272 |
+
messages,
|
| 273 |
+
tokenize=True,
|
| 274 |
+
return_dict=True,
|
| 275 |
+
return_tensors="pt",
|
| 276 |
+
add_generation_prompt=True,
|
| 277 |
+
).to(model.device)
|
| 278 |
+
input_len = inputs["input_ids"].shape[-1]
|
| 279 |
+
|
| 280 |
+
# Generate output
|
| 281 |
+
outputs = model.generate(**inputs, max_new_tokens=512)
|
| 282 |
+
response = processor.decode(outputs[0][input_len:], skip_special_tokens=False)
|
| 283 |
+
|
| 284 |
+
# Parse output
|
| 285 |
+
processor.parse_response(response)
|
| 286 |
+
```
|
| 287 |
+
|
| 288 |
+
</details>
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
<details>
|
| 292 |
+
<summary>Code for processing Videos</summary>
|
| 293 |
+
|
| 294 |
+
Instead of using `AutoModelForCausalLM`, you can use `AutoModelForMultimodalLM` to process videos. To use it, make sure to install the following packages:
|
| 295 |
+
|
| 296 |
+
`pip install -U transformers torch torchvision torchcodec librosa accelerate`
|
| 297 |
+
|
| 298 |
+
You can then load the model with the code below:
|
| 299 |
+
|
| 300 |
+
```python
|
| 301 |
+
from transformers import AutoProcessor, AutoModelForMultimodalLM
|
| 302 |
+
|
| 303 |
+
MODEL_ID = "google/gemma-4-31B-it"
|
| 304 |
+
|
| 305 |
+
# Load model
|
| 306 |
+
processor = AutoProcessor.from_pretrained(MODEL_ID)
|
| 307 |
+
model = AutoModelForMultimodalLM.from_pretrained(
|
| 308 |
+
MODEL_ID,
|
| 309 |
+
dtype="auto",
|
| 310 |
+
device_map="auto"
|
| 311 |
+
)
|
| 312 |
+
```
|
| 313 |
+
|
| 314 |
+
Once the model is loaded, you can start generating output by directly referencing the video URL in the prompt:
|
| 315 |
+
|
| 316 |
+
|
| 317 |
+
```python
|
| 318 |
+
# Prompt - add video before text
|
| 319 |
+
messages = [
|
| 320 |
+
{
|
| 321 |
+
'role': 'user',
|
| 322 |
+
'content': [
|
| 323 |
+
{"type": "video", "video": "https://github.com/bebechien/gemma/raw/refs/heads/main/videos/ForBiggerBlazes.mp4"},
|
| 324 |
+
{'type': 'text', 'text': 'Describe this video.'}
|
| 325 |
+
]
|
| 326 |
+
}
|
| 327 |
+
]
|
| 328 |
+
|
| 329 |
+
# Process input
|
| 330 |
+
inputs = processor.apply_chat_template(
|
| 331 |
+
messages,
|
| 332 |
+
tokenize=True,
|
| 333 |
+
return_dict=True,
|
| 334 |
+
return_tensors="pt",
|
| 335 |
+
add_generation_prompt=True,
|
| 336 |
+
).to(model.device)
|
| 337 |
+
input_len = inputs["input_ids"].shape[-1]
|
| 338 |
+
|
| 339 |
+
# Generate output
|
| 340 |
+
outputs = model.generate(**inputs, max_new_tokens=512)
|
| 341 |
+
response = processor.decode(outputs[0][input_len:], skip_special_tokens=False)
|
| 342 |
+
|
| 343 |
+
# Parse output
|
| 344 |
+
processor.parse_response(response)
|
| 345 |
+
```
|
| 346 |
+
|
| 347 |
+
</details>
|
| 348 |
+
|
| 349 |
+
|
| 350 |
+
## **Best Practices**
|
| 351 |
+
|
| 352 |
+
For the best performance, use these configurations and best practices:
|
| 353 |
+
|
| 354 |
+
### 1. Sampling Parameters
|
| 355 |
+
|
| 356 |
+
Use the following standardized sampling configuration across all use cases:
|
| 357 |
+
|
| 358 |
+
* `temperature=1.0`
|
| 359 |
+
* `top_p=0.95`
|
| 360 |
+
* `top_k=64`
|
| 361 |
+
|
| 362 |
+
### 2. Thinking Mode Configuration
|
| 363 |
+
|
| 364 |
+
Compared to Gemma 3, the models use standard `system`, `assistant`, and `user` roles. To properly manage the thinking process, use the following control tokens:
|
| 365 |
+
|
| 366 |
+
* **Trigger Thinking:** Thinking is enabled by including the `<|think|>` token at the start of the system prompt. To disable thinking, remove the token.
|
| 367 |
+
* **Standard Generation:** When thinking is enabled, the model will output its internal reasoning followed by the final answer using this structure:
|
| 368 |
+
`<|channel>thought\n`**[Internal reasoning]**`<channel|>`
|
| 369 |
+
* **Disabled Thinking Behavior:** For all models except for the E2B and E4B variants, if thinking is disabled, the model will still generate the tags but with an empty thought block:
|
| 370 |
+
`<|channel>thought\n<channel|>`**[Final answer]**
|
| 371 |
+
|
| 372 |
+
> [!Note]
|
| 373 |
+
> Note that many libraries like Transformers and llama.cpp handle the complexities of the chat template for you.
|
| 374 |
+
|
| 375 |
+
### 3. Multi-Turn Conversations
|
| 376 |
+
|
| 377 |
+
* **No Thinking Content in History**: In multi-turn conversations, the historical model output should only include the final response. Thoughts from previous model turns must *not be added* before the next user turn begins.
|
| 378 |
+
|
| 379 |
+
### 4. Modality order
|
| 380 |
+
|
| 381 |
+
* For optimal performance with multimodal inputs, place image and/or audio content **before** the text in your prompt.
|
| 382 |
+
|
| 383 |
+
### 5. Variable Image Resolution
|
| 384 |
+
|
| 385 |
+
Aside from variable aspect ratios, Gemma 4 supports variable image resolution through a configurable visual token budget, which controls how many tokens are used to represent an image. A higher token budget preserves more visual detail at the cost of additional compute, while a lower budget enables faster inference for tasks that don't require fine-grained understanding.
|
| 386 |
+
|
| 387 |
+
* The supported token budgets are: **70**, **140**, **280**, **560**, and **1120**.
|
| 388 |
+
* Use *lower budgets* for classification, captioning, or video understanding, where faster inference and processing many frames outweigh fine-grained detail.
|
| 389 |
+
* Use *higher budgets* for tasks like OCR, document parsing, or reading small text.
|
| 390 |
+
|
| 391 |
+
### 6. Audio
|
| 392 |
+
|
| 393 |
+
Use the following prompt structures for audio processing:
|
| 394 |
+
|
| 395 |
+
* **Audio Speech Recognition (ASR)**
|
| 396 |
+
|
| 397 |
+
```text
|
| 398 |
+
Transcribe the following speech segment in {LANGUAGE} into {LANGUAGE} text.
|
| 399 |
+
|
| 400 |
+
Follow these specific instructions for formatting the answer:
|
| 401 |
+
* Only output the transcription, with no newlines.
|
| 402 |
+
* When transcribing numbers, write the digits, i.e. write 1.7 and not one point seven, and write 3 instead of three.
|
| 403 |
+
```
|
| 404 |
+
|
| 405 |
+
* **Automatic Speech Translation (AST)**
|
| 406 |
+
|
| 407 |
+
```text
|
| 408 |
+
Transcribe the following speech segment in {SOURCE_LANGUAGE}, then translate it into {TARGET_LANGUAGE}.
|
| 409 |
+
When formatting the answer, first output the transcription in {SOURCE_LANGUAGE}, then one newline, then output the string '{TARGET_LANGUAGE}: ', then the translation in {TARGET_LANGUAGE}.
|
| 410 |
+
```
|
| 411 |
+
|
| 412 |
+
### 7. Audio and Video Length
|
| 413 |
+
|
| 414 |
+
All models support image inputs and can process videos as frames whereas the E2B and E4B models also support audio inputs. Audio supports a maximum length of 30 seconds. Video supports a maximum of 60 seconds assuming the images are processed at one frame per second.
|
| 415 |
+
|
| 416 |
+
## **Model Data**
|
| 417 |
+
|
| 418 |
+
Data used for model training and how the data was processed.
|
| 419 |
+
|
| 420 |
+
### **Training Dataset**
|
| 421 |
+
|
| 422 |
+
Our pre-training dataset is a large-scale, diverse collection of data encompassing a wide range of domains and modalities, which includes web documents, code, images, audio, with a cutoff date of January 2025. Here are the key components:
|
| 423 |
+
|
| 424 |
+
* **Web Documents**: A diverse collection of web text ensures the model is exposed to a broad range of linguistic styles, topics, and vocabulary. The training dataset includes content in over 140 languages.
|
| 425 |
+
* **Code**: Exposing the model to code helps it to learn the syntax and patterns of programming languages, which improves its ability to generate code and understand code-related questions.
|
| 426 |
+
* **Mathematics**: Training on mathematical text helps the model learn logical reasoning, symbolic representation, and to address mathematical queries.
|
| 427 |
+
* **Images**: A wide range of images enables the model to perform image analysis and visual data extraction tasks.
|
| 428 |
+
|
| 429 |
+
The combination of these diverse data sources is crucial for training a powerful multimodal model that can handle a wide variety of different tasks and data formats.
|
| 430 |
+
|
| 431 |
+
### **Data Preprocessing**
|
| 432 |
+
|
| 433 |
+
Here are the key data cleaning and filtering methods applied to the training data:
|
| 434 |
+
|
| 435 |
+
* **CSAM Filtering**: Rigorous CSAM (Child Sexual Abuse Material) filtering was applied at multiple stages in the data preparation process to ensure the exclusion of harmful and illegal content.
|
| 436 |
+
* **Sensitive Data Filtering**: As part of making Gemma pre-trained models safe and reliable, automated techniques were used to filter out certain personal information and other sensitive data from training sets.
|
| 437 |
+
* **Additional methods**: Filtering based on content quality and safety in line with [our policies](https://ai.google/static/documents/ai-responsibility-update-published-february-2025.pdf).
|
| 438 |
+
|
| 439 |
+
## **Ethics and Safety**
|
| 440 |
+
|
| 441 |
+
As open models become central to enterprise infrastructure, provenance and security are paramount. Developed by Google DeepMind, Gemma 4 undergoes the same rigorous safety evaluations as our proprietary Gemini models.
|
| 442 |
+
|
| 443 |
+
### **Evaluation Approach**
|
| 444 |
+
|
| 445 |
+
Gemma 4 models were developed in partnership with internal safety and responsible AI teams. A range of automated as well as human evaluations were conducted to help improve model safety. These evaluations align with [Google’s AI principles](https://ai.google/principles/), as well as safety policies, which aim to prevent our generative AI models from generating harmful content, including:
|
| 446 |
+
|
| 447 |
+
* Content related to child sexual abuse material and exploitation
|
| 448 |
+
* Dangerous content (e.g., promoting suicide, or instructing in activities that could cause real-world harm)
|
| 449 |
+
* Sexually explicit content
|
| 450 |
+
* Hate speech (e.g., dehumanizing members of protected groups)
|
| 451 |
+
* Harassment (e.g., encouraging violence against people)
|
| 452 |
+
|
| 453 |
+
### **Evaluation Results**
|
| 454 |
+
|
| 455 |
+
For all areas of safety testing, we saw major improvements in all categories of content safety relative to previous Gemma models. Overall, Gemma 4 models significantly outperform Gemma 3 and 3n models in improving safety, while keeping unjustified refusals low. All testing was conducted without safety filters to evaluate the model capabilities and behaviors. For both text-to-text and image-to-text, and across all model sizes, the model produced minimal policy violations, and showed significant improvements over previous Gemma models' performance.
|
| 456 |
+
|
| 457 |
+
## **Usage and Limitations**
|
| 458 |
+
|
| 459 |
+
These models have certain limitations that users should be aware of.
|
| 460 |
+
|
| 461 |
+
### **Intended Usage**
|
| 462 |
+
|
| 463 |
+
Multimodal models (capable of processing vision, language, and/or audio) have a wide range of applications across various industries and domains. The following list of potential uses is not comprehensive. The purpose of this list is to provide contextual information about the possible use-cases that the model creators considered as part of model training and development.
|
| 464 |
+
|
| 465 |
+
* **Content Creation and Communication**
|
| 466 |
+
* **Text Generation**: These models can be used to generate creative text formats such as poems, scripts, code, marketing copy, and email drafts.
|
| 467 |
+
* **Chatbots and Conversational AI**: Power conversational interfaces for customer service, virtual assistants, or interactive applications.
|
| 468 |
+
* **Text Summarization**: Generate concise summaries of a text corpus, research papers, or reports.
|
| 469 |
+
* **Image Data Extraction**: These models can be used to extract, interpret, and summarize visual data for text communications.
|
| 470 |
+
* **Audio Processing and Interaction**: The smaller models (E2B and E4B) can analyze and interpret audio inputs, enabling voice-driven interactions and transcriptions.
|
| 471 |
+
* **Research and Education**
|
| 472 |
+
* **Natural Language Processing (NLP) and VLM Research**: These models can serve as a foundation for researchers to experiment with VLM and NLP techniques, develop algorithms, and contribute to the advancement of the field.
|
| 473 |
+
* **Language Learning Tools**: Support interactive language learning experiences, aiding in grammar correction or providing writing practice.
|
| 474 |
+
* **Knowledge Exploration**: Assist researchers in exploring large bodies of text by generating summaries or answering questions about specific topics.
|
| 475 |
+
|
| 476 |
+
### **Limitations**
|
| 477 |
+
|
| 478 |
+
* **Training Data**
|
| 479 |
+
* The quality and diversity of the training data significantly influence the model's capabilities. Biases or gaps in the training data can lead to limitations in the model's responses.
|
| 480 |
+
* The scope of the training dataset determines the subject areas the model can handle effectively.
|
| 481 |
+
* **Context and Task Complexity**
|
| 482 |
+
* Models perform well on tasks that can be framed with clear prompts and instructions. Open-ended or highly complex tasks might be challenging.
|
| 483 |
+
* A model's performance can be influenced by the amount of context provided (longer context generally leads to better outputs, up to a certain point).
|
| 484 |
+
* **Language Ambiguity and Nuance**
|
| 485 |
+
* Natural language is inherently complex. Models might struggle to grasp subtle nuances, sarcasm, or figurative language.
|
| 486 |
+
* **Factual Accuracy**
|
| 487 |
+
* Models generate responses based on information they learned from their training datasets, but they are not knowledge bases. They may generate incorrect or outdated factual statements.
|
| 488 |
+
* **Common Sense**
|
| 489 |
+
* Models rely on statistical patterns in language. They might lack the ability to apply common sense reasoning in certain situations.
|
| 490 |
+
|
| 491 |
+
### **Ethical Considerations and Risks**
|
| 492 |
+
|
| 493 |
+
The development of vision-language models (VLMs) raises several ethical concerns. In creating an open model, we have carefully considered the following:
|
| 494 |
+
|
| 495 |
+
* **Bias and Fairness**
|
| 496 |
+
* VLMs trained on large-scale, real-world text and image data can reflect socio-cultural biases embedded in the training material. Gemma 4 models underwent careful scrutiny, input data pre-processing, and post-training evaluations as reported in this card to help mitigate the risk of these biases.
|
| 497 |
+
* **Misinformation and Misuse**
|
| 498 |
+
* VLMs can be misused to generate text that is false, misleading, or harmful.
|
| 499 |
+
* Guidelines are provided for responsible use with the model, see the [Responsible Generative AI Toolkit](https://ai.google.dev/responsible).
|
| 500 |
+
* **Transparency and Accountability**
|
| 501 |
+
* This model card summarizes details on the models' architecture, capabilities, limitations, and evaluation processes.
|
| 502 |
+
* A responsibly developed open model offers the opportunity to share innovation by making VLM technology accessible to developers and researchers across the AI ecosystem.
|
| 503 |
+
|
| 504 |
+
**Risks identified and mitigations**:
|
| 505 |
+
|
| 506 |
+
* **Generation of harmful content**: Mechanisms and guidelines for content safety are essential. Developers are encouraged to exercise caution and implement appropriate content safety safeguards based on their specific product policies and application use cases.
|
| 507 |
+
* **Misuse for malicious purposes**: Technical limitations and developer and end-user education can help mitigate against malicious applications of VLMs. Educational resources and reporting mechanisms for users to flag misuse are provided.
|
| 508 |
+
* **Privacy violations**: Models were trained on data filtered for removal of certain personal information and other sensitive data. Developers are encouraged to adhere to privacy regulations with privacy-preserving techniques.
|
| 509 |
+
* **Perpetuation of biases**: It's encouraged to perform continuous monitoring (using evaluation metrics, human review) and the exploration of de-biasing techniques during model training, fine-tuning, and other use cases.
|
| 510 |
+
|
| 511 |
+
### **Benefits**
|
| 512 |
+
|
| 513 |
+
At the time of release, this family of models provides high-performance open vision-language model implementations designed from the ground up for responsible AI development compared to similarly sized models.
|
chat_template.jinja
ADDED
|
@@ -0,0 +1,266 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{%- macro format_parameters(properties, required) -%}
|
| 2 |
+
{%- set standard_keys = ['description', 'type', 'properties', 'required', 'nullable'] -%}
|
| 3 |
+
{%- set ns = namespace(found_first=false) -%}
|
| 4 |
+
{%- for key, value in properties | dictsort -%}
|
| 5 |
+
{%- set add_comma = false -%}
|
| 6 |
+
{%- if key not in standard_keys -%}
|
| 7 |
+
{%- if ns.found_first %},{% endif -%}
|
| 8 |
+
{%- set ns.found_first = true -%}
|
| 9 |
+
{{ key }}:{
|
| 10 |
+
{%- if value['description'] -%}
|
| 11 |
+
description:<|"|>{{ value['description'] }}<|"|>
|
| 12 |
+
{%- set add_comma = true -%}
|
| 13 |
+
{%- endif -%}
|
| 14 |
+
{%- if value['nullable'] %}
|
| 15 |
+
{%- if add_comma %},{%- else -%} {%- set add_comma = true -%} {% endif -%}
|
| 16 |
+
nullable:true
|
| 17 |
+
{%- endif -%}
|
| 18 |
+
{%- if value['type'] | upper == 'STRING' -%}
|
| 19 |
+
{%- if value['enum'] -%}
|
| 20 |
+
{%- if add_comma %},{%- else -%} {%- set add_comma = true -%} {% endif -%}
|
| 21 |
+
enum:{{ format_argument(value['enum']) }}
|
| 22 |
+
{%- endif -%}
|
| 23 |
+
{%- elif value['type'] | upper == 'OBJECT' -%}
|
| 24 |
+
,properties:{
|
| 25 |
+
{%- if value['properties'] is defined and value['properties'] is mapping -%}
|
| 26 |
+
{{- format_parameters(value['properties'], value['required'] | default([])) -}}
|
| 27 |
+
{%- elif value is mapping -%}
|
| 28 |
+
{{- format_parameters(value, value['required'] | default([])) -}}
|
| 29 |
+
{%- endif -%}
|
| 30 |
+
}
|
| 31 |
+
{%- if value['required'] -%}
|
| 32 |
+
,required:[
|
| 33 |
+
{%- for item in value['required'] | default([]) -%}
|
| 34 |
+
<|"|>{{- item -}}<|"|>
|
| 35 |
+
{%- if not loop.last %},{% endif -%}
|
| 36 |
+
{%- endfor -%}
|
| 37 |
+
]
|
| 38 |
+
{%- endif -%}
|
| 39 |
+
{%- elif value['type'] | upper == 'ARRAY' -%}
|
| 40 |
+
{%- if value['items'] is mapping and value['items'] -%}
|
| 41 |
+
,items:{
|
| 42 |
+
{%- set ns_items = namespace(found_first=false) -%}
|
| 43 |
+
{%- for item_key, item_value in value['items'] | dictsort -%}
|
| 44 |
+
{%- if item_value is not none -%}
|
| 45 |
+
{%- if ns_items.found_first %},{% endif -%}
|
| 46 |
+
{%- set ns_items.found_first = true -%}
|
| 47 |
+
{%- if item_key == 'properties' -%}
|
| 48 |
+
properties:{
|
| 49 |
+
{%- if item_value is mapping -%}
|
| 50 |
+
{{- format_parameters(item_value, value['items']['required'] | default([])) -}}
|
| 51 |
+
{%- endif -%}
|
| 52 |
+
}
|
| 53 |
+
{%- elif item_key == 'required' -%}
|
| 54 |
+
required:[
|
| 55 |
+
{%- for req_item in item_value -%}
|
| 56 |
+
<|"|>{{- req_item -}}<|"|>
|
| 57 |
+
{%- if not loop.last %},{% endif -%}
|
| 58 |
+
{%- endfor -%}
|
| 59 |
+
]
|
| 60 |
+
{%- elif item_key == 'type' -%}
|
| 61 |
+
{%- if item_value is string -%}
|
| 62 |
+
type:{{ format_argument(item_value | upper) }}
|
| 63 |
+
{%- else -%}
|
| 64 |
+
type:{{ format_argument(item_value | map('upper') | list) }}
|
| 65 |
+
{%- endif -%}
|
| 66 |
+
{%- else -%}
|
| 67 |
+
{{ item_key }}:{{ format_argument(item_value) }}
|
| 68 |
+
{%- endif -%}
|
| 69 |
+
{%- endif -%}
|
| 70 |
+
{%- endfor -%}
|
| 71 |
+
}
|
| 72 |
+
{%- endif -%}
|
| 73 |
+
{%- endif -%}
|
| 74 |
+
{%- if add_comma %},{%- else -%} {%- set add_comma = true -%} {% endif -%}
|
| 75 |
+
type:<|"|>{{ value['type'] | upper }}<|"|>}
|
| 76 |
+
{%- endif -%}
|
| 77 |
+
{%- endfor -%}
|
| 78 |
+
{%- endmacro -%}
|
| 79 |
+
{%- macro format_function_declaration(tool_data) -%}
|
| 80 |
+
declaration:{{- tool_data['function']['name'] -}}{description:<|"|>{{- tool_data['function']['description'] -}}<|"|>
|
| 81 |
+
{%- set params = tool_data['function']['parameters'] -%}
|
| 82 |
+
{%- if params -%}
|
| 83 |
+
,parameters:{
|
| 84 |
+
{%- if params['properties'] -%}
|
| 85 |
+
properties:{ {{- format_parameters(params['properties'], params['required']) -}} },
|
| 86 |
+
{%- endif -%}
|
| 87 |
+
{%- if params['required'] -%}
|
| 88 |
+
required:[
|
| 89 |
+
{%- for item in params['required'] -%}
|
| 90 |
+
<|"|>{{- item -}}<|"|>
|
| 91 |
+
{{- ',' if not loop.last -}}
|
| 92 |
+
{%- endfor -%}
|
| 93 |
+
],
|
| 94 |
+
{%- endif -%}
|
| 95 |
+
{%- if params['type'] -%}
|
| 96 |
+
type:<|"|>{{- params['type'] | upper -}}<|"|>}
|
| 97 |
+
{%- endif -%}
|
| 98 |
+
{%- endif -%}
|
| 99 |
+
{%- if 'response' in tool_data['function'] -%}
|
| 100 |
+
{%- set response_declaration = tool_data['function']['response'] -%}
|
| 101 |
+
,response:{
|
| 102 |
+
{%- if response_declaration['description'] -%}
|
| 103 |
+
description:<|"|>{{- response_declaration['description'] -}}<|"|>,
|
| 104 |
+
{%- endif -%}
|
| 105 |
+
{%- if response_declaration['type'] | upper == 'OBJECT' -%}
|
| 106 |
+
type:<|"|>{{- response_declaration['type'] | upper -}}<|"|>}
|
| 107 |
+
{%- endif -%}
|
| 108 |
+
{%- endif -%}
|
| 109 |
+
}
|
| 110 |
+
{%- endmacro -%}
|
| 111 |
+
{%- macro format_argument(argument, escape_keys=True) -%}
|
| 112 |
+
{%- if argument is string -%}
|
| 113 |
+
{{- '<|"|>' + argument + '<|"|>' -}}
|
| 114 |
+
{%- elif argument is boolean -%}
|
| 115 |
+
{{- 'true' if argument else 'false' -}}
|
| 116 |
+
{%- elif argument is mapping -%}
|
| 117 |
+
{{- '{' -}}
|
| 118 |
+
{%- set ns = namespace(found_first=false) -%}
|
| 119 |
+
{%- for key, value in argument | dictsort -%}
|
| 120 |
+
{%- if ns.found_first %},{% endif -%}
|
| 121 |
+
{%- set ns.found_first = true -%}
|
| 122 |
+
{%- if escape_keys -%}
|
| 123 |
+
{{- '<|"|>' + key + '<|"|>' -}}
|
| 124 |
+
{%- else -%}
|
| 125 |
+
{{- key -}}
|
| 126 |
+
{%- endif -%}
|
| 127 |
+
:{{- format_argument(value, escape_keys=escape_keys) -}}
|
| 128 |
+
{%- endfor -%}
|
| 129 |
+
{{- '}' -}}
|
| 130 |
+
{%- elif argument is sequence -%}
|
| 131 |
+
{{- '[' -}}
|
| 132 |
+
{%- for item in argument -%}
|
| 133 |
+
{{- format_argument(item, escape_keys=escape_keys) -}}
|
| 134 |
+
{%- if not loop.last %},{% endif -%}
|
| 135 |
+
{%- endfor -%}
|
| 136 |
+
{{- ']' -}}
|
| 137 |
+
{%- else -%}
|
| 138 |
+
{{- argument -}}
|
| 139 |
+
{%- endif -%}
|
| 140 |
+
{%- endmacro -%}
|
| 141 |
+
{%- macro strip_thinking(text) -%}
|
| 142 |
+
{%- set ns = namespace(result='') -%}
|
| 143 |
+
{%- for part in text.split('<channel|>') -%}
|
| 144 |
+
{%- if '<|channel>' in part -%}
|
| 145 |
+
{%- set ns.result = ns.result + part.split('<|channel>')[0] -%}
|
| 146 |
+
{%- else -%}
|
| 147 |
+
{%- set ns.result = ns.result + part -%}
|
| 148 |
+
{%- endif -%}
|
| 149 |
+
{%- endfor -%}
|
| 150 |
+
{{- ns.result | trim -}}
|
| 151 |
+
{%- endmacro -%}
|
| 152 |
+
|
| 153 |
+
{%- set ns = namespace(prev_message_type=None) -%}
|
| 154 |
+
{%- set loop_messages = messages -%}
|
| 155 |
+
{{ bos_token }}
|
| 156 |
+
{#- Handle System/Tool Definitions Block -#}
|
| 157 |
+
{%- if (enable_thinking is defined and enable_thinking) or tools or messages[0]['role'] in ['system', 'developer'] -%}
|
| 158 |
+
{{- '<|turn>system\n' -}}
|
| 159 |
+
|
| 160 |
+
{#- Inject Thinking token at the very top of the FIRST system turn -#}
|
| 161 |
+
{%- if enable_thinking is defined and enable_thinking -%}
|
| 162 |
+
{{- '<|think|>' -}}
|
| 163 |
+
{%- set ns.prev_message_type = 'think' -%}
|
| 164 |
+
{%- endif -%}
|
| 165 |
+
|
| 166 |
+
{%- if messages[0]['role'] in ['system', 'developer'] -%}
|
| 167 |
+
{{- messages[0]['content'] | trim -}}
|
| 168 |
+
{%- set loop_messages = messages[1:] -%}
|
| 169 |
+
{%- endif -%}
|
| 170 |
+
|
| 171 |
+
{%- if tools -%}
|
| 172 |
+
{%- for tool in tools %}
|
| 173 |
+
{{- '<|tool>' -}}
|
| 174 |
+
{{- format_function_declaration(tool) | trim -}}
|
| 175 |
+
{{- '<tool|>' -}}
|
| 176 |
+
{%- endfor %}
|
| 177 |
+
{%- set ns.prev_message_type = 'tool' -%}
|
| 178 |
+
{%- endif -%}
|
| 179 |
+
|
| 180 |
+
{{- '<turn|>\n' -}}
|
| 181 |
+
{%- endif %}
|
| 182 |
+
|
| 183 |
+
{#- Loop through messages -#}
|
| 184 |
+
{%- for message in loop_messages -%}
|
| 185 |
+
{%- set ns.prev_message_type = None -%}
|
| 186 |
+
{%- set role = 'model' if message['role'] == 'assistant' else message['role'] -%}
|
| 187 |
+
{{- '<|turn>' + role + '\n' }}
|
| 188 |
+
|
| 189 |
+
{%- if message['tool_calls'] -%}
|
| 190 |
+
{%- for tool_call in message['tool_calls'] -%}
|
| 191 |
+
{%- set function = tool_call['function'] -%}
|
| 192 |
+
{{- '<|tool_call>call:' + function['name'] + '{' -}}
|
| 193 |
+
{%- if function['arguments'] is mapping -%}
|
| 194 |
+
{%- set ns_args = namespace(found_first=false) -%}
|
| 195 |
+
{%- for key, value in function['arguments'] | dictsort -%}
|
| 196 |
+
{%- if ns_args.found_first %},{% endif -%}
|
| 197 |
+
{%- set ns_args.found_first = true -%}
|
| 198 |
+
{{- key -}}:{{- format_argument(value, escape_keys=False) -}}
|
| 199 |
+
{%- endfor -%}
|
| 200 |
+
{%- elif function['arguments'] is string -%}
|
| 201 |
+
{{- function['arguments'] -}}
|
| 202 |
+
{%- endif -%}
|
| 203 |
+
{{- '}<tool_call|>' -}}
|
| 204 |
+
{%- endfor -%}
|
| 205 |
+
{%- set ns.prev_message_type = 'tool_call' -%}
|
| 206 |
+
{%- endif -%}
|
| 207 |
+
|
| 208 |
+
{%- if message['tool_responses'] -%}
|
| 209 |
+
{#- Tool Response handling -#}
|
| 210 |
+
{%- for tool_response in message['tool_responses'] -%}
|
| 211 |
+
{{- '<|tool_response>' -}}
|
| 212 |
+
{%- if tool_response['response'] is mapping -%}
|
| 213 |
+
{{- 'response:' + tool_response['name'] | default('unknown') + '{' -}}
|
| 214 |
+
{%- for key, value in tool_response['response'] | dictsort -%}
|
| 215 |
+
{{- key -}}:{{- format_argument(value, escape_keys=False) -}}
|
| 216 |
+
{%- if not loop.last %},{% endif -%}
|
| 217 |
+
{%- endfor -%}
|
| 218 |
+
{{- '}' -}}
|
| 219 |
+
{%- else -%}
|
| 220 |
+
{{- 'response:' + tool_response['name'] | default('unknown') + '{value:' + format_argument(tool_response['response'], escape_keys=False) + '}' -}}
|
| 221 |
+
{%- endif -%}
|
| 222 |
+
{{- '<tool_response|>' -}}
|
| 223 |
+
{%- endfor -%}
|
| 224 |
+
{%- set ns.prev_message_type = 'tool_response' -%}
|
| 225 |
+
{%- endif -%}
|
| 226 |
+
|
| 227 |
+
{%- if message['content'] is string -%}
|
| 228 |
+
{%- if role == 'model' -%}
|
| 229 |
+
{{- strip_thinking(message['content']) -}}
|
| 230 |
+
{%- else -%}
|
| 231 |
+
{{- message['content'] | trim -}}
|
| 232 |
+
{%- endif -%}
|
| 233 |
+
{%- elif message['content'] is sequence -%}
|
| 234 |
+
{%- for item in message['content'] -%}
|
| 235 |
+
{%- if item['type'] == 'text' -%}
|
| 236 |
+
{%- if role == 'model' -%}
|
| 237 |
+
{{- strip_thinking(item['text']) -}}
|
| 238 |
+
{%- else -%}
|
| 239 |
+
{{- item['text'] | trim -}}
|
| 240 |
+
{%- endif -%}
|
| 241 |
+
{%- elif item['type'] == 'image' -%}
|
| 242 |
+
{{- '\n\n<|image|>\n\n' -}}
|
| 243 |
+
{%- set ns.prev_message_type = 'image' -%}
|
| 244 |
+
{%- elif item['type'] == 'audio' -%}
|
| 245 |
+
{{- '<|audio|>' -}}
|
| 246 |
+
{%- set ns.prev_message_type = 'audio' -%}
|
| 247 |
+
{%- elif item['type'] == 'video' -%}
|
| 248 |
+
{{- '\n\n<|video|>\n\n' -}}
|
| 249 |
+
{%- set ns.prev_message_type = 'video' -%}
|
| 250 |
+
{%- endif -%}
|
| 251 |
+
{%- endfor -%}
|
| 252 |
+
{%- endif -%}
|
| 253 |
+
|
| 254 |
+
{%- if not (message['tool_responses'] and not message['content']) -%}
|
| 255 |
+
{{- '<turn|>\n' -}}
|
| 256 |
+
{%- endif -%}
|
| 257 |
+
{%- endfor -%}
|
| 258 |
+
|
| 259 |
+
{%- if add_generation_prompt -%}
|
| 260 |
+
{%- if ns.prev_message_type != 'tool_response' -%}
|
| 261 |
+
{{- '<|turn>model\n' -}}
|
| 262 |
+
{%- endif -%}
|
| 263 |
+
{%- if not enable_thinking | default(false) -%}
|
| 264 |
+
{{- '<|channel>thought\n<channel|>' -}}
|
| 265 |
+
{%- endif -%}
|
| 266 |
+
{%- endif -%}
|
config.json
ADDED
|
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"Gemma4ForConditionalGeneration"
|
| 4 |
+
],
|
| 5 |
+
"audio_config": null,
|
| 6 |
+
"audio_token_id": 258881,
|
| 7 |
+
"boa_token_id": 256000,
|
| 8 |
+
"boi_token_id": 255999,
|
| 9 |
+
"dtype": "bfloat16",
|
| 10 |
+
"eoa_token_id": 258883,
|
| 11 |
+
"eoa_token_index": 258883,
|
| 12 |
+
"eoi_token_id": 258882,
|
| 13 |
+
"eos_token_id": [
|
| 14 |
+
1,
|
| 15 |
+
106
|
| 16 |
+
],
|
| 17 |
+
"image_token_id": 258880,
|
| 18 |
+
"initializer_range": 0.02,
|
| 19 |
+
"model_type": "gemma4",
|
| 20 |
+
"text_config": {
|
| 21 |
+
"attention_bias": false,
|
| 22 |
+
"attention_dropout": 0.0,
|
| 23 |
+
"attention_k_eq_v": true,
|
| 24 |
+
"bos_token_id": 2,
|
| 25 |
+
"dtype": "bfloat16",
|
| 26 |
+
"enable_moe_block": false,
|
| 27 |
+
"eos_token_id": 1,
|
| 28 |
+
"expert_intermediate_size": null,
|
| 29 |
+
"final_logit_softcapping": 30.0,
|
| 30 |
+
"global_head_dim": 512,
|
| 31 |
+
"head_dim": 256,
|
| 32 |
+
"hidden_activation": "gelu_pytorch_tanh",
|
| 33 |
+
"hidden_size": 5376,
|
| 34 |
+
"hidden_size_per_layer_input": 0,
|
| 35 |
+
"initializer_range": 0.02,
|
| 36 |
+
"intermediate_size": 21504,
|
| 37 |
+
"layer_types": [
|
| 38 |
+
"sliding_attention",
|
| 39 |
+
"sliding_attention",
|
| 40 |
+
"sliding_attention",
|
| 41 |
+
"sliding_attention",
|
| 42 |
+
"sliding_attention",
|
| 43 |
+
"full_attention",
|
| 44 |
+
"sliding_attention",
|
| 45 |
+
"sliding_attention",
|
| 46 |
+
"sliding_attention",
|
| 47 |
+
"sliding_attention",
|
| 48 |
+
"sliding_attention",
|
| 49 |
+
"full_attention",
|
| 50 |
+
"sliding_attention",
|
| 51 |
+
"sliding_attention",
|
| 52 |
+
"sliding_attention",
|
| 53 |
+
"sliding_attention",
|
| 54 |
+
"sliding_attention",
|
| 55 |
+
"full_attention",
|
| 56 |
+
"sliding_attention",
|
| 57 |
+
"sliding_attention",
|
| 58 |
+
"sliding_attention",
|
| 59 |
+
"sliding_attention",
|
| 60 |
+
"sliding_attention",
|
| 61 |
+
"full_attention",
|
| 62 |
+
"sliding_attention",
|
| 63 |
+
"sliding_attention",
|
| 64 |
+
"sliding_attention",
|
| 65 |
+
"sliding_attention",
|
| 66 |
+
"sliding_attention",
|
| 67 |
+
"full_attention",
|
| 68 |
+
"sliding_attention",
|
| 69 |
+
"sliding_attention",
|
| 70 |
+
"sliding_attention",
|
| 71 |
+
"sliding_attention",
|
| 72 |
+
"sliding_attention",
|
| 73 |
+
"full_attention",
|
| 74 |
+
"sliding_attention",
|
| 75 |
+
"sliding_attention",
|
| 76 |
+
"sliding_attention",
|
| 77 |
+
"sliding_attention",
|
| 78 |
+
"sliding_attention",
|
| 79 |
+
"full_attention",
|
| 80 |
+
"sliding_attention",
|
| 81 |
+
"sliding_attention",
|
| 82 |
+
"sliding_attention",
|
| 83 |
+
"sliding_attention",
|
| 84 |
+
"sliding_attention",
|
| 85 |
+
"full_attention",
|
| 86 |
+
"sliding_attention",
|
| 87 |
+
"sliding_attention",
|
| 88 |
+
"sliding_attention",
|
| 89 |
+
"sliding_attention",
|
| 90 |
+
"sliding_attention",
|
| 91 |
+
"full_attention",
|
| 92 |
+
"sliding_attention",
|
| 93 |
+
"sliding_attention",
|
| 94 |
+
"sliding_attention",
|
| 95 |
+
"sliding_attention",
|
| 96 |
+
"sliding_attention",
|
| 97 |
+
"full_attention"
|
| 98 |
+
],
|
| 99 |
+
"max_position_embeddings": 262144,
|
| 100 |
+
"model_type": "gemma4_text",
|
| 101 |
+
"num_attention_heads": 32,
|
| 102 |
+
"num_experts": null,
|
| 103 |
+
"num_global_key_value_heads": 4,
|
| 104 |
+
"num_hidden_layers": 60,
|
| 105 |
+
"num_key_value_heads": 16,
|
| 106 |
+
"num_kv_shared_layers": 0,
|
| 107 |
+
"pad_token_id": 0,
|
| 108 |
+
"rms_norm_eps": 1e-06,
|
| 109 |
+
"rope_parameters": {
|
| 110 |
+
"full_attention": {
|
| 111 |
+
"partial_rotary_factor": 0.25,
|
| 112 |
+
"rope_theta": 1000000.0,
|
| 113 |
+
"rope_type": "proportional"
|
| 114 |
+
},
|
| 115 |
+
"sliding_attention": {
|
| 116 |
+
"rope_theta": 10000.0,
|
| 117 |
+
"rope_type": "default"
|
| 118 |
+
}
|
| 119 |
+
},
|
| 120 |
+
"sliding_window": 1024,
|
| 121 |
+
"tie_word_embeddings": true,
|
| 122 |
+
"top_k_experts": null,
|
| 123 |
+
"use_bidirectional_attention": "vision",
|
| 124 |
+
"use_cache": true,
|
| 125 |
+
"use_double_wide_mlp": false,
|
| 126 |
+
"vocab_size": 262144,
|
| 127 |
+
"vocab_size_per_layer_input": 262144
|
| 128 |
+
},
|
| 129 |
+
"tie_word_embeddings": true,
|
| 130 |
+
"transformers_version": "5.5.0.dev0",
|
| 131 |
+
"video_token_id": 258884,
|
| 132 |
+
"vision_config": {
|
| 133 |
+
"_name_or_path": "",
|
| 134 |
+
"architectures": null,
|
| 135 |
+
"attention_bias": false,
|
| 136 |
+
"attention_dropout": 0.0,
|
| 137 |
+
"chunk_size_feed_forward": 0,
|
| 138 |
+
"default_output_length": 280,
|
| 139 |
+
"dtype": "bfloat16",
|
| 140 |
+
"global_head_dim": 72,
|
| 141 |
+
"head_dim": 72,
|
| 142 |
+
"hidden_activation": "gelu_pytorch_tanh",
|
| 143 |
+
"hidden_size": 1152,
|
| 144 |
+
"id2label": {
|
| 145 |
+
"0": "LABEL_0",
|
| 146 |
+
"1": "LABEL_1"
|
| 147 |
+
},
|
| 148 |
+
"initializer_range": 0.02,
|
| 149 |
+
"intermediate_size": 4304,
|
| 150 |
+
"is_encoder_decoder": false,
|
| 151 |
+
"label2id": {
|
| 152 |
+
"LABEL_0": 0,
|
| 153 |
+
"LABEL_1": 1
|
| 154 |
+
},
|
| 155 |
+
"max_position_embeddings": 131072,
|
| 156 |
+
"model_type": "gemma4_vision",
|
| 157 |
+
"num_attention_heads": 16,
|
| 158 |
+
"num_hidden_layers": 27,
|
| 159 |
+
"num_key_value_heads": 16,
|
| 160 |
+
"output_attentions": false,
|
| 161 |
+
"output_hidden_states": false,
|
| 162 |
+
"patch_size": 16,
|
| 163 |
+
"pooling_kernel_size": 3,
|
| 164 |
+
"position_embedding_size": 10240,
|
| 165 |
+
"problem_type": null,
|
| 166 |
+
"return_dict": true,
|
| 167 |
+
"rms_norm_eps": 1e-06,
|
| 168 |
+
"rope_parameters": {
|
| 169 |
+
"rope_theta": 100.0,
|
| 170 |
+
"rope_type": "default"
|
| 171 |
+
},
|
| 172 |
+
"standardize": true,
|
| 173 |
+
"use_clipped_linears": false
|
| 174 |
+
},
|
| 175 |
+
"vision_soft_tokens_per_image": 280
|
| 176 |
+
}
|
generation_config.json
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token_id": 2,
|
| 3 |
+
"do_sample": true,
|
| 4 |
+
"eos_token_id": [
|
| 5 |
+
1,
|
| 6 |
+
106,
|
| 7 |
+
50
|
| 8 |
+
],
|
| 9 |
+
"pad_token_id": 0,
|
| 10 |
+
"temperature": 1.0,
|
| 11 |
+
"top_k": 64,
|
| 12 |
+
"top_p": 0.95,
|
| 13 |
+
"transformers_version": "5.5.0.dev0"
|
| 14 |
+
}
|
model-00001-of-00002.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:eeef8791537bc04f110967c513149e037d2a9ae97d49add7291ebfa62806bbfa
|
| 3 |
+
size 49784788364
|
model-00002-of-00002.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:018912220f559f7025d60333e0996183cd538aa77ad6f4988a89ce47be681f10
|
| 3 |
+
size 12761549884
|
model.safetensors.index.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
processor_config.json
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"audio_ms_per_token": 40,
|
| 3 |
+
"audio_seq_length": 750,
|
| 4 |
+
"feature_extractor": {
|
| 5 |
+
"dither": 0.0,
|
| 6 |
+
"feature_extractor_type": "Gemma4AudioFeatureExtractor",
|
| 7 |
+
"feature_size": 128,
|
| 8 |
+
"fft_length": 512,
|
| 9 |
+
"fft_overdrive": false,
|
| 10 |
+
"frame_length": 320,
|
| 11 |
+
"hop_length": 160,
|
| 12 |
+
"input_scale_factor": 1.0,
|
| 13 |
+
"max_frequency": 8000.0,
|
| 14 |
+
"mel_floor": 0.001,
|
| 15 |
+
"min_frequency": 0.0,
|
| 16 |
+
"padding_side": "right",
|
| 17 |
+
"padding_value": 0.0,
|
| 18 |
+
"per_bin_mean": null,
|
| 19 |
+
"per_bin_stddev": null,
|
| 20 |
+
"preemphasis": 0.0,
|
| 21 |
+
"preemphasis_htk_flavor": true,
|
| 22 |
+
"return_attention_mask": true,
|
| 23 |
+
"sampling_rate": 16000
|
| 24 |
+
},
|
| 25 |
+
"image_processor": {
|
| 26 |
+
"do_convert_rgb": true,
|
| 27 |
+
"do_normalize": false,
|
| 28 |
+
"do_rescale": true,
|
| 29 |
+
"do_resize": true,
|
| 30 |
+
"image_mean": [
|
| 31 |
+
0.0,
|
| 32 |
+
0.0,
|
| 33 |
+
0.0
|
| 34 |
+
],
|
| 35 |
+
"image_processor_type": "Gemma4ImageProcessor",
|
| 36 |
+
"image_seq_length": 280,
|
| 37 |
+
"image_std": [
|
| 38 |
+
1.0,
|
| 39 |
+
1.0,
|
| 40 |
+
1.0
|
| 41 |
+
],
|
| 42 |
+
"max_soft_tokens": 280,
|
| 43 |
+
"patch_size": 16,
|
| 44 |
+
"pooling_kernel_size": 3,
|
| 45 |
+
"resample": 3,
|
| 46 |
+
"rescale_factor": 0.00392156862745098
|
| 47 |
+
},
|
| 48 |
+
"image_seq_length": 280,
|
| 49 |
+
"processor_class": "Gemma4Processor",
|
| 50 |
+
"video_processor": {
|
| 51 |
+
"do_convert_rgb": true,
|
| 52 |
+
"do_normalize": true,
|
| 53 |
+
"do_rescale": true,
|
| 54 |
+
"do_resize": true,
|
| 55 |
+
"do_sample_frames": true,
|
| 56 |
+
"image_mean": [
|
| 57 |
+
0.0,
|
| 58 |
+
0.0,
|
| 59 |
+
0.0
|
| 60 |
+
],
|
| 61 |
+
"image_std": [
|
| 62 |
+
1.0,
|
| 63 |
+
1.0,
|
| 64 |
+
1.0
|
| 65 |
+
],
|
| 66 |
+
"max_soft_tokens": 70,
|
| 67 |
+
"num_frames": 32,
|
| 68 |
+
"patch_size": 16,
|
| 69 |
+
"pooling_kernel_size": 3,
|
| 70 |
+
"resample": 3,
|
| 71 |
+
"rescale_factor": 0.00392156862745098,
|
| 72 |
+
"return_metadata": false,
|
| 73 |
+
"video_processor_type": "Gemma4VideoProcessor"
|
| 74 |
+
}
|
| 75 |
+
}
|
tokenizer.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cc8d3a0ce36466ccc1278bf987df5f71db1719b9ca6b4118264f45cb627bfe0f
|
| 3 |
+
size 32169626
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"audio_token": "<|audio|>",
|
| 3 |
+
"backend": "tokenizers",
|
| 4 |
+
"boa_token": "<|audio>",
|
| 5 |
+
"boi_token": "<|image>",
|
| 6 |
+
"bos_token": "<bos>",
|
| 7 |
+
"eoa_token": "<audio|>",
|
| 8 |
+
"eoc_token": "<channel|>",
|
| 9 |
+
"eoi_token": "<image|>",
|
| 10 |
+
"eos_token": "<eos>",
|
| 11 |
+
"eot_token": "<turn|>",
|
| 12 |
+
"escape_token": "<|\"|>",
|
| 13 |
+
"etc_token": "<tool_call|>",
|
| 14 |
+
"etd_token": "<tool|>",
|
| 15 |
+
"etr_token": "<tool_response|>",
|
| 16 |
+
"extra_special_tokens": [
|
| 17 |
+
"<|video|>"
|
| 18 |
+
],
|
| 19 |
+
"image_token": "<|image|>",
|
| 20 |
+
"mask_token": "<mask>",
|
| 21 |
+
"model_max_length": 1000000000000000019884624838656,
|
| 22 |
+
"pad_token": "<pad>",
|
| 23 |
+
"padding_side": "left",
|
| 24 |
+
"processor_class": "Gemma4Processor",
|
| 25 |
+
"response_schema": {
|
| 26 |
+
"type": "object",
|
| 27 |
+
"properties": {
|
| 28 |
+
"role": {
|
| 29 |
+
"const": "assistant"
|
| 30 |
+
},
|
| 31 |
+
"thinking": {
|
| 32 |
+
"type": "string"
|
| 33 |
+
},
|
| 34 |
+
"content": {
|
| 35 |
+
"type": "string"
|
| 36 |
+
},
|
| 37 |
+
"tool_calls": {
|
| 38 |
+
"x-regex-iterator": "<\\|tool_call>(.*?)<tool_call\\|>",
|
| 39 |
+
"type": "array",
|
| 40 |
+
"items": {
|
| 41 |
+
"type": "object",
|
| 42 |
+
"properties": {
|
| 43 |
+
"type": {
|
| 44 |
+
"const": "function"
|
| 45 |
+
},
|
| 46 |
+
"function": {
|
| 47 |
+
"type": "object",
|
| 48 |
+
"x-regex": "call\\:(?P<name>\\w+)(?P<arguments>\\{.*\\})",
|
| 49 |
+
"properties": {
|
| 50 |
+
"name": {
|
| 51 |
+
"type": "string"
|
| 52 |
+
},
|
| 53 |
+
"arguments": {
|
| 54 |
+
"type": "object",
|
| 55 |
+
"x-parser": "gemma4-tool-call",
|
| 56 |
+
"additionalProperties": {}
|
| 57 |
+
}
|
| 58 |
+
}
|
| 59 |
+
}
|
| 60 |
+
}
|
| 61 |
+
}
|
| 62 |
+
}
|
| 63 |
+
},
|
| 64 |
+
"x-regex": "(\\<\\|channel\\>thought\\n(?P<thinking>.*?)\\<channel\\|\\>)?(?P<content>(?:(?!\\<\\|tool_call\\>)(?!\\<turn\\|\\>).)+)?(?P<tool_calls>\\<\\|tool_call\\>.*\\<tool_call\\|\\>)?(?:\\<turn\\|\\>)?"
|
| 65 |
+
},
|
| 66 |
+
"soc_token": "<|channel>",
|
| 67 |
+
"sot_token": "<|turn>",
|
| 68 |
+
"stc_token": "<|tool_call>",
|
| 69 |
+
"std_token": "<|tool>",
|
| 70 |
+
"str_token": "<|tool_response>",
|
| 71 |
+
"think_token": "<|think|>",
|
| 72 |
+
"tokenizer_class": "GemmaTokenizer",
|
| 73 |
+
"unk_token": "<unk>"
|
| 74 |
+
}
|