Add files using upload-large-folder tool
Browse files- .gitattributes +1 -0
- README.md +836 -0
- chat_template.jinja +6 -0
- config.json +36 -0
- generation_config.json +11 -0
- images/logo_alia_2.png +0 -0
- model-00001-of-00017.safetensors +3 -0
- model-00002-of-00017.safetensors +3 -0
- model-00003-of-00017.safetensors +3 -0
- model-00004-of-00017.safetensors +3 -0
- model-00005-of-00017.safetensors +3 -0
- model-00006-of-00017.safetensors +3 -0
- model-00007-of-00017.safetensors +3 -0
- model-00008-of-00017.safetensors +3 -0
- model-00009-of-00017.safetensors +3 -0
- model-00010-of-00017.safetensors +3 -0
- model-00011-of-00017.safetensors +3 -0
- model-00012-of-00017.safetensors +3 -0
- model-00013-of-00017.safetensors +3 -0
- model-00014-of-00017.safetensors +3 -0
- model-00015-of-00017.safetensors +3 -0
- model-00016-of-00017.safetensors +3 -0
- model-00017-of-00017.safetensors +3 -0
- model.safetensors.index.json +442 -0
- special_tokens_map.json +30 -0
- tokenizer.json +3 -0
- tokenizer.model +3 -0
- tokenizer_config.json +1100 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
|
@@ -0,0 +1,836 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: apache-2.0
|
| 3 |
+
library_name: transformers
|
| 4 |
+
pipeline_tag: text-generation
|
| 5 |
+
language:
|
| 6 |
+
- ca
|
| 7 |
+
- en
|
| 8 |
+
- es
|
| 9 |
+
- eu
|
| 10 |
+
- gl
|
| 11 |
+
datasets:
|
| 12 |
+
- CohereLabs/aya_dataset
|
| 13 |
+
- projecte-aina/CoQCat
|
| 14 |
+
- databricks/databricks-dolly-15k
|
| 15 |
+
- projecte-aina/dolly3k_ca
|
| 16 |
+
- projecte-aina/MentorES
|
| 17 |
+
- projecte-aina/MentorCA
|
| 18 |
+
- HuggingFaceH4/no_robots
|
| 19 |
+
- projecte-aina/RAG_Multilingual
|
| 20 |
+
- Unbabel/TowerBlocks-v0.2
|
| 21 |
+
- OpenAssistant/oasst2
|
| 22 |
+
- open-r1/OpenR1-Math-220k
|
| 23 |
+
- HuggingFaceFW/fineweb-edu
|
| 24 |
+
- allenai/WildChat-1M
|
| 25 |
+
base_model:
|
| 26 |
+
- BSC-LT/ALIA-40b
|
| 27 |
+
---
|
| 28 |
+
|
| 29 |
+

|
| 30 |
+
|
| 31 |
+
> [!NOTE] **Sampling Parameters:** For optimal performance, we recommend using temperatures close to zero (0 - 0.2). Additionally, we advise against using any type of repetition penalty, as from our experience, [it negatively impacts instructed model's responses](https://www.reddit.com/r/LocalLLaMA/comments/1g383mq/repetition_penalties_are_terribly_implemented_a/).
|
| 32 |
+
|
| 33 |
+
# ALIA-40b-instruct Model Card
|
| 34 |
+
ALIA-40b-instruct-2601 is the latest release in the ALIA model family. While development is ongoing and further updates are expected, this version already incorporates several notable improvements over previous releases.
|
| 35 |
+
|
| 36 |
+
### Main improvements
|
| 37 |
+
- **Instruction Following:** Enhanced alignment and instruction-tuning, leading to more reliable adherence to user intent across a wide range of tasks.
|
| 38 |
+
- **Input Robustness:** Strengthened resilience to noisy, ambiguous, or malformed user inputs, resulting in more stable and predictable responses.
|
| 39 |
+
- **Safety**: Improved safety alignment, reducing the likelihood of generating responses related to sensitive or restricted topics and improving resistance to attacks, while maintaining helpfulness on allowed content.
|
| 40 |
+
- - **Long-Context**: Improved long-context capabilities compared to previous version ALIA-40b-instruct-2512
|
| 41 |
+
|
| 42 |
+
The ALIA-40b-instruct model is an instructed variant of a context-extended [base ALIA-40b model](https://huggingface.co/BSC-LT/ALIA-40b), which was pre-trained from scratch on 9.83 trillion tokens of carefully curated data spanning 35 European languages (including code). This instructed version is optimized to follow user prompts and engage in dialogue. It supports a broad range of languages (e.g. Spanish, Catalan, Basque, English, etc.) and is capable of text generation, translation, summarization, and question-answering in these languages. This version has also gone through a preliminary alignment phase for helpfulness and safety with synthetically generated preference pairs.
|
| 43 |
+
|
| 44 |
+
In keeping with our commitment to open-source development, all tools and sources used to process and create the training data are open-licensed. For clarity, our definition of open-licensed excludes any source, tool, model, or dataset whose terms of use impose restrictive conditions that impede standard open reuse.
|
| 45 |
+
|
| 46 |
+
This model is released under the permissive [Apache 2.0 license](https://www.apache.org/licenses/LICENSE-2.0). Along with the open weights, all training scripts and configuration files are made publicly available in [this GitHub repository](https://github.com/langtech-bsc/alia).
|
| 47 |
+
|
| 48 |
+
To visit the model cards of other model versions, please refer to the [Model Index](https://huggingface.co/BSC-LT/ALIA-40b-instruct-2601#model-index).
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
---
|
| 52 |
+
|
| 53 |
+
## Model Details
|
| 54 |
+
|
| 55 |
+
### Description
|
| 56 |
+
|
| 57 |
+
The ALIA-40b is a transformer-based, decoder-only language model that was pre-trained from scratch on 9.37 trillion tokens of meticulously curated data. It subsequently underwent continued pretraining on additional 424 billion high-quality tokens, and was further extended with a supplementary 39 billion tokens drawn from a similarly diverse mixture, totalling 9.83 trillion tokens.
|
| 58 |
+
|
| 59 |
+
ALIA-40b-Instruct is an instructed variant of this latest ALIA-40b version. Its development process comprises three consecutive stages, each targeting a specific capability: (1) long-context adaptation to extend the modelβs context window, (2) supervised fine-tuning to improve instruction following capabilities, and (3) an alignment stage to better match human preferences and safety.
|
| 60 |
+
|
| 61 |
+
After the long-context adaptation, the post-training process begins with a supervised fine-tuning (SFT) stage, performed over 808k conversation samples to strengthen instruction following and
|
| 62 |
+
add conversational capabilities.
|
| 63 |
+
|
| 64 |
+
In the third stage, the model is aligned with human preferences through Direct Policy Optimization (DPO) using a mixture of 368k preference pairs. Of this mixture, approximately 82% of the pairs target general model helpfulness, while 18% focus on response safety.
|
| 65 |
+
|
| 66 |
+
Although the base model is highly multilingual, the post-training process concentrated primarily on Spanish, Catalan, Basque, Galician, and English. We also incorporated data from other related languages where inclusion empirically improved the performance on the target languages. However, performance in those additional languages is not guaranteed due to the limited amount of available data and the scarcity of evaluation resources.
|
| 67 |
+
|
| 68 |
+
### Hyperparameters
|
| 69 |
+
|
| 70 |
+
Here we list the specific hyperparameters used during the different training stages.
|
| 71 |
+
|
| 72 |
+
#### Long context CPT
|
| 73 |
+
|
| 74 |
+
| Hyperparameter | Value |
|
| 75 |
+
| --- | --- |
|
| 76 |
+
| Learning rate | 9e-7 |
|
| 77 |
+
| LR Scheduler | Constant |
|
| 78 |
+
| Tokens per update | 4M |
|
| 79 |
+
| Training tokens (4k β32k). | 2B |
|
| 80 |
+
| Training tokens (32k β160k). | 36.8B |
|
| 81 |
+
|
| 82 |
+
#### Supervised Fine-Tuning (SFT)
|
| 83 |
+
| Hyperparameter | Value |
|
| 84 |
+
| --- | --- |
|
| 85 |
+
| Learning rate | 5e-6 |
|
| 86 |
+
| Batch size | 1024 |
|
| 87 |
+
| Epochs | 1 |
|
| 88 |
+
| LR Scheduler | Cosine |
|
| 89 |
+
| Warmup Ratio | 0.03 |
|
| 90 |
+
| NEFTune Noise Alpha | 5 |
|
| 91 |
+
| Number of Samples | 807,750 |
|
| 92 |
+
|
| 93 |
+
#### Alignment
|
| 94 |
+
| Hyperparameter | Value |
|
| 95 |
+
| --- | --- |
|
| 96 |
+
| Learning rate | 2e-6 |
|
| 97 |
+
| Batch size | 1024 |
|
| 98 |
+
| Epochs | 2 |
|
| 99 |
+
| Beta | 0.1 |
|
| 100 |
+
| LR Scheduler | Linear |
|
| 101 |
+
| Number of samples | 368,475 |
|
| 102 |
+
|
| 103 |
+
### Architecture
|
| 104 |
+
|
| 105 |
+
| Attribute | Value |
|
| 106 |
+
| --- | --- |
|
| 107 |
+
| Total Parameters | 40,433,885,184 |
|
| 108 |
+
| Embedding Parameters | 2,097,152,000 |
|
| 109 |
+
| Layers | 48 |
|
| 110 |
+
| Hidden size | 8,192 |
|
| 111 |
+
| Attention heads | 64 |
|
| 112 |
+
| Context length | 163,840 |
|
| 113 |
+
| Vocabulary size | 256,000 |
|
| 114 |
+
| Precision | bfloat16 |
|
| 115 |
+
| Embedding type | RoPE |
|
| 116 |
+
| Activation Function | SwiGLU |
|
| 117 |
+
| Layer normalization | RMS Norm |
|
| 118 |
+
| Flash attention | β
|
|
| 119 |
+
| Grouped Query Attention | β
|
|
| 120 |
+
| Num. query groups | 8 |
|
| 121 |
+
|
| 122 |
+
---
|
| 123 |
+
|
| 124 |
+
## Intended Use
|
| 125 |
+
|
| 126 |
+
### Direct Use
|
| 127 |
+
|
| 128 |
+
ALIAβ40bβinstruct is intended for research and development purposes as a general-purpose multilingual assistant. It can be used to generate text, answer questions, translate between supported languages, and follow user instructions in those languages. As noted by the ALIA-40b base card, the ALIA family is aimed at both research and commercial use in any of the covered languages. In practice, ALIA-40b-instruct is best suited for tasks like multilingual chatbots, summarization, translation, and content generation, provided users are aware of its limitations.
|
| 129 |
+
|
| 130 |
+
### Out-of-scope Use
|
| 131 |
+
|
| 132 |
+
The model is not intended for malicious activities, such as harming others or violating human rights. Any downstream application must comply with current laws and regulations. Irresponsible usage in production environments without proper risk assessment and mitigation is also discouraged.
|
| 133 |
+
|
| 134 |
+
---
|
| 135 |
+
|
| 136 |
+
## Hardware and Software
|
| 137 |
+
|
| 138 |
+
### Training Framework
|
| 139 |
+
|
| 140 |
+
The post-training process was conducted using two complementary frameworks, each selected to best support its corresponding stage:
|
| 141 |
+
|
| 142 |
+
- Supervised Fine-Tuning (SFT): Conducted with an internal fork of the FastChat codebase, adapted to our infrastructure and optimized for stability and efficiency in our use case.
|
| 143 |
+
- Alignment Stage: Implemented with the TRL (Transformers Reinforcement Learning) library, applied to preference-pair training to achieve preliminary alignment with human preferences.
|
| 144 |
+
|
| 145 |
+
### Compute Infrastructure
|
| 146 |
+
|
| 147 |
+
All models were trained on [MareNostrum 5](https://www.bsc.es/ca/marenostrum/marenostrum-5), a pre-exascale EuroHPC supercomputer hosted and
|
| 148 |
+
operated by Barcelona Supercomputing Center.
|
| 149 |
+
|
| 150 |
+
The accelerated partition is composed of 1,120 nodes with the following specifications:
|
| 151 |
+
|
| 152 |
+
- 4x Nvidia Hopper GPUs with 64GB HBM2 memory
|
| 153 |
+
- 2x Intel Sapphire Rapids 8460Y+ at 2.3Ghz and 32c each (64 cores)
|
| 154 |
+
- 4x NDR200 (BW per node 800Gb/s)
|
| 155 |
+
- 512 GB of Main memory (DDR5)
|
| 156 |
+
- 460GB of NVMe storage
|
| 157 |
+
|
| 158 |
+
The table below specifies the number of nodes and GPUs employed for each post-training stage:
|
| 159 |
+
|
| 160 |
+
| Phase | Nodes | GPUs |
|
| 161 |
+
| --- | --- | --- |
|
| 162 |
+
| SFT | 16 | 64 |
|
| 163 |
+
| Alignment | 16 | 64 |
|
| 164 |
+
|
| 165 |
+
---
|
| 166 |
+
|
| 167 |
+
## How to use
|
| 168 |
+
|
| 169 |
+
The model can be used either directly in Python using the `transformers` library or deployed as a service and used through standard API calls.
|
| 170 |
+
|
| 171 |
+
While the former gives the most control over the inference process it requires the code to be executed on a machine with a sufficiently powerful GPU to run the model locally, and is more error prone than the alternative.
|
| 172 |
+
We therefore strongly recommend the latter, as deploying the model as a service can be done either locally or on a remote server and makes the model available to multiple clients in parallel among other advantages.
|
| 173 |
+
|
| 174 |
+
Unless you have very specific needs (e.g. for research) that require adapting the inference process it is preferable to follow the "deployment as a service" guidelines below.
|
| 175 |
+
|
| 176 |
+
### Local inference with Python / transformers
|
| 177 |
+
The instruction-following models utilize the widely adopted ChatML template to structure conversational inputs and outputs.
|
| 178 |
+
|
| 179 |
+
Using this standardized chat format ensures a consistent and enhanced conversational experience. The template can be easily applied through the tokenizerβs built-in functions, as illustrated in the example snippet below:
|
| 180 |
+
|
| 181 |
+
```python
|
| 182 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 183 |
+
import torch
|
| 184 |
+
|
| 185 |
+
model_id = "BSC-LT/ALIA-40b-instruct-2601"
|
| 186 |
+
|
| 187 |
+
text = "At what temperature does water boil?"
|
| 188 |
+
|
| 189 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 190 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 191 |
+
model_id,
|
| 192 |
+
device_map="auto",
|
| 193 |
+
torch_dtype=torch.bfloat16
|
| 194 |
+
)
|
| 195 |
+
|
| 196 |
+
message = [ { "role": "user", "content": text } ]
|
| 197 |
+
|
| 198 |
+
prompt = tokenizer.apply_chat_template(
|
| 199 |
+
message,
|
| 200 |
+
tokenize=False,
|
| 201 |
+
add_generation_prompt=True,
|
| 202 |
+
)
|
| 203 |
+
|
| 204 |
+
inputs = tokenizer.encode(prompt, add_special_tokens=False, return_tensors="pt")
|
| 205 |
+
outputs = model.generate(input_ids=inputs.to(model.device), max_new_tokens=200)
|
| 206 |
+
|
| 207 |
+
print(tokenizer.decode(outputs[0], skip_special_tokens=True))
|
| 208 |
+
|
| 209 |
+
```
|
| 210 |
+
|
| 211 |
+
---
|
| 212 |
+
Using this template, each turn in the conversation is preceded by a `<|im_start|>` delimiter indicating the beginning of a message, followed by the role of the entity
|
| 213 |
+
(either `user`, for content supplied by the user, or `assistant` for the model's responses), and finished with the `<|im_end|>` token:
|
| 214 |
+
|
| 215 |
+
```
|
| 216 |
+
<s><|im_start|>user
|
| 217 |
+
At what temperature does water boil?<|im_end|>
|
| 218 |
+
<|im_start|>assistant
|
| 219 |
+
Water turns into vapor at 100Β°C.<|im_end|>
|
| 220 |
+
```
|
| 221 |
+
Loading the model with transformers' `AutoModelForCausalLM` guarantees that adequate sampling parameters are used during generation. If using alternative inference libraries such as vLLM, Ollama, or SGLang, it is crucial to verify that optimal parameters are used. To this end, in order to ensure optimal results, we recommend using **temperatures around 0-0.2** without any type of repetition penalties applied.
|
| 222 |
+
|
| 223 |
+
---
|
| 224 |
+
|
| 225 |
+
### Deployment as service and remote use (Messages API)
|
| 226 |
+
|
| 227 |
+
In our experience, `vllm` works well for deploying the full unquantized version of the model, whereas `llama.cpp` is appropriate for the quantized (GGUF) version.
|
| 228 |
+
**We strongly discourage using ollama as we have encountered compatibility issues that may seriously degrade the model's performance.**
|
| 229 |
+
|
| 230 |
+
The easiest and most reliable way to have a working deployment of ALIA-40b-instruct is through the "Deploy / HF Inference Endpoints" option directly on the Hugging Face model page.
|
| 231 |
+
This automatically creates a functioning endpoint, using vllm or llama.cpp according to the model variant, with an appropriately dimensioned GPU.
|
| 232 |
+
While there are additional settings available for the endpoint we found the standard configuration proposed by Hugging Face to be a reasonable starting point.
|
| 233 |
+
|
| 234 |
+
Once the endpoint is running, the model can be easily called using OpenAI's "Messages API" (the de facto standard API for LLM use). By using this API the chat template is applied automatically by the service, requiring no explicit configuration on the client side. The endpoint's configuration page on Hugging Face also provides a "Playground" for testing and API examples, as well as a simple chat interface.
|
| 235 |
+
|
| 236 |
+
Example usage:
|
| 237 |
+
```python
|
| 238 |
+
# pip install openai
|
| 239 |
+
|
| 240 |
+
from openai import OpenAI
|
| 241 |
+
|
| 242 |
+
client = OpenAI(
|
| 243 |
+
base_url = YOUR_ENDPOINT_URL,
|
| 244 |
+
api_key = YOUR_HF_TOKEN
|
| 245 |
+
)
|
| 246 |
+
|
| 247 |
+
chat_completion = client.chat.completions.create(
|
| 248 |
+
model = "BSC-LT/ALIA-40b-instruct-2601",
|
| 249 |
+
messages = [
|
| 250 |
+
{
|
| 251 |
+
"role": "user",
|
| 252 |
+
"content": "What is deep learning?"
|
| 253 |
+
}
|
| 254 |
+
],
|
| 255 |
+
stream = True,
|
| 256 |
+
max_tokens = 1000,
|
| 257 |
+
temperature=0.1
|
| 258 |
+
)
|
| 259 |
+
|
| 260 |
+
print(chat_completion.choices[0].message.content)
|
| 261 |
+
```
|
| 262 |
+
|
| 263 |
+
The model can also be deployed locally or on any server infrastructure with sufficient GPUs, using vllm or llama.cpp. We recommend an initial deployment on Hugging Face as a point of reference and comparison to make sure the model is behaving as expected in the desired deployment setup.
|
| 264 |
+
|
| 265 |
+
> [!NOTE] To check that your endpoint is working correctly, you can try to replicate the examples contained in this [Colab Notebook](https://colab.research.google.com/drive/1Yp1IWpsuQdTUxYM6KmmmoqYK0dUyZ2IO)
|
| 266 |
+
---
|
| 267 |
+
|
| 268 |
+
### Instruction Tuning Data
|
| 269 |
+
The dataset used in the supervised fine-tuning stage consists of 808k conversations. The training mixture is obtained by combining a selection of (human and synthetic) permissive-licensed datasets, with a collection of synthetic conversations **curated in-house**.
|
| 270 |
+
|
| 271 |
+
The synthetic conversations are generated using [DeepSeek-V3-0324](https://huggingface.co/deepseek-ai/DeepSeek-V3-0324), leveraging seed data and prompts from pre-training corpora, as well as other openly available instruction datasets.
|
| 272 |
+
|
| 273 |
+
The table below provides a detailed breakdown of the datasets included in this mixture, specifying their language and contribution to the overall corpus:
|
| 274 |
+
|
| 275 |
+
| **Dataset** | **ca** | **en** | **es** | **eu** | **gl** | **pt** | **Total Conversations** |
|
| 276 |
+
| --- | --- | --- | --- | --- | --- | --- | --- |
|
| 277 |
+
| aya-dataset | | 3940 | 3851 | 939 | | 8995 | 17725 |
|
| 278 |
+
| coqcat-train | 4797 | | | | | | 4797 |
|
| 279 |
+
| databricks-dolly-15k | | 15007 | | | | | 15007 |
|
| 280 |
+
| dolly-ca | 3232 | | | | | | 3232 |
|
| 281 |
+
| flores-dev | 986 | 1037 | 1964 | 493 | 505 | | 4985 |
|
| 282 |
+
| mentor-ca | 7119 | | | | | | 7119 |
|
| 283 |
+
| mentor-es | | | 7122 | | | | 7122 |
|
| 284 |
+
| no-robots | | 9477 | | | | | 9477 |
|
| 285 |
+
| rag-multilingual | 16043 | 14996 | 11263 | | | | 42302 |
|
| 286 |
+
| tower-blocks | | 7762 | 1000 | | | 1000 | 9762 |
|
| 287 |
+
| **oasst2_self-identity-rephrase** | 7 | 1074 | 447 | 8 | | | 1536 |
|
| 288 |
+
| **self-identity** | 1900 | 1978 | 1943 | 1927 | 1880 | | 9628 |
|
| 289 |
+
| open-r1-math | | 92960 | | | | | 92960 |
|
| 290 |
+
| **open-r1-math_translated** | 46357 | | 92601 | 46361 | 46431 | 46434 | 278184 |
|
| 291 |
+
| **fineweb-edu_qa** | 23374 | 20803 | 23311 | 22283 | 22307 | | 112078 |
|
| 292 |
+
| **wildchat-curated-deepseekv3** | | 173948 | 17888 | | | | 191836 |
|
| 293 |
+
| **Total** | **103815** | **342982** | **161390** | **72011** | **71123** | **56429** | **807750** |
|
| 294 |
+
|
| 295 |
+
#### Detailed SFT Data Sources:
|
| 296 |
+
|
| 297 |
+
The following table provides a detailed overview of the supervised fine-tuning data sources, including the dataset name, generation method, license and a brief description of each:
|
| 298 |
+
|
| 299 |
+
|
| 300 |
+
<details>
|
| 301 |
+
<summary>SFT Datasets</summary>
|
| 302 |
+
|
| 303 |
+
<table>
|
| 304 |
+
<tr>
|
| 305 |
+
<th>Dataset</th>
|
| 306 |
+
<th>Generation Method</th>
|
| 307 |
+
<th>License</th>
|
| 308 |
+
<th>Description</th>
|
| 309 |
+
</tr>
|
| 310 |
+
<tr>
|
| 311 |
+
<td>aya-dataset</td>
|
| 312 |
+
<td>Human Crowdsourced</td>
|
| 313 |
+
<td>Apache-2.0</td>
|
| 314 |
+
<td><a href="https://huggingface.co/datasets/CohereLabs/aya_dataset">aya_dataset</a> for the languages of interest.*</td>
|
| 315 |
+
</tr>
|
| 316 |
+
<tr>
|
| 317 |
+
<td>coqcat-train</td>
|
| 318 |
+
<td>Human Annotation</td>
|
| 319 |
+
<td>CC-BY-NC-ND-4.0</td>
|
| 320 |
+
<td><a href="https://huggingface.co/datasets/projecte-aina/CoQCat">CoQCat</a> train split, formatted using conversational templates.</td>
|
| 321 |
+
</tr>
|
| 322 |
+
<tr>
|
| 323 |
+
<td>databricks-dolly-15k</td>
|
| 324 |
+
<td>Human Annotation</td>
|
| 325 |
+
<td>CC-BY-SA-3.0</td>
|
| 326 |
+
<td><a href="https://huggingface.co/datasets/databricks/databricks-dolly-15k">databricks-dolly-15k</a> dataset.*</td>
|
| 327 |
+
</tr>
|
| 328 |
+
<tr>
|
| 329 |
+
<td>dolly-ca</td>
|
| 330 |
+
<td>Human Translation</td>
|
| 331 |
+
<td>CC-BY-SA-3.0</td>
|
| 332 |
+
<td><a href="https://huggingface.co/datasets/projecte-aina/dolly3k_ca">dolly3k_ca</a> dataset.</td>
|
| 333 |
+
</tr>
|
| 334 |
+
<tr>
|
| 335 |
+
<td>flores-dev</td>
|
| 336 |
+
<td>Human</td>
|
| 337 |
+
<td>CC-BY-SA-4.0</td>
|
| 338 |
+
<td>Flores-200 dev split, formatted using conversational templates.</td>
|
| 339 |
+
</tr>
|
| 340 |
+
<tr>
|
| 341 |
+
<td>mentor-es</td>
|
| 342 |
+
<td>Human Annotation</td>
|
| 343 |
+
<td>CC-BY-4.0</td>
|
| 344 |
+
<td><a href="https://huggingface.co/datasets/projecte-aina/MentorES">MentorES</a> dataset.</td>
|
| 345 |
+
</tr>
|
| 346 |
+
<tr>
|
| 347 |
+
<td>mentor-ca</td>
|
| 348 |
+
<td>Machine Translation</td>
|
| 349 |
+
<td>CC-BY-4.0</td>
|
| 350 |
+
<td><a href="https://huggingface.co/datasets/projecte-aina/MentorCA">MentorCA</a> dataset. Machine translated version of MentorES.</td>
|
| 351 |
+
</tr>
|
| 352 |
+
<tr>
|
| 353 |
+
<td>no-robots</td>
|
| 354 |
+
<td>Human Annotation</td>
|
| 355 |
+
<td>CC-BY-NC-4.0</td>
|
| 356 |
+
<td><a href="https://huggingface.co/datasets/HuggingFaceH4/no_robots">no_robots</a> dataset.*</td>
|
| 357 |
+
</tr>
|
| 358 |
+
<tr>
|
| 359 |
+
<td>rag-multilingual</td>
|
| 360 |
+
<td>Synthetic</td>
|
| 361 |
+
<td>CC-BY-SA-4.0</td>
|
| 362 |
+
<td><a href="https://huggingface.co/datasets/projecte-aina/RAG_Multilingual">RAG_Multilingual</a> dataset. Synthetic QA dataset generated with Mixtral8x7b.</td>
|
| 363 |
+
</tr>
|
| 364 |
+
<tr>
|
| 365 |
+
<td>tower-blocks</td>
|
| 366 |
+
<td>Mixture</td>
|
| 367 |
+
<td>Various licenses (only open licensed instances are used)</td>
|
| 368 |
+
<td><a href="https://huggingface.co/datasets/Unbabel/TowerBlocks-v0.2">TowerBlocks-v0.2</a> filtered by subdataset license and the languages of interest.*</td>
|
| 369 |
+
</tr>
|
| 370 |
+
<tr>
|
| 371 |
+
<td>oasst2_self-identity-rephrase</td>
|
| 372 |
+
<td>Human Crowdsourced / Synthetic</td>
|
| 373 |
+
<td>Apache-2.0</td>
|
| 374 |
+
<td>Identity instances from <a href="https://huggingface.co/datasets/OpenAssistant/oasst2">oasst2</a> dataset for the languages of interest. Subsequently rephrased to adapt the modelβs identity information to our case using DeepSeek-V3-0324.</td>
|
| 375 |
+
</tr>
|
| 376 |
+
<tr>
|
| 377 |
+
<td>self-identity</td>
|
| 378 |
+
<td>Synthetic</td>
|
| 379 |
+
<td>Apache-2.0 (internal)</td>
|
| 380 |
+
<td>Conversations involving self-identity information of the model, synthetically curated using DeepSeek-V3-0324.</td>
|
| 381 |
+
</tr>
|
| 382 |
+
<tr>
|
| 383 |
+
<td>open-r1-math</td>
|
| 384 |
+
<td>Synthetic</td>
|
| 385 |
+
<td>Apache-2.0</td>
|
| 386 |
+
<td>Default 93k split of the <a href="https://huggingface.co/datasets/open-r1/OpenR1-Math-220k">OpenR1-Math-220k</a> dataset.*</td>
|
| 387 |
+
</tr>
|
| 388 |
+
<tr>
|
| 389 |
+
<td>open-r1-math_translated</td>
|
| 390 |
+
<td>Synthetic</td>
|
| 391 |
+
<td>Apache-2.0 (internal)</td>
|
| 392 |
+
<td>OpenR1-Math-220k default split translated to the languages of interest with DeepSeek-V3-0324.</td>
|
| 393 |
+
</tr>
|
| 394 |
+
<tr>
|
| 395 |
+
<td>fineweb-edu_qa</td>
|
| 396 |
+
<td>Synthetic</td>
|
| 397 |
+
<td>Apache-2.0 (internal)</td>
|
| 398 |
+
<td>QA conversations created by prompting DeepSeek-V3-0324 with the highest quality documents of <a href="https://huggingface.co/datasets/HuggingFaceFW/fineweb-edu">FineWeb-Edu</a>. Subsequently filtered with the same model to ensure self-contained question-answering pairs meet quality thresholds.</td>
|
| 399 |
+
</tr>
|
| 400 |
+
<tr>
|
| 401 |
+
<td>wildchat-curated-deepseekv3</td>
|
| 402 |
+
<td>Human / Synthetic</td>
|
| 403 |
+
<td>Apache-2.0 (internal)</td>
|
| 404 |
+
<td>Human prompts from the <a href="https://huggingface.co/datasets/allenai/WildChat-1M">WildChat-1M</a> dataset together with responses generated with DeepSeek-V3-0324.</td>
|
| 405 |
+
</tr>
|
| 406 |
+
</table>
|
| 407 |
+
|
| 408 |
+
<p>*All externally sourced datasets have undergone a sanity check using shallow rule-based filtering to discard incorrect or low-quality samples and ensure conversational quality.</p>
|
| 409 |
+
|
| 410 |
+
</details>
|
| 411 |
+
|
| 412 |
+
|
| 413 |
+
### Alignment Data
|
| 414 |
+
|
| 415 |
+
The alignment data was synthetically generated from a corpus of approximately 403k prompts designed to improve both helpfulness and safety.
|
| 416 |
+
|
| 417 |
+
- **Helpfulness**: Prompts include instruction following, mathematics, question answering, and reasoning tasks across Catalan, Spanish, English, Euskera, and Galician. Additionally, M-Personas conversations, a resource specifically generated for this project, were incorporated and will also be released.
|
| 418 |
+
- **Safety**: Prompts were synthetically generated from seed prompts written by human annotators, covering nine harm categories to ensure broad coverage of safety-related scenarios.
|
| 419 |
+
|
| 420 |
+
Following approaches similar to UltraFeedback and PKU, each instruction underwent the following process:
|
| 421 |
+
|
| 422 |
+
1. Multiple responses were produced using a pool of permissively licensed models (see [Model Pool](#model-pool-for-synthetic-data-generation)) on helpfulness or safety, depending on the prompt.
|
| 423 |
+
2. These responses were rated by a judge (Deepseek-V3-0324). Helpfulness responses were given an overall rating, while safety responses were given a score based on their level of severity over a list of harm categories.
|
| 424 |
+
3. Preference pairs were constructed from these ratings. This phase should be considered preliminary, as future versions of the model will incorporate human annotators to refine and curate the generation and evaluation pipeline.
|
| 425 |
+
|
| 426 |
+
The table below presents the distribution of helpfulness prompts by language, detailing the number of examples contributed from each language:
|
| 427 |
+
|
| 428 |
+
| Dataset | ca | en | es | eu | gl | Total |
|
| 429 |
+
| ----------------------- | ---------: | ----------: | ---------: | ---------: | ---------: | ----------: |
|
| 430 |
+
| aya | 0 | 2 586 | 3 019 | 902 | 0 | 6 507 |
|
| 431 |
+
| coqcat | 4 448 | 0 | 0 | 0 | 0 | 4 448 |
|
| 432 |
+
| dolly | 0 | 9 925 | 0 | 0 | 0 | 9 925 |
|
| 433 |
+
| dolly-ca | 2 971 | 0 | 0 | 0 | 0 | 2 971 |
|
| 434 |
+
| flores-dev | 1 219 | 589 | 1 786 | 357 | 457 | 4 408 |
|
| 435 |
+
| identity | 2 924 | 20 120 | 15 720 | 2 396 | 2 276 | 43 436 |
|
| 436 |
+
| m-personas | 2 674 | 1 215 | 2 852 | 2 791 | 2 530 | 12 062 |
|
| 437 |
+
| mentor-ca | 6 517 | 0 | 0 | 0 | 0 | 6 517 |
|
| 438 |
+
| mentor-es | 0 | 0 | 6 007 | 0 | 0 | 6 007 |
|
| 439 |
+
| new_open-orca | 0 | 15 528 | 0 | 0 | 0 | 15 528 |
|
| 440 |
+
| no-robots-system-prompt | 0 | 5 913 | 0 | 0 | 0 | 5 913 |
|
| 441 |
+
| oasst-ca | 2 195 | 0 | 0 | 0 | 0 | 2 195 |
|
| 442 |
+
| persona-generic | 8 849 | 0 | 9 464 | 8 899 | 8 588 | 35 800 |
|
| 443 |
+
| persona-reasoning | 8 721 | 0 | 9 501 | 8 977 | 8 474 | 35 673 |
|
| 444 |
+
| rag-multilingual | 15 072 | 10 003 | 9 955 | 0 | 0 | 35 030 |
|
| 445 |
+
| tower-blocks | 0 | 4 126 | 692 | 0 | 0 | 4 818 |
|
| 446 |
+
| **Total** | **55 590** | **170 005** | **58 996** | **24 322** | **22 325** | **231 238** |
|
| 447 |
+
|
| 448 |
+
The following table summarizes the safety prompts included in the alignment dataset by language and number of instances, covering the nine harm categories:
|
| 449 |
+
|
| 450 |
+
| **Language** | Instances |
|
| 451 |
+
| --- | --- |
|
| 452 |
+
| ca | 21074 |
|
| 453 |
+
| es | 20887 |
|
| 454 |
+
| en | 6370 |
|
| 455 |
+
| eu | 13459 |
|
| 456 |
+
| gl | 9951 |
|
| 457 |
+
|
| 458 |
+
#### Model Pool for Synthetic Data Generation
|
| 459 |
+
|
| 460 |
+
In the table below, we list the permissively licensed models that were used to generate the synthetic datasets for alignment:
|
| 461 |
+
|
| 462 |
+
|
| 463 |
+
|
| 464 |
+
<details>
|
| 465 |
+
<summary>Model Pool </summary>
|
| 466 |
+
|
| 467 |
+
|
| 468 |
+
<table>
|
| 469 |
+
<tr>
|
| 470 |
+
<th>Family</th>
|
| 471 |
+
<th>Model Name</th>
|
| 472 |
+
<th>Size (B)</th>
|
| 473 |
+
<th>Variant</th>
|
| 474 |
+
<th>License</th>
|
| 475 |
+
</tr>
|
| 476 |
+
<tr>
|
| 477 |
+
<td>EuroLLM</td>
|
| 478 |
+
<td>EuroLLM_9B_Instruct</td>
|
| 479 |
+
<td>9</td>
|
| 480 |
+
<td>instructed</td>
|
| 481 |
+
<td>Apache 2.0</td>
|
| 482 |
+
</tr>
|
| 483 |
+
<tr>
|
| 484 |
+
<td>Deepseek</td>
|
| 485 |
+
<td>DeepSeek-V3-0324</td>
|
| 486 |
+
<td>685</td>
|
| 487 |
+
<td>aligned</td>
|
| 488 |
+
<td>MIT</td>
|
| 489 |
+
</tr>
|
| 490 |
+
<tr>
|
| 491 |
+
<td>Qwen</td>
|
| 492 |
+
<td>Qwen3-235B-A22B</td>
|
| 493 |
+
<td>235</td>
|
| 494 |
+
<td>aligned</td>
|
| 495 |
+
<td>Apache 2.0</td>
|
| 496 |
+
</tr>
|
| 497 |
+
<tr>
|
| 498 |
+
<td></td>
|
| 499 |
+
<td>Qwen3-30B-A3B</td>
|
| 500 |
+
<td>30</td>
|
| 501 |
+
<td>aligned</td>
|
| 502 |
+
<td>Apache 2.0</td>
|
| 503 |
+
</tr>
|
| 504 |
+
<tr>
|
| 505 |
+
<td></td>
|
| 506 |
+
<td>Qwen3-32B</td>
|
| 507 |
+
<td>32</td>
|
| 508 |
+
<td>aligned</td>
|
| 509 |
+
<td>Apache 2.0</td>
|
| 510 |
+
</tr>
|
| 511 |
+
<tr>
|
| 512 |
+
<td></td>
|
| 513 |
+
<td>Qwen3-14B</td>
|
| 514 |
+
<td>14</td>
|
| 515 |
+
<td>aligned</td>
|
| 516 |
+
<td>Apache 2.0</td>
|
| 517 |
+
</tr>
|
| 518 |
+
<tr>
|
| 519 |
+
<td></td>
|
| 520 |
+
<td>Qwen3-8B</td>
|
| 521 |
+
<td>8</td>
|
| 522 |
+
<td>aligned</td>
|
| 523 |
+
<td>Apache 2.0</td>
|
| 524 |
+
</tr>
|
| 525 |
+
<tr>
|
| 526 |
+
<td>Mistral</td>
|
| 527 |
+
<td>Mixtral-8x7B-Instruct-v0.1</td>
|
| 528 |
+
<td>56</td>
|
| 529 |
+
<td>aligned</td>
|
| 530 |
+
<td>Apache 2.0</td>
|
| 531 |
+
</tr>
|
| 532 |
+
<tr>
|
| 533 |
+
<td></td>
|
| 534 |
+
<td>Mistral-7B-Instruct-v0.3</td>
|
| 535 |
+
<td>7</td>
|
| 536 |
+
<td>aligned</td>
|
| 537 |
+
<td>Apache 2.0</td>
|
| 538 |
+
</tr>
|
| 539 |
+
<tr>
|
| 540 |
+
<td></td>
|
| 541 |
+
<td>Mistral-Small-24B-Instruct-2501</td>
|
| 542 |
+
<td>24</td>
|
| 543 |
+
<td>aligned</td>
|
| 544 |
+
<td>Apache 2.0</td>
|
| 545 |
+
</tr>
|
| 546 |
+
<tr>
|
| 547 |
+
<td></td>
|
| 548 |
+
<td>Mistral-Nemo-Instruct-2407</td>
|
| 549 |
+
<td>12</td>
|
| 550 |
+
<td>instructed</td>
|
| 551 |
+
<td>Apache 2.0</td>
|
| 552 |
+
</tr>
|
| 553 |
+
<tr>
|
| 554 |
+
<td>OLMO</td>
|
| 555 |
+
<td>OLMo-2-0325-32B-SFT</td>
|
| 556 |
+
<td>32</td>
|
| 557 |
+
<td>instructed</td>
|
| 558 |
+
<td>Apache 2.0</td>
|
| 559 |
+
</tr>
|
| 560 |
+
<tr>
|
| 561 |
+
<td></td>
|
| 562 |
+
<td>OLMo-2-1124-13B-SFT</td>
|
| 563 |
+
<td>13</td>
|
| 564 |
+
<td>instructed</td>
|
| 565 |
+
<td>Apache 2.0</td>
|
| 566 |
+
</tr>
|
| 567 |
+
<tr>
|
| 568 |
+
<td></td>
|
| 569 |
+
<td>OLMo-2-1124-7B-SFT</td>
|
| 570 |
+
<td>7</td>
|
| 571 |
+
<td>instructed</td>
|
| 572 |
+
<td>Apache 2.0</td>
|
| 573 |
+
</tr>
|
| 574 |
+
<tr>
|
| 575 |
+
<td>FLOR_BSC</td>
|
| 576 |
+
<td>Aitana_6_3B_BSC_Instructed</td>
|
| 577 |
+
<td>6.3</td>
|
| 578 |
+
<td>instructed</td>
|
| 579 |
+
<td>Apache 2.0</td>
|
| 580 |
+
</tr>
|
| 581 |
+
<tr>
|
| 582 |
+
<td></td>
|
| 583 |
+
<td>Flor_6_3B_Instruct</td>
|
| 584 |
+
<td>6.3</td>
|
| 585 |
+
<td>instructed</td>
|
| 586 |
+
<td>Apache 2.0</td>
|
| 587 |
+
</tr>
|
| 588 |
+
<tr>
|
| 589 |
+
<td>Salamandra</td>
|
| 590 |
+
<td>Salamandra-40b_pre-1.0_sft-1.0_hh_rlhf_ali</td>
|
| 591 |
+
<td>40</td>
|
| 592 |
+
<td>instructed</td>
|
| 593 |
+
<td>Apache 2.0</td>
|
| 594 |
+
</tr>
|
| 595 |
+
<tr>
|
| 596 |
+
<td></td>
|
| 597 |
+
<td>Salamandra-40b_pre-1.0_sft-1.0_hh_rlhf_tox</td>
|
| 598 |
+
<td>40</td>
|
| 599 |
+
<td>instructed</td>
|
| 600 |
+
<td>Apache 2.0</td>
|
| 601 |
+
</tr>
|
| 602 |
+
<tr>
|
| 603 |
+
<td></td>
|
| 604 |
+
<td>Salamandra-2b_pre-1.2_sft-1.0_hh_rlhf_ali</td>
|
| 605 |
+
<td>2</td>
|
| 606 |
+
<td>instructed</td>
|
| 607 |
+
<td>Apache 2.0</td>
|
| 608 |
+
</tr>
|
| 609 |
+
<tr>
|
| 610 |
+
<td></td>
|
| 611 |
+
<td>Salamandra-7b_pre-1.2_sft-1.0_hh_rlhf_ali</td>
|
| 612 |
+
<td>7</td>
|
| 613 |
+
<td>instructed</td>
|
| 614 |
+
<td>Apache 2.0</td>
|
| 615 |
+
</tr>
|
| 616 |
+
<tr>
|
| 617 |
+
<td></td>
|
| 618 |
+
<td>Salamandra-2b_pre-1.2_sft-1.0_hh_rlhf_tox</td>
|
| 619 |
+
<td>2</td>
|
| 620 |
+
<td>instructed</td>
|
| 621 |
+
<td>Apache 2.0</td>
|
| 622 |
+
</tr>
|
| 623 |
+
<tr>
|
| 624 |
+
<td></td>
|
| 625 |
+
<td>Salamandra-7b_pre-1.2_sft-1.0_hh_rlhf_tox</td>
|
| 626 |
+
<td>7</td>
|
| 627 |
+
<td>instructed</td>
|
| 628 |
+
<td>Apache 2.0</td>
|
| 629 |
+
</tr>
|
| 630 |
+
</table>
|
| 631 |
+
|
| 632 |
+
</details>
|
| 633 |
+
|
| 634 |
+
|
| 635 |
+
## Evaluation
|
| 636 |
+
|
| 637 |
+
### Gold-standard benchmarks
|
| 638 |
+
|
| 639 |
+
Evaluation is done using the Language Model Evaluation Harness (Gao et al., 2024). We evaluate on a set of tasks taken from [SpanishBench](https://github.com/EleutherAI/lm-evaluation-harness/tree/main/lm_eval/tasks/spanish_bench), [CatalanBench](https://github.com/EleutherAI/lm-evaluation-harness/tree/main/lm_eval/tasks/catalan_bench), [BasqueBench](https://github.com/EleutherAI/lm-evaluation-harness/tree/main/lm_eval/tasks/basque_bench) and [GalicianBench](https://github.com/EleutherAI/lm-evaluation-harness/tree/main/lm_eval/tasks/galician_bench), as well as existing English tasks available in the LM Evaluation Harness. These benchmarks include both new and existing tasks and datasets. The tables below report results for a representative selection of evaluation datasets, capturing model's performance across a variety of tasks within these benchmarks.
|
| 640 |
+
|
| 641 |
+
Only tasks that are human-generated, human-translated, or involve strong human-in-the-loop process (i.e., machine translation followed by professional revision or machine generation followed by human revision and annotation) were used. This approach explains the variation in the number of tasks reported across languages. As additional high-quality tasks are published, we will update the evaluation results accordingly. We also plan to expand evaluation to other languages, provided that the datasets meet our quality standards.
|
| 642 |
+
|
| 643 |
+
During the implementation of the evaluation we observed a series of issues worth considering when replicating and interpreting the results presented. These issues include β1.5% variances in performance in some tasks depending on the version of the `transformers` library used, and depending on the use (or lack of use) of tensor parallelism when loading a model. When implementing existing tasks, we carry out a comprehensive quality evaluation of the dataset, the Harness task itself, and what kind of input models see during evaluation. Our implementation (see links above) addresses multiple existing problems such as errors in datasets and prompts, and lack of pre-processing. All this means that results will vary if using other Harness implementations, and may slightly vary depending on the replication setup.
|
| 644 |
+
|
| 645 |
+
It should be noted that these results are subject to all the drawbacks of every current gold-standard evaluation, and that the figures do not fully represent the model's capabilities and potential. We thus advise caution when reading and interpreting the results.
|
| 646 |
+
|
| 647 |
+
All results reported below correspond to a 0-shot evaluation setting.
|
| 648 |
+
|
| 649 |
+
### Spanish
|
| 650 |
+
|
| 651 |
+
WiP
|
| 652 |
+
|
| 653 |
+
### Catalan
|
| 654 |
+
|
| 655 |
+
WiP
|
| 656 |
+
|
| 657 |
+
### Basque
|
| 658 |
+
|
| 659 |
+
WiP
|
| 660 |
+
|
| 661 |
+
### Galician
|
| 662 |
+
|
| 663 |
+
WiP
|
| 664 |
+
|
| 665 |
+
### English
|
| 666 |
+
|
| 667 |
+
WiP
|
| 668 |
+
|
| 669 |
+
### LLM-as-a-judge
|
| 670 |
+
|
| 671 |
+
We use [Prometheus-2 8x7B](https://huggingface.co/prometheus-eval/prometheus-8x7b-v2.0) as a judge to evaluate the responses of the model. Tasks are created from existing multilingual evaluation datasets covering the same categories as the ones measured in our gold-standard benchmarks. We randomly select a subset of 250 instances per language from the `test` set of each source dataset. To evaluate the responses of our model, we use task-specific criteria developed in-house for the _LLM-judge_ to use. Each criterion is measured either as a 5-point Likert scale or as a binary task depending on the idiosyncrasy of the task and criterion.
|
| 672 |
+
|
| 673 |
+
Prompts for each task are created in various ways to score the model's robustness in addition to these criteria. This is done by presenting the same source instance within three different prompts. We then calculate the variance between the scores assigned by the _LLM-judge_ to our model's responses to the three prompt styles and average it across all instances. Prompts are human translated to all languages measured. We do not provide the _LLM-judge_ with a reference answer.
|
| 674 |
+
|
| 675 |
+
The _judge_ prompt we use during evaluation is the same used to fine tune the Prometheus-2 family. We keep the _judge_ prompt and criteria used to present the _LLM-judge_ with the task prompts and model responses in English for evaluation across languages. The _judge_ prompt used is:
|
| 676 |
+
|
| 677 |
+
```python
|
| 678 |
+
"You are a fair judge assistant tasked with providing clear, objective feedback based on specific criteria, ensuring each assessment reflects the absolute standards set for performance.
|
| 679 |
+
|
| 680 |
+
###Task Description:
|
| 681 |
+
An instruction (might include an Input inside it), a response to evaluate, and a score rubric representing a evaluation criteria are given.
|
| 682 |
+
1. Write a detailed feedback that assess the quality of the response strictly based on the given score rubric, not evaluating in general.
|
| 683 |
+
2. After writing a feedback, write a score that is an integer between {a} and {b}. You should refer to the score rubric.
|
| 684 |
+
3. The output format should look as follows: \"Feedback: (write a feedback for criteria) [RESULT] (an integer number between {a} and {b})\"
|
| 685 |
+
4. Please do not generate any other opening, closing, and explanations.
|
| 686 |
+
|
| 687 |
+
###The instruction to evaluate:
|
| 688 |
+
{input}
|
| 689 |
+
|
| 690 |
+
###Response to evaluate:
|
| 691 |
+
{prediction}
|
| 692 |
+
|
| 693 |
+
###Score Rubrics:
|
| 694 |
+
{criteria}
|
| 695 |
+
|
| 696 |
+
###Feedback:"
|
| 697 |
+
```
|
| 698 |
+
|
| 699 |
+
As an example, prompts for the Math task in English are based on instances from [MGSM](https://huggingface.co/datasets/juletxara/mgsm), and each instance is presented within these prompts:
|
| 700 |
+
|
| 701 |
+
```python
|
| 702 |
+
"en": [
|
| 703 |
+
("I need help with this math problem: \"", "\" Give me the answer step by step and also the final result separately."),
|
| 704 |
+
("Can you please help me answer this? \"", "\" Explain the answer and give me the final result as well. Thanks."),
|
| 705 |
+
("Help me with this problem: \"", "\" I need the answer explained and the final result separately.")
|
| 706 |
+
]
|
| 707 |
+
```
|
| 708 |
+
|
| 709 |
+
|
| 710 |
+
This task is then evaluated by the _LLM-judge_ using two criteria, reasoning capability (5-point Likert) and mathematical correctness (binary):
|
| 711 |
+
|
| 712 |
+
```python
|
| 713 |
+
reasoning_capability_criteria = {
|
| 714 |
+
"reasoning_capability": """
|
| 715 |
+
[Does the model's answer demonstrate reasoning capability?]
|
| 716 |
+
Score 1: The answer demonstrates poor reasoning, with illogical arguments or conclusions that do not follow from the provided information.
|
| 717 |
+
Score 2: The answer shows weak reasoning, with some logical connections but also contains significant flaws or gaps in the argumentation.
|
| 718 |
+
Score 3: The answer demonstrates adequate reasoning, with generally logical arguments, but may have minor flaws or a lack of depth in the reasoning process.
|
| 719 |
+
Score 4: The answer shows strong reasoning, with well-structured arguments and conclusions that logically follow from the information provided.
|
| 720 |
+
Score 5: The answer demonstrates exceptional reasoning, with clear, coherent, and insightful arguments that are logically sound and well-supported by the information provided."""
|
| 721 |
+
}
|
| 722 |
+
|
| 723 |
+
mathematical_correctness_binary_criteria = {
|
| 724 |
+
"mathematical_correctness_binary": """
|
| 725 |
+
[Is the model's answer mathematically correct?]
|
| 726 |
+
Score 0: The answer contains mathematical errors that render the solution incorrect or unreliable.
|
| 727 |
+
Score 1: The answer is mathematically correct, with accurate calculations and appropriate use of mathematical concepts."""
|
| 728 |
+
}
|
| 729 |
+
```
|
| 730 |
+
|
| 731 |
+
#### Multilingual results
|
| 732 |
+
|
| 733 |
+
WiP
|
| 734 |
+
|
| 735 |
+
### Long Context Evaluation
|
| 736 |
+
|
| 737 |
+
To assess the long-context capabilities of our model, we performed a "needle in a haystack" test with the following configuration:
|
| 738 |
+
|
| 739 |
+
- **Needle Phrase**: *"The best thing to do in San Francisco is eat a sandwich and sit in Dolores Park on a sunny day."*
|
| 740 |
+
- **System Prompt:** *βYou are a helpful AI bot that answers questions for a user. Keep your response short and directβ*
|
| 741 |
+
- **Retrieval Question**: *"What is the best thing to do in San Francisco?"*
|
| 742 |
+
- **Evaluator**: [prometheus-8x7b-v2.0](https://huggingface.co/prometheus-eval/prometheus-8x7b-v2.0), used as the evaluation judge to determine whether the model correctly retrieved and utilized the long-context information.
|
| 743 |
+
|
| 744 |
+
This test specifically targets the modelβs ability to retain and access information across very long sequences, providing a benchmark for evaluating its extended-context reasoning and retrieval performance.
|
| 745 |
+
|
| 746 |
+

|
| 747 |
+
|
| 748 |
+
It is important to note that strong performance in the "needle in a haystack" test does not guarantee retention of short-context performance across larger tasks. This evaluation is therefore limited in scope. We are actively working on developing more robust metrics and evaluation protocols to further enhance the modelβs long-context capabilities.
|
| 749 |
+
|
| 750 |
+
|
| 751 |
+
---
|
| 752 |
+
|
| 753 |
+
## Ethical Considerations and Limitations
|
| 754 |
+
|
| 755 |
+
The ALIA-40b-instruct model is an instruction-tuned variant with preliminary alignment. It has several limitations that users should be aware of. Ongoing work is addressing these areas, including comprehensive evaluation of societal and cognitive biases as well as safety.
|
| 756 |
+
|
| 757 |
+
### Functional Limitations:
|
| 758 |
+
|
| 759 |
+
- No Function Calling: The model cannot natively execute or call external functions/APIs. Tasks requiring plugin calls or tool execution must be implemented outside the model.
|
| 760 |
+
- Reasoning & Math: The model is not guaranteed to perform robust chain-of-thought reasoning or advanced mathematics. Complex logical puzzles or multi-step inferences may fail or produce inconsistent answers.
|
| 761 |
+
- Code Generation: Although exposed to code during pretraining, ALIA-40b-Instruct is not a specialized code-generation model. It may produce code-like text, but outputs should be verified and tested before use in production codebases.
|
| 762 |
+
- Agentive Capabilities: The model does not have agentive or autonomous action capabilities. It cannot act as an autonomous agent or execute multi-step workflows.
|
| 763 |
+
|
| 764 |
+
### Bias and Harm:
|
| 765 |
+
|
| 766 |
+
WiP
|
| 767 |
+
|
| 768 |
+
### Safety and Alignment:
|
| 769 |
+
|
| 770 |
+
Alignment has been substantially enhanced compared to earlier versions, though it is not yet complete. As a result, the model may still exhibit unsafe behavior in certain edge cases, including responding to malicious prompts or generating disallowed content. To evaluate the model's vulnerabilities, we conduct a Red Teaming assessment using adversarial prompts datasets written by our annotation team, and with [DeepseekV3-0324](https://huggingface.co/deepseek-ai/DeepSeek-V3-0324) serving as the moderator model (LLM-as-a-judge with a judge prompt also validated by our annotation team). This evaluation is carried out in Spanish, Catalan, English, Basque, and Galician. Results yielded an average attack success rate of 13.3%.
|
| 771 |
+
|
| 772 |
+
Additional filtering, human oversight, and alignment steps are essential. We are actively working to improve and assess the modelβs safety, including human annotation and evaluation, as well as the development of multilingual safety datasets. A comprehensive report will be provided in subsequent updates.
|
| 773 |
+
|
| 774 |
+
### Recommendations:
|
| 775 |
+
|
| 776 |
+
Developers should implement additional safety filters, human oversight, targeted evaluation suites, and secondary evaluation models when deploying this model. Do not deploy ALIA-40b-Instruct in critical applications without extensive testing and mitigation. Users are responsible for assessing and mitigating harmful behavior or misinformation resulting from model outputs, and ensuring compliance with applicable regulations, including those governing the use of Artificial Intelligence.
|
| 777 |
+
|
| 778 |
+
---
|
| 779 |
+
|
| 780 |
+
## Additional information
|
| 781 |
+
|
| 782 |
+
### Author
|
| 783 |
+
The Language Technologies Lab from Barcelona Supercomputing Center.
|
| 784 |
+
|
| 785 |
+
### Contact
|
| 786 |
+
For further information, please send an email to <langtech@bsc.es>.
|
| 787 |
+
|
| 788 |
+
### Copyright
|
| 789 |
+
Copyright(c) 2026 by Language Technologies Lab, Barcelona Supercomputing Center.
|
| 790 |
+
|
| 791 |
+
### Funding
|
| 792 |
+
This work is funded by the Ministerio para la TransformaciΓ³n Digital y de la FunciΓ³n PΓΊblica - Funded by EU β NextGenerationEU within the framework of the project Modelos del Lenguaje.
|
| 793 |
+
|
| 794 |
+
This work has been promoted and supported by the Government of Catalonia through the Aina Project.
|
| 795 |
+
|
| 796 |
+
### Acknowledgements
|
| 797 |
+
|
| 798 |
+
This project has benefited from the contributions of numerous teams and institutions, mainly through data contributions, knowledge transfer or technical support.
|
| 799 |
+
|
| 800 |
+
We are especially grateful to our ILENIA project partners: CENID, HiTZ and CiTIUS for their participation. We also extend our genuine gratitude to the Spanish Senate and Congress, FundaciΓ³n Dialnet, and the βInstituto Universitario de Sistemas Inteligentes y Aplicaciones NumΓ©ricas en IngenierΓa (SIANI)β of the University of Las Palmas de Gran Canaria. Many other institutions have been involved in the project. Our thanks to Γmnium Cultural, Parlament de Catalunya, Institut d'Estudis Aranesos, RacΓ³ CatalΓ , Vilaweb, ACN, NaciΓ³ Digital, El mΓ³n and AquΓ BerguedΓ . We thank the Welsh government, DFKI, Occiglot project, especially Malte Ostendorff, and The Common Crawl Foundation, especially Pedro Ortiz, for their collaboration.
|
| 801 |
+
|
| 802 |
+
We would also like to give special thanks to the NVIDIA team, with whom we have met regularly, especially to: Ignacio Sarasua, Adam Henryk Grzywaczewski, Oleg Sudakov, Sergio Perez, Miguel Martinez, Felipe Soares and Meriem Bendris. Their constant support has been especially appreciated throughout the entire process.
|
| 803 |
+
|
| 804 |
+
Their valuable efforts have been instrumental in the development of this work.
|
| 805 |
+
|
| 806 |
+
### Disclaimer
|
| 807 |
+
Be aware that the model may contain biases or other unintended distortions.
|
| 808 |
+
When third parties deploy systems or provide services based on this model, or use the model themselves,
|
| 809 |
+
they bear the responsibility for mitigating any associated risks and ensuring compliance with applicable regulations,
|
| 810 |
+
including those governing the use of Artificial Intelligence.
|
| 811 |
+
|
| 812 |
+
The Barcelona Supercomputing Center, as the owner and creator of the model, shall not be held liable for any outcomes resulting from third-party use.
|
| 813 |
+
|
| 814 |
+
### Citation
|
| 815 |
+
```
|
| 816 |
+
@misc{gonzalezagirre2025salamandratechnicalreport,
|
| 817 |
+
title={Salamandra Technical Report},
|
| 818 |
+
author={Aitor Gonzalez-Agirre and Marc PΓ mies and Joan Llop and Irene Baucells and Severino Da Dalt and Daniel Tamayo and JosΓ© Javier Saiz and Ferran EspuΓ±a and Jaume Prats and Javier Aula-Blasco and Mario Mina and AdriΓ‘n Rubio and Alexander Shvets and Anna SallΓ©s and IΓ±aki Lacunza and IΓ±igo Pikabea and Jorge Palomar and JΓΊlia FalcΓ£o and LucΓa Tormo and Luis Vasquez-Reina and Montserrat Marimon and Valle RuΓz-FernΓ‘ndez and Marta Villegas},
|
| 819 |
+
year={2025},
|
| 820 |
+
eprint={2502.08489},
|
| 821 |
+
archivePrefix={arXiv},
|
| 822 |
+
primaryClass={cs.CL},
|
| 823 |
+
url={https://arxiv.org/abs/2502.08489},
|
| 824 |
+
}
|
| 825 |
+
```
|
| 826 |
+
|
| 827 |
+
### License
|
| 828 |
+
[Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)
|
| 829 |
+
|
| 830 |
+
|
| 831 |
+
## Model Index
|
| 832 |
+
|Model|Base|Instruct|
|
| 833 |
+
|:---:|:---:|:---:|
|
| 834 |
+
|2b| [Link](https://huggingface.co/BSC-LT/salamandra-2b) | [Link](https://huggingface.co/BSC-LT/salamandra-2b-instruct) |
|
| 835 |
+
|7b| [Link](https://huggingface.co/BSC-LT/salamandra-7b) | [Link](https://huggingface.co/BSC-LT/salamandra-7b-instruct) |
|
| 836 |
+
|40b| [Link](https://huggingface.co/BSC-LT/ALIA-40b) | [Link](https://huggingface.co/BSC-LT/ALIA-40b-instruct-2601) |
|
chat_template.jinja
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{{- bos_token }}{%- if messages[0]['role'] == 'system' %}{%- set system_message = messages[0]['content'] %}{%- set messages = messages[1:] %}{{ '<|im_start|>system
|
| 2 |
+
' + system_message + '<|im_end|>
|
| 3 |
+
' }}{%- endif %}{% for message in messages %}{%- if (message['role'] != 'user') and (message['role'] != 'assistant')%}{{ raise_exception('Only user and assistant roles are suported after the initial optional system message.') }}{% endif %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('After the optional system message, conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{{'<|im_start|>' + message['role'] + '
|
| 4 |
+
' + message['content'] + '<|im_end|>' + '
|
| 5 |
+
'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant
|
| 6 |
+
' }}{% endif %}
|
config.json
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"LlamaForCausalLM"
|
| 4 |
+
],
|
| 5 |
+
"attention_bias": false,
|
| 6 |
+
"attention_dropout": 0.0,
|
| 7 |
+
"bos_token_id": 1,
|
| 8 |
+
"dtype": "bfloat16",
|
| 9 |
+
"eos_token_id": 5,
|
| 10 |
+
"head_dim": 128,
|
| 11 |
+
"hidden_act": "silu",
|
| 12 |
+
"hidden_size": 8192,
|
| 13 |
+
"initializer_range": 0.02,
|
| 14 |
+
"intermediate_size": 24576,
|
| 15 |
+
"max_position_embeddings": 163840,
|
| 16 |
+
"mlp_bias": false,
|
| 17 |
+
"model_type": "llama",
|
| 18 |
+
"num_attention_heads": 64,
|
| 19 |
+
"num_hidden_layers": 48,
|
| 20 |
+
"num_key_value_heads": 8,
|
| 21 |
+
"pad_token_id": 0,
|
| 22 |
+
"pretraining_tp": 1,
|
| 23 |
+
"rms_norm_eps": 1e-05,
|
| 24 |
+
"rope_scaling": {
|
| 25 |
+
"factor": 40.0,
|
| 26 |
+
"high_freq_factor": 4.0,
|
| 27 |
+
"low_freq_factor": 1.0,
|
| 28 |
+
"original_max_position_embeddings": 4096,
|
| 29 |
+
"rope_type": "llama3"
|
| 30 |
+
},
|
| 31 |
+
"rope_theta": 10000.0,
|
| 32 |
+
"tie_word_embeddings": false,
|
| 33 |
+
"transformers_version": "4.57.1",
|
| 34 |
+
"use_cache": true,
|
| 35 |
+
"vocab_size": 256000
|
| 36 |
+
}
|
generation_config.json
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_from_model_config": true,
|
| 3 |
+
"bos_token_id": 1,
|
| 4 |
+
"eos_token_id": [
|
| 5 |
+
5,
|
| 6 |
+
2
|
| 7 |
+
],
|
| 8 |
+
"pad_token_id": 0,
|
| 9 |
+
"do_sample": false,
|
| 10 |
+
"transformers_version": "4.57.1"
|
| 11 |
+
}
|
images/logo_alia_2.png
ADDED
|
model-00001-of-00017.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:11c73712675c9838c9fd135056349f41bc2bae5ae8e79295bf8e63ce62de9153
|
| 3 |
+
size 4898947792
|
model-00002-of-00017.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f2bb38a2245a3f8fb91bcc7ae11a8fdae6d84a136c05e947aad899b57a863b38
|
| 3 |
+
size 4932603064
|
model-00003-of-00017.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ce8deed83ed9d469676918b698922139923d10e200b2067692865efb901ed11e
|
| 3 |
+
size 4932636064
|
model-00004-of-00017.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2939fd649c78b13ec1ef89408bda06ca33709946eceec293b9fac94146870787
|
| 3 |
+
size 4831940128
|
model-00005-of-00017.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2fc663ce4fb8e1c5a03646b83762a1f5132d2e76ae0c0cae367ad4659cb4bd4c
|
| 3 |
+
size 4932603096
|
model-00006-of-00017.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5e559e95174f92ef0241a07ea46ad2102ef9a7ca0aa70ec774f4522d5039bea8
|
| 3 |
+
size 4932603088
|
model-00007-of-00017.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7fb635e4cb7087db32ea3c149242e57cc1051db41b7f58e525d6048f9d29c022
|
| 3 |
+
size 4932636096
|
model-00008-of-00017.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:590d279f429a2c0667db35c733e3dc4aa925bb1ee24beb91a4da279f7ac6291d
|
| 3 |
+
size 4831940160
|
model-00009-of-00017.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8a3dfd3d397532601faef36904caa4fadd3bf52fd36a08bf7f83c6cc164fd278
|
| 3 |
+
size 4932603096
|
model-00010-of-00017.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:095e68da60ef0d62aee61cfc71afc66dc81bc1242617374b6188b8637b4a1bc3
|
| 3 |
+
size 4932603088
|
model-00011-of-00017.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:003586385f3c546baa0d6870aa854e439904e61da11879d6284f991ef4961dc4
|
| 3 |
+
size 4932636096
|
model-00012-of-00017.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ec72bd061596e8c0996db0f934d4127f07c8488138aa88bc118ae3aebae2601b
|
| 3 |
+
size 4831940160
|
model-00013-of-00017.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:43d07c067bdbee91cd057ad399917e3b94d451589467f1372a2cc42a3811777b
|
| 3 |
+
size 4932603096
|
model-00014-of-00017.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:317732d9ec6b70faa39311d369b9999a55c1af9ca358f42901f40199f6735e55
|
| 3 |
+
size 4932603088
|
model-00015-of-00017.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ebf8546b9de8248eb9e5218ea1f8d47374e95cad739229d985a07a75c1a54f5f
|
| 3 |
+
size 4932636096
|
model-00016-of-00017.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e8705ce0e6a1bd41ca2d7e20067b0f91e3cae1248ae2e30ba56aa9eaa49589fd
|
| 3 |
+
size 3019983016
|
model-00017-of-00017.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c983c494e1dc0f0d7af75afa18c53d4e005c87d7fc820b1cc9c08ae3fedf23a2
|
| 3 |
+
size 4194304128
|
model.safetensors.index.json
ADDED
|
@@ -0,0 +1,442 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"metadata": {
|
| 3 |
+
"total_size": 80867770368
|
| 4 |
+
},
|
| 5 |
+
"weight_map": {
|
| 6 |
+
"lm_head.weight": "model-00017-of-00017.safetensors",
|
| 7 |
+
"model.embed_tokens.weight": "model-00001-of-00017.safetensors",
|
| 8 |
+
"model.layers.0.input_layernorm.weight": "model-00002-of-00017.safetensors",
|
| 9 |
+
"model.layers.0.mlp.down_proj.weight": "model-00002-of-00017.safetensors",
|
| 10 |
+
"model.layers.0.mlp.gate_proj.weight": "model-00001-of-00017.safetensors",
|
| 11 |
+
"model.layers.0.mlp.up_proj.weight": "model-00002-of-00017.safetensors",
|
| 12 |
+
"model.layers.0.post_attention_layernorm.weight": "model-00002-of-00017.safetensors",
|
| 13 |
+
"model.layers.0.self_attn.k_proj.weight": "model-00001-of-00017.safetensors",
|
| 14 |
+
"model.layers.0.self_attn.o_proj.weight": "model-00001-of-00017.safetensors",
|
| 15 |
+
"model.layers.0.self_attn.q_proj.weight": "model-00001-of-00017.safetensors",
|
| 16 |
+
"model.layers.0.self_attn.v_proj.weight": "model-00001-of-00017.safetensors",
|
| 17 |
+
"model.layers.1.input_layernorm.weight": "model-00002-of-00017.safetensors",
|
| 18 |
+
"model.layers.1.mlp.down_proj.weight": "model-00002-of-00017.safetensors",
|
| 19 |
+
"model.layers.1.mlp.gate_proj.weight": "model-00002-of-00017.safetensors",
|
| 20 |
+
"model.layers.1.mlp.up_proj.weight": "model-00002-of-00017.safetensors",
|
| 21 |
+
"model.layers.1.post_attention_layernorm.weight": "model-00002-of-00017.safetensors",
|
| 22 |
+
"model.layers.1.self_attn.k_proj.weight": "model-00002-of-00017.safetensors",
|
| 23 |
+
"model.layers.1.self_attn.o_proj.weight": "model-00002-of-00017.safetensors",
|
| 24 |
+
"model.layers.1.self_attn.q_proj.weight": "model-00002-of-00017.safetensors",
|
| 25 |
+
"model.layers.1.self_attn.v_proj.weight": "model-00002-of-00017.safetensors",
|
| 26 |
+
"model.layers.10.input_layernorm.weight": "model-00005-of-00017.safetensors",
|
| 27 |
+
"model.layers.10.mlp.down_proj.weight": "model-00005-of-00017.safetensors",
|
| 28 |
+
"model.layers.10.mlp.gate_proj.weight": "model-00005-of-00017.safetensors",
|
| 29 |
+
"model.layers.10.mlp.up_proj.weight": "model-00005-of-00017.safetensors",
|
| 30 |
+
"model.layers.10.post_attention_layernorm.weight": "model-00005-of-00017.safetensors",
|
| 31 |
+
"model.layers.10.self_attn.k_proj.weight": "model-00004-of-00017.safetensors",
|
| 32 |
+
"model.layers.10.self_attn.o_proj.weight": "model-00004-of-00017.safetensors",
|
| 33 |
+
"model.layers.10.self_attn.q_proj.weight": "model-00004-of-00017.safetensors",
|
| 34 |
+
"model.layers.10.self_attn.v_proj.weight": "model-00004-of-00017.safetensors",
|
| 35 |
+
"model.layers.11.input_layernorm.weight": "model-00005-of-00017.safetensors",
|
| 36 |
+
"model.layers.11.mlp.down_proj.weight": "model-00005-of-00017.safetensors",
|
| 37 |
+
"model.layers.11.mlp.gate_proj.weight": "model-00005-of-00017.safetensors",
|
| 38 |
+
"model.layers.11.mlp.up_proj.weight": "model-00005-of-00017.safetensors",
|
| 39 |
+
"model.layers.11.post_attention_layernorm.weight": "model-00005-of-00017.safetensors",
|
| 40 |
+
"model.layers.11.self_attn.k_proj.weight": "model-00005-of-00017.safetensors",
|
| 41 |
+
"model.layers.11.self_attn.o_proj.weight": "model-00005-of-00017.safetensors",
|
| 42 |
+
"model.layers.11.self_attn.q_proj.weight": "model-00005-of-00017.safetensors",
|
| 43 |
+
"model.layers.11.self_attn.v_proj.weight": "model-00005-of-00017.safetensors",
|
| 44 |
+
"model.layers.12.input_layernorm.weight": "model-00005-of-00017.safetensors",
|
| 45 |
+
"model.layers.12.mlp.down_proj.weight": "model-00005-of-00017.safetensors",
|
| 46 |
+
"model.layers.12.mlp.gate_proj.weight": "model-00005-of-00017.safetensors",
|
| 47 |
+
"model.layers.12.mlp.up_proj.weight": "model-00005-of-00017.safetensors",
|
| 48 |
+
"model.layers.12.post_attention_layernorm.weight": "model-00005-of-00017.safetensors",
|
| 49 |
+
"model.layers.12.self_attn.k_proj.weight": "model-00005-of-00017.safetensors",
|
| 50 |
+
"model.layers.12.self_attn.o_proj.weight": "model-00005-of-00017.safetensors",
|
| 51 |
+
"model.layers.12.self_attn.q_proj.weight": "model-00005-of-00017.safetensors",
|
| 52 |
+
"model.layers.12.self_attn.v_proj.weight": "model-00005-of-00017.safetensors",
|
| 53 |
+
"model.layers.13.input_layernorm.weight": "model-00006-of-00017.safetensors",
|
| 54 |
+
"model.layers.13.mlp.down_proj.weight": "model-00006-of-00017.safetensors",
|
| 55 |
+
"model.layers.13.mlp.gate_proj.weight": "model-00005-of-00017.safetensors",
|
| 56 |
+
"model.layers.13.mlp.up_proj.weight": "model-00006-of-00017.safetensors",
|
| 57 |
+
"model.layers.13.post_attention_layernorm.weight": "model-00006-of-00017.safetensors",
|
| 58 |
+
"model.layers.13.self_attn.k_proj.weight": "model-00005-of-00017.safetensors",
|
| 59 |
+
"model.layers.13.self_attn.o_proj.weight": "model-00005-of-00017.safetensors",
|
| 60 |
+
"model.layers.13.self_attn.q_proj.weight": "model-00005-of-00017.safetensors",
|
| 61 |
+
"model.layers.13.self_attn.v_proj.weight": "model-00005-of-00017.safetensors",
|
| 62 |
+
"model.layers.14.input_layernorm.weight": "model-00006-of-00017.safetensors",
|
| 63 |
+
"model.layers.14.mlp.down_proj.weight": "model-00006-of-00017.safetensors",
|
| 64 |
+
"model.layers.14.mlp.gate_proj.weight": "model-00006-of-00017.safetensors",
|
| 65 |
+
"model.layers.14.mlp.up_proj.weight": "model-00006-of-00017.safetensors",
|
| 66 |
+
"model.layers.14.post_attention_layernorm.weight": "model-00006-of-00017.safetensors",
|
| 67 |
+
"model.layers.14.self_attn.k_proj.weight": "model-00006-of-00017.safetensors",
|
| 68 |
+
"model.layers.14.self_attn.o_proj.weight": "model-00006-of-00017.safetensors",
|
| 69 |
+
"model.layers.14.self_attn.q_proj.weight": "model-00006-of-00017.safetensors",
|
| 70 |
+
"model.layers.14.self_attn.v_proj.weight": "model-00006-of-00017.safetensors",
|
| 71 |
+
"model.layers.15.input_layernorm.weight": "model-00006-of-00017.safetensors",
|
| 72 |
+
"model.layers.15.mlp.down_proj.weight": "model-00006-of-00017.safetensors",
|
| 73 |
+
"model.layers.15.mlp.gate_proj.weight": "model-00006-of-00017.safetensors",
|
| 74 |
+
"model.layers.15.mlp.up_proj.weight": "model-00006-of-00017.safetensors",
|
| 75 |
+
"model.layers.15.post_attention_layernorm.weight": "model-00006-of-00017.safetensors",
|
| 76 |
+
"model.layers.15.self_attn.k_proj.weight": "model-00006-of-00017.safetensors",
|
| 77 |
+
"model.layers.15.self_attn.o_proj.weight": "model-00006-of-00017.safetensors",
|
| 78 |
+
"model.layers.15.self_attn.q_proj.weight": "model-00006-of-00017.safetensors",
|
| 79 |
+
"model.layers.15.self_attn.v_proj.weight": "model-00006-of-00017.safetensors",
|
| 80 |
+
"model.layers.16.input_layernorm.weight": "model-00007-of-00017.safetensors",
|
| 81 |
+
"model.layers.16.mlp.down_proj.weight": "model-00007-of-00017.safetensors",
|
| 82 |
+
"model.layers.16.mlp.gate_proj.weight": "model-00006-of-00017.safetensors",
|
| 83 |
+
"model.layers.16.mlp.up_proj.weight": "model-00006-of-00017.safetensors",
|
| 84 |
+
"model.layers.16.post_attention_layernorm.weight": "model-00007-of-00017.safetensors",
|
| 85 |
+
"model.layers.16.self_attn.k_proj.weight": "model-00006-of-00017.safetensors",
|
| 86 |
+
"model.layers.16.self_attn.o_proj.weight": "model-00006-of-00017.safetensors",
|
| 87 |
+
"model.layers.16.self_attn.q_proj.weight": "model-00006-of-00017.safetensors",
|
| 88 |
+
"model.layers.16.self_attn.v_proj.weight": "model-00006-of-00017.safetensors",
|
| 89 |
+
"model.layers.17.input_layernorm.weight": "model-00007-of-00017.safetensors",
|
| 90 |
+
"model.layers.17.mlp.down_proj.weight": "model-00007-of-00017.safetensors",
|
| 91 |
+
"model.layers.17.mlp.gate_proj.weight": "model-00007-of-00017.safetensors",
|
| 92 |
+
"model.layers.17.mlp.up_proj.weight": "model-00007-of-00017.safetensors",
|
| 93 |
+
"model.layers.17.post_attention_layernorm.weight": "model-00007-of-00017.safetensors",
|
| 94 |
+
"model.layers.17.self_attn.k_proj.weight": "model-00007-of-00017.safetensors",
|
| 95 |
+
"model.layers.17.self_attn.o_proj.weight": "model-00007-of-00017.safetensors",
|
| 96 |
+
"model.layers.17.self_attn.q_proj.weight": "model-00007-of-00017.safetensors",
|
| 97 |
+
"model.layers.17.self_attn.v_proj.weight": "model-00007-of-00017.safetensors",
|
| 98 |
+
"model.layers.18.input_layernorm.weight": "model-00007-of-00017.safetensors",
|
| 99 |
+
"model.layers.18.mlp.down_proj.weight": "model-00007-of-00017.safetensors",
|
| 100 |
+
"model.layers.18.mlp.gate_proj.weight": "model-00007-of-00017.safetensors",
|
| 101 |
+
"model.layers.18.mlp.up_proj.weight": "model-00007-of-00017.safetensors",
|
| 102 |
+
"model.layers.18.post_attention_layernorm.weight": "model-00007-of-00017.safetensors",
|
| 103 |
+
"model.layers.18.self_attn.k_proj.weight": "model-00007-of-00017.safetensors",
|
| 104 |
+
"model.layers.18.self_attn.o_proj.weight": "model-00007-of-00017.safetensors",
|
| 105 |
+
"model.layers.18.self_attn.q_proj.weight": "model-00007-of-00017.safetensors",
|
| 106 |
+
"model.layers.18.self_attn.v_proj.weight": "model-00007-of-00017.safetensors",
|
| 107 |
+
"model.layers.19.input_layernorm.weight": "model-00007-of-00017.safetensors",
|
| 108 |
+
"model.layers.19.mlp.down_proj.weight": "model-00007-of-00017.safetensors",
|
| 109 |
+
"model.layers.19.mlp.gate_proj.weight": "model-00007-of-00017.safetensors",
|
| 110 |
+
"model.layers.19.mlp.up_proj.weight": "model-00007-of-00017.safetensors",
|
| 111 |
+
"model.layers.19.post_attention_layernorm.weight": "model-00007-of-00017.safetensors",
|
| 112 |
+
"model.layers.19.self_attn.k_proj.weight": "model-00007-of-00017.safetensors",
|
| 113 |
+
"model.layers.19.self_attn.o_proj.weight": "model-00007-of-00017.safetensors",
|
| 114 |
+
"model.layers.19.self_attn.q_proj.weight": "model-00007-of-00017.safetensors",
|
| 115 |
+
"model.layers.19.self_attn.v_proj.weight": "model-00007-of-00017.safetensors",
|
| 116 |
+
"model.layers.2.input_layernorm.weight": "model-00002-of-00017.safetensors",
|
| 117 |
+
"model.layers.2.mlp.down_proj.weight": "model-00002-of-00017.safetensors",
|
| 118 |
+
"model.layers.2.mlp.gate_proj.weight": "model-00002-of-00017.safetensors",
|
| 119 |
+
"model.layers.2.mlp.up_proj.weight": "model-00002-of-00017.safetensors",
|
| 120 |
+
"model.layers.2.post_attention_layernorm.weight": "model-00002-of-00017.safetensors",
|
| 121 |
+
"model.layers.2.self_attn.k_proj.weight": "model-00002-of-00017.safetensors",
|
| 122 |
+
"model.layers.2.self_attn.o_proj.weight": "model-00002-of-00017.safetensors",
|
| 123 |
+
"model.layers.2.self_attn.q_proj.weight": "model-00002-of-00017.safetensors",
|
| 124 |
+
"model.layers.2.self_attn.v_proj.weight": "model-00002-of-00017.safetensors",
|
| 125 |
+
"model.layers.20.input_layernorm.weight": "model-00008-of-00017.safetensors",
|
| 126 |
+
"model.layers.20.mlp.down_proj.weight": "model-00008-of-00017.safetensors",
|
| 127 |
+
"model.layers.20.mlp.gate_proj.weight": "model-00008-of-00017.safetensors",
|
| 128 |
+
"model.layers.20.mlp.up_proj.weight": "model-00008-of-00017.safetensors",
|
| 129 |
+
"model.layers.20.post_attention_layernorm.weight": "model-00008-of-00017.safetensors",
|
| 130 |
+
"model.layers.20.self_attn.k_proj.weight": "model-00008-of-00017.safetensors",
|
| 131 |
+
"model.layers.20.self_attn.o_proj.weight": "model-00008-of-00017.safetensors",
|
| 132 |
+
"model.layers.20.self_attn.q_proj.weight": "model-00008-of-00017.safetensors",
|
| 133 |
+
"model.layers.20.self_attn.v_proj.weight": "model-00008-of-00017.safetensors",
|
| 134 |
+
"model.layers.21.input_layernorm.weight": "model-00008-of-00017.safetensors",
|
| 135 |
+
"model.layers.21.mlp.down_proj.weight": "model-00008-of-00017.safetensors",
|
| 136 |
+
"model.layers.21.mlp.gate_proj.weight": "model-00008-of-00017.safetensors",
|
| 137 |
+
"model.layers.21.mlp.up_proj.weight": "model-00008-of-00017.safetensors",
|
| 138 |
+
"model.layers.21.post_attention_layernorm.weight": "model-00008-of-00017.safetensors",
|
| 139 |
+
"model.layers.21.self_attn.k_proj.weight": "model-00008-of-00017.safetensors",
|
| 140 |
+
"model.layers.21.self_attn.o_proj.weight": "model-00008-of-00017.safetensors",
|
| 141 |
+
"model.layers.21.self_attn.q_proj.weight": "model-00008-of-00017.safetensors",
|
| 142 |
+
"model.layers.21.self_attn.v_proj.weight": "model-00008-of-00017.safetensors",
|
| 143 |
+
"model.layers.22.input_layernorm.weight": "model-00008-of-00017.safetensors",
|
| 144 |
+
"model.layers.22.mlp.down_proj.weight": "model-00008-of-00017.safetensors",
|
| 145 |
+
"model.layers.22.mlp.gate_proj.weight": "model-00008-of-00017.safetensors",
|
| 146 |
+
"model.layers.22.mlp.up_proj.weight": "model-00008-of-00017.safetensors",
|
| 147 |
+
"model.layers.22.post_attention_layernorm.weight": "model-00008-of-00017.safetensors",
|
| 148 |
+
"model.layers.22.self_attn.k_proj.weight": "model-00008-of-00017.safetensors",
|
| 149 |
+
"model.layers.22.self_attn.o_proj.weight": "model-00008-of-00017.safetensors",
|
| 150 |
+
"model.layers.22.self_attn.q_proj.weight": "model-00008-of-00017.safetensors",
|
| 151 |
+
"model.layers.22.self_attn.v_proj.weight": "model-00008-of-00017.safetensors",
|
| 152 |
+
"model.layers.23.input_layernorm.weight": "model-00009-of-00017.safetensors",
|
| 153 |
+
"model.layers.23.mlp.down_proj.weight": "model-00009-of-00017.safetensors",
|
| 154 |
+
"model.layers.23.mlp.gate_proj.weight": "model-00009-of-00017.safetensors",
|
| 155 |
+
"model.layers.23.mlp.up_proj.weight": "model-00009-of-00017.safetensors",
|
| 156 |
+
"model.layers.23.post_attention_layernorm.weight": "model-00009-of-00017.safetensors",
|
| 157 |
+
"model.layers.23.self_attn.k_proj.weight": "model-00008-of-00017.safetensors",
|
| 158 |
+
"model.layers.23.self_attn.o_proj.weight": "model-00008-of-00017.safetensors",
|
| 159 |
+
"model.layers.23.self_attn.q_proj.weight": "model-00008-of-00017.safetensors",
|
| 160 |
+
"model.layers.23.self_attn.v_proj.weight": "model-00008-of-00017.safetensors",
|
| 161 |
+
"model.layers.24.input_layernorm.weight": "model-00009-of-00017.safetensors",
|
| 162 |
+
"model.layers.24.mlp.down_proj.weight": "model-00009-of-00017.safetensors",
|
| 163 |
+
"model.layers.24.mlp.gate_proj.weight": "model-00009-of-00017.safetensors",
|
| 164 |
+
"model.layers.24.mlp.up_proj.weight": "model-00009-of-00017.safetensors",
|
| 165 |
+
"model.layers.24.post_attention_layernorm.weight": "model-00009-of-00017.safetensors",
|
| 166 |
+
"model.layers.24.self_attn.k_proj.weight": "model-00009-of-00017.safetensors",
|
| 167 |
+
"model.layers.24.self_attn.o_proj.weight": "model-00009-of-00017.safetensors",
|
| 168 |
+
"model.layers.24.self_attn.q_proj.weight": "model-00009-of-00017.safetensors",
|
| 169 |
+
"model.layers.24.self_attn.v_proj.weight": "model-00009-of-00017.safetensors",
|
| 170 |
+
"model.layers.25.input_layernorm.weight": "model-00009-of-00017.safetensors",
|
| 171 |
+
"model.layers.25.mlp.down_proj.weight": "model-00009-of-00017.safetensors",
|
| 172 |
+
"model.layers.25.mlp.gate_proj.weight": "model-00009-of-00017.safetensors",
|
| 173 |
+
"model.layers.25.mlp.up_proj.weight": "model-00009-of-00017.safetensors",
|
| 174 |
+
"model.layers.25.post_attention_layernorm.weight": "model-00009-of-00017.safetensors",
|
| 175 |
+
"model.layers.25.self_attn.k_proj.weight": "model-00009-of-00017.safetensors",
|
| 176 |
+
"model.layers.25.self_attn.o_proj.weight": "model-00009-of-00017.safetensors",
|
| 177 |
+
"model.layers.25.self_attn.q_proj.weight": "model-00009-of-00017.safetensors",
|
| 178 |
+
"model.layers.25.self_attn.v_proj.weight": "model-00009-of-00017.safetensors",
|
| 179 |
+
"model.layers.26.input_layernorm.weight": "model-00010-of-00017.safetensors",
|
| 180 |
+
"model.layers.26.mlp.down_proj.weight": "model-00010-of-00017.safetensors",
|
| 181 |
+
"model.layers.26.mlp.gate_proj.weight": "model-00009-of-00017.safetensors",
|
| 182 |
+
"model.layers.26.mlp.up_proj.weight": "model-00010-of-00017.safetensors",
|
| 183 |
+
"model.layers.26.post_attention_layernorm.weight": "model-00010-of-00017.safetensors",
|
| 184 |
+
"model.layers.26.self_attn.k_proj.weight": "model-00009-of-00017.safetensors",
|
| 185 |
+
"model.layers.26.self_attn.o_proj.weight": "model-00009-of-00017.safetensors",
|
| 186 |
+
"model.layers.26.self_attn.q_proj.weight": "model-00009-of-00017.safetensors",
|
| 187 |
+
"model.layers.26.self_attn.v_proj.weight": "model-00009-of-00017.safetensors",
|
| 188 |
+
"model.layers.27.input_layernorm.weight": "model-00010-of-00017.safetensors",
|
| 189 |
+
"model.layers.27.mlp.down_proj.weight": "model-00010-of-00017.safetensors",
|
| 190 |
+
"model.layers.27.mlp.gate_proj.weight": "model-00010-of-00017.safetensors",
|
| 191 |
+
"model.layers.27.mlp.up_proj.weight": "model-00010-of-00017.safetensors",
|
| 192 |
+
"model.layers.27.post_attention_layernorm.weight": "model-00010-of-00017.safetensors",
|
| 193 |
+
"model.layers.27.self_attn.k_proj.weight": "model-00010-of-00017.safetensors",
|
| 194 |
+
"model.layers.27.self_attn.o_proj.weight": "model-00010-of-00017.safetensors",
|
| 195 |
+
"model.layers.27.self_attn.q_proj.weight": "model-00010-of-00017.safetensors",
|
| 196 |
+
"model.layers.27.self_attn.v_proj.weight": "model-00010-of-00017.safetensors",
|
| 197 |
+
"model.layers.28.input_layernorm.weight": "model-00010-of-00017.safetensors",
|
| 198 |
+
"model.layers.28.mlp.down_proj.weight": "model-00010-of-00017.safetensors",
|
| 199 |
+
"model.layers.28.mlp.gate_proj.weight": "model-00010-of-00017.safetensors",
|
| 200 |
+
"model.layers.28.mlp.up_proj.weight": "model-00010-of-00017.safetensors",
|
| 201 |
+
"model.layers.28.post_attention_layernorm.weight": "model-00010-of-00017.safetensors",
|
| 202 |
+
"model.layers.28.self_attn.k_proj.weight": "model-00010-of-00017.safetensors",
|
| 203 |
+
"model.layers.28.self_attn.o_proj.weight": "model-00010-of-00017.safetensors",
|
| 204 |
+
"model.layers.28.self_attn.q_proj.weight": "model-00010-of-00017.safetensors",
|
| 205 |
+
"model.layers.28.self_attn.v_proj.weight": "model-00010-of-00017.safetensors",
|
| 206 |
+
"model.layers.29.input_layernorm.weight": "model-00011-of-00017.safetensors",
|
| 207 |
+
"model.layers.29.mlp.down_proj.weight": "model-00011-of-00017.safetensors",
|
| 208 |
+
"model.layers.29.mlp.gate_proj.weight": "model-00010-of-00017.safetensors",
|
| 209 |
+
"model.layers.29.mlp.up_proj.weight": "model-00010-of-00017.safetensors",
|
| 210 |
+
"model.layers.29.post_attention_layernorm.weight": "model-00011-of-00017.safetensors",
|
| 211 |
+
"model.layers.29.self_attn.k_proj.weight": "model-00010-of-00017.safetensors",
|
| 212 |
+
"model.layers.29.self_attn.o_proj.weight": "model-00010-of-00017.safetensors",
|
| 213 |
+
"model.layers.29.self_attn.q_proj.weight": "model-00010-of-00017.safetensors",
|
| 214 |
+
"model.layers.29.self_attn.v_proj.weight": "model-00010-of-00017.safetensors",
|
| 215 |
+
"model.layers.3.input_layernorm.weight": "model-00003-of-00017.safetensors",
|
| 216 |
+
"model.layers.3.mlp.down_proj.weight": "model-00003-of-00017.safetensors",
|
| 217 |
+
"model.layers.3.mlp.gate_proj.weight": "model-00002-of-00017.safetensors",
|
| 218 |
+
"model.layers.3.mlp.up_proj.weight": "model-00002-of-00017.safetensors",
|
| 219 |
+
"model.layers.3.post_attention_layernorm.weight": "model-00003-of-00017.safetensors",
|
| 220 |
+
"model.layers.3.self_attn.k_proj.weight": "model-00002-of-00017.safetensors",
|
| 221 |
+
"model.layers.3.self_attn.o_proj.weight": "model-00002-of-00017.safetensors",
|
| 222 |
+
"model.layers.3.self_attn.q_proj.weight": "model-00002-of-00017.safetensors",
|
| 223 |
+
"model.layers.3.self_attn.v_proj.weight": "model-00002-of-00017.safetensors",
|
| 224 |
+
"model.layers.30.input_layernorm.weight": "model-00011-of-00017.safetensors",
|
| 225 |
+
"model.layers.30.mlp.down_proj.weight": "model-00011-of-00017.safetensors",
|
| 226 |
+
"model.layers.30.mlp.gate_proj.weight": "model-00011-of-00017.safetensors",
|
| 227 |
+
"model.layers.30.mlp.up_proj.weight": "model-00011-of-00017.safetensors",
|
| 228 |
+
"model.layers.30.post_attention_layernorm.weight": "model-00011-of-00017.safetensors",
|
| 229 |
+
"model.layers.30.self_attn.k_proj.weight": "model-00011-of-00017.safetensors",
|
| 230 |
+
"model.layers.30.self_attn.o_proj.weight": "model-00011-of-00017.safetensors",
|
| 231 |
+
"model.layers.30.self_attn.q_proj.weight": "model-00011-of-00017.safetensors",
|
| 232 |
+
"model.layers.30.self_attn.v_proj.weight": "model-00011-of-00017.safetensors",
|
| 233 |
+
"model.layers.31.input_layernorm.weight": "model-00011-of-00017.safetensors",
|
| 234 |
+
"model.layers.31.mlp.down_proj.weight": "model-00011-of-00017.safetensors",
|
| 235 |
+
"model.layers.31.mlp.gate_proj.weight": "model-00011-of-00017.safetensors",
|
| 236 |
+
"model.layers.31.mlp.up_proj.weight": "model-00011-of-00017.safetensors",
|
| 237 |
+
"model.layers.31.post_attention_layernorm.weight": "model-00011-of-00017.safetensors",
|
| 238 |
+
"model.layers.31.self_attn.k_proj.weight": "model-00011-of-00017.safetensors",
|
| 239 |
+
"model.layers.31.self_attn.o_proj.weight": "model-00011-of-00017.safetensors",
|
| 240 |
+
"model.layers.31.self_attn.q_proj.weight": "model-00011-of-00017.safetensors",
|
| 241 |
+
"model.layers.31.self_attn.v_proj.weight": "model-00011-of-00017.safetensors",
|
| 242 |
+
"model.layers.32.input_layernorm.weight": "model-00011-of-00017.safetensors",
|
| 243 |
+
"model.layers.32.mlp.down_proj.weight": "model-00011-of-00017.safetensors",
|
| 244 |
+
"model.layers.32.mlp.gate_proj.weight": "model-00011-of-00017.safetensors",
|
| 245 |
+
"model.layers.32.mlp.up_proj.weight": "model-00011-of-00017.safetensors",
|
| 246 |
+
"model.layers.32.post_attention_layernorm.weight": "model-00011-of-00017.safetensors",
|
| 247 |
+
"model.layers.32.self_attn.k_proj.weight": "model-00011-of-00017.safetensors",
|
| 248 |
+
"model.layers.32.self_attn.o_proj.weight": "model-00011-of-00017.safetensors",
|
| 249 |
+
"model.layers.32.self_attn.q_proj.weight": "model-00011-of-00017.safetensors",
|
| 250 |
+
"model.layers.32.self_attn.v_proj.weight": "model-00011-of-00017.safetensors",
|
| 251 |
+
"model.layers.33.input_layernorm.weight": "model-00012-of-00017.safetensors",
|
| 252 |
+
"model.layers.33.mlp.down_proj.weight": "model-00012-of-00017.safetensors",
|
| 253 |
+
"model.layers.33.mlp.gate_proj.weight": "model-00012-of-00017.safetensors",
|
| 254 |
+
"model.layers.33.mlp.up_proj.weight": "model-00012-of-00017.safetensors",
|
| 255 |
+
"model.layers.33.post_attention_layernorm.weight": "model-00012-of-00017.safetensors",
|
| 256 |
+
"model.layers.33.self_attn.k_proj.weight": "model-00012-of-00017.safetensors",
|
| 257 |
+
"model.layers.33.self_attn.o_proj.weight": "model-00012-of-00017.safetensors",
|
| 258 |
+
"model.layers.33.self_attn.q_proj.weight": "model-00012-of-00017.safetensors",
|
| 259 |
+
"model.layers.33.self_attn.v_proj.weight": "model-00012-of-00017.safetensors",
|
| 260 |
+
"model.layers.34.input_layernorm.weight": "model-00012-of-00017.safetensors",
|
| 261 |
+
"model.layers.34.mlp.down_proj.weight": "model-00012-of-00017.safetensors",
|
| 262 |
+
"model.layers.34.mlp.gate_proj.weight": "model-00012-of-00017.safetensors",
|
| 263 |
+
"model.layers.34.mlp.up_proj.weight": "model-00012-of-00017.safetensors",
|
| 264 |
+
"model.layers.34.post_attention_layernorm.weight": "model-00012-of-00017.safetensors",
|
| 265 |
+
"model.layers.34.self_attn.k_proj.weight": "model-00012-of-00017.safetensors",
|
| 266 |
+
"model.layers.34.self_attn.o_proj.weight": "model-00012-of-00017.safetensors",
|
| 267 |
+
"model.layers.34.self_attn.q_proj.weight": "model-00012-of-00017.safetensors",
|
| 268 |
+
"model.layers.34.self_attn.v_proj.weight": "model-00012-of-00017.safetensors",
|
| 269 |
+
"model.layers.35.input_layernorm.weight": "model-00012-of-00017.safetensors",
|
| 270 |
+
"model.layers.35.mlp.down_proj.weight": "model-00012-of-00017.safetensors",
|
| 271 |
+
"model.layers.35.mlp.gate_proj.weight": "model-00012-of-00017.safetensors",
|
| 272 |
+
"model.layers.35.mlp.up_proj.weight": "model-00012-of-00017.safetensors",
|
| 273 |
+
"model.layers.35.post_attention_layernorm.weight": "model-00012-of-00017.safetensors",
|
| 274 |
+
"model.layers.35.self_attn.k_proj.weight": "model-00012-of-00017.safetensors",
|
| 275 |
+
"model.layers.35.self_attn.o_proj.weight": "model-00012-of-00017.safetensors",
|
| 276 |
+
"model.layers.35.self_attn.q_proj.weight": "model-00012-of-00017.safetensors",
|
| 277 |
+
"model.layers.35.self_attn.v_proj.weight": "model-00012-of-00017.safetensors",
|
| 278 |
+
"model.layers.36.input_layernorm.weight": "model-00013-of-00017.safetensors",
|
| 279 |
+
"model.layers.36.mlp.down_proj.weight": "model-00013-of-00017.safetensors",
|
| 280 |
+
"model.layers.36.mlp.gate_proj.weight": "model-00013-of-00017.safetensors",
|
| 281 |
+
"model.layers.36.mlp.up_proj.weight": "model-00013-of-00017.safetensors",
|
| 282 |
+
"model.layers.36.post_attention_layernorm.weight": "model-00013-of-00017.safetensors",
|
| 283 |
+
"model.layers.36.self_attn.k_proj.weight": "model-00012-of-00017.safetensors",
|
| 284 |
+
"model.layers.36.self_attn.o_proj.weight": "model-00012-of-00017.safetensors",
|
| 285 |
+
"model.layers.36.self_attn.q_proj.weight": "model-00012-of-00017.safetensors",
|
| 286 |
+
"model.layers.36.self_attn.v_proj.weight": "model-00012-of-00017.safetensors",
|
| 287 |
+
"model.layers.37.input_layernorm.weight": "model-00013-of-00017.safetensors",
|
| 288 |
+
"model.layers.37.mlp.down_proj.weight": "model-00013-of-00017.safetensors",
|
| 289 |
+
"model.layers.37.mlp.gate_proj.weight": "model-00013-of-00017.safetensors",
|
| 290 |
+
"model.layers.37.mlp.up_proj.weight": "model-00013-of-00017.safetensors",
|
| 291 |
+
"model.layers.37.post_attention_layernorm.weight": "model-00013-of-00017.safetensors",
|
| 292 |
+
"model.layers.37.self_attn.k_proj.weight": "model-00013-of-00017.safetensors",
|
| 293 |
+
"model.layers.37.self_attn.o_proj.weight": "model-00013-of-00017.safetensors",
|
| 294 |
+
"model.layers.37.self_attn.q_proj.weight": "model-00013-of-00017.safetensors",
|
| 295 |
+
"model.layers.37.self_attn.v_proj.weight": "model-00013-of-00017.safetensors",
|
| 296 |
+
"model.layers.38.input_layernorm.weight": "model-00013-of-00017.safetensors",
|
| 297 |
+
"model.layers.38.mlp.down_proj.weight": "model-00013-of-00017.safetensors",
|
| 298 |
+
"model.layers.38.mlp.gate_proj.weight": "model-00013-of-00017.safetensors",
|
| 299 |
+
"model.layers.38.mlp.up_proj.weight": "model-00013-of-00017.safetensors",
|
| 300 |
+
"model.layers.38.post_attention_layernorm.weight": "model-00013-of-00017.safetensors",
|
| 301 |
+
"model.layers.38.self_attn.k_proj.weight": "model-00013-of-00017.safetensors",
|
| 302 |
+
"model.layers.38.self_attn.o_proj.weight": "model-00013-of-00017.safetensors",
|
| 303 |
+
"model.layers.38.self_attn.q_proj.weight": "model-00013-of-00017.safetensors",
|
| 304 |
+
"model.layers.38.self_attn.v_proj.weight": "model-00013-of-00017.safetensors",
|
| 305 |
+
"model.layers.39.input_layernorm.weight": "model-00014-of-00017.safetensors",
|
| 306 |
+
"model.layers.39.mlp.down_proj.weight": "model-00014-of-00017.safetensors",
|
| 307 |
+
"model.layers.39.mlp.gate_proj.weight": "model-00013-of-00017.safetensors",
|
| 308 |
+
"model.layers.39.mlp.up_proj.weight": "model-00014-of-00017.safetensors",
|
| 309 |
+
"model.layers.39.post_attention_layernorm.weight": "model-00014-of-00017.safetensors",
|
| 310 |
+
"model.layers.39.self_attn.k_proj.weight": "model-00013-of-00017.safetensors",
|
| 311 |
+
"model.layers.39.self_attn.o_proj.weight": "model-00013-of-00017.safetensors",
|
| 312 |
+
"model.layers.39.self_attn.q_proj.weight": "model-00013-of-00017.safetensors",
|
| 313 |
+
"model.layers.39.self_attn.v_proj.weight": "model-00013-of-00017.safetensors",
|
| 314 |
+
"model.layers.4.input_layernorm.weight": "model-00003-of-00017.safetensors",
|
| 315 |
+
"model.layers.4.mlp.down_proj.weight": "model-00003-of-00017.safetensors",
|
| 316 |
+
"model.layers.4.mlp.gate_proj.weight": "model-00003-of-00017.safetensors",
|
| 317 |
+
"model.layers.4.mlp.up_proj.weight": "model-00003-of-00017.safetensors",
|
| 318 |
+
"model.layers.4.post_attention_layernorm.weight": "model-00003-of-00017.safetensors",
|
| 319 |
+
"model.layers.4.self_attn.k_proj.weight": "model-00003-of-00017.safetensors",
|
| 320 |
+
"model.layers.4.self_attn.o_proj.weight": "model-00003-of-00017.safetensors",
|
| 321 |
+
"model.layers.4.self_attn.q_proj.weight": "model-00003-of-00017.safetensors",
|
| 322 |
+
"model.layers.4.self_attn.v_proj.weight": "model-00003-of-00017.safetensors",
|
| 323 |
+
"model.layers.40.input_layernorm.weight": "model-00014-of-00017.safetensors",
|
| 324 |
+
"model.layers.40.mlp.down_proj.weight": "model-00014-of-00017.safetensors",
|
| 325 |
+
"model.layers.40.mlp.gate_proj.weight": "model-00014-of-00017.safetensors",
|
| 326 |
+
"model.layers.40.mlp.up_proj.weight": "model-00014-of-00017.safetensors",
|
| 327 |
+
"model.layers.40.post_attention_layernorm.weight": "model-00014-of-00017.safetensors",
|
| 328 |
+
"model.layers.40.self_attn.k_proj.weight": "model-00014-of-00017.safetensors",
|
| 329 |
+
"model.layers.40.self_attn.o_proj.weight": "model-00014-of-00017.safetensors",
|
| 330 |
+
"model.layers.40.self_attn.q_proj.weight": "model-00014-of-00017.safetensors",
|
| 331 |
+
"model.layers.40.self_attn.v_proj.weight": "model-00014-of-00017.safetensors",
|
| 332 |
+
"model.layers.41.input_layernorm.weight": "model-00014-of-00017.safetensors",
|
| 333 |
+
"model.layers.41.mlp.down_proj.weight": "model-00014-of-00017.safetensors",
|
| 334 |
+
"model.layers.41.mlp.gate_proj.weight": "model-00014-of-00017.safetensors",
|
| 335 |
+
"model.layers.41.mlp.up_proj.weight": "model-00014-of-00017.safetensors",
|
| 336 |
+
"model.layers.41.post_attention_layernorm.weight": "model-00014-of-00017.safetensors",
|
| 337 |
+
"model.layers.41.self_attn.k_proj.weight": "model-00014-of-00017.safetensors",
|
| 338 |
+
"model.layers.41.self_attn.o_proj.weight": "model-00014-of-00017.safetensors",
|
| 339 |
+
"model.layers.41.self_attn.q_proj.weight": "model-00014-of-00017.safetensors",
|
| 340 |
+
"model.layers.41.self_attn.v_proj.weight": "model-00014-of-00017.safetensors",
|
| 341 |
+
"model.layers.42.input_layernorm.weight": "model-00015-of-00017.safetensors",
|
| 342 |
+
"model.layers.42.mlp.down_proj.weight": "model-00015-of-00017.safetensors",
|
| 343 |
+
"model.layers.42.mlp.gate_proj.weight": "model-00014-of-00017.safetensors",
|
| 344 |
+
"model.layers.42.mlp.up_proj.weight": "model-00014-of-00017.safetensors",
|
| 345 |
+
"model.layers.42.post_attention_layernorm.weight": "model-00015-of-00017.safetensors",
|
| 346 |
+
"model.layers.42.self_attn.k_proj.weight": "model-00014-of-00017.safetensors",
|
| 347 |
+
"model.layers.42.self_attn.o_proj.weight": "model-00014-of-00017.safetensors",
|
| 348 |
+
"model.layers.42.self_attn.q_proj.weight": "model-00014-of-00017.safetensors",
|
| 349 |
+
"model.layers.42.self_attn.v_proj.weight": "model-00014-of-00017.safetensors",
|
| 350 |
+
"model.layers.43.input_layernorm.weight": "model-00015-of-00017.safetensors",
|
| 351 |
+
"model.layers.43.mlp.down_proj.weight": "model-00015-of-00017.safetensors",
|
| 352 |
+
"model.layers.43.mlp.gate_proj.weight": "model-00015-of-00017.safetensors",
|
| 353 |
+
"model.layers.43.mlp.up_proj.weight": "model-00015-of-00017.safetensors",
|
| 354 |
+
"model.layers.43.post_attention_layernorm.weight": "model-00015-of-00017.safetensors",
|
| 355 |
+
"model.layers.43.self_attn.k_proj.weight": "model-00015-of-00017.safetensors",
|
| 356 |
+
"model.layers.43.self_attn.o_proj.weight": "model-00015-of-00017.safetensors",
|
| 357 |
+
"model.layers.43.self_attn.q_proj.weight": "model-00015-of-00017.safetensors",
|
| 358 |
+
"model.layers.43.self_attn.v_proj.weight": "model-00015-of-00017.safetensors",
|
| 359 |
+
"model.layers.44.input_layernorm.weight": "model-00015-of-00017.safetensors",
|
| 360 |
+
"model.layers.44.mlp.down_proj.weight": "model-00015-of-00017.safetensors",
|
| 361 |
+
"model.layers.44.mlp.gate_proj.weight": "model-00015-of-00017.safetensors",
|
| 362 |
+
"model.layers.44.mlp.up_proj.weight": "model-00015-of-00017.safetensors",
|
| 363 |
+
"model.layers.44.post_attention_layernorm.weight": "model-00015-of-00017.safetensors",
|
| 364 |
+
"model.layers.44.self_attn.k_proj.weight": "model-00015-of-00017.safetensors",
|
| 365 |
+
"model.layers.44.self_attn.o_proj.weight": "model-00015-of-00017.safetensors",
|
| 366 |
+
"model.layers.44.self_attn.q_proj.weight": "model-00015-of-00017.safetensors",
|
| 367 |
+
"model.layers.44.self_attn.v_proj.weight": "model-00015-of-00017.safetensors",
|
| 368 |
+
"model.layers.45.input_layernorm.weight": "model-00015-of-00017.safetensors",
|
| 369 |
+
"model.layers.45.mlp.down_proj.weight": "model-00015-of-00017.safetensors",
|
| 370 |
+
"model.layers.45.mlp.gate_proj.weight": "model-00015-of-00017.safetensors",
|
| 371 |
+
"model.layers.45.mlp.up_proj.weight": "model-00015-of-00017.safetensors",
|
| 372 |
+
"model.layers.45.post_attention_layernorm.weight": "model-00015-of-00017.safetensors",
|
| 373 |
+
"model.layers.45.self_attn.k_proj.weight": "model-00015-of-00017.safetensors",
|
| 374 |
+
"model.layers.45.self_attn.o_proj.weight": "model-00015-of-00017.safetensors",
|
| 375 |
+
"model.layers.45.self_attn.q_proj.weight": "model-00015-of-00017.safetensors",
|
| 376 |
+
"model.layers.45.self_attn.v_proj.weight": "model-00015-of-00017.safetensors",
|
| 377 |
+
"model.layers.46.input_layernorm.weight": "model-00016-of-00017.safetensors",
|
| 378 |
+
"model.layers.46.mlp.down_proj.weight": "model-00016-of-00017.safetensors",
|
| 379 |
+
"model.layers.46.mlp.gate_proj.weight": "model-00016-of-00017.safetensors",
|
| 380 |
+
"model.layers.46.mlp.up_proj.weight": "model-00016-of-00017.safetensors",
|
| 381 |
+
"model.layers.46.post_attention_layernorm.weight": "model-00016-of-00017.safetensors",
|
| 382 |
+
"model.layers.46.self_attn.k_proj.weight": "model-00016-of-00017.safetensors",
|
| 383 |
+
"model.layers.46.self_attn.o_proj.weight": "model-00016-of-00017.safetensors",
|
| 384 |
+
"model.layers.46.self_attn.q_proj.weight": "model-00016-of-00017.safetensors",
|
| 385 |
+
"model.layers.46.self_attn.v_proj.weight": "model-00016-of-00017.safetensors",
|
| 386 |
+
"model.layers.47.input_layernorm.weight": "model-00016-of-00017.safetensors",
|
| 387 |
+
"model.layers.47.mlp.down_proj.weight": "model-00016-of-00017.safetensors",
|
| 388 |
+
"model.layers.47.mlp.gate_proj.weight": "model-00016-of-00017.safetensors",
|
| 389 |
+
"model.layers.47.mlp.up_proj.weight": "model-00016-of-00017.safetensors",
|
| 390 |
+
"model.layers.47.post_attention_layernorm.weight": "model-00016-of-00017.safetensors",
|
| 391 |
+
"model.layers.47.self_attn.k_proj.weight": "model-00016-of-00017.safetensors",
|
| 392 |
+
"model.layers.47.self_attn.o_proj.weight": "model-00016-of-00017.safetensors",
|
| 393 |
+
"model.layers.47.self_attn.q_proj.weight": "model-00016-of-00017.safetensors",
|
| 394 |
+
"model.layers.47.self_attn.v_proj.weight": "model-00016-of-00017.safetensors",
|
| 395 |
+
"model.layers.5.input_layernorm.weight": "model-00003-of-00017.safetensors",
|
| 396 |
+
"model.layers.5.mlp.down_proj.weight": "model-00003-of-00017.safetensors",
|
| 397 |
+
"model.layers.5.mlp.gate_proj.weight": "model-00003-of-00017.safetensors",
|
| 398 |
+
"model.layers.5.mlp.up_proj.weight": "model-00003-of-00017.safetensors",
|
| 399 |
+
"model.layers.5.post_attention_layernorm.weight": "model-00003-of-00017.safetensors",
|
| 400 |
+
"model.layers.5.self_attn.k_proj.weight": "model-00003-of-00017.safetensors",
|
| 401 |
+
"model.layers.5.self_attn.o_proj.weight": "model-00003-of-00017.safetensors",
|
| 402 |
+
"model.layers.5.self_attn.q_proj.weight": "model-00003-of-00017.safetensors",
|
| 403 |
+
"model.layers.5.self_attn.v_proj.weight": "model-00003-of-00017.safetensors",
|
| 404 |
+
"model.layers.6.input_layernorm.weight": "model-00003-of-00017.safetensors",
|
| 405 |
+
"model.layers.6.mlp.down_proj.weight": "model-00003-of-00017.safetensors",
|
| 406 |
+
"model.layers.6.mlp.gate_proj.weight": "model-00003-of-00017.safetensors",
|
| 407 |
+
"model.layers.6.mlp.up_proj.weight": "model-00003-of-00017.safetensors",
|
| 408 |
+
"model.layers.6.post_attention_layernorm.weight": "model-00003-of-00017.safetensors",
|
| 409 |
+
"model.layers.6.self_attn.k_proj.weight": "model-00003-of-00017.safetensors",
|
| 410 |
+
"model.layers.6.self_attn.o_proj.weight": "model-00003-of-00017.safetensors",
|
| 411 |
+
"model.layers.6.self_attn.q_proj.weight": "model-00003-of-00017.safetensors",
|
| 412 |
+
"model.layers.6.self_attn.v_proj.weight": "model-00003-of-00017.safetensors",
|
| 413 |
+
"model.layers.7.input_layernorm.weight": "model-00004-of-00017.safetensors",
|
| 414 |
+
"model.layers.7.mlp.down_proj.weight": "model-00004-of-00017.safetensors",
|
| 415 |
+
"model.layers.7.mlp.gate_proj.weight": "model-00004-of-00017.safetensors",
|
| 416 |
+
"model.layers.7.mlp.up_proj.weight": "model-00004-of-00017.safetensors",
|
| 417 |
+
"model.layers.7.post_attention_layernorm.weight": "model-00004-of-00017.safetensors",
|
| 418 |
+
"model.layers.7.self_attn.k_proj.weight": "model-00004-of-00017.safetensors",
|
| 419 |
+
"model.layers.7.self_attn.o_proj.weight": "model-00004-of-00017.safetensors",
|
| 420 |
+
"model.layers.7.self_attn.q_proj.weight": "model-00004-of-00017.safetensors",
|
| 421 |
+
"model.layers.7.self_attn.v_proj.weight": "model-00004-of-00017.safetensors",
|
| 422 |
+
"model.layers.8.input_layernorm.weight": "model-00004-of-00017.safetensors",
|
| 423 |
+
"model.layers.8.mlp.down_proj.weight": "model-00004-of-00017.safetensors",
|
| 424 |
+
"model.layers.8.mlp.gate_proj.weight": "model-00004-of-00017.safetensors",
|
| 425 |
+
"model.layers.8.mlp.up_proj.weight": "model-00004-of-00017.safetensors",
|
| 426 |
+
"model.layers.8.post_attention_layernorm.weight": "model-00004-of-00017.safetensors",
|
| 427 |
+
"model.layers.8.self_attn.k_proj.weight": "model-00004-of-00017.safetensors",
|
| 428 |
+
"model.layers.8.self_attn.o_proj.weight": "model-00004-of-00017.safetensors",
|
| 429 |
+
"model.layers.8.self_attn.q_proj.weight": "model-00004-of-00017.safetensors",
|
| 430 |
+
"model.layers.8.self_attn.v_proj.weight": "model-00004-of-00017.safetensors",
|
| 431 |
+
"model.layers.9.input_layernorm.weight": "model-00004-of-00017.safetensors",
|
| 432 |
+
"model.layers.9.mlp.down_proj.weight": "model-00004-of-00017.safetensors",
|
| 433 |
+
"model.layers.9.mlp.gate_proj.weight": "model-00004-of-00017.safetensors",
|
| 434 |
+
"model.layers.9.mlp.up_proj.weight": "model-00004-of-00017.safetensors",
|
| 435 |
+
"model.layers.9.post_attention_layernorm.weight": "model-00004-of-00017.safetensors",
|
| 436 |
+
"model.layers.9.self_attn.k_proj.weight": "model-00004-of-00017.safetensors",
|
| 437 |
+
"model.layers.9.self_attn.o_proj.weight": "model-00004-of-00017.safetensors",
|
| 438 |
+
"model.layers.9.self_attn.q_proj.weight": "model-00004-of-00017.safetensors",
|
| 439 |
+
"model.layers.9.self_attn.v_proj.weight": "model-00004-of-00017.safetensors",
|
| 440 |
+
"model.norm.weight": "model-00016-of-00017.safetensors"
|
| 441 |
+
}
|
| 442 |
+
}
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token": {
|
| 3 |
+
"content": "<s>",
|
| 4 |
+
"lstrip": false,
|
| 5 |
+
"normalized": false,
|
| 6 |
+
"rstrip": false,
|
| 7 |
+
"single_word": false
|
| 8 |
+
},
|
| 9 |
+
"eos_token": {
|
| 10 |
+
"content": "<|im_end|>",
|
| 11 |
+
"lstrip": false,
|
| 12 |
+
"normalized": false,
|
| 13 |
+
"rstrip": false,
|
| 14 |
+
"single_word": false
|
| 15 |
+
},
|
| 16 |
+
"pad_token": {
|
| 17 |
+
"content": "<unk>",
|
| 18 |
+
"lstrip": false,
|
| 19 |
+
"normalized": false,
|
| 20 |
+
"rstrip": false,
|
| 21 |
+
"single_word": false
|
| 22 |
+
},
|
| 23 |
+
"unk_token": {
|
| 24 |
+
"content": "<unk>",
|
| 25 |
+
"lstrip": false,
|
| 26 |
+
"normalized": false,
|
| 27 |
+
"rstrip": false,
|
| 28 |
+
"single_word": false
|
| 29 |
+
}
|
| 30 |
+
}
|
tokenizer.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2e90b85b3e3b3ebfc6b9bafeb954b37f2435eed595738337e53f2a746d23d5a2
|
| 3 |
+
size 37007416
|
tokenizer.model
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fa490e57cebce5cb1a0a5b1a5d3fa4de05aee53dc3a44791f1c3401db44d802d
|
| 3 |
+
size 4813274
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,1100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_bos_token": true,
|
| 3 |
+
"add_eos_token": false,
|
| 4 |
+
"add_prefix_space": true,
|
| 5 |
+
"added_tokens_decoder": {
|
| 6 |
+
"0": {
|
| 7 |
+
"content": "<unk>",
|
| 8 |
+
"lstrip": false,
|
| 9 |
+
"normalized": false,
|
| 10 |
+
"rstrip": false,
|
| 11 |
+
"single_word": false,
|
| 12 |
+
"special": true
|
| 13 |
+
},
|
| 14 |
+
"1": {
|
| 15 |
+
"content": "<s>",
|
| 16 |
+
"lstrip": false,
|
| 17 |
+
"normalized": false,
|
| 18 |
+
"rstrip": false,
|
| 19 |
+
"single_word": false,
|
| 20 |
+
"special": true
|
| 21 |
+
},
|
| 22 |
+
"2": {
|
| 23 |
+
"content": "</s>",
|
| 24 |
+
"lstrip": false,
|
| 25 |
+
"normalized": false,
|
| 26 |
+
"rstrip": false,
|
| 27 |
+
"single_word": false,
|
| 28 |
+
"special": true
|
| 29 |
+
},
|
| 30 |
+
"3": {
|
| 31 |
+
"content": "<pad>",
|
| 32 |
+
"lstrip": false,
|
| 33 |
+
"normalized": false,
|
| 34 |
+
"rstrip": false,
|
| 35 |
+
"single_word": false,
|
| 36 |
+
"special": true
|
| 37 |
+
},
|
| 38 |
+
"4": {
|
| 39 |
+
"content": "<|im_start|>",
|
| 40 |
+
"lstrip": false,
|
| 41 |
+
"normalized": false,
|
| 42 |
+
"rstrip": false,
|
| 43 |
+
"single_word": false,
|
| 44 |
+
"special": true
|
| 45 |
+
},
|
| 46 |
+
"5": {
|
| 47 |
+
"content": "<|im_end|>",
|
| 48 |
+
"lstrip": false,
|
| 49 |
+
"normalized": false,
|
| 50 |
+
"rstrip": false,
|
| 51 |
+
"single_word": false,
|
| 52 |
+
"special": true
|
| 53 |
+
},
|
| 54 |
+
"6": {
|
| 55 |
+
"content": "<|reserved_token_1|>",
|
| 56 |
+
"lstrip": false,
|
| 57 |
+
"normalized": false,
|
| 58 |
+
"rstrip": false,
|
| 59 |
+
"single_word": false,
|
| 60 |
+
"special": true
|
| 61 |
+
},
|
| 62 |
+
"7": {
|
| 63 |
+
"content": "<|reserved_token_2|>",
|
| 64 |
+
"lstrip": false,
|
| 65 |
+
"normalized": false,
|
| 66 |
+
"rstrip": false,
|
| 67 |
+
"single_word": false,
|
| 68 |
+
"special": true
|
| 69 |
+
},
|
| 70 |
+
"8": {
|
| 71 |
+
"content": "<|reserved_token_3|>",
|
| 72 |
+
"lstrip": false,
|
| 73 |
+
"normalized": false,
|
| 74 |
+
"rstrip": false,
|
| 75 |
+
"single_word": false,
|
| 76 |
+
"special": true
|
| 77 |
+
},
|
| 78 |
+
"9": {
|
| 79 |
+
"content": "<|reserved_token_4|>",
|
| 80 |
+
"lstrip": false,
|
| 81 |
+
"normalized": false,
|
| 82 |
+
"rstrip": false,
|
| 83 |
+
"single_word": false,
|
| 84 |
+
"special": true
|
| 85 |
+
},
|
| 86 |
+
"10": {
|
| 87 |
+
"content": "<|reserved_token_5|>",
|
| 88 |
+
"lstrip": false,
|
| 89 |
+
"normalized": false,
|
| 90 |
+
"rstrip": false,
|
| 91 |
+
"single_word": false,
|
| 92 |
+
"special": true
|
| 93 |
+
},
|
| 94 |
+
"11": {
|
| 95 |
+
"content": "<|reserved_token_6|>",
|
| 96 |
+
"lstrip": false,
|
| 97 |
+
"normalized": false,
|
| 98 |
+
"rstrip": false,
|
| 99 |
+
"single_word": false,
|
| 100 |
+
"special": true
|
| 101 |
+
},
|
| 102 |
+
"12": {
|
| 103 |
+
"content": "<|reserved_token_7|>",
|
| 104 |
+
"lstrip": false,
|
| 105 |
+
"normalized": false,
|
| 106 |
+
"rstrip": false,
|
| 107 |
+
"single_word": false,
|
| 108 |
+
"special": true
|
| 109 |
+
},
|
| 110 |
+
"13": {
|
| 111 |
+
"content": "<|reserved_token_8|>",
|
| 112 |
+
"lstrip": false,
|
| 113 |
+
"normalized": false,
|
| 114 |
+
"rstrip": false,
|
| 115 |
+
"single_word": false,
|
| 116 |
+
"special": true
|
| 117 |
+
},
|
| 118 |
+
"14": {
|
| 119 |
+
"content": "<|reserved_token_9|>",
|
| 120 |
+
"lstrip": false,
|
| 121 |
+
"normalized": false,
|
| 122 |
+
"rstrip": false,
|
| 123 |
+
"single_word": false,
|
| 124 |
+
"special": true
|
| 125 |
+
},
|
| 126 |
+
"15": {
|
| 127 |
+
"content": "<|reserved_token_10|>",
|
| 128 |
+
"lstrip": false,
|
| 129 |
+
"normalized": false,
|
| 130 |
+
"rstrip": false,
|
| 131 |
+
"single_word": false,
|
| 132 |
+
"special": true
|
| 133 |
+
},
|
| 134 |
+
"16": {
|
| 135 |
+
"content": "<|reserved_token_11|>",
|
| 136 |
+
"lstrip": false,
|
| 137 |
+
"normalized": false,
|
| 138 |
+
"rstrip": false,
|
| 139 |
+
"single_word": false,
|
| 140 |
+
"special": true
|
| 141 |
+
},
|
| 142 |
+
"17": {
|
| 143 |
+
"content": "<|reserved_token_12|>",
|
| 144 |
+
"lstrip": false,
|
| 145 |
+
"normalized": false,
|
| 146 |
+
"rstrip": false,
|
| 147 |
+
"single_word": false,
|
| 148 |
+
"special": true
|
| 149 |
+
},
|
| 150 |
+
"18": {
|
| 151 |
+
"content": "<|reserved_token_13|>",
|
| 152 |
+
"lstrip": false,
|
| 153 |
+
"normalized": false,
|
| 154 |
+
"rstrip": false,
|
| 155 |
+
"single_word": false,
|
| 156 |
+
"special": true
|
| 157 |
+
},
|
| 158 |
+
"19": {
|
| 159 |
+
"content": "<|reserved_token_14|>",
|
| 160 |
+
"lstrip": false,
|
| 161 |
+
"normalized": false,
|
| 162 |
+
"rstrip": false,
|
| 163 |
+
"single_word": false,
|
| 164 |
+
"special": true
|
| 165 |
+
},
|
| 166 |
+
"20": {
|
| 167 |
+
"content": "<|reserved_token_15|>",
|
| 168 |
+
"lstrip": false,
|
| 169 |
+
"normalized": false,
|
| 170 |
+
"rstrip": false,
|
| 171 |
+
"single_word": false,
|
| 172 |
+
"special": true
|
| 173 |
+
},
|
| 174 |
+
"21": {
|
| 175 |
+
"content": "<|reserved_token_16|>",
|
| 176 |
+
"lstrip": false,
|
| 177 |
+
"normalized": false,
|
| 178 |
+
"rstrip": false,
|
| 179 |
+
"single_word": false,
|
| 180 |
+
"special": true
|
| 181 |
+
},
|
| 182 |
+
"22": {
|
| 183 |
+
"content": "<|reserved_token_17|>",
|
| 184 |
+
"lstrip": false,
|
| 185 |
+
"normalized": false,
|
| 186 |
+
"rstrip": false,
|
| 187 |
+
"single_word": false,
|
| 188 |
+
"special": true
|
| 189 |
+
},
|
| 190 |
+
"23": {
|
| 191 |
+
"content": "<|reserved_token_18|>",
|
| 192 |
+
"lstrip": false,
|
| 193 |
+
"normalized": false,
|
| 194 |
+
"rstrip": false,
|
| 195 |
+
"single_word": false,
|
| 196 |
+
"special": true
|
| 197 |
+
},
|
| 198 |
+
"24": {
|
| 199 |
+
"content": "<|reserved_token_19|>",
|
| 200 |
+
"lstrip": false,
|
| 201 |
+
"normalized": false,
|
| 202 |
+
"rstrip": false,
|
| 203 |
+
"single_word": false,
|
| 204 |
+
"special": true
|
| 205 |
+
},
|
| 206 |
+
"25": {
|
| 207 |
+
"content": "<|reserved_token_20|>",
|
| 208 |
+
"lstrip": false,
|
| 209 |
+
"normalized": false,
|
| 210 |
+
"rstrip": false,
|
| 211 |
+
"single_word": false,
|
| 212 |
+
"special": true
|
| 213 |
+
},
|
| 214 |
+
"26": {
|
| 215 |
+
"content": "<|reserved_token_21|>",
|
| 216 |
+
"lstrip": false,
|
| 217 |
+
"normalized": false,
|
| 218 |
+
"rstrip": false,
|
| 219 |
+
"single_word": false,
|
| 220 |
+
"special": true
|
| 221 |
+
},
|
| 222 |
+
"27": {
|
| 223 |
+
"content": "<|reserved_token_22|>",
|
| 224 |
+
"lstrip": false,
|
| 225 |
+
"normalized": false,
|
| 226 |
+
"rstrip": false,
|
| 227 |
+
"single_word": false,
|
| 228 |
+
"special": true
|
| 229 |
+
},
|
| 230 |
+
"28": {
|
| 231 |
+
"content": "<|reserved_token_23|>",
|
| 232 |
+
"lstrip": false,
|
| 233 |
+
"normalized": false,
|
| 234 |
+
"rstrip": false,
|
| 235 |
+
"single_word": false,
|
| 236 |
+
"special": true
|
| 237 |
+
},
|
| 238 |
+
"29": {
|
| 239 |
+
"content": "<|reserved_token_24|>",
|
| 240 |
+
"lstrip": false,
|
| 241 |
+
"normalized": false,
|
| 242 |
+
"rstrip": false,
|
| 243 |
+
"single_word": false,
|
| 244 |
+
"special": true
|
| 245 |
+
},
|
| 246 |
+
"30": {
|
| 247 |
+
"content": "<|reserved_token_25|>",
|
| 248 |
+
"lstrip": false,
|
| 249 |
+
"normalized": false,
|
| 250 |
+
"rstrip": false,
|
| 251 |
+
"single_word": false,
|
| 252 |
+
"special": true
|
| 253 |
+
},
|
| 254 |
+
"31": {
|
| 255 |
+
"content": "<|reserved_token_26|>",
|
| 256 |
+
"lstrip": false,
|
| 257 |
+
"normalized": false,
|
| 258 |
+
"rstrip": false,
|
| 259 |
+
"single_word": false,
|
| 260 |
+
"special": true
|
| 261 |
+
},
|
| 262 |
+
"32": {
|
| 263 |
+
"content": "<|reserved_token_27|>",
|
| 264 |
+
"lstrip": false,
|
| 265 |
+
"normalized": false,
|
| 266 |
+
"rstrip": false,
|
| 267 |
+
"single_word": false,
|
| 268 |
+
"special": true
|
| 269 |
+
},
|
| 270 |
+
"33": {
|
| 271 |
+
"content": "<|reserved_token_28|>",
|
| 272 |
+
"lstrip": false,
|
| 273 |
+
"normalized": false,
|
| 274 |
+
"rstrip": false,
|
| 275 |
+
"single_word": false,
|
| 276 |
+
"special": true
|
| 277 |
+
},
|
| 278 |
+
"34": {
|
| 279 |
+
"content": "<|reserved_token_29|>",
|
| 280 |
+
"lstrip": false,
|
| 281 |
+
"normalized": false,
|
| 282 |
+
"rstrip": false,
|
| 283 |
+
"single_word": false,
|
| 284 |
+
"special": true
|
| 285 |
+
},
|
| 286 |
+
"35": {
|
| 287 |
+
"content": "<|reserved_token_30|>",
|
| 288 |
+
"lstrip": false,
|
| 289 |
+
"normalized": false,
|
| 290 |
+
"rstrip": false,
|
| 291 |
+
"single_word": false,
|
| 292 |
+
"special": true
|
| 293 |
+
},
|
| 294 |
+
"36": {
|
| 295 |
+
"content": "<|reserved_token_31|>",
|
| 296 |
+
"lstrip": false,
|
| 297 |
+
"normalized": false,
|
| 298 |
+
"rstrip": false,
|
| 299 |
+
"single_word": false,
|
| 300 |
+
"special": true
|
| 301 |
+
},
|
| 302 |
+
"37": {
|
| 303 |
+
"content": "<|reserved_token_32|>",
|
| 304 |
+
"lstrip": false,
|
| 305 |
+
"normalized": false,
|
| 306 |
+
"rstrip": false,
|
| 307 |
+
"single_word": false,
|
| 308 |
+
"special": true
|
| 309 |
+
},
|
| 310 |
+
"38": {
|
| 311 |
+
"content": "<|reserved_token_33|>",
|
| 312 |
+
"lstrip": false,
|
| 313 |
+
"normalized": false,
|
| 314 |
+
"rstrip": false,
|
| 315 |
+
"single_word": false,
|
| 316 |
+
"special": true
|
| 317 |
+
},
|
| 318 |
+
"39": {
|
| 319 |
+
"content": "<|reserved_token_34|>",
|
| 320 |
+
"lstrip": false,
|
| 321 |
+
"normalized": false,
|
| 322 |
+
"rstrip": false,
|
| 323 |
+
"single_word": false,
|
| 324 |
+
"special": true
|
| 325 |
+
},
|
| 326 |
+
"40": {
|
| 327 |
+
"content": "<|reserved_token_35|>",
|
| 328 |
+
"lstrip": false,
|
| 329 |
+
"normalized": false,
|
| 330 |
+
"rstrip": false,
|
| 331 |
+
"single_word": false,
|
| 332 |
+
"special": true
|
| 333 |
+
},
|
| 334 |
+
"41": {
|
| 335 |
+
"content": "<|reserved_token_36|>",
|
| 336 |
+
"lstrip": false,
|
| 337 |
+
"normalized": false,
|
| 338 |
+
"rstrip": false,
|
| 339 |
+
"single_word": false,
|
| 340 |
+
"special": true
|
| 341 |
+
},
|
| 342 |
+
"42": {
|
| 343 |
+
"content": "<|reserved_token_37|>",
|
| 344 |
+
"lstrip": false,
|
| 345 |
+
"normalized": false,
|
| 346 |
+
"rstrip": false,
|
| 347 |
+
"single_word": false,
|
| 348 |
+
"special": true
|
| 349 |
+
},
|
| 350 |
+
"43": {
|
| 351 |
+
"content": "<|reserved_token_38|>",
|
| 352 |
+
"lstrip": false,
|
| 353 |
+
"normalized": false,
|
| 354 |
+
"rstrip": false,
|
| 355 |
+
"single_word": false,
|
| 356 |
+
"special": true
|
| 357 |
+
},
|
| 358 |
+
"44": {
|
| 359 |
+
"content": "<|reserved_token_39|>",
|
| 360 |
+
"lstrip": false,
|
| 361 |
+
"normalized": false,
|
| 362 |
+
"rstrip": false,
|
| 363 |
+
"single_word": false,
|
| 364 |
+
"special": true
|
| 365 |
+
},
|
| 366 |
+
"45": {
|
| 367 |
+
"content": "<|reserved_token_40|>",
|
| 368 |
+
"lstrip": false,
|
| 369 |
+
"normalized": false,
|
| 370 |
+
"rstrip": false,
|
| 371 |
+
"single_word": false,
|
| 372 |
+
"special": true
|
| 373 |
+
},
|
| 374 |
+
"46": {
|
| 375 |
+
"content": "<|reserved_token_41|>",
|
| 376 |
+
"lstrip": false,
|
| 377 |
+
"normalized": false,
|
| 378 |
+
"rstrip": false,
|
| 379 |
+
"single_word": false,
|
| 380 |
+
"special": true
|
| 381 |
+
},
|
| 382 |
+
"47": {
|
| 383 |
+
"content": "<|reserved_token_42|>",
|
| 384 |
+
"lstrip": false,
|
| 385 |
+
"normalized": false,
|
| 386 |
+
"rstrip": false,
|
| 387 |
+
"single_word": false,
|
| 388 |
+
"special": true
|
| 389 |
+
},
|
| 390 |
+
"48": {
|
| 391 |
+
"content": "<|reserved_token_43|>",
|
| 392 |
+
"lstrip": false,
|
| 393 |
+
"normalized": false,
|
| 394 |
+
"rstrip": false,
|
| 395 |
+
"single_word": false,
|
| 396 |
+
"special": true
|
| 397 |
+
},
|
| 398 |
+
"49": {
|
| 399 |
+
"content": "<|reserved_token_44|>",
|
| 400 |
+
"lstrip": false,
|
| 401 |
+
"normalized": false,
|
| 402 |
+
"rstrip": false,
|
| 403 |
+
"single_word": false,
|
| 404 |
+
"special": true
|
| 405 |
+
},
|
| 406 |
+
"50": {
|
| 407 |
+
"content": "<|reserved_token_45|>",
|
| 408 |
+
"lstrip": false,
|
| 409 |
+
"normalized": false,
|
| 410 |
+
"rstrip": false,
|
| 411 |
+
"single_word": false,
|
| 412 |
+
"special": true
|
| 413 |
+
},
|
| 414 |
+
"51": {
|
| 415 |
+
"content": "<|reserved_token_46|>",
|
| 416 |
+
"lstrip": false,
|
| 417 |
+
"normalized": false,
|
| 418 |
+
"rstrip": false,
|
| 419 |
+
"single_word": false,
|
| 420 |
+
"special": true
|
| 421 |
+
},
|
| 422 |
+
"52": {
|
| 423 |
+
"content": "<|reserved_token_47|>",
|
| 424 |
+
"lstrip": false,
|
| 425 |
+
"normalized": false,
|
| 426 |
+
"rstrip": false,
|
| 427 |
+
"single_word": false,
|
| 428 |
+
"special": true
|
| 429 |
+
},
|
| 430 |
+
"53": {
|
| 431 |
+
"content": "<|reserved_token_48|>",
|
| 432 |
+
"lstrip": false,
|
| 433 |
+
"normalized": false,
|
| 434 |
+
"rstrip": false,
|
| 435 |
+
"single_word": false,
|
| 436 |
+
"special": true
|
| 437 |
+
},
|
| 438 |
+
"54": {
|
| 439 |
+
"content": "<|reserved_token_49|>",
|
| 440 |
+
"lstrip": false,
|
| 441 |
+
"normalized": false,
|
| 442 |
+
"rstrip": false,
|
| 443 |
+
"single_word": false,
|
| 444 |
+
"special": true
|
| 445 |
+
},
|
| 446 |
+
"55": {
|
| 447 |
+
"content": "<|reserved_token_50|>",
|
| 448 |
+
"lstrip": false,
|
| 449 |
+
"normalized": false,
|
| 450 |
+
"rstrip": false,
|
| 451 |
+
"single_word": false,
|
| 452 |
+
"special": true
|
| 453 |
+
},
|
| 454 |
+
"56": {
|
| 455 |
+
"content": "<|reserved_token_51|>",
|
| 456 |
+
"lstrip": false,
|
| 457 |
+
"normalized": false,
|
| 458 |
+
"rstrip": false,
|
| 459 |
+
"single_word": false,
|
| 460 |
+
"special": true
|
| 461 |
+
},
|
| 462 |
+
"57": {
|
| 463 |
+
"content": "<|reserved_token_52|>",
|
| 464 |
+
"lstrip": false,
|
| 465 |
+
"normalized": false,
|
| 466 |
+
"rstrip": false,
|
| 467 |
+
"single_word": false,
|
| 468 |
+
"special": true
|
| 469 |
+
},
|
| 470 |
+
"58": {
|
| 471 |
+
"content": "<|reserved_token_53|>",
|
| 472 |
+
"lstrip": false,
|
| 473 |
+
"normalized": false,
|
| 474 |
+
"rstrip": false,
|
| 475 |
+
"single_word": false,
|
| 476 |
+
"special": true
|
| 477 |
+
},
|
| 478 |
+
"59": {
|
| 479 |
+
"content": "<|reserved_token_54|>",
|
| 480 |
+
"lstrip": false,
|
| 481 |
+
"normalized": false,
|
| 482 |
+
"rstrip": false,
|
| 483 |
+
"single_word": false,
|
| 484 |
+
"special": true
|
| 485 |
+
},
|
| 486 |
+
"60": {
|
| 487 |
+
"content": "<|reserved_token_55|>",
|
| 488 |
+
"lstrip": false,
|
| 489 |
+
"normalized": false,
|
| 490 |
+
"rstrip": false,
|
| 491 |
+
"single_word": false,
|
| 492 |
+
"special": true
|
| 493 |
+
},
|
| 494 |
+
"61": {
|
| 495 |
+
"content": "<|reserved_token_56|>",
|
| 496 |
+
"lstrip": false,
|
| 497 |
+
"normalized": false,
|
| 498 |
+
"rstrip": false,
|
| 499 |
+
"single_word": false,
|
| 500 |
+
"special": true
|
| 501 |
+
},
|
| 502 |
+
"62": {
|
| 503 |
+
"content": "<|reserved_token_57|>",
|
| 504 |
+
"lstrip": false,
|
| 505 |
+
"normalized": false,
|
| 506 |
+
"rstrip": false,
|
| 507 |
+
"single_word": false,
|
| 508 |
+
"special": true
|
| 509 |
+
},
|
| 510 |
+
"63": {
|
| 511 |
+
"content": "<|reserved_token_58|>",
|
| 512 |
+
"lstrip": false,
|
| 513 |
+
"normalized": false,
|
| 514 |
+
"rstrip": false,
|
| 515 |
+
"single_word": false,
|
| 516 |
+
"special": true
|
| 517 |
+
},
|
| 518 |
+
"64": {
|
| 519 |
+
"content": "<|reserved_token_59|>",
|
| 520 |
+
"lstrip": false,
|
| 521 |
+
"normalized": false,
|
| 522 |
+
"rstrip": false,
|
| 523 |
+
"single_word": false,
|
| 524 |
+
"special": true
|
| 525 |
+
},
|
| 526 |
+
"65": {
|
| 527 |
+
"content": "<|reserved_token_60|>",
|
| 528 |
+
"lstrip": false,
|
| 529 |
+
"normalized": false,
|
| 530 |
+
"rstrip": false,
|
| 531 |
+
"single_word": false,
|
| 532 |
+
"special": true
|
| 533 |
+
},
|
| 534 |
+
"66": {
|
| 535 |
+
"content": "<|reserved_token_61|>",
|
| 536 |
+
"lstrip": false,
|
| 537 |
+
"normalized": false,
|
| 538 |
+
"rstrip": false,
|
| 539 |
+
"single_word": false,
|
| 540 |
+
"special": true
|
| 541 |
+
},
|
| 542 |
+
"67": {
|
| 543 |
+
"content": "<|reserved_token_62|>",
|
| 544 |
+
"lstrip": false,
|
| 545 |
+
"normalized": false,
|
| 546 |
+
"rstrip": false,
|
| 547 |
+
"single_word": false,
|
| 548 |
+
"special": true
|
| 549 |
+
},
|
| 550 |
+
"68": {
|
| 551 |
+
"content": "<|reserved_token_63|>",
|
| 552 |
+
"lstrip": false,
|
| 553 |
+
"normalized": false,
|
| 554 |
+
"rstrip": false,
|
| 555 |
+
"single_word": false,
|
| 556 |
+
"special": true
|
| 557 |
+
},
|
| 558 |
+
"69": {
|
| 559 |
+
"content": "<|reserved_token_64|>",
|
| 560 |
+
"lstrip": false,
|
| 561 |
+
"normalized": false,
|
| 562 |
+
"rstrip": false,
|
| 563 |
+
"single_word": false,
|
| 564 |
+
"special": true
|
| 565 |
+
},
|
| 566 |
+
"70": {
|
| 567 |
+
"content": "<|reserved_token_65|>",
|
| 568 |
+
"lstrip": false,
|
| 569 |
+
"normalized": false,
|
| 570 |
+
"rstrip": false,
|
| 571 |
+
"single_word": false,
|
| 572 |
+
"special": true
|
| 573 |
+
},
|
| 574 |
+
"71": {
|
| 575 |
+
"content": "<|reserved_token_66|>",
|
| 576 |
+
"lstrip": false,
|
| 577 |
+
"normalized": false,
|
| 578 |
+
"rstrip": false,
|
| 579 |
+
"single_word": false,
|
| 580 |
+
"special": true
|
| 581 |
+
},
|
| 582 |
+
"72": {
|
| 583 |
+
"content": "<|reserved_token_67|>",
|
| 584 |
+
"lstrip": false,
|
| 585 |
+
"normalized": false,
|
| 586 |
+
"rstrip": false,
|
| 587 |
+
"single_word": false,
|
| 588 |
+
"special": true
|
| 589 |
+
},
|
| 590 |
+
"73": {
|
| 591 |
+
"content": "<|reserved_token_68|>",
|
| 592 |
+
"lstrip": false,
|
| 593 |
+
"normalized": false,
|
| 594 |
+
"rstrip": false,
|
| 595 |
+
"single_word": false,
|
| 596 |
+
"special": true
|
| 597 |
+
},
|
| 598 |
+
"74": {
|
| 599 |
+
"content": "<|reserved_token_69|>",
|
| 600 |
+
"lstrip": false,
|
| 601 |
+
"normalized": false,
|
| 602 |
+
"rstrip": false,
|
| 603 |
+
"single_word": false,
|
| 604 |
+
"special": true
|
| 605 |
+
},
|
| 606 |
+
"75": {
|
| 607 |
+
"content": "<|reserved_token_70|>",
|
| 608 |
+
"lstrip": false,
|
| 609 |
+
"normalized": false,
|
| 610 |
+
"rstrip": false,
|
| 611 |
+
"single_word": false,
|
| 612 |
+
"special": true
|
| 613 |
+
},
|
| 614 |
+
"76": {
|
| 615 |
+
"content": "<|reserved_token_71|>",
|
| 616 |
+
"lstrip": false,
|
| 617 |
+
"normalized": false,
|
| 618 |
+
"rstrip": false,
|
| 619 |
+
"single_word": false,
|
| 620 |
+
"special": true
|
| 621 |
+
},
|
| 622 |
+
"77": {
|
| 623 |
+
"content": "<|reserved_token_72|>",
|
| 624 |
+
"lstrip": false,
|
| 625 |
+
"normalized": false,
|
| 626 |
+
"rstrip": false,
|
| 627 |
+
"single_word": false,
|
| 628 |
+
"special": true
|
| 629 |
+
},
|
| 630 |
+
"78": {
|
| 631 |
+
"content": "<|reserved_token_73|>",
|
| 632 |
+
"lstrip": false,
|
| 633 |
+
"normalized": false,
|
| 634 |
+
"rstrip": false,
|
| 635 |
+
"single_word": false,
|
| 636 |
+
"special": true
|
| 637 |
+
},
|
| 638 |
+
"79": {
|
| 639 |
+
"content": "<|reserved_token_74|>",
|
| 640 |
+
"lstrip": false,
|
| 641 |
+
"normalized": false,
|
| 642 |
+
"rstrip": false,
|
| 643 |
+
"single_word": false,
|
| 644 |
+
"special": true
|
| 645 |
+
},
|
| 646 |
+
"80": {
|
| 647 |
+
"content": "<|reserved_token_75|>",
|
| 648 |
+
"lstrip": false,
|
| 649 |
+
"normalized": false,
|
| 650 |
+
"rstrip": false,
|
| 651 |
+
"single_word": false,
|
| 652 |
+
"special": true
|
| 653 |
+
},
|
| 654 |
+
"81": {
|
| 655 |
+
"content": "<|reserved_token_76|>",
|
| 656 |
+
"lstrip": false,
|
| 657 |
+
"normalized": false,
|
| 658 |
+
"rstrip": false,
|
| 659 |
+
"single_word": false,
|
| 660 |
+
"special": true
|
| 661 |
+
},
|
| 662 |
+
"82": {
|
| 663 |
+
"content": "<|reserved_token_77|>",
|
| 664 |
+
"lstrip": false,
|
| 665 |
+
"normalized": false,
|
| 666 |
+
"rstrip": false,
|
| 667 |
+
"single_word": false,
|
| 668 |
+
"special": true
|
| 669 |
+
},
|
| 670 |
+
"83": {
|
| 671 |
+
"content": "<|reserved_token_78|>",
|
| 672 |
+
"lstrip": false,
|
| 673 |
+
"normalized": false,
|
| 674 |
+
"rstrip": false,
|
| 675 |
+
"single_word": false,
|
| 676 |
+
"special": true
|
| 677 |
+
},
|
| 678 |
+
"84": {
|
| 679 |
+
"content": "<|reserved_token_79|>",
|
| 680 |
+
"lstrip": false,
|
| 681 |
+
"normalized": false,
|
| 682 |
+
"rstrip": false,
|
| 683 |
+
"single_word": false,
|
| 684 |
+
"special": true
|
| 685 |
+
},
|
| 686 |
+
"85": {
|
| 687 |
+
"content": "<|reserved_token_80|>",
|
| 688 |
+
"lstrip": false,
|
| 689 |
+
"normalized": false,
|
| 690 |
+
"rstrip": false,
|
| 691 |
+
"single_word": false,
|
| 692 |
+
"special": true
|
| 693 |
+
},
|
| 694 |
+
"86": {
|
| 695 |
+
"content": "<|reserved_token_81|>",
|
| 696 |
+
"lstrip": false,
|
| 697 |
+
"normalized": false,
|
| 698 |
+
"rstrip": false,
|
| 699 |
+
"single_word": false,
|
| 700 |
+
"special": true
|
| 701 |
+
},
|
| 702 |
+
"87": {
|
| 703 |
+
"content": "<|reserved_token_82|>",
|
| 704 |
+
"lstrip": false,
|
| 705 |
+
"normalized": false,
|
| 706 |
+
"rstrip": false,
|
| 707 |
+
"single_word": false,
|
| 708 |
+
"special": true
|
| 709 |
+
},
|
| 710 |
+
"88": {
|
| 711 |
+
"content": "<|reserved_token_83|>",
|
| 712 |
+
"lstrip": false,
|
| 713 |
+
"normalized": false,
|
| 714 |
+
"rstrip": false,
|
| 715 |
+
"single_word": false,
|
| 716 |
+
"special": true
|
| 717 |
+
},
|
| 718 |
+
"89": {
|
| 719 |
+
"content": "<|reserved_token_84|>",
|
| 720 |
+
"lstrip": false,
|
| 721 |
+
"normalized": false,
|
| 722 |
+
"rstrip": false,
|
| 723 |
+
"single_word": false,
|
| 724 |
+
"special": true
|
| 725 |
+
},
|
| 726 |
+
"90": {
|
| 727 |
+
"content": "<|reserved_token_85|>",
|
| 728 |
+
"lstrip": false,
|
| 729 |
+
"normalized": false,
|
| 730 |
+
"rstrip": false,
|
| 731 |
+
"single_word": false,
|
| 732 |
+
"special": true
|
| 733 |
+
},
|
| 734 |
+
"91": {
|
| 735 |
+
"content": "<|reserved_token_86|>",
|
| 736 |
+
"lstrip": false,
|
| 737 |
+
"normalized": false,
|
| 738 |
+
"rstrip": false,
|
| 739 |
+
"single_word": false,
|
| 740 |
+
"special": true
|
| 741 |
+
},
|
| 742 |
+
"92": {
|
| 743 |
+
"content": "<|reserved_token_87|>",
|
| 744 |
+
"lstrip": false,
|
| 745 |
+
"normalized": false,
|
| 746 |
+
"rstrip": false,
|
| 747 |
+
"single_word": false,
|
| 748 |
+
"special": true
|
| 749 |
+
},
|
| 750 |
+
"93": {
|
| 751 |
+
"content": "<|reserved_token_88|>",
|
| 752 |
+
"lstrip": false,
|
| 753 |
+
"normalized": false,
|
| 754 |
+
"rstrip": false,
|
| 755 |
+
"single_word": false,
|
| 756 |
+
"special": true
|
| 757 |
+
},
|
| 758 |
+
"94": {
|
| 759 |
+
"content": "<|reserved_token_89|>",
|
| 760 |
+
"lstrip": false,
|
| 761 |
+
"normalized": false,
|
| 762 |
+
"rstrip": false,
|
| 763 |
+
"single_word": false,
|
| 764 |
+
"special": true
|
| 765 |
+
},
|
| 766 |
+
"95": {
|
| 767 |
+
"content": "<|reserved_token_90|>",
|
| 768 |
+
"lstrip": false,
|
| 769 |
+
"normalized": false,
|
| 770 |
+
"rstrip": false,
|
| 771 |
+
"single_word": false,
|
| 772 |
+
"special": true
|
| 773 |
+
},
|
| 774 |
+
"96": {
|
| 775 |
+
"content": "<|reserved_token_91|>",
|
| 776 |
+
"lstrip": false,
|
| 777 |
+
"normalized": false,
|
| 778 |
+
"rstrip": false,
|
| 779 |
+
"single_word": false,
|
| 780 |
+
"special": true
|
| 781 |
+
},
|
| 782 |
+
"97": {
|
| 783 |
+
"content": "<|reserved_token_92|>",
|
| 784 |
+
"lstrip": false,
|
| 785 |
+
"normalized": false,
|
| 786 |
+
"rstrip": false,
|
| 787 |
+
"single_word": false,
|
| 788 |
+
"special": true
|
| 789 |
+
},
|
| 790 |
+
"98": {
|
| 791 |
+
"content": "<|reserved_token_93|>",
|
| 792 |
+
"lstrip": false,
|
| 793 |
+
"normalized": false,
|
| 794 |
+
"rstrip": false,
|
| 795 |
+
"single_word": false,
|
| 796 |
+
"special": true
|
| 797 |
+
},
|
| 798 |
+
"99": {
|
| 799 |
+
"content": "<|reserved_token_94|>",
|
| 800 |
+
"lstrip": false,
|
| 801 |
+
"normalized": false,
|
| 802 |
+
"rstrip": false,
|
| 803 |
+
"single_word": false,
|
| 804 |
+
"special": true
|
| 805 |
+
},
|
| 806 |
+
"100": {
|
| 807 |
+
"content": "<|reserved_token_95|>",
|
| 808 |
+
"lstrip": false,
|
| 809 |
+
"normalized": false,
|
| 810 |
+
"rstrip": false,
|
| 811 |
+
"single_word": false,
|
| 812 |
+
"special": true
|
| 813 |
+
},
|
| 814 |
+
"101": {
|
| 815 |
+
"content": "<|reserved_token_96|>",
|
| 816 |
+
"lstrip": false,
|
| 817 |
+
"normalized": false,
|
| 818 |
+
"rstrip": false,
|
| 819 |
+
"single_word": false,
|
| 820 |
+
"special": true
|
| 821 |
+
},
|
| 822 |
+
"102": {
|
| 823 |
+
"content": "<|reserved_token_97|>",
|
| 824 |
+
"lstrip": false,
|
| 825 |
+
"normalized": false,
|
| 826 |
+
"rstrip": false,
|
| 827 |
+
"single_word": false,
|
| 828 |
+
"special": true
|
| 829 |
+
},
|
| 830 |
+
"103": {
|
| 831 |
+
"content": "<|reserved_token_98|>",
|
| 832 |
+
"lstrip": false,
|
| 833 |
+
"normalized": false,
|
| 834 |
+
"rstrip": false,
|
| 835 |
+
"single_word": false,
|
| 836 |
+
"special": true
|
| 837 |
+
},
|
| 838 |
+
"104": {
|
| 839 |
+
"content": "\\r",
|
| 840 |
+
"lstrip": false,
|
| 841 |
+
"normalized": false,
|
| 842 |
+
"rstrip": false,
|
| 843 |
+
"single_word": false,
|
| 844 |
+
"special": false
|
| 845 |
+
},
|
| 846 |
+
"105": {
|
| 847 |
+
"content": "ββ",
|
| 848 |
+
"lstrip": false,
|
| 849 |
+
"normalized": false,
|
| 850 |
+
"rstrip": false,
|
| 851 |
+
"single_word": false,
|
| 852 |
+
"special": false
|
| 853 |
+
},
|
| 854 |
+
"106": {
|
| 855 |
+
"content": "βββ",
|
| 856 |
+
"lstrip": false,
|
| 857 |
+
"normalized": false,
|
| 858 |
+
"rstrip": false,
|
| 859 |
+
"single_word": false,
|
| 860 |
+
"special": false
|
| 861 |
+
},
|
| 862 |
+
"107": {
|
| 863 |
+
"content": "ββββ",
|
| 864 |
+
"lstrip": false,
|
| 865 |
+
"normalized": false,
|
| 866 |
+
"rstrip": false,
|
| 867 |
+
"single_word": false,
|
| 868 |
+
"special": false
|
| 869 |
+
},
|
| 870 |
+
"108": {
|
| 871 |
+
"content": "βββββ",
|
| 872 |
+
"lstrip": false,
|
| 873 |
+
"normalized": false,
|
| 874 |
+
"rstrip": false,
|
| 875 |
+
"single_word": false,
|
| 876 |
+
"special": false
|
| 877 |
+
},
|
| 878 |
+
"109": {
|
| 879 |
+
"content": "ββββββ",
|
| 880 |
+
"lstrip": false,
|
| 881 |
+
"normalized": false,
|
| 882 |
+
"rstrip": false,
|
| 883 |
+
"single_word": false,
|
| 884 |
+
"special": false
|
| 885 |
+
},
|
| 886 |
+
"110": {
|
| 887 |
+
"content": "βββββββ",
|
| 888 |
+
"lstrip": false,
|
| 889 |
+
"normalized": false,
|
| 890 |
+
"rstrip": false,
|
| 891 |
+
"single_word": false,
|
| 892 |
+
"special": false
|
| 893 |
+
},
|
| 894 |
+
"111": {
|
| 895 |
+
"content": "ββββββββ",
|
| 896 |
+
"lstrip": false,
|
| 897 |
+
"normalized": false,
|
| 898 |
+
"rstrip": false,
|
| 899 |
+
"single_word": false,
|
| 900 |
+
"special": false
|
| 901 |
+
},
|
| 902 |
+
"112": {
|
| 903 |
+
"content": "βββββββββ",
|
| 904 |
+
"lstrip": false,
|
| 905 |
+
"normalized": false,
|
| 906 |
+
"rstrip": false,
|
| 907 |
+
"single_word": false,
|
| 908 |
+
"special": false
|
| 909 |
+
},
|
| 910 |
+
"113": {
|
| 911 |
+
"content": "ββββββββββ",
|
| 912 |
+
"lstrip": false,
|
| 913 |
+
"normalized": false,
|
| 914 |
+
"rstrip": false,
|
| 915 |
+
"single_word": false,
|
| 916 |
+
"special": false
|
| 917 |
+
},
|
| 918 |
+
"114": {
|
| 919 |
+
"content": "βββββββββββ",
|
| 920 |
+
"lstrip": false,
|
| 921 |
+
"normalized": false,
|
| 922 |
+
"rstrip": false,
|
| 923 |
+
"single_word": false,
|
| 924 |
+
"special": false
|
| 925 |
+
},
|
| 926 |
+
"115": {
|
| 927 |
+
"content": "ββββββββββββ",
|
| 928 |
+
"lstrip": false,
|
| 929 |
+
"normalized": false,
|
| 930 |
+
"rstrip": false,
|
| 931 |
+
"single_word": false,
|
| 932 |
+
"special": false
|
| 933 |
+
},
|
| 934 |
+
"116": {
|
| 935 |
+
"content": "βββββββββββββ",
|
| 936 |
+
"lstrip": false,
|
| 937 |
+
"normalized": false,
|
| 938 |
+
"rstrip": false,
|
| 939 |
+
"single_word": false,
|
| 940 |
+
"special": false
|
| 941 |
+
},
|
| 942 |
+
"117": {
|
| 943 |
+
"content": "ββββββββββββββ",
|
| 944 |
+
"lstrip": false,
|
| 945 |
+
"normalized": false,
|
| 946 |
+
"rstrip": false,
|
| 947 |
+
"single_word": false,
|
| 948 |
+
"special": false
|
| 949 |
+
},
|
| 950 |
+
"118": {
|
| 951 |
+
"content": "βββββββββββββββ",
|
| 952 |
+
"lstrip": false,
|
| 953 |
+
"normalized": false,
|
| 954 |
+
"rstrip": false,
|
| 955 |
+
"single_word": false,
|
| 956 |
+
"special": false
|
| 957 |
+
},
|
| 958 |
+
"119": {
|
| 959 |
+
"content": "ββββββββββββββββ",
|
| 960 |
+
"lstrip": false,
|
| 961 |
+
"normalized": false,
|
| 962 |
+
"rstrip": false,
|
| 963 |
+
"single_word": false,
|
| 964 |
+
"special": false
|
| 965 |
+
},
|
| 966 |
+
"120": {
|
| 967 |
+
"content": "βββββββββββββββββ",
|
| 968 |
+
"lstrip": false,
|
| 969 |
+
"normalized": false,
|
| 970 |
+
"rstrip": false,
|
| 971 |
+
"single_word": false,
|
| 972 |
+
"special": false
|
| 973 |
+
},
|
| 974 |
+
"121": {
|
| 975 |
+
"content": "ββββββββββββββββββ",
|
| 976 |
+
"lstrip": false,
|
| 977 |
+
"normalized": false,
|
| 978 |
+
"rstrip": false,
|
| 979 |
+
"single_word": false,
|
| 980 |
+
"special": false
|
| 981 |
+
},
|
| 982 |
+
"122": {
|
| 983 |
+
"content": "βββββββββββββββββββ",
|
| 984 |
+
"lstrip": false,
|
| 985 |
+
"normalized": false,
|
| 986 |
+
"rstrip": false,
|
| 987 |
+
"single_word": false,
|
| 988 |
+
"special": false
|
| 989 |
+
},
|
| 990 |
+
"123": {
|
| 991 |
+
"content": "ββββββββββββββββββββ",
|
| 992 |
+
"lstrip": false,
|
| 993 |
+
"normalized": false,
|
| 994 |
+
"rstrip": false,
|
| 995 |
+
"single_word": false,
|
| 996 |
+
"special": false
|
| 997 |
+
},
|
| 998 |
+
"124": {
|
| 999 |
+
"content": "βββββββββββββββββββββ",
|
| 1000 |
+
"lstrip": false,
|
| 1001 |
+
"normalized": false,
|
| 1002 |
+
"rstrip": false,
|
| 1003 |
+
"single_word": false,
|
| 1004 |
+
"special": false
|
| 1005 |
+
},
|
| 1006 |
+
"125": {
|
| 1007 |
+
"content": "ββββββββββββββββββββββ",
|
| 1008 |
+
"lstrip": false,
|
| 1009 |
+
"normalized": false,
|
| 1010 |
+
"rstrip": false,
|
| 1011 |
+
"single_word": false,
|
| 1012 |
+
"special": false
|
| 1013 |
+
},
|
| 1014 |
+
"126": {
|
| 1015 |
+
"content": "βββββββββββββββββββββββ",
|
| 1016 |
+
"lstrip": false,
|
| 1017 |
+
"normalized": false,
|
| 1018 |
+
"rstrip": false,
|
| 1019 |
+
"single_word": false,
|
| 1020 |
+
"special": false
|
| 1021 |
+
},
|
| 1022 |
+
"127": {
|
| 1023 |
+
"content": "ββββββββββββββββββββββββ",
|
| 1024 |
+
"lstrip": false,
|
| 1025 |
+
"normalized": false,
|
| 1026 |
+
"rstrip": false,
|
| 1027 |
+
"single_word": false,
|
| 1028 |
+
"special": false
|
| 1029 |
+
},
|
| 1030 |
+
"128": {
|
| 1031 |
+
"content": "\t\t",
|
| 1032 |
+
"lstrip": false,
|
| 1033 |
+
"normalized": false,
|
| 1034 |
+
"rstrip": false,
|
| 1035 |
+
"single_word": false,
|
| 1036 |
+
"special": false
|
| 1037 |
+
},
|
| 1038 |
+
"129": {
|
| 1039 |
+
"content": "\t\t\t",
|
| 1040 |
+
"lstrip": false,
|
| 1041 |
+
"normalized": false,
|
| 1042 |
+
"rstrip": false,
|
| 1043 |
+
"single_word": false,
|
| 1044 |
+
"special": false
|
| 1045 |
+
},
|
| 1046 |
+
"130": {
|
| 1047 |
+
"content": "\t\t\t\t",
|
| 1048 |
+
"lstrip": false,
|
| 1049 |
+
"normalized": false,
|
| 1050 |
+
"rstrip": false,
|
| 1051 |
+
"single_word": false,
|
| 1052 |
+
"special": false
|
| 1053 |
+
},
|
| 1054 |
+
"131": {
|
| 1055 |
+
"content": "\t\t\t\t\t",
|
| 1056 |
+
"lstrip": false,
|
| 1057 |
+
"normalized": false,
|
| 1058 |
+
"rstrip": false,
|
| 1059 |
+
"single_word": false,
|
| 1060 |
+
"special": false
|
| 1061 |
+
},
|
| 1062 |
+
"132": {
|
| 1063 |
+
"content": "\t\t\t\t\t\t",
|
| 1064 |
+
"lstrip": false,
|
| 1065 |
+
"normalized": false,
|
| 1066 |
+
"rstrip": false,
|
| 1067 |
+
"single_word": false,
|
| 1068 |
+
"special": false
|
| 1069 |
+
},
|
| 1070 |
+
"133": {
|
| 1071 |
+
"content": "\n\n",
|
| 1072 |
+
"lstrip": false,
|
| 1073 |
+
"normalized": false,
|
| 1074 |
+
"rstrip": false,
|
| 1075 |
+
"single_word": false,
|
| 1076 |
+
"special": false
|
| 1077 |
+
},
|
| 1078 |
+
"134": {
|
| 1079 |
+
"content": "\n\n\n",
|
| 1080 |
+
"lstrip": false,
|
| 1081 |
+
"normalized": false,
|
| 1082 |
+
"rstrip": false,
|
| 1083 |
+
"single_word": false,
|
| 1084 |
+
"special": false
|
| 1085 |
+
}
|
| 1086 |
+
},
|
| 1087 |
+
"bos_token": "<s>",
|
| 1088 |
+
"clean_up_tokenization_spaces": false,
|
| 1089 |
+
"eos_token": "<|im_end|>",
|
| 1090 |
+
"legacy": false,
|
| 1091 |
+
"local_files_only": true,
|
| 1092 |
+
"model_max_length": 163840,
|
| 1093 |
+
"pad_token": "<unk>",
|
| 1094 |
+
"padding_side": "right",
|
| 1095 |
+
"sp_model_kwargs": {},
|
| 1096 |
+
"spaces_between_special_tokens": false,
|
| 1097 |
+
"tokenizer_class": "LlamaTokenizer",
|
| 1098 |
+
"unk_token": "<unk>",
|
| 1099 |
+
"use_default_system_prompt": false
|
| 1100 |
+
}
|