Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +9 -0
- sdlm_ckpt_final/README_1.md +154 -0
- sdlm_ckpt_final/assets/ablation_tau.png +3 -0
- sdlm_ckpt_final/assets/framework.png +3 -0
- sdlm_ckpt_final/assets/main_exp1.png +3 -0
- sdlm_ckpt_final/assets/main_exp2.png +3 -0
- sdlm_ckpt_final/assets/self_speculative_decoding.png +3 -0
- sdlm_ckpt_final/assets/three_framework.png +3 -0
- sdlm_ckpt_final/sdlm_32b_bs4/added_tokens.json +25 -0
- sdlm_ckpt_final/sdlm_32b_bs4/all_results.json +8 -0
- sdlm_ckpt_final/sdlm_32b_bs4/attn_mask_utils.py +292 -0
- sdlm_ckpt_final/sdlm_32b_bs4/config.json +36 -0
- sdlm_ckpt_final/sdlm_32b_bs4/configuration_sdlm.py +147 -0
- sdlm_ckpt_final/sdlm_32b_bs4/generation_config.json +10 -0
- sdlm_ckpt_final/sdlm_32b_bs4/merges.txt +0 -0
- sdlm_ckpt_final/sdlm_32b_bs4/model-00001-of-00014.safetensors +3 -0
- sdlm_ckpt_final/sdlm_32b_bs4/model-00002-of-00014.safetensors +3 -0
- sdlm_ckpt_final/sdlm_32b_bs4/model-00003-of-00014.safetensors +3 -0
- sdlm_ckpt_final/sdlm_32b_bs4/model-00004-of-00014.safetensors +3 -0
- sdlm_ckpt_final/sdlm_32b_bs4/model-00005-of-00014.safetensors +3 -0
- sdlm_ckpt_final/sdlm_32b_bs4/model-00006-of-00014.safetensors +3 -0
- sdlm_ckpt_final/sdlm_32b_bs4/model-00007-of-00014.safetensors +3 -0
- sdlm_ckpt_final/sdlm_32b_bs4/model-00008-of-00014.safetensors +3 -0
- sdlm_ckpt_final/sdlm_32b_bs4/model-00009-of-00014.safetensors +3 -0
- sdlm_ckpt_final/sdlm_32b_bs4/model-00010-of-00014.safetensors +3 -0
- sdlm_ckpt_final/sdlm_32b_bs4/model-00011-of-00014.safetensors +3 -0
- sdlm_ckpt_final/sdlm_32b_bs4/model-00012-of-00014.safetensors +3 -0
- sdlm_ckpt_final/sdlm_32b_bs4/model-00013-of-00014.safetensors +3 -0
- sdlm_ckpt_final/sdlm_32b_bs4/model-00014-of-00014.safetensors +3 -0
- sdlm_ckpt_final/sdlm_32b_bs4/model.safetensors.index.json +778 -0
- sdlm_ckpt_final/sdlm_32b_bs4/modeling_sdlm.py +1590 -0
- sdlm_ckpt_final/sdlm_32b_bs4/special_tokens_map.json +38 -0
- sdlm_ckpt_final/sdlm_32b_bs4/tokenizer_config.json +217 -0
- sdlm_ckpt_final/sdlm_32b_bs4/train_results.json +8 -0
- sdlm_ckpt_final/sdlm_32b_bs4/trainer_state.json +0 -0
- sdlm_ckpt_final/sdlm_32b_bs4/training_args.bin +3 -0
- sdlm_ckpt_final/sdlm_32b_bs4/training_log.txt +3 -0
- sdlm_ckpt_final/sdlm_32b_bs4/training_log_from_ckpt_4000.txt +0 -0
- sdlm_ckpt_final/sdlm_32b_bs4/training_log_from_ckpt_4000_2.txt +0 -0
- sdlm_ckpt_final/sdlm_32b_bs4/training_log_from_ckpt_5200.txt +0 -0
- sdlm_ckpt_final/sdlm_32b_bs4/vocab.json +0 -0
- sdlm_ckpt_final/sdlm_3b_bs4/added_tokens.json +25 -0
- sdlm_ckpt_final/sdlm_3b_bs4/all_results.json +8 -0
- sdlm_ckpt_final/sdlm_3b_bs4/attn_mask_utils.py +292 -0
- sdlm_ckpt_final/sdlm_3b_bs4/config.json +37 -0
- sdlm_ckpt_final/sdlm_3b_bs4/configuration_sdlm.py +147 -0
- sdlm_ckpt_final/sdlm_3b_bs4/generation_config.json +10 -0
- sdlm_ckpt_final/sdlm_3b_bs4/merges.txt +0 -0
- sdlm_ckpt_final/sdlm_3b_bs4/model-00001-of-00002.safetensors +3 -0
- sdlm_ckpt_final/sdlm_3b_bs4/model-00002-of-00002.safetensors +3 -0
.gitattributes
CHANGED
|
@@ -64,3 +64,12 @@ mask_block_v1114_bs4_3b_main/training_log.txt filter=lfs diff=lfs merge=lfs -tex
|
|
| 64 |
mask_block_v1114_bs8_3b_main/training_log.txt filter=lfs diff=lfs merge=lfs -text
|
| 65 |
mask_block_v1114_bs3_3b_ablation11/training_log.txt filter=lfs diff=lfs merge=lfs -text
|
| 66 |
mask_block_v1114_bs3_3b_ablation12/training_log.txt filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 64 |
mask_block_v1114_bs8_3b_main/training_log.txt filter=lfs diff=lfs merge=lfs -text
|
| 65 |
mask_block_v1114_bs3_3b_ablation11/training_log.txt filter=lfs diff=lfs merge=lfs -text
|
| 66 |
mask_block_v1114_bs3_3b_ablation12/training_log.txt filter=lfs diff=lfs merge=lfs -text
|
| 67 |
+
sdlm_ckpt_final/assets/ablation_tau.png filter=lfs diff=lfs merge=lfs -text
|
| 68 |
+
sdlm_ckpt_final/assets/framework.png filter=lfs diff=lfs merge=lfs -text
|
| 69 |
+
sdlm_ckpt_final/assets/main_exp1.png filter=lfs diff=lfs merge=lfs -text
|
| 70 |
+
sdlm_ckpt_final/assets/main_exp2.png filter=lfs diff=lfs merge=lfs -text
|
| 71 |
+
sdlm_ckpt_final/assets/self_speculative_decoding.png filter=lfs diff=lfs merge=lfs -text
|
| 72 |
+
sdlm_ckpt_final/assets/three_framework.png filter=lfs diff=lfs merge=lfs -text
|
| 73 |
+
sdlm_ckpt_final/sdlm_32b_bs4/training_log.txt filter=lfs diff=lfs merge=lfs -text
|
| 74 |
+
sdlm_ckpt_final/sdlm_3b_bs4/training_log.txt filter=lfs diff=lfs merge=lfs -text
|
| 75 |
+
sdlm_ckpt_final/sdlm_3b_bs8/training_log.txt filter=lfs diff=lfs merge=lfs -text
|
sdlm_ckpt_final/README_1.md
ADDED
|
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: apache-2.0
|
| 3 |
+
license_name: qwen
|
| 4 |
+
license_link: https://huggingface.co/Qwen/Qwen2.5-3B/blob/main/LICENSE
|
| 5 |
+
pipeline_tag: text-generation
|
| 6 |
+
library_name: transformers
|
| 7 |
+
base_model:
|
| 8 |
+
- Qwen/Qwen2.5-3B
|
| 9 |
+
base_model_relation: finetune
|
| 10 |
+
language:
|
| 11 |
+
- en
|
| 12 |
+
tags:
|
| 13 |
+
- sdlm
|
| 14 |
+
- diffusion language model
|
| 15 |
+
- custom_code
|
| 16 |
+
datasets:
|
| 17 |
+
- dyyyyyyyy/ScaleQuest-Math
|
| 18 |
+
- OpenCoder-LLM/opc-sft-stage2
|
| 19 |
+
- allenai/tulu-3-sft-mixture
|
| 20 |
+
- HuggingFaceTB/smoltalk2
|
| 21 |
+
- LipengCS/Table-GPT
|
| 22 |
+
- allenai/SciRIFF
|
| 23 |
+
---
|
| 24 |
+
|
| 25 |
+
# SDLM-3B-D4
|
| 26 |
+
|
| 27 |
+
[\[📂 GitHub\]](https://github.com/OpenGVLab/SDLM) [\[📜 Tech Report\]](https://huggingface.co/papers/xxx) [\[🤗 HuggingFace\]](https://huggingface.co/collections/OpenGVLab/sdlm-68ac82709d7c343ad36aa552)
|
| 28 |
+
|
| 29 |
+
## Introduction
|
| 30 |
+
|
| 31 |
+
We propose a <b>S</b>equential <b>D</b>iffusion <b>L</b>anguage <b>M</b>odel (<b>SDLM</b>), to cheaply stimulate the parallel prediction capabilities of diffusion models. Specifically, SDLM reduces distribution shift by limiting the prediction range to a fixed block length and enforces decoding order through the longest prefix decoding method, thereby significantly improving prediction efficiency while ensuring generation quality. Our method can be viewed as a further generalization of the autoregressive (AR) paradigm. Therefore, it is possible to use pre-trained AR weights and quickly migrate to the diffusion framework with only minimal instruction fine-tuning.
|
| 32 |
+
|
| 33 |
+

|
| 34 |
+
|
| 35 |
+
## SDLM Family
|
| 36 |
+
|
| 37 |
+
In the following table, we provide an overview of the SDLM series.
|
| 38 |
+
|
| 39 |
+
| Model Name | Base Model 🤗 | HF Link 🤗 |
|
| 40 |
+
| ----------- | ------------------------------------------------------------ | -------------------------------------------- |
|
| 41 |
+
| SDLM-3B-D4 | <a href="https://huggingface.co/Qwen/Qwen2.5-3B">Qwen2.5-3B</a> | https://huggingface.co/OpenGVLab/SDLM-3B-D4 |
|
| 42 |
+
| SDLM-3B-D8 | <a href="https://huggingface.co/Qwen/Qwen2.5-3B">Qwen2.5-3B</a> | https://huggingface.co/OpenGVLab/SDLM-3B-D8 |
|
| 43 |
+
| SDLM-32B-D4 | <a href="https://huggingface.co/Qwen/Qwen2.5-32B">Qwen2.5-32B</a> | https://huggingface.co/OpenGVLab/SDLM-32B-D4 |
|
| 44 |
+
|
| 45 |
+
## Model Architecture
|
| 46 |
+
|
| 47 |
+
We propose a sequential blockwise masked prediction method that reduces error accumulation in diffusion-based generation. Our method leverages the observation that predictions for tokens at lower positional indices typically benefit from more reliable contextual information, resulting in lower deviation and improved accuracy.
|
| 48 |
+
|
| 49 |
+
* **(a) Training pipeline.** Reordered input enables structured mask with causal prefix (top-left), visible cross-block prefix (bottom-left), and intra-block bidirectional attention (bottom-right).
|
| 50 |
+
* **(b) Sampling Pipeline.** Confidence-based dynamic block decoding with KV cache reuse. At each step, a block of B tokens is predicted with B-1 padding masks. The longest high-confidence prefix is selected as dynamic output. Cached KV states enable efficient decoding.
|
| 51 |
+
|
| 52 |
+

|
| 53 |
+
|
| 54 |
+
## Performance
|
| 55 |
+
|
| 56 |
+
### Long-Form Benchmarks
|
| 57 |
+
|
| 58 |
+
SDLM delivers strong performance with significantly faster decoding speed. It operates approximately 2x faster than comparable autoregressive models while matching their accuracy, and achieves up to 5x speedup over other diffusion language models, as evidenced by results on the MATH-500 benchmark.
|
| 59 |
+
|
| 60 |
+

|
| 61 |
+
|
| 62 |
+
### General Mutiple-Choice Benchmarks
|
| 63 |
+
|
| 64 |
+

|
| 65 |
+
|
| 66 |
+
### Block Size & Self-Speculative Decoding
|
| 67 |
+
|
| 68 |
+

|
| 69 |
+
|
| 70 |
+
## Trade-off Between Performance and Speed
|
| 71 |
+
|
| 72 |
+
Trade-off between performance and speed under different confidence thresholds τ for SDLM-3B (B=4) and SDLM-3B (B=8). By adjusting τ, a controllable trade-off between speed and performance can be achieved. SpeedUp denotes the average number of tokens output per forward pass.
|
| 73 |
+
|
| 74 |
+

|
| 75 |
+
|
| 76 |
+
## Inference
|
| 77 |
+
|
| 78 |
+
1. Install Dependencies
|
| 79 |
+
|
| 80 |
+
Key package versions:
|
| 81 |
+
|
| 82 |
+
```
|
| 83 |
+
transformers==4.37.2
|
| 84 |
+
torch>=2.5.0
|
| 85 |
+
```
|
| 86 |
+
|
| 87 |
+
2. Download the model generation script [sdlm_inference.py](https://github.com/OpenGVLab/SDLM/blob/main/sdlm_inference.py) to your working directory.
|
| 88 |
+
|
| 89 |
+
3. We provide an example code to run `SDLM-3B-D4` using `transformers`.
|
| 90 |
+
|
| 91 |
+
```python
|
| 92 |
+
import torch
|
| 93 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 94 |
+
from sdlm_inference import SDLM_generate
|
| 95 |
+
|
| 96 |
+
if __name__ == "__main__":
|
| 97 |
+
ckpt_hf = 'OpenGVLab/SDLM-3B-D4'
|
| 98 |
+
|
| 99 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 100 |
+
ckpt_hf,
|
| 101 |
+
attn_implementation="eager",
|
| 102 |
+
trust_remote_code=True
|
| 103 |
+
).to(dtype=torch.float16)
|
| 104 |
+
tokenizer = AutoTokenizer.from_pretrained(ckpt_hf)
|
| 105 |
+
|
| 106 |
+
prompt = 'Write a Fibonacci function in Python.'
|
| 107 |
+
messages = [
|
| 108 |
+
{"role": "system", "content": "You are a helpful assistant."},
|
| 109 |
+
{"role": "user", "content": prompt}
|
| 110 |
+
]
|
| 111 |
+
text = tokenizer.apply_chat_template(
|
| 112 |
+
messages,
|
| 113 |
+
tokenize=False,
|
| 114 |
+
add_generation_prompt=True
|
| 115 |
+
)
|
| 116 |
+
|
| 117 |
+
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
|
| 118 |
+
|
| 119 |
+
response, history = SDLM_generate(
|
| 120 |
+
model,
|
| 121 |
+
tokenizer,
|
| 122 |
+
model_inputs,
|
| 123 |
+
max_gen_len = 1024,
|
| 124 |
+
temperature = 0,
|
| 125 |
+
threshold = 0.5,
|
| 126 |
+
n_future_tokens = 4,
|
| 127 |
+
alg = 'prob_conf', # prob_conf | entropy_conf | self_speculative
|
| 128 |
+
save_history = True,
|
| 129 |
+
use_cache = True
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
print('response: ', response[0])
|
| 133 |
+
|
| 134 |
+
print('=======histroy')
|
| 135 |
+
for item in history:
|
| 136 |
+
print('cur total token ', item[1])
|
| 137 |
+
print(item[0][0])
|
| 138 |
+
print('--------')
|
| 139 |
+
```
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
## Citation
|
| 144 |
+
|
| 145 |
+
If you find this project useful in your research, please consider citing:
|
| 146 |
+
|
| 147 |
+
```BibTeX
|
| 148 |
+
@article{SDLM,
|
| 149 |
+
title={Sequential Diffusion Language Models},
|
| 150 |
+
author={},
|
| 151 |
+
journal={arXiv preprint arXiv:2025.xxxxx},
|
| 152 |
+
year={2025}
|
| 153 |
+
}
|
| 154 |
+
```
|
sdlm_ckpt_final/assets/ablation_tau.png
ADDED
|
Git LFS Details
|
sdlm_ckpt_final/assets/framework.png
ADDED
|
Git LFS Details
|
sdlm_ckpt_final/assets/main_exp1.png
ADDED
|
Git LFS Details
|
sdlm_ckpt_final/assets/main_exp2.png
ADDED
|
Git LFS Details
|
sdlm_ckpt_final/assets/self_speculative_decoding.png
ADDED
|
Git LFS Details
|
sdlm_ckpt_final/assets/three_framework.png
ADDED
|
Git LFS Details
|
sdlm_ckpt_final/sdlm_32b_bs4/added_tokens.json
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"</tool_call>": 151658,
|
| 3 |
+
"<text_mask>": 151665,
|
| 4 |
+
"<tool_call>": 151657,
|
| 5 |
+
"<|box_end|>": 151649,
|
| 6 |
+
"<|box_start|>": 151648,
|
| 7 |
+
"<|endoftext|>": 151643,
|
| 8 |
+
"<|file_sep|>": 151664,
|
| 9 |
+
"<|fim_middle|>": 151660,
|
| 10 |
+
"<|fim_pad|>": 151662,
|
| 11 |
+
"<|fim_prefix|>": 151659,
|
| 12 |
+
"<|fim_suffix|>": 151661,
|
| 13 |
+
"<|im_end|>": 151645,
|
| 14 |
+
"<|im_start|>": 151644,
|
| 15 |
+
"<|image_pad|>": 151655,
|
| 16 |
+
"<|object_ref_end|>": 151647,
|
| 17 |
+
"<|object_ref_start|>": 151646,
|
| 18 |
+
"<|quad_end|>": 151651,
|
| 19 |
+
"<|quad_start|>": 151650,
|
| 20 |
+
"<|repo_name|>": 151663,
|
| 21 |
+
"<|video_pad|>": 151656,
|
| 22 |
+
"<|vision_end|>": 151653,
|
| 23 |
+
"<|vision_pad|>": 151654,
|
| 24 |
+
"<|vision_start|>": 151652
|
| 25 |
+
}
|
sdlm_ckpt_final/sdlm_32b_bs4/all_results.json
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"epoch": 1.0,
|
| 3 |
+
"train_loss": 0.38482022780720976,
|
| 4 |
+
"train_runtime": 198218.2379,
|
| 5 |
+
"train_samples": 3506817,
|
| 6 |
+
"train_samples_per_second": 17.692,
|
| 7 |
+
"train_steps_per_second": 0.038
|
| 8 |
+
}
|
sdlm_ckpt_final/sdlm_32b_bs4/attn_mask_utils.py
ADDED
|
@@ -0,0 +1,292 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import copy
|
| 3 |
+
|
| 4 |
+
def find_prefix_seq_length_by_pe(
|
| 5 |
+
pe: torch.Tensor
|
| 6 |
+
) -> torch.Tensor:
|
| 7 |
+
"""
|
| 8 |
+
Find the sequence length where position encoding drops (indicating prefix boundary).
|
| 9 |
+
Args:
|
| 10 |
+
pe: Position encoding tensor of shape [Batch size, Sequence length ]
|
| 11 |
+
Contains position indices for each token in the sequence.
|
| 12 |
+
Returns:
|
| 13 |
+
torch.Tensor: A tensor of shape [B] containing:
|
| 14 |
+
- The index where position encoding drops for each sequence
|
| 15 |
+
- -1 if no drop occurs in the sequence
|
| 16 |
+
"""
|
| 17 |
+
batch_size, seq_len = pe.shape
|
| 18 |
+
prev = pe[:, :-1]
|
| 19 |
+
curr = pe[:, 1:]
|
| 20 |
+
drop_mask = curr < prev # [batch_size, seq_len-1]
|
| 21 |
+
|
| 22 |
+
seq_len = torch.full((batch_size,), -1, dtype=torch.long)
|
| 23 |
+
|
| 24 |
+
for b in range(batch_size):
|
| 25 |
+
drop_pos = torch.nonzero(drop_mask[b], as_tuple=False)
|
| 26 |
+
if drop_pos.numel() > 0:
|
| 27 |
+
i = drop_pos[0].item() + 1 # Take first drop position (+1 because we compared shifted sequences)
|
| 28 |
+
seq_len[b] = i
|
| 29 |
+
|
| 30 |
+
return seq_len
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def update_causal_mask_with_pad_non_visible_2d(
|
| 35 |
+
input_ids: torch.Tensor,
|
| 36 |
+
attn_mask_2d: torch.Tensor,
|
| 37 |
+
text_mask_token_id: int = 151666,
|
| 38 |
+
block_size: int = 4,
|
| 39 |
+
causal_attn: bool = False
|
| 40 |
+
) -> torch.Tensor:
|
| 41 |
+
"""
|
| 42 |
+
Updates a 2D attention mask for hole sequence through input_ids and text_mask_token_id
|
| 43 |
+
|
| 44 |
+
Args:
|
| 45 |
+
input_ids: Input token IDs (unused in current implementation)
|
| 46 |
+
attn_mask_2d: 2D attention mask matrix of shape [seq_len, seq_len] where:
|
| 47 |
+
- 0.0 indicates allowed attention
|
| 48 |
+
- -inf indicates masked attention
|
| 49 |
+
text_mask_token_id: ID representing masked tokens
|
| 50 |
+
block_size: Size of the diffusion window
|
| 51 |
+
causal_attn: If True, maintains strict causal masking throughout
|
| 52 |
+
|
| 53 |
+
Returns:
|
| 54 |
+
Modified attention mask with updated visibility patterns
|
| 55 |
+
"""
|
| 56 |
+
seq_len = input_ids.shape[0]
|
| 57 |
+
device = input_ids.device
|
| 58 |
+
|
| 59 |
+
# Identify masked tokens and their preceding positions
|
| 60 |
+
input_mask = input_ids.eq(text_mask_token_id)
|
| 61 |
+
input_before_mask = torch.zeros_like(input_mask)
|
| 62 |
+
input_before_mask[:-1] = input_mask[1:]
|
| 63 |
+
mask_cols = (input_mask | input_before_mask)
|
| 64 |
+
non_mask = ~mask_cols
|
| 65 |
+
|
| 66 |
+
rows = torch.arange(seq_len, device=device)[:, None] # (seq_len, 1)
|
| 67 |
+
cols = torch.arange(seq_len, device=device) # (seq_len,)
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
indices = torch.arange(seq_len, device=device)
|
| 71 |
+
prev_non_mask = (indices * non_mask).cummax(dim=0).values
|
| 72 |
+
|
| 73 |
+
max_value = torch.iinfo(indices.dtype).max
|
| 74 |
+
mask_indices = torch.where(non_mask, indices, torch.full_like(indices, max_value))
|
| 75 |
+
reversed_mask_indices = torch.flip(mask_indices, dims=[0])
|
| 76 |
+
reversed_cummin = reversed_mask_indices.cummin(dim=0).values
|
| 77 |
+
next_non_mask = torch.flip(reversed_cummin, dims=[0])
|
| 78 |
+
|
| 79 |
+
# ================= Part 1: Make positions after masks invisible =================
|
| 80 |
+
infra_mask = (
|
| 81 |
+
(cols > prev_non_mask) &
|
| 82 |
+
(rows >= next_non_mask[None, :]) &
|
| 83 |
+
mask_cols[None, :]
|
| 84 |
+
)
|
| 85 |
+
attn_mask_2d.masked_fill_(infra_mask, -float('inf'))
|
| 86 |
+
|
| 87 |
+
# ================= Part 2: Allow visibility to previous positions (if not causal) =================
|
| 88 |
+
if not causal_attn:
|
| 89 |
+
visible_mask = (
|
| 90 |
+
(rows > prev_non_mask[None, :]) &
|
| 91 |
+
(rows < cols) &
|
| 92 |
+
mask_cols[None, :]
|
| 93 |
+
)
|
| 94 |
+
attn_mask_2d.masked_fill_(visible_mask, 0.0)
|
| 95 |
+
|
| 96 |
+
return attn_mask_2d
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
def update_causal_mask_for_one_gen_window_2d(
|
| 100 |
+
input_ids: torch.Tensor,
|
| 101 |
+
attn_mask_2d: torch.Tensor,
|
| 102 |
+
block_size: int = 4,
|
| 103 |
+
use_cache: bool = True,
|
| 104 |
+
causal_attn: bool = False
|
| 105 |
+
) -> torch.Tensor:
|
| 106 |
+
"""
|
| 107 |
+
Updates a 2D attention mask for a diffusion window in transformer inference.
|
| 108 |
+
|
| 109 |
+
Args:
|
| 110 |
+
input_ids: Input token IDs (unused in current implementation)
|
| 111 |
+
attn_mask_2d: 2D attention mask matrix of shape [seq_len, seq_len] where:
|
| 112 |
+
- 0.0 indicates allowed attention
|
| 113 |
+
- -inf indicates masked attention
|
| 114 |
+
block_size: Size of the diffusion window
|
| 115 |
+
use_cache: Whether key-value cache is being used
|
| 116 |
+
causal_attn: If True, maintains strict causal masking throughout
|
| 117 |
+
|
| 118 |
+
Returns:
|
| 119 |
+
Modified attention mask with updated visibility patterns
|
| 120 |
+
"""
|
| 121 |
+
|
| 122 |
+
if not causal_attn:
|
| 123 |
+
# Make the diffusion window (last block_size tokens) fully visible to itself
|
| 124 |
+
# This allows bidirectional attention within the diffusion window
|
| 125 |
+
attn_mask_2d[-block_size:, -block_size:] = 0.0
|
| 126 |
+
if use_cache:
|
| 127 |
+
# Mask the last token from previous round to prevent recomputation and maintain generation consistency.
|
| 128 |
+
attn_mask_2d[-block_size:, -block_size-1] = -float('inf')
|
| 129 |
+
|
| 130 |
+
return attn_mask_2d
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
def create_block_diff_mask_by_pe_1d(
|
| 134 |
+
b: int,
|
| 135 |
+
h: int,
|
| 136 |
+
q_idx: torch.Tensor,
|
| 137 |
+
kv_idx: torch.Tensor,
|
| 138 |
+
block_size: int,
|
| 139 |
+
x0_len_list: torch.Tensor,
|
| 140 |
+
position_ids_list: torch.Tensor,
|
| 141 |
+
causal_attn: bool = False,
|
| 142 |
+
) -> torch.Tensor:
|
| 143 |
+
"""Computes attention mask for a single query-key position in Flex Attention.
|
| 144 |
+
|
| 145 |
+
Args:
|
| 146 |
+
b (int): Batch index (0 <= b < batch_size).
|
| 147 |
+
h (int): Head index (unused in current implementation, reserved for future multi-head support).
|
| 148 |
+
q_idx (torch.Tensor): Query position index (scalar or 0D tensor).
|
| 149 |
+
kv_idx (torch.Tensor): Key/Value position index (scalar or 0D tensor).
|
| 150 |
+
block_size (int): Size of processing blocks for non-`x0` tokens.
|
| 151 |
+
x0_len_list (torch.Tensor): Tensor of shape [batch_size] with `x0` segment lengths.
|
| 152 |
+
position_ids_list (torch.Tensor): Tensor of shape [batch_size, seq_len] with position IDs.
|
| 153 |
+
causal_attn (bool, optional): Enforces causal masking in mutual blocks if True. Defaults to False.
|
| 154 |
+
|
| 155 |
+
Returns:
|
| 156 |
+
torch.Tensor: Boolean indicating whether attention is allowed (True = allowed).
|
| 157 |
+
"""
|
| 158 |
+
x0_len = x0_len_list[b]
|
| 159 |
+
position_ids = position_ids_list[b]
|
| 160 |
+
|
| 161 |
+
x0_flag_q = (q_idx < x0_len)
|
| 162 |
+
x0_flag_kv = (kv_idx < x0_len)
|
| 163 |
+
|
| 164 |
+
# top - left causal
|
| 165 |
+
block_causal = (
|
| 166 |
+
x0_flag_q & \
|
| 167 |
+
x0_flag_kv & \
|
| 168 |
+
(q_idx >= kv_idx)
|
| 169 |
+
)
|
| 170 |
+
|
| 171 |
+
q_ith_block = (q_idx - x0_len) // block_size
|
| 172 |
+
kv_ith_block = (kv_idx - x0_len) // block_size
|
| 173 |
+
|
| 174 |
+
# bottom - right
|
| 175 |
+
block_mutual = (
|
| 176 |
+
(~x0_flag_q & ~x0_flag_kv) & \
|
| 177 |
+
(q_ith_block == kv_ith_block) & \
|
| 178 |
+
(q_idx >= kv_idx if causal_attn else 1)
|
| 179 |
+
)
|
| 180 |
+
|
| 181 |
+
# bottom - left
|
| 182 |
+
prefix_len = position_ids[x0_len + q_ith_block * block_size] # kv_idx's cosponding prefix
|
| 183 |
+
block_prefix = (
|
| 184 |
+
(~x0_flag_q & x0_flag_kv) & \
|
| 185 |
+
(kv_idx < prefix_len)
|
| 186 |
+
)
|
| 187 |
+
|
| 188 |
+
mask_val = (block_causal | block_mutual | block_prefix)
|
| 189 |
+
return mask_val.to(torch.bool)
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
def create_block_diff_mask_by_pe_4d(
|
| 193 |
+
block_size: int,
|
| 194 |
+
x0_len_list: torch.Tensor,
|
| 195 |
+
position_ids: torch.Tensor,
|
| 196 |
+
causal_attn: bool = False
|
| 197 |
+
) -> tuple[torch.Tensor, torch.Tensor]:
|
| 198 |
+
"""Generates a 4D attention mask for block-difference attention patterns.
|
| 199 |
+
|
| 200 |
+
The mask consists of three regions:
|
| 201 |
+
1. Causal block (top-left): Standard causal attention for `x0` tokens.
|
| 202 |
+
2. Mutual block (bottom-right): Non-causal attention within the same block for non-`x0` tokens.
|
| 203 |
+
3. Prefix block (bottom-left): Non-`x0` tokens can attend to a prefix of `x0` tokens.
|
| 204 |
+
|
| 205 |
+
Args:
|
| 206 |
+
block_size (int): Size of processing blocks for non-`x0` tokens.
|
| 207 |
+
x0_len_list (torch.Tensor): Tensor of shape [B] containing lengths of `x0` segments per batch.
|
| 208 |
+
position_ids (torch.Tensor): Tensor of shape [B, seq_len] containing position IDs.
|
| 209 |
+
causal_attn (bool, optional): If True, enforces causal masking in mutual blocks. Defaults to False.
|
| 210 |
+
|
| 211 |
+
Returns:
|
| 212 |
+
tuple[torch.Tensor, torch.Tensor]:
|
| 213 |
+
- A float mask of shape [batch_size, 1, seq_len, seq_len] with `-inf` for masked positions (non visiable).
|
| 214 |
+
- A boolean mask of shape [batch_size, 1, seq_len, seq_len] indicating allowed attention positions.
|
| 215 |
+
"""
|
| 216 |
+
batch_size, seq_len = position_ids.shape
|
| 217 |
+
device = position_ids.device
|
| 218 |
+
|
| 219 |
+
# Create position indices [batch_size, seq_len, seq_len]
|
| 220 |
+
q_idx = torch.arange(seq_len, device=device).view(1, seq_len, 1) # [1, seq_len, 1]
|
| 221 |
+
kv_idx = torch.arange(seq_len, device=device).view(1, 1, seq_len) # [1, 1, seq_len]
|
| 222 |
+
|
| 223 |
+
# Broadcast to [B, seq_len, seq_len]
|
| 224 |
+
x0_len = x0_len_list.view(batch_size, 1, 1) # [batch_size, 1, 1]
|
| 225 |
+
x0_flag_q = q_idx < x0_len # [batch_size, seq_len, seq_len]
|
| 226 |
+
x0_flag_kv = kv_idx < x0_len
|
| 227 |
+
|
| 228 |
+
# Block indices calculation [batch_size, seq_len, seq_len]
|
| 229 |
+
q_block_idx = (q_idx - x0_len) // block_size
|
| 230 |
+
kv_block_idx = (kv_idx - x0_len) // block_size
|
| 231 |
+
|
| 232 |
+
# causal block (top-left)
|
| 233 |
+
block_causal = x0_flag_q & x0_flag_kv & (q_idx >= kv_idx)
|
| 234 |
+
|
| 235 |
+
# Mutual block (bottom-right)
|
| 236 |
+
mutual_condition = (q_idx >= kv_idx) if causal_attn else torch.ones_like(q_idx, dtype=torch.bool)
|
| 237 |
+
block_mutual = (~x0_flag_q & ~x0_flag_kv &
|
| 238 |
+
(q_block_idx == kv_block_idx) &
|
| 239 |
+
mutual_condition)
|
| 240 |
+
|
| 241 |
+
# Prefix block (bottom-left)
|
| 242 |
+
q_blk = torch.div(q_idx - x0_len, block_size, rounding_mode='floor')
|
| 243 |
+
q_blk_start = (x0_len_list.view(batch_size, 1) + q_blk[:, :, 0] * block_size).clamp(min=0, max=seq_len-1) # (batch_size, L)
|
| 244 |
+
prefix_len = position_ids.gather(1, q_blk_start)
|
| 245 |
+
prefix_len = prefix_len.unsqueeze(2)
|
| 246 |
+
block_prefix = (~x0_flag_q & x0_flag_kv) & (kv_idx < prefix_len)
|
| 247 |
+
|
| 248 |
+
# FIXME Padding Mask
|
| 249 |
+
# padding_mask = (position_ids.view(batch_size, 1, seq_len) != -1) & (position_ids.view(batch_size, seq_len, -1) != -1)
|
| 250 |
+
|
| 251 |
+
# Combine masks
|
| 252 |
+
final_mask = (block_causal | block_mutual | block_prefix) # bool
|
| 253 |
+
# & padding_mask
|
| 254 |
+
customized_mask = torch.full_like(final_mask, float('-inf'), dtype=torch.bfloat16)
|
| 255 |
+
customized_mask.masked_fill_(final_mask, 0.0) # 0.0 or -inf
|
| 256 |
+
|
| 257 |
+
# Add head dimension [batch_size, 1, seq_len, seq_len]
|
| 258 |
+
return customized_mask.unsqueeze(1).to(device=device), final_mask.unsqueeze(1).to(device=device)
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
def find_pred_pos_from_input_ids(
|
| 262 |
+
input_ids: torch.LongTensor = None,
|
| 263 |
+
text_mask_token_id: int = 151666,
|
| 264 |
+
) -> torch.Tensor:
|
| 265 |
+
"""Compute the relative prediction positions for masked tokens in a sequence.
|
| 266 |
+
|
| 267 |
+
For non-masked positions, the output is 0. For masked positions, the value increments
|
| 268 |
+
by 1 for each consecutive mask token, indicating how many steps ahead the prediction is.
|
| 269 |
+
|
| 270 |
+
Args:
|
| 271 |
+
input_ids (torch.LongTensor): Input token IDs of shape [batch_size, seq_len].
|
| 272 |
+
text_mask_token_id (int, optional): Token ID representing masked positions. Defaults to 151666.
|
| 273 |
+
|
| 274 |
+
Returns:
|
| 275 |
+
torch.Tensor: A tensor of shape [batch_size, seq_len] where:
|
| 276 |
+
- 0 indicates a non-masked token.
|
| 277 |
+
- n > 0 indicates the nth consecutive masked token (e.g., 1 = first mask, 2 = second mask, etc.).
|
| 278 |
+
"""
|
| 279 |
+
batch_size, seq_len = input_ids.shape
|
| 280 |
+
device = input_ids.device
|
| 281 |
+
|
| 282 |
+
is_mask = (input_ids == text_mask_token_id)
|
| 283 |
+
|
| 284 |
+
base_mask = torch.zeros((batch_size, seq_len), dtype=torch.int8, device=device)
|
| 285 |
+
|
| 286 |
+
for b in range(batch_size):
|
| 287 |
+
for ix in range(1, seq_len):
|
| 288 |
+
if is_mask[b][ix] == True:
|
| 289 |
+
# Increment counter if current token is masked
|
| 290 |
+
base_mask[b][ix] = base_mask[b][ix-1] + 1
|
| 291 |
+
|
| 292 |
+
return base_mask
|
sdlm_ckpt_final/sdlm_32b_bs4/config.json
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"SDLMQwen2ForCausalLM"
|
| 4 |
+
],
|
| 5 |
+
"auto_map": {
|
| 6 |
+
"AutoConfig": "configuration_sdlm.SDLMQwen2Config",
|
| 7 |
+
"AutoModelForCausalLM": "modeling_sdlm.SDLMQwen2ForCausalLM"
|
| 8 |
+
},
|
| 9 |
+
"attention_dropout": 0.0,
|
| 10 |
+
"attn_implementation": "eager",
|
| 11 |
+
"block_size": 4,
|
| 12 |
+
"bos_token_id": 151643,
|
| 13 |
+
"casual_attn": false,
|
| 14 |
+
"eos_token_id": 151643,
|
| 15 |
+
"hidden_act": "silu",
|
| 16 |
+
"hidden_size": 5120,
|
| 17 |
+
"initializer_range": 0.02,
|
| 18 |
+
"intermediate_size": 27648,
|
| 19 |
+
"max_position_embeddings": 131072,
|
| 20 |
+
"max_window_layers": 64,
|
| 21 |
+
"model_type": "qwen2",
|
| 22 |
+
"num_attention_heads": 40,
|
| 23 |
+
"num_hidden_layers": 64,
|
| 24 |
+
"num_key_value_heads": 8,
|
| 25 |
+
"rms_norm_eps": 1e-05,
|
| 26 |
+
"rope_theta": 1000000.0,
|
| 27 |
+
"sliding_window": 131072,
|
| 28 |
+
"text_mask_token": "<text_mask>",
|
| 29 |
+
"text_mask_token_id": 151665,
|
| 30 |
+
"tie_word_embeddings": false,
|
| 31 |
+
"torch_dtype": "bfloat16",
|
| 32 |
+
"transformers_version": "4.37.2",
|
| 33 |
+
"use_cache": true,
|
| 34 |
+
"use_sliding_window": false,
|
| 35 |
+
"vocab_size": 151666
|
| 36 |
+
}
|
sdlm_ckpt_final/sdlm_32b_bs4/configuration_sdlm.py
ADDED
|
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
""" Qwen2 model configuration"""
|
| 16 |
+
|
| 17 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 18 |
+
from transformers.utils import logging
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
logger = logging.get_logger(__name__)
|
| 22 |
+
|
| 23 |
+
QWEN2_PRETRAINED_CONFIG_ARCHIVE_MAP = {
|
| 24 |
+
"Qwen/Qwen2-7B-beta": "https://huggingface.co/Qwen/Qwen2-7B-beta/resolve/main/config.json",
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
class SDLMQwen2Config(PretrainedConfig):
|
| 28 |
+
r"""
|
| 29 |
+
This is the configuration class to store the configuration of a [`Qwen2Model`]. It is used to instantiate a
|
| 30 |
+
Qwen2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
|
| 31 |
+
with the defaults will yield a similar configuration to that of
|
| 32 |
+
Qwen2-7B-beta [Qwen/Qwen2-7B-beta](https://huggingface.co/Qwen/Qwen2-7B-beta).
|
| 33 |
+
|
| 34 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 35 |
+
documentation from [`PretrainedConfig`] for more information.
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
Args:
|
| 39 |
+
vocab_size (`int`, *optional*, defaults to 151936):
|
| 40 |
+
Vocabulary size of the Qwen2 model. Defines the number of different tokens that can be represented by the
|
| 41 |
+
`inputs_ids` passed when calling [`Qwen2Model`]
|
| 42 |
+
hidden_size (`int`, *optional*, defaults to 4096):
|
| 43 |
+
Dimension of the hidden representations.
|
| 44 |
+
intermediate_size (`int`, *optional*, defaults to 22016):
|
| 45 |
+
Dimension of the MLP representations.
|
| 46 |
+
num_hidden_layers (`int`, *optional*, defaults to 32):
|
| 47 |
+
Number of hidden layers in the Transformer encoder.
|
| 48 |
+
num_attention_heads (`int`, *optional*, defaults to 32):
|
| 49 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
| 50 |
+
num_key_value_heads (`int`, *optional*, defaults to 32):
|
| 51 |
+
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
|
| 52 |
+
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
|
| 53 |
+
`num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
|
| 54 |
+
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
|
| 55 |
+
by meanpooling all the original heads within that group. For more details checkout [this
|
| 56 |
+
paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `32`.
|
| 57 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
|
| 58 |
+
The non-linear activation function (function or string) in the decoder.
|
| 59 |
+
max_position_embeddings (`int`, *optional*, defaults to 32768):
|
| 60 |
+
The maximum sequence length that this model might ever be used with.
|
| 61 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
| 62 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
| 63 |
+
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
|
| 64 |
+
The epsilon used by the rms normalization layers.
|
| 65 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
| 66 |
+
Whether or not the model should return the last key/values attentions (not used by all models). Only
|
| 67 |
+
relevant if `config.is_decoder=True`.
|
| 68 |
+
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
|
| 69 |
+
Whether the model's input and output word embeddings should be tied.
|
| 70 |
+
rope_theta (`float`, *optional*, defaults to 10000.0):
|
| 71 |
+
The base period of the RoPE embeddings.
|
| 72 |
+
use_sliding_window (`bool`, *optional*, defaults to `False`):
|
| 73 |
+
Whether to use sliding window attention.
|
| 74 |
+
sliding_window (`int`, *optional*, defaults to 4096):
|
| 75 |
+
Sliding window attention (SWA) window size. If not specified, will default to `4096`.
|
| 76 |
+
max_window_layers (`int`, *optional*, defaults to 28):
|
| 77 |
+
The number of layers that use SWA (Sliding Window Attention). The bottom layers use SWA while the top use full attention.
|
| 78 |
+
attention_dropout (`float`, *optional*, defaults to 0.0):
|
| 79 |
+
The dropout ratio for the attention probabilities.
|
| 80 |
+
|
| 81 |
+
```python
|
| 82 |
+
>>> from transformers import Qwen2Model, Qwen2Config
|
| 83 |
+
|
| 84 |
+
>>> # Initializing a Qwen2 style configuration
|
| 85 |
+
>>> configuration = Qwen2Config()
|
| 86 |
+
|
| 87 |
+
>>> # Initializing a model from the Qwen2-7B style configuration
|
| 88 |
+
>>> model = Qwen2Model(configuration)
|
| 89 |
+
|
| 90 |
+
>>> # Accessing the model configuration
|
| 91 |
+
>>> configuration = model.config
|
| 92 |
+
```"""
|
| 93 |
+
|
| 94 |
+
model_type = "qwen2"
|
| 95 |
+
keys_to_ignore_at_inference = ["past_key_values"]
|
| 96 |
+
|
| 97 |
+
def __init__(
|
| 98 |
+
self,
|
| 99 |
+
vocab_size=151936,
|
| 100 |
+
hidden_size=4096,
|
| 101 |
+
intermediate_size=22016,
|
| 102 |
+
num_hidden_layers=32,
|
| 103 |
+
num_attention_heads=32,
|
| 104 |
+
num_key_value_heads=32,
|
| 105 |
+
hidden_act="silu",
|
| 106 |
+
max_position_embeddings=32768,
|
| 107 |
+
initializer_range=0.02,
|
| 108 |
+
rms_norm_eps=1e-6,
|
| 109 |
+
use_cache=True,
|
| 110 |
+
tie_word_embeddings=False,
|
| 111 |
+
rope_theta=10000.0,
|
| 112 |
+
use_sliding_window=False,
|
| 113 |
+
sliding_window=4096,
|
| 114 |
+
max_window_layers=28,
|
| 115 |
+
attention_dropout=0.0,
|
| 116 |
+
**kwargs,
|
| 117 |
+
):
|
| 118 |
+
self.vocab_size = vocab_size
|
| 119 |
+
self.max_position_embeddings = max_position_embeddings
|
| 120 |
+
self.hidden_size = hidden_size
|
| 121 |
+
self.intermediate_size = intermediate_size
|
| 122 |
+
self.num_hidden_layers = num_hidden_layers
|
| 123 |
+
self.num_attention_heads = num_attention_heads
|
| 124 |
+
self.use_sliding_window = use_sliding_window
|
| 125 |
+
self.sliding_window = sliding_window
|
| 126 |
+
self.max_window_layers = max_window_layers
|
| 127 |
+
|
| 128 |
+
# for backward compatibility
|
| 129 |
+
if num_key_value_heads is None:
|
| 130 |
+
num_key_value_heads = num_attention_heads
|
| 131 |
+
|
| 132 |
+
self.num_key_value_heads = num_key_value_heads
|
| 133 |
+
self.hidden_act = hidden_act
|
| 134 |
+
self.initializer_range = initializer_range
|
| 135 |
+
self.rms_norm_eps = rms_norm_eps
|
| 136 |
+
self.use_cache = use_cache
|
| 137 |
+
self.rope_theta = rope_theta
|
| 138 |
+
self.attention_dropout = attention_dropout
|
| 139 |
+
if kwargs.get('attn_implementation', None) is None:
|
| 140 |
+
self.attn_implementation = kwargs['attn_implementation'] = 'flash_attention_2'
|
| 141 |
+
else:
|
| 142 |
+
self.attn_implementation = kwargs['attn_implementation']
|
| 143 |
+
|
| 144 |
+
super().__init__(
|
| 145 |
+
tie_word_embeddings=tie_word_embeddings,
|
| 146 |
+
**kwargs,
|
| 147 |
+
)
|
sdlm_ckpt_final/sdlm_32b_bs4/generation_config.json
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"attn_implementation": "eager",
|
| 3 |
+
"bos_token_id": 151643,
|
| 4 |
+
"eos_token_id": [
|
| 5 |
+
151643,
|
| 6 |
+
151645
|
| 7 |
+
],
|
| 8 |
+
"max_new_tokens": 4096,
|
| 9 |
+
"transformers_version": "4.37.2"
|
| 10 |
+
}
|
sdlm_ckpt_final/sdlm_32b_bs4/merges.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
sdlm_ckpt_final/sdlm_32b_bs4/model-00001-of-00014.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6203d4cd1307ea95c6efe9d706c3647b031054c028ee843ec104b05d0b59fa5b
|
| 3 |
+
size 4887655472
|
sdlm_ckpt_final/sdlm_32b_bs4/model-00002-of-00014.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:15bd7e77967929e1c42baabb4b753a50880b7423504ca812cc9eec39a7cfa05f
|
| 3 |
+
size 4876059352
|
sdlm_ckpt_final/sdlm_32b_bs4/model-00003-of-00014.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:074f64daf0862ce8d07dbcbe79d0c15d3ac19a5ffc3dae40e0c7783ae32bd588
|
| 3 |
+
size 4876059384
|
sdlm_ckpt_final/sdlm_32b_bs4/model-00004-of-00014.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4acec5d4faf16aa580fd44486a156bc29afe14400b326b29920ed824ee91e764
|
| 3 |
+
size 4876059416
|
sdlm_ckpt_final/sdlm_32b_bs4/model-00005-of-00014.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d6b2b7079f2e2ee87254c30360142cd09b3b66f132ca2ff40013fff842c519de
|
| 3 |
+
size 4876059416
|
sdlm_ckpt_final/sdlm_32b_bs4/model-00006-of-00014.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:79e864b5e19166effb377eabcb127bd07d28202d025b2863f61bd4077082e916
|
| 3 |
+
size 4876059416
|
sdlm_ckpt_final/sdlm_32b_bs4/model-00007-of-00014.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b7f46fa41d568b8b13adfd6134c016a288fd1b5bdf707810ec65ac185debf04c
|
| 3 |
+
size 4876059416
|
sdlm_ckpt_final/sdlm_32b_bs4/model-00008-of-00014.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:23e55a14d4d3b4d958f69a7671a843ad5847bee2794bbaffccf2afa88890cda7
|
| 3 |
+
size 4876059416
|
sdlm_ckpt_final/sdlm_32b_bs4/model-00009-of-00014.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1791e8934af40ecdf939cf2897509a444d81c99b0fff02fbce83abeeae6f985f
|
| 3 |
+
size 4876059416
|
sdlm_ckpt_final/sdlm_32b_bs4/model-00010-of-00014.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1c350821d8268f0036101e06c8ee610a048c2eec57c8445e077caa40c13cbbc1
|
| 3 |
+
size 4876059416
|
sdlm_ckpt_final/sdlm_32b_bs4/model-00011-of-00014.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:dd70361d47f9f6fc2dcce8a06f17b47b834990f724738e73546c4025c2f6d1c9
|
| 3 |
+
size 4876059416
|
sdlm_ckpt_final/sdlm_32b_bs4/model-00012-of-00014.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a3387d16083f29e811949a5b93075ceee984e7b1a30eda9fb74bdfcad45bb8be
|
| 3 |
+
size 4876059416
|
sdlm_ckpt_final/sdlm_32b_bs4/model-00013-of-00014.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b3df67f062e7b1ebfe9cf2f70374d09303961cb54238466a990d259d48dac111
|
| 3 |
+
size 4876059416
|
sdlm_ckpt_final/sdlm_32b_bs4/model-00014-of-00014.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d67985bf3b046bd1fdf2516ab9edfcb5ef8a689b8258ac380eda0fdf523e3a77
|
| 3 |
+
size 2119322280
|
sdlm_ckpt_final/sdlm_32b_bs4/model.safetensors.index.json
ADDED
|
@@ -0,0 +1,778 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"metadata": {
|
| 3 |
+
"total_size": 65519601664
|
| 4 |
+
},
|
| 5 |
+
"weight_map": {
|
| 6 |
+
"lm_head.weight": "model-00014-of-00014.safetensors",
|
| 7 |
+
"model.embed_tokens.weight": "model-00001-of-00014.safetensors",
|
| 8 |
+
"model.layers.0.input_layernorm.weight": "model-00001-of-00014.safetensors",
|
| 9 |
+
"model.layers.0.mlp.down_proj.weight": "model-00001-of-00014.safetensors",
|
| 10 |
+
"model.layers.0.mlp.gate_proj.weight": "model-00001-of-00014.safetensors",
|
| 11 |
+
"model.layers.0.mlp.up_proj.weight": "model-00001-of-00014.safetensors",
|
| 12 |
+
"model.layers.0.post_attention_layernorm.weight": "model-00001-of-00014.safetensors",
|
| 13 |
+
"model.layers.0.self_attn.k_proj.bias": "model-00001-of-00014.safetensors",
|
| 14 |
+
"model.layers.0.self_attn.k_proj.weight": "model-00001-of-00014.safetensors",
|
| 15 |
+
"model.layers.0.self_attn.o_proj.weight": "model-00001-of-00014.safetensors",
|
| 16 |
+
"model.layers.0.self_attn.q_proj.bias": "model-00001-of-00014.safetensors",
|
| 17 |
+
"model.layers.0.self_attn.q_proj.weight": "model-00001-of-00014.safetensors",
|
| 18 |
+
"model.layers.0.self_attn.v_proj.bias": "model-00001-of-00014.safetensors",
|
| 19 |
+
"model.layers.0.self_attn.v_proj.weight": "model-00001-of-00014.safetensors",
|
| 20 |
+
"model.layers.1.input_layernorm.weight": "model-00001-of-00014.safetensors",
|
| 21 |
+
"model.layers.1.mlp.down_proj.weight": "model-00001-of-00014.safetensors",
|
| 22 |
+
"model.layers.1.mlp.gate_proj.weight": "model-00001-of-00014.safetensors",
|
| 23 |
+
"model.layers.1.mlp.up_proj.weight": "model-00001-of-00014.safetensors",
|
| 24 |
+
"model.layers.1.post_attention_layernorm.weight": "model-00001-of-00014.safetensors",
|
| 25 |
+
"model.layers.1.self_attn.k_proj.bias": "model-00001-of-00014.safetensors",
|
| 26 |
+
"model.layers.1.self_attn.k_proj.weight": "model-00001-of-00014.safetensors",
|
| 27 |
+
"model.layers.1.self_attn.o_proj.weight": "model-00001-of-00014.safetensors",
|
| 28 |
+
"model.layers.1.self_attn.q_proj.bias": "model-00001-of-00014.safetensors",
|
| 29 |
+
"model.layers.1.self_attn.q_proj.weight": "model-00001-of-00014.safetensors",
|
| 30 |
+
"model.layers.1.self_attn.v_proj.bias": "model-00001-of-00014.safetensors",
|
| 31 |
+
"model.layers.1.self_attn.v_proj.weight": "model-00001-of-00014.safetensors",
|
| 32 |
+
"model.layers.10.input_layernorm.weight": "model-00003-of-00014.safetensors",
|
| 33 |
+
"model.layers.10.mlp.down_proj.weight": "model-00003-of-00014.safetensors",
|
| 34 |
+
"model.layers.10.mlp.gate_proj.weight": "model-00003-of-00014.safetensors",
|
| 35 |
+
"model.layers.10.mlp.up_proj.weight": "model-00003-of-00014.safetensors",
|
| 36 |
+
"model.layers.10.post_attention_layernorm.weight": "model-00003-of-00014.safetensors",
|
| 37 |
+
"model.layers.10.self_attn.k_proj.bias": "model-00003-of-00014.safetensors",
|
| 38 |
+
"model.layers.10.self_attn.k_proj.weight": "model-00003-of-00014.safetensors",
|
| 39 |
+
"model.layers.10.self_attn.o_proj.weight": "model-00003-of-00014.safetensors",
|
| 40 |
+
"model.layers.10.self_attn.q_proj.bias": "model-00003-of-00014.safetensors",
|
| 41 |
+
"model.layers.10.self_attn.q_proj.weight": "model-00003-of-00014.safetensors",
|
| 42 |
+
"model.layers.10.self_attn.v_proj.bias": "model-00003-of-00014.safetensors",
|
| 43 |
+
"model.layers.10.self_attn.v_proj.weight": "model-00003-of-00014.safetensors",
|
| 44 |
+
"model.layers.11.input_layernorm.weight": "model-00003-of-00014.safetensors",
|
| 45 |
+
"model.layers.11.mlp.down_proj.weight": "model-00003-of-00014.safetensors",
|
| 46 |
+
"model.layers.11.mlp.gate_proj.weight": "model-00003-of-00014.safetensors",
|
| 47 |
+
"model.layers.11.mlp.up_proj.weight": "model-00003-of-00014.safetensors",
|
| 48 |
+
"model.layers.11.post_attention_layernorm.weight": "model-00003-of-00014.safetensors",
|
| 49 |
+
"model.layers.11.self_attn.k_proj.bias": "model-00003-of-00014.safetensors",
|
| 50 |
+
"model.layers.11.self_attn.k_proj.weight": "model-00003-of-00014.safetensors",
|
| 51 |
+
"model.layers.11.self_attn.o_proj.weight": "model-00003-of-00014.safetensors",
|
| 52 |
+
"model.layers.11.self_attn.q_proj.bias": "model-00003-of-00014.safetensors",
|
| 53 |
+
"model.layers.11.self_attn.q_proj.weight": "model-00003-of-00014.safetensors",
|
| 54 |
+
"model.layers.11.self_attn.v_proj.bias": "model-00003-of-00014.safetensors",
|
| 55 |
+
"model.layers.11.self_attn.v_proj.weight": "model-00003-of-00014.safetensors",
|
| 56 |
+
"model.layers.12.input_layernorm.weight": "model-00003-of-00014.safetensors",
|
| 57 |
+
"model.layers.12.mlp.down_proj.weight": "model-00003-of-00014.safetensors",
|
| 58 |
+
"model.layers.12.mlp.gate_proj.weight": "model-00003-of-00014.safetensors",
|
| 59 |
+
"model.layers.12.mlp.up_proj.weight": "model-00003-of-00014.safetensors",
|
| 60 |
+
"model.layers.12.post_attention_layernorm.weight": "model-00003-of-00014.safetensors",
|
| 61 |
+
"model.layers.12.self_attn.k_proj.bias": "model-00003-of-00014.safetensors",
|
| 62 |
+
"model.layers.12.self_attn.k_proj.weight": "model-00003-of-00014.safetensors",
|
| 63 |
+
"model.layers.12.self_attn.o_proj.weight": "model-00003-of-00014.safetensors",
|
| 64 |
+
"model.layers.12.self_attn.q_proj.bias": "model-00003-of-00014.safetensors",
|
| 65 |
+
"model.layers.12.self_attn.q_proj.weight": "model-00003-of-00014.safetensors",
|
| 66 |
+
"model.layers.12.self_attn.v_proj.bias": "model-00003-of-00014.safetensors",
|
| 67 |
+
"model.layers.12.self_attn.v_proj.weight": "model-00003-of-00014.safetensors",
|
| 68 |
+
"model.layers.13.input_layernorm.weight": "model-00004-of-00014.safetensors",
|
| 69 |
+
"model.layers.13.mlp.down_proj.weight": "model-00004-of-00014.safetensors",
|
| 70 |
+
"model.layers.13.mlp.gate_proj.weight": "model-00003-of-00014.safetensors",
|
| 71 |
+
"model.layers.13.mlp.up_proj.weight": "model-00004-of-00014.safetensors",
|
| 72 |
+
"model.layers.13.post_attention_layernorm.weight": "model-00004-of-00014.safetensors",
|
| 73 |
+
"model.layers.13.self_attn.k_proj.bias": "model-00003-of-00014.safetensors",
|
| 74 |
+
"model.layers.13.self_attn.k_proj.weight": "model-00003-of-00014.safetensors",
|
| 75 |
+
"model.layers.13.self_attn.o_proj.weight": "model-00003-of-00014.safetensors",
|
| 76 |
+
"model.layers.13.self_attn.q_proj.bias": "model-00003-of-00014.safetensors",
|
| 77 |
+
"model.layers.13.self_attn.q_proj.weight": "model-00003-of-00014.safetensors",
|
| 78 |
+
"model.layers.13.self_attn.v_proj.bias": "model-00003-of-00014.safetensors",
|
| 79 |
+
"model.layers.13.self_attn.v_proj.weight": "model-00003-of-00014.safetensors",
|
| 80 |
+
"model.layers.14.input_layernorm.weight": "model-00004-of-00014.safetensors",
|
| 81 |
+
"model.layers.14.mlp.down_proj.weight": "model-00004-of-00014.safetensors",
|
| 82 |
+
"model.layers.14.mlp.gate_proj.weight": "model-00004-of-00014.safetensors",
|
| 83 |
+
"model.layers.14.mlp.up_proj.weight": "model-00004-of-00014.safetensors",
|
| 84 |
+
"model.layers.14.post_attention_layernorm.weight": "model-00004-of-00014.safetensors",
|
| 85 |
+
"model.layers.14.self_attn.k_proj.bias": "model-00004-of-00014.safetensors",
|
| 86 |
+
"model.layers.14.self_attn.k_proj.weight": "model-00004-of-00014.safetensors",
|
| 87 |
+
"model.layers.14.self_attn.o_proj.weight": "model-00004-of-00014.safetensors",
|
| 88 |
+
"model.layers.14.self_attn.q_proj.bias": "model-00004-of-00014.safetensors",
|
| 89 |
+
"model.layers.14.self_attn.q_proj.weight": "model-00004-of-00014.safetensors",
|
| 90 |
+
"model.layers.14.self_attn.v_proj.bias": "model-00004-of-00014.safetensors",
|
| 91 |
+
"model.layers.14.self_attn.v_proj.weight": "model-00004-of-00014.safetensors",
|
| 92 |
+
"model.layers.15.input_layernorm.weight": "model-00004-of-00014.safetensors",
|
| 93 |
+
"model.layers.15.mlp.down_proj.weight": "model-00004-of-00014.safetensors",
|
| 94 |
+
"model.layers.15.mlp.gate_proj.weight": "model-00004-of-00014.safetensors",
|
| 95 |
+
"model.layers.15.mlp.up_proj.weight": "model-00004-of-00014.safetensors",
|
| 96 |
+
"model.layers.15.post_attention_layernorm.weight": "model-00004-of-00014.safetensors",
|
| 97 |
+
"model.layers.15.self_attn.k_proj.bias": "model-00004-of-00014.safetensors",
|
| 98 |
+
"model.layers.15.self_attn.k_proj.weight": "model-00004-of-00014.safetensors",
|
| 99 |
+
"model.layers.15.self_attn.o_proj.weight": "model-00004-of-00014.safetensors",
|
| 100 |
+
"model.layers.15.self_attn.q_proj.bias": "model-00004-of-00014.safetensors",
|
| 101 |
+
"model.layers.15.self_attn.q_proj.weight": "model-00004-of-00014.safetensors",
|
| 102 |
+
"model.layers.15.self_attn.v_proj.bias": "model-00004-of-00014.safetensors",
|
| 103 |
+
"model.layers.15.self_attn.v_proj.weight": "model-00004-of-00014.safetensors",
|
| 104 |
+
"model.layers.16.input_layernorm.weight": "model-00004-of-00014.safetensors",
|
| 105 |
+
"model.layers.16.mlp.down_proj.weight": "model-00004-of-00014.safetensors",
|
| 106 |
+
"model.layers.16.mlp.gate_proj.weight": "model-00004-of-00014.safetensors",
|
| 107 |
+
"model.layers.16.mlp.up_proj.weight": "model-00004-of-00014.safetensors",
|
| 108 |
+
"model.layers.16.post_attention_layernorm.weight": "model-00004-of-00014.safetensors",
|
| 109 |
+
"model.layers.16.self_attn.k_proj.bias": "model-00004-of-00014.safetensors",
|
| 110 |
+
"model.layers.16.self_attn.k_proj.weight": "model-00004-of-00014.safetensors",
|
| 111 |
+
"model.layers.16.self_attn.o_proj.weight": "model-00004-of-00014.safetensors",
|
| 112 |
+
"model.layers.16.self_attn.q_proj.bias": "model-00004-of-00014.safetensors",
|
| 113 |
+
"model.layers.16.self_attn.q_proj.weight": "model-00004-of-00014.safetensors",
|
| 114 |
+
"model.layers.16.self_attn.v_proj.bias": "model-00004-of-00014.safetensors",
|
| 115 |
+
"model.layers.16.self_attn.v_proj.weight": "model-00004-of-00014.safetensors",
|
| 116 |
+
"model.layers.17.input_layernorm.weight": "model-00004-of-00014.safetensors",
|
| 117 |
+
"model.layers.17.mlp.down_proj.weight": "model-00004-of-00014.safetensors",
|
| 118 |
+
"model.layers.17.mlp.gate_proj.weight": "model-00004-of-00014.safetensors",
|
| 119 |
+
"model.layers.17.mlp.up_proj.weight": "model-00004-of-00014.safetensors",
|
| 120 |
+
"model.layers.17.post_attention_layernorm.weight": "model-00004-of-00014.safetensors",
|
| 121 |
+
"model.layers.17.self_attn.k_proj.bias": "model-00004-of-00014.safetensors",
|
| 122 |
+
"model.layers.17.self_attn.k_proj.weight": "model-00004-of-00014.safetensors",
|
| 123 |
+
"model.layers.17.self_attn.o_proj.weight": "model-00004-of-00014.safetensors",
|
| 124 |
+
"model.layers.17.self_attn.q_proj.bias": "model-00004-of-00014.safetensors",
|
| 125 |
+
"model.layers.17.self_attn.q_proj.weight": "model-00004-of-00014.safetensors",
|
| 126 |
+
"model.layers.17.self_attn.v_proj.bias": "model-00004-of-00014.safetensors",
|
| 127 |
+
"model.layers.17.self_attn.v_proj.weight": "model-00004-of-00014.safetensors",
|
| 128 |
+
"model.layers.18.input_layernorm.weight": "model-00005-of-00014.safetensors",
|
| 129 |
+
"model.layers.18.mlp.down_proj.weight": "model-00005-of-00014.safetensors",
|
| 130 |
+
"model.layers.18.mlp.gate_proj.weight": "model-00004-of-00014.safetensors",
|
| 131 |
+
"model.layers.18.mlp.up_proj.weight": "model-00005-of-00014.safetensors",
|
| 132 |
+
"model.layers.18.post_attention_layernorm.weight": "model-00005-of-00014.safetensors",
|
| 133 |
+
"model.layers.18.self_attn.k_proj.bias": "model-00004-of-00014.safetensors",
|
| 134 |
+
"model.layers.18.self_attn.k_proj.weight": "model-00004-of-00014.safetensors",
|
| 135 |
+
"model.layers.18.self_attn.o_proj.weight": "model-00004-of-00014.safetensors",
|
| 136 |
+
"model.layers.18.self_attn.q_proj.bias": "model-00004-of-00014.safetensors",
|
| 137 |
+
"model.layers.18.self_attn.q_proj.weight": "model-00004-of-00014.safetensors",
|
| 138 |
+
"model.layers.18.self_attn.v_proj.bias": "model-00004-of-00014.safetensors",
|
| 139 |
+
"model.layers.18.self_attn.v_proj.weight": "model-00004-of-00014.safetensors",
|
| 140 |
+
"model.layers.19.input_layernorm.weight": "model-00005-of-00014.safetensors",
|
| 141 |
+
"model.layers.19.mlp.down_proj.weight": "model-00005-of-00014.safetensors",
|
| 142 |
+
"model.layers.19.mlp.gate_proj.weight": "model-00005-of-00014.safetensors",
|
| 143 |
+
"model.layers.19.mlp.up_proj.weight": "model-00005-of-00014.safetensors",
|
| 144 |
+
"model.layers.19.post_attention_layernorm.weight": "model-00005-of-00014.safetensors",
|
| 145 |
+
"model.layers.19.self_attn.k_proj.bias": "model-00005-of-00014.safetensors",
|
| 146 |
+
"model.layers.19.self_attn.k_proj.weight": "model-00005-of-00014.safetensors",
|
| 147 |
+
"model.layers.19.self_attn.o_proj.weight": "model-00005-of-00014.safetensors",
|
| 148 |
+
"model.layers.19.self_attn.q_proj.bias": "model-00005-of-00014.safetensors",
|
| 149 |
+
"model.layers.19.self_attn.q_proj.weight": "model-00005-of-00014.safetensors",
|
| 150 |
+
"model.layers.19.self_attn.v_proj.bias": "model-00005-of-00014.safetensors",
|
| 151 |
+
"model.layers.19.self_attn.v_proj.weight": "model-00005-of-00014.safetensors",
|
| 152 |
+
"model.layers.2.input_layernorm.weight": "model-00001-of-00014.safetensors",
|
| 153 |
+
"model.layers.2.mlp.down_proj.weight": "model-00001-of-00014.safetensors",
|
| 154 |
+
"model.layers.2.mlp.gate_proj.weight": "model-00001-of-00014.safetensors",
|
| 155 |
+
"model.layers.2.mlp.up_proj.weight": "model-00001-of-00014.safetensors",
|
| 156 |
+
"model.layers.2.post_attention_layernorm.weight": "model-00001-of-00014.safetensors",
|
| 157 |
+
"model.layers.2.self_attn.k_proj.bias": "model-00001-of-00014.safetensors",
|
| 158 |
+
"model.layers.2.self_attn.k_proj.weight": "model-00001-of-00014.safetensors",
|
| 159 |
+
"model.layers.2.self_attn.o_proj.weight": "model-00001-of-00014.safetensors",
|
| 160 |
+
"model.layers.2.self_attn.q_proj.bias": "model-00001-of-00014.safetensors",
|
| 161 |
+
"model.layers.2.self_attn.q_proj.weight": "model-00001-of-00014.safetensors",
|
| 162 |
+
"model.layers.2.self_attn.v_proj.bias": "model-00001-of-00014.safetensors",
|
| 163 |
+
"model.layers.2.self_attn.v_proj.weight": "model-00001-of-00014.safetensors",
|
| 164 |
+
"model.layers.20.input_layernorm.weight": "model-00005-of-00014.safetensors",
|
| 165 |
+
"model.layers.20.mlp.down_proj.weight": "model-00005-of-00014.safetensors",
|
| 166 |
+
"model.layers.20.mlp.gate_proj.weight": "model-00005-of-00014.safetensors",
|
| 167 |
+
"model.layers.20.mlp.up_proj.weight": "model-00005-of-00014.safetensors",
|
| 168 |
+
"model.layers.20.post_attention_layernorm.weight": "model-00005-of-00014.safetensors",
|
| 169 |
+
"model.layers.20.self_attn.k_proj.bias": "model-00005-of-00014.safetensors",
|
| 170 |
+
"model.layers.20.self_attn.k_proj.weight": "model-00005-of-00014.safetensors",
|
| 171 |
+
"model.layers.20.self_attn.o_proj.weight": "model-00005-of-00014.safetensors",
|
| 172 |
+
"model.layers.20.self_attn.q_proj.bias": "model-00005-of-00014.safetensors",
|
| 173 |
+
"model.layers.20.self_attn.q_proj.weight": "model-00005-of-00014.safetensors",
|
| 174 |
+
"model.layers.20.self_attn.v_proj.bias": "model-00005-of-00014.safetensors",
|
| 175 |
+
"model.layers.20.self_attn.v_proj.weight": "model-00005-of-00014.safetensors",
|
| 176 |
+
"model.layers.21.input_layernorm.weight": "model-00005-of-00014.safetensors",
|
| 177 |
+
"model.layers.21.mlp.down_proj.weight": "model-00005-of-00014.safetensors",
|
| 178 |
+
"model.layers.21.mlp.gate_proj.weight": "model-00005-of-00014.safetensors",
|
| 179 |
+
"model.layers.21.mlp.up_proj.weight": "model-00005-of-00014.safetensors",
|
| 180 |
+
"model.layers.21.post_attention_layernorm.weight": "model-00005-of-00014.safetensors",
|
| 181 |
+
"model.layers.21.self_attn.k_proj.bias": "model-00005-of-00014.safetensors",
|
| 182 |
+
"model.layers.21.self_attn.k_proj.weight": "model-00005-of-00014.safetensors",
|
| 183 |
+
"model.layers.21.self_attn.o_proj.weight": "model-00005-of-00014.safetensors",
|
| 184 |
+
"model.layers.21.self_attn.q_proj.bias": "model-00005-of-00014.safetensors",
|
| 185 |
+
"model.layers.21.self_attn.q_proj.weight": "model-00005-of-00014.safetensors",
|
| 186 |
+
"model.layers.21.self_attn.v_proj.bias": "model-00005-of-00014.safetensors",
|
| 187 |
+
"model.layers.21.self_attn.v_proj.weight": "model-00005-of-00014.safetensors",
|
| 188 |
+
"model.layers.22.input_layernorm.weight": "model-00005-of-00014.safetensors",
|
| 189 |
+
"model.layers.22.mlp.down_proj.weight": "model-00005-of-00014.safetensors",
|
| 190 |
+
"model.layers.22.mlp.gate_proj.weight": "model-00005-of-00014.safetensors",
|
| 191 |
+
"model.layers.22.mlp.up_proj.weight": "model-00005-of-00014.safetensors",
|
| 192 |
+
"model.layers.22.post_attention_layernorm.weight": "model-00005-of-00014.safetensors",
|
| 193 |
+
"model.layers.22.self_attn.k_proj.bias": "model-00005-of-00014.safetensors",
|
| 194 |
+
"model.layers.22.self_attn.k_proj.weight": "model-00005-of-00014.safetensors",
|
| 195 |
+
"model.layers.22.self_attn.o_proj.weight": "model-00005-of-00014.safetensors",
|
| 196 |
+
"model.layers.22.self_attn.q_proj.bias": "model-00005-of-00014.safetensors",
|
| 197 |
+
"model.layers.22.self_attn.q_proj.weight": "model-00005-of-00014.safetensors",
|
| 198 |
+
"model.layers.22.self_attn.v_proj.bias": "model-00005-of-00014.safetensors",
|
| 199 |
+
"model.layers.22.self_attn.v_proj.weight": "model-00005-of-00014.safetensors",
|
| 200 |
+
"model.layers.23.input_layernorm.weight": "model-00006-of-00014.safetensors",
|
| 201 |
+
"model.layers.23.mlp.down_proj.weight": "model-00006-of-00014.safetensors",
|
| 202 |
+
"model.layers.23.mlp.gate_proj.weight": "model-00005-of-00014.safetensors",
|
| 203 |
+
"model.layers.23.mlp.up_proj.weight": "model-00006-of-00014.safetensors",
|
| 204 |
+
"model.layers.23.post_attention_layernorm.weight": "model-00006-of-00014.safetensors",
|
| 205 |
+
"model.layers.23.self_attn.k_proj.bias": "model-00005-of-00014.safetensors",
|
| 206 |
+
"model.layers.23.self_attn.k_proj.weight": "model-00005-of-00014.safetensors",
|
| 207 |
+
"model.layers.23.self_attn.o_proj.weight": "model-00005-of-00014.safetensors",
|
| 208 |
+
"model.layers.23.self_attn.q_proj.bias": "model-00005-of-00014.safetensors",
|
| 209 |
+
"model.layers.23.self_attn.q_proj.weight": "model-00005-of-00014.safetensors",
|
| 210 |
+
"model.layers.23.self_attn.v_proj.bias": "model-00005-of-00014.safetensors",
|
| 211 |
+
"model.layers.23.self_attn.v_proj.weight": "model-00005-of-00014.safetensors",
|
| 212 |
+
"model.layers.24.input_layernorm.weight": "model-00006-of-00014.safetensors",
|
| 213 |
+
"model.layers.24.mlp.down_proj.weight": "model-00006-of-00014.safetensors",
|
| 214 |
+
"model.layers.24.mlp.gate_proj.weight": "model-00006-of-00014.safetensors",
|
| 215 |
+
"model.layers.24.mlp.up_proj.weight": "model-00006-of-00014.safetensors",
|
| 216 |
+
"model.layers.24.post_attention_layernorm.weight": "model-00006-of-00014.safetensors",
|
| 217 |
+
"model.layers.24.self_attn.k_proj.bias": "model-00006-of-00014.safetensors",
|
| 218 |
+
"model.layers.24.self_attn.k_proj.weight": "model-00006-of-00014.safetensors",
|
| 219 |
+
"model.layers.24.self_attn.o_proj.weight": "model-00006-of-00014.safetensors",
|
| 220 |
+
"model.layers.24.self_attn.q_proj.bias": "model-00006-of-00014.safetensors",
|
| 221 |
+
"model.layers.24.self_attn.q_proj.weight": "model-00006-of-00014.safetensors",
|
| 222 |
+
"model.layers.24.self_attn.v_proj.bias": "model-00006-of-00014.safetensors",
|
| 223 |
+
"model.layers.24.self_attn.v_proj.weight": "model-00006-of-00014.safetensors",
|
| 224 |
+
"model.layers.25.input_layernorm.weight": "model-00006-of-00014.safetensors",
|
| 225 |
+
"model.layers.25.mlp.down_proj.weight": "model-00006-of-00014.safetensors",
|
| 226 |
+
"model.layers.25.mlp.gate_proj.weight": "model-00006-of-00014.safetensors",
|
| 227 |
+
"model.layers.25.mlp.up_proj.weight": "model-00006-of-00014.safetensors",
|
| 228 |
+
"model.layers.25.post_attention_layernorm.weight": "model-00006-of-00014.safetensors",
|
| 229 |
+
"model.layers.25.self_attn.k_proj.bias": "model-00006-of-00014.safetensors",
|
| 230 |
+
"model.layers.25.self_attn.k_proj.weight": "model-00006-of-00014.safetensors",
|
| 231 |
+
"model.layers.25.self_attn.o_proj.weight": "model-00006-of-00014.safetensors",
|
| 232 |
+
"model.layers.25.self_attn.q_proj.bias": "model-00006-of-00014.safetensors",
|
| 233 |
+
"model.layers.25.self_attn.q_proj.weight": "model-00006-of-00014.safetensors",
|
| 234 |
+
"model.layers.25.self_attn.v_proj.bias": "model-00006-of-00014.safetensors",
|
| 235 |
+
"model.layers.25.self_attn.v_proj.weight": "model-00006-of-00014.safetensors",
|
| 236 |
+
"model.layers.26.input_layernorm.weight": "model-00006-of-00014.safetensors",
|
| 237 |
+
"model.layers.26.mlp.down_proj.weight": "model-00006-of-00014.safetensors",
|
| 238 |
+
"model.layers.26.mlp.gate_proj.weight": "model-00006-of-00014.safetensors",
|
| 239 |
+
"model.layers.26.mlp.up_proj.weight": "model-00006-of-00014.safetensors",
|
| 240 |
+
"model.layers.26.post_attention_layernorm.weight": "model-00006-of-00014.safetensors",
|
| 241 |
+
"model.layers.26.self_attn.k_proj.bias": "model-00006-of-00014.safetensors",
|
| 242 |
+
"model.layers.26.self_attn.k_proj.weight": "model-00006-of-00014.safetensors",
|
| 243 |
+
"model.layers.26.self_attn.o_proj.weight": "model-00006-of-00014.safetensors",
|
| 244 |
+
"model.layers.26.self_attn.q_proj.bias": "model-00006-of-00014.safetensors",
|
| 245 |
+
"model.layers.26.self_attn.q_proj.weight": "model-00006-of-00014.safetensors",
|
| 246 |
+
"model.layers.26.self_attn.v_proj.bias": "model-00006-of-00014.safetensors",
|
| 247 |
+
"model.layers.26.self_attn.v_proj.weight": "model-00006-of-00014.safetensors",
|
| 248 |
+
"model.layers.27.input_layernorm.weight": "model-00006-of-00014.safetensors",
|
| 249 |
+
"model.layers.27.mlp.down_proj.weight": "model-00006-of-00014.safetensors",
|
| 250 |
+
"model.layers.27.mlp.gate_proj.weight": "model-00006-of-00014.safetensors",
|
| 251 |
+
"model.layers.27.mlp.up_proj.weight": "model-00006-of-00014.safetensors",
|
| 252 |
+
"model.layers.27.post_attention_layernorm.weight": "model-00006-of-00014.safetensors",
|
| 253 |
+
"model.layers.27.self_attn.k_proj.bias": "model-00006-of-00014.safetensors",
|
| 254 |
+
"model.layers.27.self_attn.k_proj.weight": "model-00006-of-00014.safetensors",
|
| 255 |
+
"model.layers.27.self_attn.o_proj.weight": "model-00006-of-00014.safetensors",
|
| 256 |
+
"model.layers.27.self_attn.q_proj.bias": "model-00006-of-00014.safetensors",
|
| 257 |
+
"model.layers.27.self_attn.q_proj.weight": "model-00006-of-00014.safetensors",
|
| 258 |
+
"model.layers.27.self_attn.v_proj.bias": "model-00006-of-00014.safetensors",
|
| 259 |
+
"model.layers.27.self_attn.v_proj.weight": "model-00006-of-00014.safetensors",
|
| 260 |
+
"model.layers.28.input_layernorm.weight": "model-00007-of-00014.safetensors",
|
| 261 |
+
"model.layers.28.mlp.down_proj.weight": "model-00007-of-00014.safetensors",
|
| 262 |
+
"model.layers.28.mlp.gate_proj.weight": "model-00006-of-00014.safetensors",
|
| 263 |
+
"model.layers.28.mlp.up_proj.weight": "model-00007-of-00014.safetensors",
|
| 264 |
+
"model.layers.28.post_attention_layernorm.weight": "model-00007-of-00014.safetensors",
|
| 265 |
+
"model.layers.28.self_attn.k_proj.bias": "model-00006-of-00014.safetensors",
|
| 266 |
+
"model.layers.28.self_attn.k_proj.weight": "model-00006-of-00014.safetensors",
|
| 267 |
+
"model.layers.28.self_attn.o_proj.weight": "model-00006-of-00014.safetensors",
|
| 268 |
+
"model.layers.28.self_attn.q_proj.bias": "model-00006-of-00014.safetensors",
|
| 269 |
+
"model.layers.28.self_attn.q_proj.weight": "model-00006-of-00014.safetensors",
|
| 270 |
+
"model.layers.28.self_attn.v_proj.bias": "model-00006-of-00014.safetensors",
|
| 271 |
+
"model.layers.28.self_attn.v_proj.weight": "model-00006-of-00014.safetensors",
|
| 272 |
+
"model.layers.29.input_layernorm.weight": "model-00007-of-00014.safetensors",
|
| 273 |
+
"model.layers.29.mlp.down_proj.weight": "model-00007-of-00014.safetensors",
|
| 274 |
+
"model.layers.29.mlp.gate_proj.weight": "model-00007-of-00014.safetensors",
|
| 275 |
+
"model.layers.29.mlp.up_proj.weight": "model-00007-of-00014.safetensors",
|
| 276 |
+
"model.layers.29.post_attention_layernorm.weight": "model-00007-of-00014.safetensors",
|
| 277 |
+
"model.layers.29.self_attn.k_proj.bias": "model-00007-of-00014.safetensors",
|
| 278 |
+
"model.layers.29.self_attn.k_proj.weight": "model-00007-of-00014.safetensors",
|
| 279 |
+
"model.layers.29.self_attn.o_proj.weight": "model-00007-of-00014.safetensors",
|
| 280 |
+
"model.layers.29.self_attn.q_proj.bias": "model-00007-of-00014.safetensors",
|
| 281 |
+
"model.layers.29.self_attn.q_proj.weight": "model-00007-of-00014.safetensors",
|
| 282 |
+
"model.layers.29.self_attn.v_proj.bias": "model-00007-of-00014.safetensors",
|
| 283 |
+
"model.layers.29.self_attn.v_proj.weight": "model-00007-of-00014.safetensors",
|
| 284 |
+
"model.layers.3.input_layernorm.weight": "model-00002-of-00014.safetensors",
|
| 285 |
+
"model.layers.3.mlp.down_proj.weight": "model-00002-of-00014.safetensors",
|
| 286 |
+
"model.layers.3.mlp.gate_proj.weight": "model-00001-of-00014.safetensors",
|
| 287 |
+
"model.layers.3.mlp.up_proj.weight": "model-00002-of-00014.safetensors",
|
| 288 |
+
"model.layers.3.post_attention_layernorm.weight": "model-00002-of-00014.safetensors",
|
| 289 |
+
"model.layers.3.self_attn.k_proj.bias": "model-00001-of-00014.safetensors",
|
| 290 |
+
"model.layers.3.self_attn.k_proj.weight": "model-00001-of-00014.safetensors",
|
| 291 |
+
"model.layers.3.self_attn.o_proj.weight": "model-00001-of-00014.safetensors",
|
| 292 |
+
"model.layers.3.self_attn.q_proj.bias": "model-00001-of-00014.safetensors",
|
| 293 |
+
"model.layers.3.self_attn.q_proj.weight": "model-00001-of-00014.safetensors",
|
| 294 |
+
"model.layers.3.self_attn.v_proj.bias": "model-00001-of-00014.safetensors",
|
| 295 |
+
"model.layers.3.self_attn.v_proj.weight": "model-00001-of-00014.safetensors",
|
| 296 |
+
"model.layers.30.input_layernorm.weight": "model-00007-of-00014.safetensors",
|
| 297 |
+
"model.layers.30.mlp.down_proj.weight": "model-00007-of-00014.safetensors",
|
| 298 |
+
"model.layers.30.mlp.gate_proj.weight": "model-00007-of-00014.safetensors",
|
| 299 |
+
"model.layers.30.mlp.up_proj.weight": "model-00007-of-00014.safetensors",
|
| 300 |
+
"model.layers.30.post_attention_layernorm.weight": "model-00007-of-00014.safetensors",
|
| 301 |
+
"model.layers.30.self_attn.k_proj.bias": "model-00007-of-00014.safetensors",
|
| 302 |
+
"model.layers.30.self_attn.k_proj.weight": "model-00007-of-00014.safetensors",
|
| 303 |
+
"model.layers.30.self_attn.o_proj.weight": "model-00007-of-00014.safetensors",
|
| 304 |
+
"model.layers.30.self_attn.q_proj.bias": "model-00007-of-00014.safetensors",
|
| 305 |
+
"model.layers.30.self_attn.q_proj.weight": "model-00007-of-00014.safetensors",
|
| 306 |
+
"model.layers.30.self_attn.v_proj.bias": "model-00007-of-00014.safetensors",
|
| 307 |
+
"model.layers.30.self_attn.v_proj.weight": "model-00007-of-00014.safetensors",
|
| 308 |
+
"model.layers.31.input_layernorm.weight": "model-00007-of-00014.safetensors",
|
| 309 |
+
"model.layers.31.mlp.down_proj.weight": "model-00007-of-00014.safetensors",
|
| 310 |
+
"model.layers.31.mlp.gate_proj.weight": "model-00007-of-00014.safetensors",
|
| 311 |
+
"model.layers.31.mlp.up_proj.weight": "model-00007-of-00014.safetensors",
|
| 312 |
+
"model.layers.31.post_attention_layernorm.weight": "model-00007-of-00014.safetensors",
|
| 313 |
+
"model.layers.31.self_attn.k_proj.bias": "model-00007-of-00014.safetensors",
|
| 314 |
+
"model.layers.31.self_attn.k_proj.weight": "model-00007-of-00014.safetensors",
|
| 315 |
+
"model.layers.31.self_attn.o_proj.weight": "model-00007-of-00014.safetensors",
|
| 316 |
+
"model.layers.31.self_attn.q_proj.bias": "model-00007-of-00014.safetensors",
|
| 317 |
+
"model.layers.31.self_attn.q_proj.weight": "model-00007-of-00014.safetensors",
|
| 318 |
+
"model.layers.31.self_attn.v_proj.bias": "model-00007-of-00014.safetensors",
|
| 319 |
+
"model.layers.31.self_attn.v_proj.weight": "model-00007-of-00014.safetensors",
|
| 320 |
+
"model.layers.32.input_layernorm.weight": "model-00007-of-00014.safetensors",
|
| 321 |
+
"model.layers.32.mlp.down_proj.weight": "model-00007-of-00014.safetensors",
|
| 322 |
+
"model.layers.32.mlp.gate_proj.weight": "model-00007-of-00014.safetensors",
|
| 323 |
+
"model.layers.32.mlp.up_proj.weight": "model-00007-of-00014.safetensors",
|
| 324 |
+
"model.layers.32.post_attention_layernorm.weight": "model-00007-of-00014.safetensors",
|
| 325 |
+
"model.layers.32.self_attn.k_proj.bias": "model-00007-of-00014.safetensors",
|
| 326 |
+
"model.layers.32.self_attn.k_proj.weight": "model-00007-of-00014.safetensors",
|
| 327 |
+
"model.layers.32.self_attn.o_proj.weight": "model-00007-of-00014.safetensors",
|
| 328 |
+
"model.layers.32.self_attn.q_proj.bias": "model-00007-of-00014.safetensors",
|
| 329 |
+
"model.layers.32.self_attn.q_proj.weight": "model-00007-of-00014.safetensors",
|
| 330 |
+
"model.layers.32.self_attn.v_proj.bias": "model-00007-of-00014.safetensors",
|
| 331 |
+
"model.layers.32.self_attn.v_proj.weight": "model-00007-of-00014.safetensors",
|
| 332 |
+
"model.layers.33.input_layernorm.weight": "model-00008-of-00014.safetensors",
|
| 333 |
+
"model.layers.33.mlp.down_proj.weight": "model-00008-of-00014.safetensors",
|
| 334 |
+
"model.layers.33.mlp.gate_proj.weight": "model-00007-of-00014.safetensors",
|
| 335 |
+
"model.layers.33.mlp.up_proj.weight": "model-00008-of-00014.safetensors",
|
| 336 |
+
"model.layers.33.post_attention_layernorm.weight": "model-00008-of-00014.safetensors",
|
| 337 |
+
"model.layers.33.self_attn.k_proj.bias": "model-00007-of-00014.safetensors",
|
| 338 |
+
"model.layers.33.self_attn.k_proj.weight": "model-00007-of-00014.safetensors",
|
| 339 |
+
"model.layers.33.self_attn.o_proj.weight": "model-00007-of-00014.safetensors",
|
| 340 |
+
"model.layers.33.self_attn.q_proj.bias": "model-00007-of-00014.safetensors",
|
| 341 |
+
"model.layers.33.self_attn.q_proj.weight": "model-00007-of-00014.safetensors",
|
| 342 |
+
"model.layers.33.self_attn.v_proj.bias": "model-00007-of-00014.safetensors",
|
| 343 |
+
"model.layers.33.self_attn.v_proj.weight": "model-00007-of-00014.safetensors",
|
| 344 |
+
"model.layers.34.input_layernorm.weight": "model-00008-of-00014.safetensors",
|
| 345 |
+
"model.layers.34.mlp.down_proj.weight": "model-00008-of-00014.safetensors",
|
| 346 |
+
"model.layers.34.mlp.gate_proj.weight": "model-00008-of-00014.safetensors",
|
| 347 |
+
"model.layers.34.mlp.up_proj.weight": "model-00008-of-00014.safetensors",
|
| 348 |
+
"model.layers.34.post_attention_layernorm.weight": "model-00008-of-00014.safetensors",
|
| 349 |
+
"model.layers.34.self_attn.k_proj.bias": "model-00008-of-00014.safetensors",
|
| 350 |
+
"model.layers.34.self_attn.k_proj.weight": "model-00008-of-00014.safetensors",
|
| 351 |
+
"model.layers.34.self_attn.o_proj.weight": "model-00008-of-00014.safetensors",
|
| 352 |
+
"model.layers.34.self_attn.q_proj.bias": "model-00008-of-00014.safetensors",
|
| 353 |
+
"model.layers.34.self_attn.q_proj.weight": "model-00008-of-00014.safetensors",
|
| 354 |
+
"model.layers.34.self_attn.v_proj.bias": "model-00008-of-00014.safetensors",
|
| 355 |
+
"model.layers.34.self_attn.v_proj.weight": "model-00008-of-00014.safetensors",
|
| 356 |
+
"model.layers.35.input_layernorm.weight": "model-00008-of-00014.safetensors",
|
| 357 |
+
"model.layers.35.mlp.down_proj.weight": "model-00008-of-00014.safetensors",
|
| 358 |
+
"model.layers.35.mlp.gate_proj.weight": "model-00008-of-00014.safetensors",
|
| 359 |
+
"model.layers.35.mlp.up_proj.weight": "model-00008-of-00014.safetensors",
|
| 360 |
+
"model.layers.35.post_attention_layernorm.weight": "model-00008-of-00014.safetensors",
|
| 361 |
+
"model.layers.35.self_attn.k_proj.bias": "model-00008-of-00014.safetensors",
|
| 362 |
+
"model.layers.35.self_attn.k_proj.weight": "model-00008-of-00014.safetensors",
|
| 363 |
+
"model.layers.35.self_attn.o_proj.weight": "model-00008-of-00014.safetensors",
|
| 364 |
+
"model.layers.35.self_attn.q_proj.bias": "model-00008-of-00014.safetensors",
|
| 365 |
+
"model.layers.35.self_attn.q_proj.weight": "model-00008-of-00014.safetensors",
|
| 366 |
+
"model.layers.35.self_attn.v_proj.bias": "model-00008-of-00014.safetensors",
|
| 367 |
+
"model.layers.35.self_attn.v_proj.weight": "model-00008-of-00014.safetensors",
|
| 368 |
+
"model.layers.36.input_layernorm.weight": "model-00008-of-00014.safetensors",
|
| 369 |
+
"model.layers.36.mlp.down_proj.weight": "model-00008-of-00014.safetensors",
|
| 370 |
+
"model.layers.36.mlp.gate_proj.weight": "model-00008-of-00014.safetensors",
|
| 371 |
+
"model.layers.36.mlp.up_proj.weight": "model-00008-of-00014.safetensors",
|
| 372 |
+
"model.layers.36.post_attention_layernorm.weight": "model-00008-of-00014.safetensors",
|
| 373 |
+
"model.layers.36.self_attn.k_proj.bias": "model-00008-of-00014.safetensors",
|
| 374 |
+
"model.layers.36.self_attn.k_proj.weight": "model-00008-of-00014.safetensors",
|
| 375 |
+
"model.layers.36.self_attn.o_proj.weight": "model-00008-of-00014.safetensors",
|
| 376 |
+
"model.layers.36.self_attn.q_proj.bias": "model-00008-of-00014.safetensors",
|
| 377 |
+
"model.layers.36.self_attn.q_proj.weight": "model-00008-of-00014.safetensors",
|
| 378 |
+
"model.layers.36.self_attn.v_proj.bias": "model-00008-of-00014.safetensors",
|
| 379 |
+
"model.layers.36.self_attn.v_proj.weight": "model-00008-of-00014.safetensors",
|
| 380 |
+
"model.layers.37.input_layernorm.weight": "model-00008-of-00014.safetensors",
|
| 381 |
+
"model.layers.37.mlp.down_proj.weight": "model-00008-of-00014.safetensors",
|
| 382 |
+
"model.layers.37.mlp.gate_proj.weight": "model-00008-of-00014.safetensors",
|
| 383 |
+
"model.layers.37.mlp.up_proj.weight": "model-00008-of-00014.safetensors",
|
| 384 |
+
"model.layers.37.post_attention_layernorm.weight": "model-00008-of-00014.safetensors",
|
| 385 |
+
"model.layers.37.self_attn.k_proj.bias": "model-00008-of-00014.safetensors",
|
| 386 |
+
"model.layers.37.self_attn.k_proj.weight": "model-00008-of-00014.safetensors",
|
| 387 |
+
"model.layers.37.self_attn.o_proj.weight": "model-00008-of-00014.safetensors",
|
| 388 |
+
"model.layers.37.self_attn.q_proj.bias": "model-00008-of-00014.safetensors",
|
| 389 |
+
"model.layers.37.self_attn.q_proj.weight": "model-00008-of-00014.safetensors",
|
| 390 |
+
"model.layers.37.self_attn.v_proj.bias": "model-00008-of-00014.safetensors",
|
| 391 |
+
"model.layers.37.self_attn.v_proj.weight": "model-00008-of-00014.safetensors",
|
| 392 |
+
"model.layers.38.input_layernorm.weight": "model-00009-of-00014.safetensors",
|
| 393 |
+
"model.layers.38.mlp.down_proj.weight": "model-00009-of-00014.safetensors",
|
| 394 |
+
"model.layers.38.mlp.gate_proj.weight": "model-00008-of-00014.safetensors",
|
| 395 |
+
"model.layers.38.mlp.up_proj.weight": "model-00009-of-00014.safetensors",
|
| 396 |
+
"model.layers.38.post_attention_layernorm.weight": "model-00009-of-00014.safetensors",
|
| 397 |
+
"model.layers.38.self_attn.k_proj.bias": "model-00008-of-00014.safetensors",
|
| 398 |
+
"model.layers.38.self_attn.k_proj.weight": "model-00008-of-00014.safetensors",
|
| 399 |
+
"model.layers.38.self_attn.o_proj.weight": "model-00008-of-00014.safetensors",
|
| 400 |
+
"model.layers.38.self_attn.q_proj.bias": "model-00008-of-00014.safetensors",
|
| 401 |
+
"model.layers.38.self_attn.q_proj.weight": "model-00008-of-00014.safetensors",
|
| 402 |
+
"model.layers.38.self_attn.v_proj.bias": "model-00008-of-00014.safetensors",
|
| 403 |
+
"model.layers.38.self_attn.v_proj.weight": "model-00008-of-00014.safetensors",
|
| 404 |
+
"model.layers.39.input_layernorm.weight": "model-00009-of-00014.safetensors",
|
| 405 |
+
"model.layers.39.mlp.down_proj.weight": "model-00009-of-00014.safetensors",
|
| 406 |
+
"model.layers.39.mlp.gate_proj.weight": "model-00009-of-00014.safetensors",
|
| 407 |
+
"model.layers.39.mlp.up_proj.weight": "model-00009-of-00014.safetensors",
|
| 408 |
+
"model.layers.39.post_attention_layernorm.weight": "model-00009-of-00014.safetensors",
|
| 409 |
+
"model.layers.39.self_attn.k_proj.bias": "model-00009-of-00014.safetensors",
|
| 410 |
+
"model.layers.39.self_attn.k_proj.weight": "model-00009-of-00014.safetensors",
|
| 411 |
+
"model.layers.39.self_attn.o_proj.weight": "model-00009-of-00014.safetensors",
|
| 412 |
+
"model.layers.39.self_attn.q_proj.bias": "model-00009-of-00014.safetensors",
|
| 413 |
+
"model.layers.39.self_attn.q_proj.weight": "model-00009-of-00014.safetensors",
|
| 414 |
+
"model.layers.39.self_attn.v_proj.bias": "model-00009-of-00014.safetensors",
|
| 415 |
+
"model.layers.39.self_attn.v_proj.weight": "model-00009-of-00014.safetensors",
|
| 416 |
+
"model.layers.4.input_layernorm.weight": "model-00002-of-00014.safetensors",
|
| 417 |
+
"model.layers.4.mlp.down_proj.weight": "model-00002-of-00014.safetensors",
|
| 418 |
+
"model.layers.4.mlp.gate_proj.weight": "model-00002-of-00014.safetensors",
|
| 419 |
+
"model.layers.4.mlp.up_proj.weight": "model-00002-of-00014.safetensors",
|
| 420 |
+
"model.layers.4.post_attention_layernorm.weight": "model-00002-of-00014.safetensors",
|
| 421 |
+
"model.layers.4.self_attn.k_proj.bias": "model-00002-of-00014.safetensors",
|
| 422 |
+
"model.layers.4.self_attn.k_proj.weight": "model-00002-of-00014.safetensors",
|
| 423 |
+
"model.layers.4.self_attn.o_proj.weight": "model-00002-of-00014.safetensors",
|
| 424 |
+
"model.layers.4.self_attn.q_proj.bias": "model-00002-of-00014.safetensors",
|
| 425 |
+
"model.layers.4.self_attn.q_proj.weight": "model-00002-of-00014.safetensors",
|
| 426 |
+
"model.layers.4.self_attn.v_proj.bias": "model-00002-of-00014.safetensors",
|
| 427 |
+
"model.layers.4.self_attn.v_proj.weight": "model-00002-of-00014.safetensors",
|
| 428 |
+
"model.layers.40.input_layernorm.weight": "model-00009-of-00014.safetensors",
|
| 429 |
+
"model.layers.40.mlp.down_proj.weight": "model-00009-of-00014.safetensors",
|
| 430 |
+
"model.layers.40.mlp.gate_proj.weight": "model-00009-of-00014.safetensors",
|
| 431 |
+
"model.layers.40.mlp.up_proj.weight": "model-00009-of-00014.safetensors",
|
| 432 |
+
"model.layers.40.post_attention_layernorm.weight": "model-00009-of-00014.safetensors",
|
| 433 |
+
"model.layers.40.self_attn.k_proj.bias": "model-00009-of-00014.safetensors",
|
| 434 |
+
"model.layers.40.self_attn.k_proj.weight": "model-00009-of-00014.safetensors",
|
| 435 |
+
"model.layers.40.self_attn.o_proj.weight": "model-00009-of-00014.safetensors",
|
| 436 |
+
"model.layers.40.self_attn.q_proj.bias": "model-00009-of-00014.safetensors",
|
| 437 |
+
"model.layers.40.self_attn.q_proj.weight": "model-00009-of-00014.safetensors",
|
| 438 |
+
"model.layers.40.self_attn.v_proj.bias": "model-00009-of-00014.safetensors",
|
| 439 |
+
"model.layers.40.self_attn.v_proj.weight": "model-00009-of-00014.safetensors",
|
| 440 |
+
"model.layers.41.input_layernorm.weight": "model-00009-of-00014.safetensors",
|
| 441 |
+
"model.layers.41.mlp.down_proj.weight": "model-00009-of-00014.safetensors",
|
| 442 |
+
"model.layers.41.mlp.gate_proj.weight": "model-00009-of-00014.safetensors",
|
| 443 |
+
"model.layers.41.mlp.up_proj.weight": "model-00009-of-00014.safetensors",
|
| 444 |
+
"model.layers.41.post_attention_layernorm.weight": "model-00009-of-00014.safetensors",
|
| 445 |
+
"model.layers.41.self_attn.k_proj.bias": "model-00009-of-00014.safetensors",
|
| 446 |
+
"model.layers.41.self_attn.k_proj.weight": "model-00009-of-00014.safetensors",
|
| 447 |
+
"model.layers.41.self_attn.o_proj.weight": "model-00009-of-00014.safetensors",
|
| 448 |
+
"model.layers.41.self_attn.q_proj.bias": "model-00009-of-00014.safetensors",
|
| 449 |
+
"model.layers.41.self_attn.q_proj.weight": "model-00009-of-00014.safetensors",
|
| 450 |
+
"model.layers.41.self_attn.v_proj.bias": "model-00009-of-00014.safetensors",
|
| 451 |
+
"model.layers.41.self_attn.v_proj.weight": "model-00009-of-00014.safetensors",
|
| 452 |
+
"model.layers.42.input_layernorm.weight": "model-00009-of-00014.safetensors",
|
| 453 |
+
"model.layers.42.mlp.down_proj.weight": "model-00009-of-00014.safetensors",
|
| 454 |
+
"model.layers.42.mlp.gate_proj.weight": "model-00009-of-00014.safetensors",
|
| 455 |
+
"model.layers.42.mlp.up_proj.weight": "model-00009-of-00014.safetensors",
|
| 456 |
+
"model.layers.42.post_attention_layernorm.weight": "model-00009-of-00014.safetensors",
|
| 457 |
+
"model.layers.42.self_attn.k_proj.bias": "model-00009-of-00014.safetensors",
|
| 458 |
+
"model.layers.42.self_attn.k_proj.weight": "model-00009-of-00014.safetensors",
|
| 459 |
+
"model.layers.42.self_attn.o_proj.weight": "model-00009-of-00014.safetensors",
|
| 460 |
+
"model.layers.42.self_attn.q_proj.bias": "model-00009-of-00014.safetensors",
|
| 461 |
+
"model.layers.42.self_attn.q_proj.weight": "model-00009-of-00014.safetensors",
|
| 462 |
+
"model.layers.42.self_attn.v_proj.bias": "model-00009-of-00014.safetensors",
|
| 463 |
+
"model.layers.42.self_attn.v_proj.weight": "model-00009-of-00014.safetensors",
|
| 464 |
+
"model.layers.43.input_layernorm.weight": "model-00010-of-00014.safetensors",
|
| 465 |
+
"model.layers.43.mlp.down_proj.weight": "model-00010-of-00014.safetensors",
|
| 466 |
+
"model.layers.43.mlp.gate_proj.weight": "model-00009-of-00014.safetensors",
|
| 467 |
+
"model.layers.43.mlp.up_proj.weight": "model-00010-of-00014.safetensors",
|
| 468 |
+
"model.layers.43.post_attention_layernorm.weight": "model-00010-of-00014.safetensors",
|
| 469 |
+
"model.layers.43.self_attn.k_proj.bias": "model-00009-of-00014.safetensors",
|
| 470 |
+
"model.layers.43.self_attn.k_proj.weight": "model-00009-of-00014.safetensors",
|
| 471 |
+
"model.layers.43.self_attn.o_proj.weight": "model-00009-of-00014.safetensors",
|
| 472 |
+
"model.layers.43.self_attn.q_proj.bias": "model-00009-of-00014.safetensors",
|
| 473 |
+
"model.layers.43.self_attn.q_proj.weight": "model-00009-of-00014.safetensors",
|
| 474 |
+
"model.layers.43.self_attn.v_proj.bias": "model-00009-of-00014.safetensors",
|
| 475 |
+
"model.layers.43.self_attn.v_proj.weight": "model-00009-of-00014.safetensors",
|
| 476 |
+
"model.layers.44.input_layernorm.weight": "model-00010-of-00014.safetensors",
|
| 477 |
+
"model.layers.44.mlp.down_proj.weight": "model-00010-of-00014.safetensors",
|
| 478 |
+
"model.layers.44.mlp.gate_proj.weight": "model-00010-of-00014.safetensors",
|
| 479 |
+
"model.layers.44.mlp.up_proj.weight": "model-00010-of-00014.safetensors",
|
| 480 |
+
"model.layers.44.post_attention_layernorm.weight": "model-00010-of-00014.safetensors",
|
| 481 |
+
"model.layers.44.self_attn.k_proj.bias": "model-00010-of-00014.safetensors",
|
| 482 |
+
"model.layers.44.self_attn.k_proj.weight": "model-00010-of-00014.safetensors",
|
| 483 |
+
"model.layers.44.self_attn.o_proj.weight": "model-00010-of-00014.safetensors",
|
| 484 |
+
"model.layers.44.self_attn.q_proj.bias": "model-00010-of-00014.safetensors",
|
| 485 |
+
"model.layers.44.self_attn.q_proj.weight": "model-00010-of-00014.safetensors",
|
| 486 |
+
"model.layers.44.self_attn.v_proj.bias": "model-00010-of-00014.safetensors",
|
| 487 |
+
"model.layers.44.self_attn.v_proj.weight": "model-00010-of-00014.safetensors",
|
| 488 |
+
"model.layers.45.input_layernorm.weight": "model-00010-of-00014.safetensors",
|
| 489 |
+
"model.layers.45.mlp.down_proj.weight": "model-00010-of-00014.safetensors",
|
| 490 |
+
"model.layers.45.mlp.gate_proj.weight": "model-00010-of-00014.safetensors",
|
| 491 |
+
"model.layers.45.mlp.up_proj.weight": "model-00010-of-00014.safetensors",
|
| 492 |
+
"model.layers.45.post_attention_layernorm.weight": "model-00010-of-00014.safetensors",
|
| 493 |
+
"model.layers.45.self_attn.k_proj.bias": "model-00010-of-00014.safetensors",
|
| 494 |
+
"model.layers.45.self_attn.k_proj.weight": "model-00010-of-00014.safetensors",
|
| 495 |
+
"model.layers.45.self_attn.o_proj.weight": "model-00010-of-00014.safetensors",
|
| 496 |
+
"model.layers.45.self_attn.q_proj.bias": "model-00010-of-00014.safetensors",
|
| 497 |
+
"model.layers.45.self_attn.q_proj.weight": "model-00010-of-00014.safetensors",
|
| 498 |
+
"model.layers.45.self_attn.v_proj.bias": "model-00010-of-00014.safetensors",
|
| 499 |
+
"model.layers.45.self_attn.v_proj.weight": "model-00010-of-00014.safetensors",
|
| 500 |
+
"model.layers.46.input_layernorm.weight": "model-00010-of-00014.safetensors",
|
| 501 |
+
"model.layers.46.mlp.down_proj.weight": "model-00010-of-00014.safetensors",
|
| 502 |
+
"model.layers.46.mlp.gate_proj.weight": "model-00010-of-00014.safetensors",
|
| 503 |
+
"model.layers.46.mlp.up_proj.weight": "model-00010-of-00014.safetensors",
|
| 504 |
+
"model.layers.46.post_attention_layernorm.weight": "model-00010-of-00014.safetensors",
|
| 505 |
+
"model.layers.46.self_attn.k_proj.bias": "model-00010-of-00014.safetensors",
|
| 506 |
+
"model.layers.46.self_attn.k_proj.weight": "model-00010-of-00014.safetensors",
|
| 507 |
+
"model.layers.46.self_attn.o_proj.weight": "model-00010-of-00014.safetensors",
|
| 508 |
+
"model.layers.46.self_attn.q_proj.bias": "model-00010-of-00014.safetensors",
|
| 509 |
+
"model.layers.46.self_attn.q_proj.weight": "model-00010-of-00014.safetensors",
|
| 510 |
+
"model.layers.46.self_attn.v_proj.bias": "model-00010-of-00014.safetensors",
|
| 511 |
+
"model.layers.46.self_attn.v_proj.weight": "model-00010-of-00014.safetensors",
|
| 512 |
+
"model.layers.47.input_layernorm.weight": "model-00010-of-00014.safetensors",
|
| 513 |
+
"model.layers.47.mlp.down_proj.weight": "model-00010-of-00014.safetensors",
|
| 514 |
+
"model.layers.47.mlp.gate_proj.weight": "model-00010-of-00014.safetensors",
|
| 515 |
+
"model.layers.47.mlp.up_proj.weight": "model-00010-of-00014.safetensors",
|
| 516 |
+
"model.layers.47.post_attention_layernorm.weight": "model-00010-of-00014.safetensors",
|
| 517 |
+
"model.layers.47.self_attn.k_proj.bias": "model-00010-of-00014.safetensors",
|
| 518 |
+
"model.layers.47.self_attn.k_proj.weight": "model-00010-of-00014.safetensors",
|
| 519 |
+
"model.layers.47.self_attn.o_proj.weight": "model-00010-of-00014.safetensors",
|
| 520 |
+
"model.layers.47.self_attn.q_proj.bias": "model-00010-of-00014.safetensors",
|
| 521 |
+
"model.layers.47.self_attn.q_proj.weight": "model-00010-of-00014.safetensors",
|
| 522 |
+
"model.layers.47.self_attn.v_proj.bias": "model-00010-of-00014.safetensors",
|
| 523 |
+
"model.layers.47.self_attn.v_proj.weight": "model-00010-of-00014.safetensors",
|
| 524 |
+
"model.layers.48.input_layernorm.weight": "model-00011-of-00014.safetensors",
|
| 525 |
+
"model.layers.48.mlp.down_proj.weight": "model-00011-of-00014.safetensors",
|
| 526 |
+
"model.layers.48.mlp.gate_proj.weight": "model-00010-of-00014.safetensors",
|
| 527 |
+
"model.layers.48.mlp.up_proj.weight": "model-00011-of-00014.safetensors",
|
| 528 |
+
"model.layers.48.post_attention_layernorm.weight": "model-00011-of-00014.safetensors",
|
| 529 |
+
"model.layers.48.self_attn.k_proj.bias": "model-00010-of-00014.safetensors",
|
| 530 |
+
"model.layers.48.self_attn.k_proj.weight": "model-00010-of-00014.safetensors",
|
| 531 |
+
"model.layers.48.self_attn.o_proj.weight": "model-00010-of-00014.safetensors",
|
| 532 |
+
"model.layers.48.self_attn.q_proj.bias": "model-00010-of-00014.safetensors",
|
| 533 |
+
"model.layers.48.self_attn.q_proj.weight": "model-00010-of-00014.safetensors",
|
| 534 |
+
"model.layers.48.self_attn.v_proj.bias": "model-00010-of-00014.safetensors",
|
| 535 |
+
"model.layers.48.self_attn.v_proj.weight": "model-00010-of-00014.safetensors",
|
| 536 |
+
"model.layers.49.input_layernorm.weight": "model-00011-of-00014.safetensors",
|
| 537 |
+
"model.layers.49.mlp.down_proj.weight": "model-00011-of-00014.safetensors",
|
| 538 |
+
"model.layers.49.mlp.gate_proj.weight": "model-00011-of-00014.safetensors",
|
| 539 |
+
"model.layers.49.mlp.up_proj.weight": "model-00011-of-00014.safetensors",
|
| 540 |
+
"model.layers.49.post_attention_layernorm.weight": "model-00011-of-00014.safetensors",
|
| 541 |
+
"model.layers.49.self_attn.k_proj.bias": "model-00011-of-00014.safetensors",
|
| 542 |
+
"model.layers.49.self_attn.k_proj.weight": "model-00011-of-00014.safetensors",
|
| 543 |
+
"model.layers.49.self_attn.o_proj.weight": "model-00011-of-00014.safetensors",
|
| 544 |
+
"model.layers.49.self_attn.q_proj.bias": "model-00011-of-00014.safetensors",
|
| 545 |
+
"model.layers.49.self_attn.q_proj.weight": "model-00011-of-00014.safetensors",
|
| 546 |
+
"model.layers.49.self_attn.v_proj.bias": "model-00011-of-00014.safetensors",
|
| 547 |
+
"model.layers.49.self_attn.v_proj.weight": "model-00011-of-00014.safetensors",
|
| 548 |
+
"model.layers.5.input_layernorm.weight": "model-00002-of-00014.safetensors",
|
| 549 |
+
"model.layers.5.mlp.down_proj.weight": "model-00002-of-00014.safetensors",
|
| 550 |
+
"model.layers.5.mlp.gate_proj.weight": "model-00002-of-00014.safetensors",
|
| 551 |
+
"model.layers.5.mlp.up_proj.weight": "model-00002-of-00014.safetensors",
|
| 552 |
+
"model.layers.5.post_attention_layernorm.weight": "model-00002-of-00014.safetensors",
|
| 553 |
+
"model.layers.5.self_attn.k_proj.bias": "model-00002-of-00014.safetensors",
|
| 554 |
+
"model.layers.5.self_attn.k_proj.weight": "model-00002-of-00014.safetensors",
|
| 555 |
+
"model.layers.5.self_attn.o_proj.weight": "model-00002-of-00014.safetensors",
|
| 556 |
+
"model.layers.5.self_attn.q_proj.bias": "model-00002-of-00014.safetensors",
|
| 557 |
+
"model.layers.5.self_attn.q_proj.weight": "model-00002-of-00014.safetensors",
|
| 558 |
+
"model.layers.5.self_attn.v_proj.bias": "model-00002-of-00014.safetensors",
|
| 559 |
+
"model.layers.5.self_attn.v_proj.weight": "model-00002-of-00014.safetensors",
|
| 560 |
+
"model.layers.50.input_layernorm.weight": "model-00011-of-00014.safetensors",
|
| 561 |
+
"model.layers.50.mlp.down_proj.weight": "model-00011-of-00014.safetensors",
|
| 562 |
+
"model.layers.50.mlp.gate_proj.weight": "model-00011-of-00014.safetensors",
|
| 563 |
+
"model.layers.50.mlp.up_proj.weight": "model-00011-of-00014.safetensors",
|
| 564 |
+
"model.layers.50.post_attention_layernorm.weight": "model-00011-of-00014.safetensors",
|
| 565 |
+
"model.layers.50.self_attn.k_proj.bias": "model-00011-of-00014.safetensors",
|
| 566 |
+
"model.layers.50.self_attn.k_proj.weight": "model-00011-of-00014.safetensors",
|
| 567 |
+
"model.layers.50.self_attn.o_proj.weight": "model-00011-of-00014.safetensors",
|
| 568 |
+
"model.layers.50.self_attn.q_proj.bias": "model-00011-of-00014.safetensors",
|
| 569 |
+
"model.layers.50.self_attn.q_proj.weight": "model-00011-of-00014.safetensors",
|
| 570 |
+
"model.layers.50.self_attn.v_proj.bias": "model-00011-of-00014.safetensors",
|
| 571 |
+
"model.layers.50.self_attn.v_proj.weight": "model-00011-of-00014.safetensors",
|
| 572 |
+
"model.layers.51.input_layernorm.weight": "model-00011-of-00014.safetensors",
|
| 573 |
+
"model.layers.51.mlp.down_proj.weight": "model-00011-of-00014.safetensors",
|
| 574 |
+
"model.layers.51.mlp.gate_proj.weight": "model-00011-of-00014.safetensors",
|
| 575 |
+
"model.layers.51.mlp.up_proj.weight": "model-00011-of-00014.safetensors",
|
| 576 |
+
"model.layers.51.post_attention_layernorm.weight": "model-00011-of-00014.safetensors",
|
| 577 |
+
"model.layers.51.self_attn.k_proj.bias": "model-00011-of-00014.safetensors",
|
| 578 |
+
"model.layers.51.self_attn.k_proj.weight": "model-00011-of-00014.safetensors",
|
| 579 |
+
"model.layers.51.self_attn.o_proj.weight": "model-00011-of-00014.safetensors",
|
| 580 |
+
"model.layers.51.self_attn.q_proj.bias": "model-00011-of-00014.safetensors",
|
| 581 |
+
"model.layers.51.self_attn.q_proj.weight": "model-00011-of-00014.safetensors",
|
| 582 |
+
"model.layers.51.self_attn.v_proj.bias": "model-00011-of-00014.safetensors",
|
| 583 |
+
"model.layers.51.self_attn.v_proj.weight": "model-00011-of-00014.safetensors",
|
| 584 |
+
"model.layers.52.input_layernorm.weight": "model-00011-of-00014.safetensors",
|
| 585 |
+
"model.layers.52.mlp.down_proj.weight": "model-00011-of-00014.safetensors",
|
| 586 |
+
"model.layers.52.mlp.gate_proj.weight": "model-00011-of-00014.safetensors",
|
| 587 |
+
"model.layers.52.mlp.up_proj.weight": "model-00011-of-00014.safetensors",
|
| 588 |
+
"model.layers.52.post_attention_layernorm.weight": "model-00011-of-00014.safetensors",
|
| 589 |
+
"model.layers.52.self_attn.k_proj.bias": "model-00011-of-00014.safetensors",
|
| 590 |
+
"model.layers.52.self_attn.k_proj.weight": "model-00011-of-00014.safetensors",
|
| 591 |
+
"model.layers.52.self_attn.o_proj.weight": "model-00011-of-00014.safetensors",
|
| 592 |
+
"model.layers.52.self_attn.q_proj.bias": "model-00011-of-00014.safetensors",
|
| 593 |
+
"model.layers.52.self_attn.q_proj.weight": "model-00011-of-00014.safetensors",
|
| 594 |
+
"model.layers.52.self_attn.v_proj.bias": "model-00011-of-00014.safetensors",
|
| 595 |
+
"model.layers.52.self_attn.v_proj.weight": "model-00011-of-00014.safetensors",
|
| 596 |
+
"model.layers.53.input_layernorm.weight": "model-00012-of-00014.safetensors",
|
| 597 |
+
"model.layers.53.mlp.down_proj.weight": "model-00012-of-00014.safetensors",
|
| 598 |
+
"model.layers.53.mlp.gate_proj.weight": "model-00011-of-00014.safetensors",
|
| 599 |
+
"model.layers.53.mlp.up_proj.weight": "model-00012-of-00014.safetensors",
|
| 600 |
+
"model.layers.53.post_attention_layernorm.weight": "model-00012-of-00014.safetensors",
|
| 601 |
+
"model.layers.53.self_attn.k_proj.bias": "model-00011-of-00014.safetensors",
|
| 602 |
+
"model.layers.53.self_attn.k_proj.weight": "model-00011-of-00014.safetensors",
|
| 603 |
+
"model.layers.53.self_attn.o_proj.weight": "model-00011-of-00014.safetensors",
|
| 604 |
+
"model.layers.53.self_attn.q_proj.bias": "model-00011-of-00014.safetensors",
|
| 605 |
+
"model.layers.53.self_attn.q_proj.weight": "model-00011-of-00014.safetensors",
|
| 606 |
+
"model.layers.53.self_attn.v_proj.bias": "model-00011-of-00014.safetensors",
|
| 607 |
+
"model.layers.53.self_attn.v_proj.weight": "model-00011-of-00014.safetensors",
|
| 608 |
+
"model.layers.54.input_layernorm.weight": "model-00012-of-00014.safetensors",
|
| 609 |
+
"model.layers.54.mlp.down_proj.weight": "model-00012-of-00014.safetensors",
|
| 610 |
+
"model.layers.54.mlp.gate_proj.weight": "model-00012-of-00014.safetensors",
|
| 611 |
+
"model.layers.54.mlp.up_proj.weight": "model-00012-of-00014.safetensors",
|
| 612 |
+
"model.layers.54.post_attention_layernorm.weight": "model-00012-of-00014.safetensors",
|
| 613 |
+
"model.layers.54.self_attn.k_proj.bias": "model-00012-of-00014.safetensors",
|
| 614 |
+
"model.layers.54.self_attn.k_proj.weight": "model-00012-of-00014.safetensors",
|
| 615 |
+
"model.layers.54.self_attn.o_proj.weight": "model-00012-of-00014.safetensors",
|
| 616 |
+
"model.layers.54.self_attn.q_proj.bias": "model-00012-of-00014.safetensors",
|
| 617 |
+
"model.layers.54.self_attn.q_proj.weight": "model-00012-of-00014.safetensors",
|
| 618 |
+
"model.layers.54.self_attn.v_proj.bias": "model-00012-of-00014.safetensors",
|
| 619 |
+
"model.layers.54.self_attn.v_proj.weight": "model-00012-of-00014.safetensors",
|
| 620 |
+
"model.layers.55.input_layernorm.weight": "model-00012-of-00014.safetensors",
|
| 621 |
+
"model.layers.55.mlp.down_proj.weight": "model-00012-of-00014.safetensors",
|
| 622 |
+
"model.layers.55.mlp.gate_proj.weight": "model-00012-of-00014.safetensors",
|
| 623 |
+
"model.layers.55.mlp.up_proj.weight": "model-00012-of-00014.safetensors",
|
| 624 |
+
"model.layers.55.post_attention_layernorm.weight": "model-00012-of-00014.safetensors",
|
| 625 |
+
"model.layers.55.self_attn.k_proj.bias": "model-00012-of-00014.safetensors",
|
| 626 |
+
"model.layers.55.self_attn.k_proj.weight": "model-00012-of-00014.safetensors",
|
| 627 |
+
"model.layers.55.self_attn.o_proj.weight": "model-00012-of-00014.safetensors",
|
| 628 |
+
"model.layers.55.self_attn.q_proj.bias": "model-00012-of-00014.safetensors",
|
| 629 |
+
"model.layers.55.self_attn.q_proj.weight": "model-00012-of-00014.safetensors",
|
| 630 |
+
"model.layers.55.self_attn.v_proj.bias": "model-00012-of-00014.safetensors",
|
| 631 |
+
"model.layers.55.self_attn.v_proj.weight": "model-00012-of-00014.safetensors",
|
| 632 |
+
"model.layers.56.input_layernorm.weight": "model-00012-of-00014.safetensors",
|
| 633 |
+
"model.layers.56.mlp.down_proj.weight": "model-00012-of-00014.safetensors",
|
| 634 |
+
"model.layers.56.mlp.gate_proj.weight": "model-00012-of-00014.safetensors",
|
| 635 |
+
"model.layers.56.mlp.up_proj.weight": "model-00012-of-00014.safetensors",
|
| 636 |
+
"model.layers.56.post_attention_layernorm.weight": "model-00012-of-00014.safetensors",
|
| 637 |
+
"model.layers.56.self_attn.k_proj.bias": "model-00012-of-00014.safetensors",
|
| 638 |
+
"model.layers.56.self_attn.k_proj.weight": "model-00012-of-00014.safetensors",
|
| 639 |
+
"model.layers.56.self_attn.o_proj.weight": "model-00012-of-00014.safetensors",
|
| 640 |
+
"model.layers.56.self_attn.q_proj.bias": "model-00012-of-00014.safetensors",
|
| 641 |
+
"model.layers.56.self_attn.q_proj.weight": "model-00012-of-00014.safetensors",
|
| 642 |
+
"model.layers.56.self_attn.v_proj.bias": "model-00012-of-00014.safetensors",
|
| 643 |
+
"model.layers.56.self_attn.v_proj.weight": "model-00012-of-00014.safetensors",
|
| 644 |
+
"model.layers.57.input_layernorm.weight": "model-00012-of-00014.safetensors",
|
| 645 |
+
"model.layers.57.mlp.down_proj.weight": "model-00012-of-00014.safetensors",
|
| 646 |
+
"model.layers.57.mlp.gate_proj.weight": "model-00012-of-00014.safetensors",
|
| 647 |
+
"model.layers.57.mlp.up_proj.weight": "model-00012-of-00014.safetensors",
|
| 648 |
+
"model.layers.57.post_attention_layernorm.weight": "model-00012-of-00014.safetensors",
|
| 649 |
+
"model.layers.57.self_attn.k_proj.bias": "model-00012-of-00014.safetensors",
|
| 650 |
+
"model.layers.57.self_attn.k_proj.weight": "model-00012-of-00014.safetensors",
|
| 651 |
+
"model.layers.57.self_attn.o_proj.weight": "model-00012-of-00014.safetensors",
|
| 652 |
+
"model.layers.57.self_attn.q_proj.bias": "model-00012-of-00014.safetensors",
|
| 653 |
+
"model.layers.57.self_attn.q_proj.weight": "model-00012-of-00014.safetensors",
|
| 654 |
+
"model.layers.57.self_attn.v_proj.bias": "model-00012-of-00014.safetensors",
|
| 655 |
+
"model.layers.57.self_attn.v_proj.weight": "model-00012-of-00014.safetensors",
|
| 656 |
+
"model.layers.58.input_layernorm.weight": "model-00013-of-00014.safetensors",
|
| 657 |
+
"model.layers.58.mlp.down_proj.weight": "model-00013-of-00014.safetensors",
|
| 658 |
+
"model.layers.58.mlp.gate_proj.weight": "model-00012-of-00014.safetensors",
|
| 659 |
+
"model.layers.58.mlp.up_proj.weight": "model-00013-of-00014.safetensors",
|
| 660 |
+
"model.layers.58.post_attention_layernorm.weight": "model-00013-of-00014.safetensors",
|
| 661 |
+
"model.layers.58.self_attn.k_proj.bias": "model-00012-of-00014.safetensors",
|
| 662 |
+
"model.layers.58.self_attn.k_proj.weight": "model-00012-of-00014.safetensors",
|
| 663 |
+
"model.layers.58.self_attn.o_proj.weight": "model-00012-of-00014.safetensors",
|
| 664 |
+
"model.layers.58.self_attn.q_proj.bias": "model-00012-of-00014.safetensors",
|
| 665 |
+
"model.layers.58.self_attn.q_proj.weight": "model-00012-of-00014.safetensors",
|
| 666 |
+
"model.layers.58.self_attn.v_proj.bias": "model-00012-of-00014.safetensors",
|
| 667 |
+
"model.layers.58.self_attn.v_proj.weight": "model-00012-of-00014.safetensors",
|
| 668 |
+
"model.layers.59.input_layernorm.weight": "model-00013-of-00014.safetensors",
|
| 669 |
+
"model.layers.59.mlp.down_proj.weight": "model-00013-of-00014.safetensors",
|
| 670 |
+
"model.layers.59.mlp.gate_proj.weight": "model-00013-of-00014.safetensors",
|
| 671 |
+
"model.layers.59.mlp.up_proj.weight": "model-00013-of-00014.safetensors",
|
| 672 |
+
"model.layers.59.post_attention_layernorm.weight": "model-00013-of-00014.safetensors",
|
| 673 |
+
"model.layers.59.self_attn.k_proj.bias": "model-00013-of-00014.safetensors",
|
| 674 |
+
"model.layers.59.self_attn.k_proj.weight": "model-00013-of-00014.safetensors",
|
| 675 |
+
"model.layers.59.self_attn.o_proj.weight": "model-00013-of-00014.safetensors",
|
| 676 |
+
"model.layers.59.self_attn.q_proj.bias": "model-00013-of-00014.safetensors",
|
| 677 |
+
"model.layers.59.self_attn.q_proj.weight": "model-00013-of-00014.safetensors",
|
| 678 |
+
"model.layers.59.self_attn.v_proj.bias": "model-00013-of-00014.safetensors",
|
| 679 |
+
"model.layers.59.self_attn.v_proj.weight": "model-00013-of-00014.safetensors",
|
| 680 |
+
"model.layers.6.input_layernorm.weight": "model-00002-of-00014.safetensors",
|
| 681 |
+
"model.layers.6.mlp.down_proj.weight": "model-00002-of-00014.safetensors",
|
| 682 |
+
"model.layers.6.mlp.gate_proj.weight": "model-00002-of-00014.safetensors",
|
| 683 |
+
"model.layers.6.mlp.up_proj.weight": "model-00002-of-00014.safetensors",
|
| 684 |
+
"model.layers.6.post_attention_layernorm.weight": "model-00002-of-00014.safetensors",
|
| 685 |
+
"model.layers.6.self_attn.k_proj.bias": "model-00002-of-00014.safetensors",
|
| 686 |
+
"model.layers.6.self_attn.k_proj.weight": "model-00002-of-00014.safetensors",
|
| 687 |
+
"model.layers.6.self_attn.o_proj.weight": "model-00002-of-00014.safetensors",
|
| 688 |
+
"model.layers.6.self_attn.q_proj.bias": "model-00002-of-00014.safetensors",
|
| 689 |
+
"model.layers.6.self_attn.q_proj.weight": "model-00002-of-00014.safetensors",
|
| 690 |
+
"model.layers.6.self_attn.v_proj.bias": "model-00002-of-00014.safetensors",
|
| 691 |
+
"model.layers.6.self_attn.v_proj.weight": "model-00002-of-00014.safetensors",
|
| 692 |
+
"model.layers.60.input_layernorm.weight": "model-00013-of-00014.safetensors",
|
| 693 |
+
"model.layers.60.mlp.down_proj.weight": "model-00013-of-00014.safetensors",
|
| 694 |
+
"model.layers.60.mlp.gate_proj.weight": "model-00013-of-00014.safetensors",
|
| 695 |
+
"model.layers.60.mlp.up_proj.weight": "model-00013-of-00014.safetensors",
|
| 696 |
+
"model.layers.60.post_attention_layernorm.weight": "model-00013-of-00014.safetensors",
|
| 697 |
+
"model.layers.60.self_attn.k_proj.bias": "model-00013-of-00014.safetensors",
|
| 698 |
+
"model.layers.60.self_attn.k_proj.weight": "model-00013-of-00014.safetensors",
|
| 699 |
+
"model.layers.60.self_attn.o_proj.weight": "model-00013-of-00014.safetensors",
|
| 700 |
+
"model.layers.60.self_attn.q_proj.bias": "model-00013-of-00014.safetensors",
|
| 701 |
+
"model.layers.60.self_attn.q_proj.weight": "model-00013-of-00014.safetensors",
|
| 702 |
+
"model.layers.60.self_attn.v_proj.bias": "model-00013-of-00014.safetensors",
|
| 703 |
+
"model.layers.60.self_attn.v_proj.weight": "model-00013-of-00014.safetensors",
|
| 704 |
+
"model.layers.61.input_layernorm.weight": "model-00013-of-00014.safetensors",
|
| 705 |
+
"model.layers.61.mlp.down_proj.weight": "model-00013-of-00014.safetensors",
|
| 706 |
+
"model.layers.61.mlp.gate_proj.weight": "model-00013-of-00014.safetensors",
|
| 707 |
+
"model.layers.61.mlp.up_proj.weight": "model-00013-of-00014.safetensors",
|
| 708 |
+
"model.layers.61.post_attention_layernorm.weight": "model-00013-of-00014.safetensors",
|
| 709 |
+
"model.layers.61.self_attn.k_proj.bias": "model-00013-of-00014.safetensors",
|
| 710 |
+
"model.layers.61.self_attn.k_proj.weight": "model-00013-of-00014.safetensors",
|
| 711 |
+
"model.layers.61.self_attn.o_proj.weight": "model-00013-of-00014.safetensors",
|
| 712 |
+
"model.layers.61.self_attn.q_proj.bias": "model-00013-of-00014.safetensors",
|
| 713 |
+
"model.layers.61.self_attn.q_proj.weight": "model-00013-of-00014.safetensors",
|
| 714 |
+
"model.layers.61.self_attn.v_proj.bias": "model-00013-of-00014.safetensors",
|
| 715 |
+
"model.layers.61.self_attn.v_proj.weight": "model-00013-of-00014.safetensors",
|
| 716 |
+
"model.layers.62.input_layernorm.weight": "model-00013-of-00014.safetensors",
|
| 717 |
+
"model.layers.62.mlp.down_proj.weight": "model-00013-of-00014.safetensors",
|
| 718 |
+
"model.layers.62.mlp.gate_proj.weight": "model-00013-of-00014.safetensors",
|
| 719 |
+
"model.layers.62.mlp.up_proj.weight": "model-00013-of-00014.safetensors",
|
| 720 |
+
"model.layers.62.post_attention_layernorm.weight": "model-00013-of-00014.safetensors",
|
| 721 |
+
"model.layers.62.self_attn.k_proj.bias": "model-00013-of-00014.safetensors",
|
| 722 |
+
"model.layers.62.self_attn.k_proj.weight": "model-00013-of-00014.safetensors",
|
| 723 |
+
"model.layers.62.self_attn.o_proj.weight": "model-00013-of-00014.safetensors",
|
| 724 |
+
"model.layers.62.self_attn.q_proj.bias": "model-00013-of-00014.safetensors",
|
| 725 |
+
"model.layers.62.self_attn.q_proj.weight": "model-00013-of-00014.safetensors",
|
| 726 |
+
"model.layers.62.self_attn.v_proj.bias": "model-00013-of-00014.safetensors",
|
| 727 |
+
"model.layers.62.self_attn.v_proj.weight": "model-00013-of-00014.safetensors",
|
| 728 |
+
"model.layers.63.input_layernorm.weight": "model-00014-of-00014.safetensors",
|
| 729 |
+
"model.layers.63.mlp.down_proj.weight": "model-00014-of-00014.safetensors",
|
| 730 |
+
"model.layers.63.mlp.gate_proj.weight": "model-00013-of-00014.safetensors",
|
| 731 |
+
"model.layers.63.mlp.up_proj.weight": "model-00014-of-00014.safetensors",
|
| 732 |
+
"model.layers.63.post_attention_layernorm.weight": "model-00014-of-00014.safetensors",
|
| 733 |
+
"model.layers.63.self_attn.k_proj.bias": "model-00013-of-00014.safetensors",
|
| 734 |
+
"model.layers.63.self_attn.k_proj.weight": "model-00013-of-00014.safetensors",
|
| 735 |
+
"model.layers.63.self_attn.o_proj.weight": "model-00013-of-00014.safetensors",
|
| 736 |
+
"model.layers.63.self_attn.q_proj.bias": "model-00013-of-00014.safetensors",
|
| 737 |
+
"model.layers.63.self_attn.q_proj.weight": "model-00013-of-00014.safetensors",
|
| 738 |
+
"model.layers.63.self_attn.v_proj.bias": "model-00013-of-00014.safetensors",
|
| 739 |
+
"model.layers.63.self_attn.v_proj.weight": "model-00013-of-00014.safetensors",
|
| 740 |
+
"model.layers.7.input_layernorm.weight": "model-00002-of-00014.safetensors",
|
| 741 |
+
"model.layers.7.mlp.down_proj.weight": "model-00002-of-00014.safetensors",
|
| 742 |
+
"model.layers.7.mlp.gate_proj.weight": "model-00002-of-00014.safetensors",
|
| 743 |
+
"model.layers.7.mlp.up_proj.weight": "model-00002-of-00014.safetensors",
|
| 744 |
+
"model.layers.7.post_attention_layernorm.weight": "model-00002-of-00014.safetensors",
|
| 745 |
+
"model.layers.7.self_attn.k_proj.bias": "model-00002-of-00014.safetensors",
|
| 746 |
+
"model.layers.7.self_attn.k_proj.weight": "model-00002-of-00014.safetensors",
|
| 747 |
+
"model.layers.7.self_attn.o_proj.weight": "model-00002-of-00014.safetensors",
|
| 748 |
+
"model.layers.7.self_attn.q_proj.bias": "model-00002-of-00014.safetensors",
|
| 749 |
+
"model.layers.7.self_attn.q_proj.weight": "model-00002-of-00014.safetensors",
|
| 750 |
+
"model.layers.7.self_attn.v_proj.bias": "model-00002-of-00014.safetensors",
|
| 751 |
+
"model.layers.7.self_attn.v_proj.weight": "model-00002-of-00014.safetensors",
|
| 752 |
+
"model.layers.8.input_layernorm.weight": "model-00003-of-00014.safetensors",
|
| 753 |
+
"model.layers.8.mlp.down_proj.weight": "model-00003-of-00014.safetensors",
|
| 754 |
+
"model.layers.8.mlp.gate_proj.weight": "model-00002-of-00014.safetensors",
|
| 755 |
+
"model.layers.8.mlp.up_proj.weight": "model-00003-of-00014.safetensors",
|
| 756 |
+
"model.layers.8.post_attention_layernorm.weight": "model-00003-of-00014.safetensors",
|
| 757 |
+
"model.layers.8.self_attn.k_proj.bias": "model-00002-of-00014.safetensors",
|
| 758 |
+
"model.layers.8.self_attn.k_proj.weight": "model-00002-of-00014.safetensors",
|
| 759 |
+
"model.layers.8.self_attn.o_proj.weight": "model-00002-of-00014.safetensors",
|
| 760 |
+
"model.layers.8.self_attn.q_proj.bias": "model-00002-of-00014.safetensors",
|
| 761 |
+
"model.layers.8.self_attn.q_proj.weight": "model-00002-of-00014.safetensors",
|
| 762 |
+
"model.layers.8.self_attn.v_proj.bias": "model-00002-of-00014.safetensors",
|
| 763 |
+
"model.layers.8.self_attn.v_proj.weight": "model-00002-of-00014.safetensors",
|
| 764 |
+
"model.layers.9.input_layernorm.weight": "model-00003-of-00014.safetensors",
|
| 765 |
+
"model.layers.9.mlp.down_proj.weight": "model-00003-of-00014.safetensors",
|
| 766 |
+
"model.layers.9.mlp.gate_proj.weight": "model-00003-of-00014.safetensors",
|
| 767 |
+
"model.layers.9.mlp.up_proj.weight": "model-00003-of-00014.safetensors",
|
| 768 |
+
"model.layers.9.post_attention_layernorm.weight": "model-00003-of-00014.safetensors",
|
| 769 |
+
"model.layers.9.self_attn.k_proj.bias": "model-00003-of-00014.safetensors",
|
| 770 |
+
"model.layers.9.self_attn.k_proj.weight": "model-00003-of-00014.safetensors",
|
| 771 |
+
"model.layers.9.self_attn.o_proj.weight": "model-00003-of-00014.safetensors",
|
| 772 |
+
"model.layers.9.self_attn.q_proj.bias": "model-00003-of-00014.safetensors",
|
| 773 |
+
"model.layers.9.self_attn.q_proj.weight": "model-00003-of-00014.safetensors",
|
| 774 |
+
"model.layers.9.self_attn.v_proj.bias": "model-00003-of-00014.safetensors",
|
| 775 |
+
"model.layers.9.self_attn.v_proj.weight": "model-00003-of-00014.safetensors",
|
| 776 |
+
"model.norm.weight": "model-00014-of-00014.safetensors"
|
| 777 |
+
}
|
| 778 |
+
}
|
sdlm_ckpt_final/sdlm_32b_bs4/modeling_sdlm.py
ADDED
|
@@ -0,0 +1,1590 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
|
| 5 |
+
# and OPT implementations in this library. It has been modified from its
|
| 6 |
+
# original forms to accommodate minor architectural differences compared
|
| 7 |
+
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
|
| 8 |
+
#
|
| 9 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 10 |
+
# you may not use this file except in compliance with the License.
|
| 11 |
+
# You may obtain a copy of the License at
|
| 12 |
+
#
|
| 13 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 14 |
+
#
|
| 15 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 16 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 17 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 18 |
+
# See the License for the specific language governing permissions and
|
| 19 |
+
# limitations under the License.
|
| 20 |
+
""" PyTorch Qwen2 model."""
|
| 21 |
+
import inspect
|
| 22 |
+
import math
|
| 23 |
+
import copy
|
| 24 |
+
import warnings
|
| 25 |
+
from functools import partial
|
| 26 |
+
from typing import List, Optional, Tuple, Union
|
| 27 |
+
|
| 28 |
+
import torch
|
| 29 |
+
import torch.nn.functional as F
|
| 30 |
+
import torch.utils.checkpoint
|
| 31 |
+
from torch import nn
|
| 32 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
| 33 |
+
|
| 34 |
+
from transformers.activations import ACT2FN
|
| 35 |
+
from transformers.cache_utils import Cache, DynamicCache
|
| 36 |
+
from transformers.modeling_attn_mask_utils import _prepare_4d_causal_attention_mask, _prepare_4d_causal_attention_mask_for_sdpa
|
| 37 |
+
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
|
| 38 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 39 |
+
from transformers.utils import (
|
| 40 |
+
add_start_docstrings,
|
| 41 |
+
add_start_docstrings_to_model_forward,
|
| 42 |
+
is_flash_attn_2_available,
|
| 43 |
+
is_flash_attn_greater_or_equal_2_10,
|
| 44 |
+
logging,
|
| 45 |
+
replace_return_docstrings,
|
| 46 |
+
)
|
| 47 |
+
from .configuration_sdlm import SDLMQwen2Config
|
| 48 |
+
|
| 49 |
+
if is_flash_attn_2_available():
|
| 50 |
+
from flash_attn import flash_attn_func, flash_attn_varlen_func
|
| 51 |
+
from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
|
| 52 |
+
|
| 53 |
+
_flash_supports_window_size = "window_size" in list(inspect.signature(flash_attn_func).parameters)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
logger = logging.get_logger(__name__)
|
| 57 |
+
|
| 58 |
+
# Flex Attention Supported
|
| 59 |
+
try:
|
| 60 |
+
from torch.nn.attention.flex_attention import flex_attention, create_block_mask
|
| 61 |
+
FLEX_ATTN_AVAILABLE = True
|
| 62 |
+
torch._dynamo.config.suppress_errors = False
|
| 63 |
+
torch._dynamo.config.verbose = True
|
| 64 |
+
torch._dynamo.config.dynamic_shapes = True
|
| 65 |
+
|
| 66 |
+
except:
|
| 67 |
+
FLEX_ATTN_AVAILABLE = False
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
_CHECKPOINT_FOR_DOC = "Qwen/Qwen2-7B-beta"
|
| 71 |
+
_CONFIG_FOR_DOC = "SDLMQwen2Config"
|
| 72 |
+
|
| 73 |
+
QWEN2_PRETRAINED_MODEL_ARCHIVE_LIST = [
|
| 74 |
+
"Qwen/Qwen2-7B-beta",
|
| 75 |
+
# See all Qwen2 models at https://huggingface.co/models?filter=qwen2
|
| 76 |
+
]
|
| 77 |
+
|
| 78 |
+
import pandas as pd
|
| 79 |
+
from .attn_mask_utils import (
|
| 80 |
+
find_prefix_seq_length_by_pe,
|
| 81 |
+
update_causal_mask_with_pad_non_visible_2d,
|
| 82 |
+
update_causal_mask_for_one_gen_window_2d,
|
| 83 |
+
create_block_diff_mask_by_pe_1d,
|
| 84 |
+
create_block_diff_mask_by_pe_4d,
|
| 85 |
+
find_pred_pos_from_input_ids
|
| 86 |
+
)
|
| 87 |
+
|
| 88 |
+
# Copied from transformers.models.llama.modeling_llama._get_unpad_data
|
| 89 |
+
def _get_unpad_data(attention_mask):
|
| 90 |
+
seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
|
| 91 |
+
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
|
| 92 |
+
max_seqlen_in_batch = seqlens_in_batch.max().item()
|
| 93 |
+
cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
|
| 94 |
+
return (
|
| 95 |
+
indices,
|
| 96 |
+
cu_seqlens,
|
| 97 |
+
max_seqlen_in_batch,
|
| 98 |
+
)
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Qwen2
|
| 102 |
+
class Qwen2RMSNorm(nn.Module):
|
| 103 |
+
def __init__(self, hidden_size, eps=1e-6):
|
| 104 |
+
"""
|
| 105 |
+
Qwen2RMSNorm is equivalent to T5LayerNorm
|
| 106 |
+
"""
|
| 107 |
+
super().__init__()
|
| 108 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
| 109 |
+
self.variance_epsilon = eps
|
| 110 |
+
|
| 111 |
+
def forward(self, hidden_states):
|
| 112 |
+
input_dtype = hidden_states.dtype
|
| 113 |
+
hidden_states = hidden_states.to(torch.float32)
|
| 114 |
+
variance = hidden_states.pow(2).mean(-1, keepdim=True)
|
| 115 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
| 116 |
+
return self.weight * hidden_states.to(input_dtype)
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaRotaryEmbedding with Llama->Qwen2
|
| 120 |
+
class Qwen2RotaryEmbedding(nn.Module):
|
| 121 |
+
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
|
| 122 |
+
super().__init__()
|
| 123 |
+
|
| 124 |
+
self.dim = dim
|
| 125 |
+
self.max_position_embeddings = max_position_embeddings
|
| 126 |
+
self.base = base
|
| 127 |
+
inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
|
| 128 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
| 129 |
+
|
| 130 |
+
# Build here to make `torch.jit.trace` work.
|
| 131 |
+
self._set_cos_sin_cache(
|
| 132 |
+
seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
|
| 133 |
+
)
|
| 134 |
+
|
| 135 |
+
def _set_cos_sin_cache(self, seq_len, device, dtype):
|
| 136 |
+
self.max_seq_len_cached = seq_len
|
| 137 |
+
t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
|
| 138 |
+
|
| 139 |
+
freqs = torch.outer(t, self.inv_freq)
|
| 140 |
+
# Different from paper, but it uses a different permutation in order to obtain the same calculation
|
| 141 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
| 142 |
+
self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
|
| 143 |
+
self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
|
| 144 |
+
|
| 145 |
+
def forward(self, x, seq_len=None):
|
| 146 |
+
# x: [bs, num_attention_heads, seq_len, head_size]
|
| 147 |
+
if seq_len > self.max_seq_len_cached:
|
| 148 |
+
self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
|
| 149 |
+
|
| 150 |
+
return (
|
| 151 |
+
self.cos_cached[:seq_len].to(dtype=x.dtype),
|
| 152 |
+
self.sin_cached[:seq_len].to(dtype=x.dtype),
|
| 153 |
+
)
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
# Copied from transformers.models.llama.modeling_llama.rotate_half
|
| 157 |
+
def rotate_half(x):
|
| 158 |
+
"""Rotates half the hidden dims of the input."""
|
| 159 |
+
x1 = x[..., : x.shape[-1] // 2]
|
| 160 |
+
x2 = x[..., x.shape[-1] // 2 :]
|
| 161 |
+
return torch.cat((-x2, x1), dim=-1)
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
# Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb
|
| 165 |
+
def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
|
| 166 |
+
"""Applies Rotary Position Embedding to the query and key tensors.
|
| 167 |
+
|
| 168 |
+
Args:
|
| 169 |
+
q (`torch.Tensor`): The query tensor.
|
| 170 |
+
k (`torch.Tensor`): The key tensor.
|
| 171 |
+
cos (`torch.Tensor`): The cosine part of the rotary embedding.
|
| 172 |
+
sin (`torch.Tensor`): The sine part of the rotary embedding.
|
| 173 |
+
position_ids (`torch.Tensor`):
|
| 174 |
+
The position indices of the tokens corresponding to the query and key tensors. For example, this can be
|
| 175 |
+
used to pass offsetted position ids when working with a KV-cache.
|
| 176 |
+
unsqueeze_dim (`int`, *optional*, defaults to 1):
|
| 177 |
+
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
|
| 178 |
+
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
|
| 179 |
+
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
|
| 180 |
+
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
|
| 181 |
+
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
|
| 182 |
+
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
|
| 183 |
+
Returns:
|
| 184 |
+
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
|
| 185 |
+
"""
|
| 186 |
+
cos = cos[position_ids].unsqueeze(unsqueeze_dim)
|
| 187 |
+
sin = sin[position_ids].unsqueeze(unsqueeze_dim)
|
| 188 |
+
q_embed = (q * cos) + (rotate_half(q) * sin)
|
| 189 |
+
k_embed = (k * cos) + (rotate_half(k) * sin)
|
| 190 |
+
return q_embed, k_embed
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
# Copied from transformers.models.mistral.modeling_mistral.MistralMLP with Mistral->Qwen2
|
| 194 |
+
class Qwen2MLP(nn.Module):
|
| 195 |
+
def __init__(self, config):
|
| 196 |
+
super().__init__()
|
| 197 |
+
self.config = config
|
| 198 |
+
self.hidden_size = config.hidden_size
|
| 199 |
+
self.intermediate_size = config.intermediate_size
|
| 200 |
+
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
| 201 |
+
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
| 202 |
+
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
|
| 203 |
+
self.act_fn = ACT2FN[config.hidden_act]
|
| 204 |
+
|
| 205 |
+
def forward(self, x):
|
| 206 |
+
return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
# Copied from transformers.models.llama.modeling_llama.repeat_kv
|
| 210 |
+
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
|
| 211 |
+
"""
|
| 212 |
+
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
|
| 213 |
+
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
|
| 214 |
+
"""
|
| 215 |
+
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
|
| 216 |
+
if n_rep == 1:
|
| 217 |
+
return hidden_states
|
| 218 |
+
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
|
| 219 |
+
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
class Qwen2Attention(nn.Module):
|
| 223 |
+
"""
|
| 224 |
+
Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
|
| 225 |
+
and "Generating Long Sequences with Sparse Transformers".
|
| 226 |
+
"""
|
| 227 |
+
|
| 228 |
+
def __init__(self, config: SDLMQwen2Config, layer_idx: Optional[int] = None):
|
| 229 |
+
super().__init__()
|
| 230 |
+
self.config = config
|
| 231 |
+
self.layer_idx = layer_idx
|
| 232 |
+
if layer_idx is None:
|
| 233 |
+
logger.warning_once(
|
| 234 |
+
f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will "
|
| 235 |
+
"to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
|
| 236 |
+
"when creating this class."
|
| 237 |
+
)
|
| 238 |
+
|
| 239 |
+
self.hidden_size = config.hidden_size
|
| 240 |
+
self.num_heads = config.num_attention_heads
|
| 241 |
+
self.head_dim = self.hidden_size // self.num_heads
|
| 242 |
+
self.num_key_value_heads = config.num_key_value_heads
|
| 243 |
+
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
|
| 244 |
+
self.max_position_embeddings = config.max_position_embeddings
|
| 245 |
+
self.rope_theta = config.rope_theta
|
| 246 |
+
self.is_causal = True
|
| 247 |
+
self.attention_dropout = config.attention_dropout
|
| 248 |
+
|
| 249 |
+
if (self.head_dim * self.num_heads) != self.hidden_size:
|
| 250 |
+
raise ValueError(
|
| 251 |
+
f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
|
| 252 |
+
f" and `num_heads`: {self.num_heads})."
|
| 253 |
+
)
|
| 254 |
+
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=True)
|
| 255 |
+
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
|
| 256 |
+
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
|
| 257 |
+
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
|
| 258 |
+
|
| 259 |
+
self.rotary_emb = Qwen2RotaryEmbedding(
|
| 260 |
+
self.head_dim,
|
| 261 |
+
max_position_embeddings=self.max_position_embeddings,
|
| 262 |
+
base=self.rope_theta,
|
| 263 |
+
)
|
| 264 |
+
|
| 265 |
+
def forward(
|
| 266 |
+
self,
|
| 267 |
+
hidden_states: torch.Tensor,
|
| 268 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 269 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 270 |
+
past_key_value: Optional[Cache] = None,
|
| 271 |
+
output_attentions: bool = False,
|
| 272 |
+
use_cache: bool = False,
|
| 273 |
+
**kwargs,
|
| 274 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| 275 |
+
if "padding_mask" in kwargs:
|
| 276 |
+
warnings.warn(
|
| 277 |
+
"Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
|
| 278 |
+
)
|
| 279 |
+
bsz, q_len, _ = hidden_states.size()
|
| 280 |
+
|
| 281 |
+
query_states = self.q_proj(hidden_states)
|
| 282 |
+
key_states = self.k_proj(hidden_states)
|
| 283 |
+
value_states = self.v_proj(hidden_states)
|
| 284 |
+
|
| 285 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| 286 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 287 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 288 |
+
|
| 289 |
+
kv_seq_len = key_states.shape[-2]
|
| 290 |
+
if past_key_value is not None:
|
| 291 |
+
if self.layer_idx is None:
|
| 292 |
+
raise ValueError(
|
| 293 |
+
f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
|
| 294 |
+
"for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
|
| 295 |
+
"with a layer index."
|
| 296 |
+
)
|
| 297 |
+
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
| 298 |
+
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
|
| 299 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
| 300 |
+
|
| 301 |
+
if past_key_value is not None:
|
| 302 |
+
cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
|
| 303 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
| 304 |
+
|
| 305 |
+
# repeat k/v heads if n_kv_heads < n_heads
|
| 306 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
| 307 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
| 308 |
+
|
| 309 |
+
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
|
| 310 |
+
|
| 311 |
+
if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
|
| 312 |
+
raise ValueError(
|
| 313 |
+
f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
|
| 314 |
+
f" {attn_weights.size()}"
|
| 315 |
+
)
|
| 316 |
+
|
| 317 |
+
if attention_mask is not None:
|
| 318 |
+
if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
|
| 319 |
+
raise ValueError(
|
| 320 |
+
f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
|
| 321 |
+
)
|
| 322 |
+
|
| 323 |
+
attn_weights = attn_weights + attention_mask
|
| 324 |
+
|
| 325 |
+
# upcast attention to fp32
|
| 326 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
|
| 327 |
+
attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
|
| 328 |
+
attn_output = torch.matmul(attn_weights, value_states)
|
| 329 |
+
|
| 330 |
+
if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
|
| 331 |
+
raise ValueError(
|
| 332 |
+
f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
|
| 333 |
+
f" {attn_output.size()}"
|
| 334 |
+
)
|
| 335 |
+
|
| 336 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
| 337 |
+
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
|
| 338 |
+
|
| 339 |
+
attn_output = self.o_proj(attn_output)
|
| 340 |
+
|
| 341 |
+
if not output_attentions:
|
| 342 |
+
attn_weights = None
|
| 343 |
+
|
| 344 |
+
return attn_output, attn_weights, past_key_value
|
| 345 |
+
|
| 346 |
+
|
| 347 |
+
class Qwen2FlashAttention2(Qwen2Attention):
|
| 348 |
+
"""
|
| 349 |
+
Qwen2 flash attention module, following Qwen2 attention module. This module inherits from `Qwen2Attention`
|
| 350 |
+
as the weights of the module stays untouched. The only required change would be on the forward pass
|
| 351 |
+
where it needs to correctly call the public API of flash attention and deal with padding tokens
|
| 352 |
+
in case the input contains any of them. Additionally, for sliding window attention, we apply SWA only to the bottom
|
| 353 |
+
config.max_window_layers layers.
|
| 354 |
+
"""
|
| 355 |
+
|
| 356 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
|
| 357 |
+
def __init__(self, *args, **kwargs):
|
| 358 |
+
super().__init__(*args, **kwargs)
|
| 359 |
+
|
| 360 |
+
# TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
|
| 361 |
+
# flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
|
| 362 |
+
# Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
|
| 363 |
+
self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
|
| 364 |
+
|
| 365 |
+
def forward(
|
| 366 |
+
self,
|
| 367 |
+
hidden_states: torch.Tensor,
|
| 368 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 369 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 370 |
+
past_key_value: Optional[Cache] = None,
|
| 371 |
+
output_attentions: bool = False,
|
| 372 |
+
use_cache: bool = False,
|
| 373 |
+
**kwargs,
|
| 374 |
+
):
|
| 375 |
+
if "padding_mask" in kwargs:
|
| 376 |
+
warnings.warn(
|
| 377 |
+
"Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
|
| 378 |
+
)
|
| 379 |
+
|
| 380 |
+
# overwrite attention_mask with padding_mask
|
| 381 |
+
attention_mask = kwargs.pop("padding_mask")
|
| 382 |
+
bsz, q_len, _ = hidden_states.size()
|
| 383 |
+
|
| 384 |
+
query_states = self.q_proj(hidden_states)
|
| 385 |
+
key_states = self.k_proj(hidden_states)
|
| 386 |
+
value_states = self.v_proj(hidden_states)
|
| 387 |
+
|
| 388 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| 389 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 390 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 391 |
+
|
| 392 |
+
kv_seq_len = key_states.shape[-2]
|
| 393 |
+
if past_key_value is not None:
|
| 394 |
+
if self.layer_idx is None:
|
| 395 |
+
raise ValueError(
|
| 396 |
+
f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
|
| 397 |
+
"for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
|
| 398 |
+
"with a layer index."
|
| 399 |
+
)
|
| 400 |
+
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
| 401 |
+
|
| 402 |
+
# Because the input can be padded, the absolute sequence length depends on the max position id.
|
| 403 |
+
rotary_seq_len = max(kv_seq_len, position_ids[:, -1].max().item()) + 1
|
| 404 |
+
cos, sin = self.rotary_emb(value_states, seq_len=rotary_seq_len)
|
| 405 |
+
|
| 406 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
| 407 |
+
|
| 408 |
+
use_sliding_windows = (
|
| 409 |
+
_flash_supports_window_size
|
| 410 |
+
and getattr(self.config, "sliding_window", None) is not None
|
| 411 |
+
and kv_seq_len > self.config.sliding_window
|
| 412 |
+
and self.config.use_sliding_window
|
| 413 |
+
)
|
| 414 |
+
|
| 415 |
+
if not _flash_supports_window_size:
|
| 416 |
+
logger.warning_once(
|
| 417 |
+
"The current flash attention version does not support sliding window attention, for a more memory efficient implementation"
|
| 418 |
+
" make sure to upgrade flash-attn library."
|
| 419 |
+
)
|
| 420 |
+
|
| 421 |
+
if past_key_value is not None:
|
| 422 |
+
# Activate slicing cache only if the config has a value `sliding_windows` attribute
|
| 423 |
+
cache_has_contents = past_key_value.get_seq_length(self.layer_idx) > 0
|
| 424 |
+
if (
|
| 425 |
+
getattr(self.config, "sliding_window", None) is not None
|
| 426 |
+
and kv_seq_len > self.config.sliding_window
|
| 427 |
+
and cache_has_contents
|
| 428 |
+
):
|
| 429 |
+
slicing_tokens = 1 - self.config.sliding_window
|
| 430 |
+
|
| 431 |
+
past_key = past_key_value[self.layer_idx][0]
|
| 432 |
+
past_value = past_key_value[self.layer_idx][1]
|
| 433 |
+
|
| 434 |
+
past_key = past_key[:, :, slicing_tokens:, :].contiguous()
|
| 435 |
+
past_value = past_value[:, :, slicing_tokens:, :].contiguous()
|
| 436 |
+
|
| 437 |
+
if past_key.shape[-2] != self.config.sliding_window - 1:
|
| 438 |
+
raise ValueError(
|
| 439 |
+
f"past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got"
|
| 440 |
+
f" {past_key.shape}"
|
| 441 |
+
)
|
| 442 |
+
|
| 443 |
+
if attention_mask is not None:
|
| 444 |
+
attention_mask = attention_mask[:, slicing_tokens:]
|
| 445 |
+
attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1)
|
| 446 |
+
|
| 447 |
+
cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
|
| 448 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
| 449 |
+
|
| 450 |
+
# repeat k/v heads if n_kv_heads < n_heads
|
| 451 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
| 452 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
| 453 |
+
dropout_rate = 0.0 if not self.training else self.attention_dropout
|
| 454 |
+
|
| 455 |
+
# In PEFT, usually we cast the layer norms in float32 for training stability reasons
|
| 456 |
+
# therefore the input hidden states gets silently casted in float32. Hence, we need
|
| 457 |
+
# cast them back in float16 just to be sure everything works as expected.
|
| 458 |
+
input_dtype = query_states.dtype
|
| 459 |
+
if input_dtype == torch.float32:
|
| 460 |
+
if torch.is_autocast_enabled():
|
| 461 |
+
target_dtype = torch.get_autocast_gpu_dtype()
|
| 462 |
+
# Handle the case where the model is quantized
|
| 463 |
+
elif hasattr(self.config, "_pre_quantization_dtype"):
|
| 464 |
+
target_dtype = self.config._pre_quantization_dtype
|
| 465 |
+
else:
|
| 466 |
+
target_dtype = self.q_proj.weight.dtype
|
| 467 |
+
|
| 468 |
+
logger.warning_once(
|
| 469 |
+
f"The input hidden states seems to be silently casted in float32, this might be related to"
|
| 470 |
+
f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
|
| 471 |
+
f" {target_dtype}."
|
| 472 |
+
)
|
| 473 |
+
|
| 474 |
+
query_states = query_states.to(target_dtype)
|
| 475 |
+
key_states = key_states.to(target_dtype)
|
| 476 |
+
value_states = value_states.to(target_dtype)
|
| 477 |
+
|
| 478 |
+
# Reashape to the expected shape for Flash Attention
|
| 479 |
+
query_states = query_states.transpose(1, 2)
|
| 480 |
+
key_states = key_states.transpose(1, 2)
|
| 481 |
+
value_states = value_states.transpose(1, 2)
|
| 482 |
+
|
| 483 |
+
attn_output = self._flash_attention_forward(
|
| 484 |
+
query_states,
|
| 485 |
+
key_states,
|
| 486 |
+
value_states,
|
| 487 |
+
attention_mask,
|
| 488 |
+
q_len,
|
| 489 |
+
dropout=dropout_rate,
|
| 490 |
+
use_sliding_windows=use_sliding_windows,
|
| 491 |
+
)
|
| 492 |
+
|
| 493 |
+
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
|
| 494 |
+
attn_output = self.o_proj(attn_output)
|
| 495 |
+
|
| 496 |
+
if not output_attentions:
|
| 497 |
+
attn_weights = None
|
| 498 |
+
|
| 499 |
+
return attn_output, attn_weights, past_key_value
|
| 500 |
+
|
| 501 |
+
def _flash_attention_forward(
|
| 502 |
+
self,
|
| 503 |
+
query_states,
|
| 504 |
+
key_states,
|
| 505 |
+
value_states,
|
| 506 |
+
attention_mask,
|
| 507 |
+
query_length,
|
| 508 |
+
dropout=0.0,
|
| 509 |
+
softmax_scale=None,
|
| 510 |
+
use_sliding_windows=False,
|
| 511 |
+
):
|
| 512 |
+
"""
|
| 513 |
+
Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
|
| 514 |
+
first unpad the input, then computes the attention scores and pad the final attention scores.
|
| 515 |
+
|
| 516 |
+
Args:
|
| 517 |
+
query_states (`torch.Tensor`):
|
| 518 |
+
Input query states to be passed to Flash Attention API
|
| 519 |
+
key_states (`torch.Tensor`):
|
| 520 |
+
Input key states to be passed to Flash Attention API
|
| 521 |
+
value_states (`torch.Tensor`):
|
| 522 |
+
Input value states to be passed to Flash Attention API
|
| 523 |
+
attention_mask (`torch.Tensor`):
|
| 524 |
+
The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
|
| 525 |
+
position of padding tokens and 1 for the position of non-padding tokens.
|
| 526 |
+
dropout (`int`, *optional*):
|
| 527 |
+
Attention dropout
|
| 528 |
+
softmax_scale (`float`, *optional*):
|
| 529 |
+
The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
|
| 530 |
+
use_sliding_windows (`bool`, *optional*):
|
| 531 |
+
Whether to activate sliding window attention.
|
| 532 |
+
"""
|
| 533 |
+
if not self._flash_attn_uses_top_left_mask:
|
| 534 |
+
causal = self.is_causal
|
| 535 |
+
else:
|
| 536 |
+
# TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
|
| 537 |
+
causal = self.is_causal and query_length != 1
|
| 538 |
+
|
| 539 |
+
# Decide whether to use SWA or not by layer index.
|
| 540 |
+
if use_sliding_windows and self.layer_idx >= self.config.max_window_layers:
|
| 541 |
+
use_sliding_windows = False
|
| 542 |
+
|
| 543 |
+
# Contains at least one padding token in the sequence
|
| 544 |
+
if attention_mask is not None:
|
| 545 |
+
batch_size = query_states.shape[0]
|
| 546 |
+
query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
|
| 547 |
+
query_states, key_states, value_states, attention_mask, query_length
|
| 548 |
+
)
|
| 549 |
+
|
| 550 |
+
cu_seqlens_q, cu_seqlens_k = cu_seq_lens
|
| 551 |
+
max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
|
| 552 |
+
|
| 553 |
+
if not use_sliding_windows:
|
| 554 |
+
attn_output_unpad = flash_attn_varlen_func(
|
| 555 |
+
query_states,
|
| 556 |
+
key_states,
|
| 557 |
+
value_states,
|
| 558 |
+
cu_seqlens_q=cu_seqlens_q,
|
| 559 |
+
cu_seqlens_k=cu_seqlens_k,
|
| 560 |
+
max_seqlen_q=max_seqlen_in_batch_q,
|
| 561 |
+
max_seqlen_k=max_seqlen_in_batch_k,
|
| 562 |
+
dropout_p=dropout,
|
| 563 |
+
softmax_scale=softmax_scale,
|
| 564 |
+
causal=causal,
|
| 565 |
+
)
|
| 566 |
+
else:
|
| 567 |
+
attn_output_unpad = flash_attn_varlen_func(
|
| 568 |
+
query_states,
|
| 569 |
+
key_states,
|
| 570 |
+
value_states,
|
| 571 |
+
cu_seqlens_q=cu_seqlens_q,
|
| 572 |
+
cu_seqlens_k=cu_seqlens_k,
|
| 573 |
+
max_seqlen_q=max_seqlen_in_batch_q,
|
| 574 |
+
max_seqlen_k=max_seqlen_in_batch_k,
|
| 575 |
+
dropout_p=dropout,
|
| 576 |
+
softmax_scale=softmax_scale,
|
| 577 |
+
causal=causal,
|
| 578 |
+
window_size=(self.config.sliding_window, self.config.sliding_window),
|
| 579 |
+
)
|
| 580 |
+
|
| 581 |
+
attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
|
| 582 |
+
else:
|
| 583 |
+
if not use_sliding_windows:
|
| 584 |
+
attn_output = flash_attn_func(
|
| 585 |
+
query_states,
|
| 586 |
+
key_states,
|
| 587 |
+
value_states,
|
| 588 |
+
dropout,
|
| 589 |
+
softmax_scale=softmax_scale,
|
| 590 |
+
causal=causal,
|
| 591 |
+
)
|
| 592 |
+
else:
|
| 593 |
+
attn_output = flash_attn_func(
|
| 594 |
+
query_states,
|
| 595 |
+
key_states,
|
| 596 |
+
value_states,
|
| 597 |
+
dropout,
|
| 598 |
+
softmax_scale=softmax_scale,
|
| 599 |
+
causal=causal,
|
| 600 |
+
window_size=(self.config.sliding_window, self.config.sliding_window),
|
| 601 |
+
)
|
| 602 |
+
|
| 603 |
+
return attn_output
|
| 604 |
+
|
| 605 |
+
# Copied from transformers.models.mistral.modeling_mistral.MistralFlashAttention2._upad_input
|
| 606 |
+
def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
|
| 607 |
+
batch_size, kv_seq_len, num_heads, head_dim = key_layer.shape
|
| 608 |
+
|
| 609 |
+
# On the first iteration we need to properly re-create the padding mask
|
| 610 |
+
# by slicing it on the proper place
|
| 611 |
+
if kv_seq_len != attention_mask.shape[-1]:
|
| 612 |
+
attention_mask_num_tokens = attention_mask.shape[-1]
|
| 613 |
+
attention_mask = attention_mask[:, attention_mask_num_tokens - kv_seq_len :]
|
| 614 |
+
|
| 615 |
+
indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
|
| 616 |
+
|
| 617 |
+
key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
|
| 618 |
+
value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
|
| 619 |
+
|
| 620 |
+
if query_length == kv_seq_len:
|
| 621 |
+
query_layer = index_first_axis(
|
| 622 |
+
query_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k
|
| 623 |
+
)
|
| 624 |
+
cu_seqlens_q = cu_seqlens_k
|
| 625 |
+
max_seqlen_in_batch_q = max_seqlen_in_batch_k
|
| 626 |
+
indices_q = indices_k
|
| 627 |
+
elif query_length == 1:
|
| 628 |
+
max_seqlen_in_batch_q = 1
|
| 629 |
+
cu_seqlens_q = torch.arange(
|
| 630 |
+
batch_size + 1, dtype=torch.int32, device=query_layer.device
|
| 631 |
+
) # There is a memcpy here, that is very bad.
|
| 632 |
+
indices_q = cu_seqlens_q[:-1]
|
| 633 |
+
query_layer = query_layer.squeeze(1)
|
| 634 |
+
else:
|
| 635 |
+
# The -q_len: slice assumes left padding.
|
| 636 |
+
attention_mask = attention_mask[:, -query_length:]
|
| 637 |
+
query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
|
| 638 |
+
|
| 639 |
+
return (
|
| 640 |
+
query_layer,
|
| 641 |
+
key_layer,
|
| 642 |
+
value_layer,
|
| 643 |
+
indices_q,
|
| 644 |
+
(cu_seqlens_q, cu_seqlens_k),
|
| 645 |
+
(max_seqlen_in_batch_q, max_seqlen_in_batch_k),
|
| 646 |
+
)
|
| 647 |
+
|
| 648 |
+
|
| 649 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaSdpaAttention with Llama->Qwen2
|
| 650 |
+
class Qwen2SdpaAttention(Qwen2Attention):
|
| 651 |
+
"""
|
| 652 |
+
Qwen2 attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
|
| 653 |
+
`Qwen2Attention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
|
| 654 |
+
SDPA API.
|
| 655 |
+
"""
|
| 656 |
+
|
| 657 |
+
# Adapted from Qwen2Attention.forward
|
| 658 |
+
def forward(
|
| 659 |
+
self,
|
| 660 |
+
hidden_states: torch.Tensor,
|
| 661 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 662 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 663 |
+
past_key_value: Optional[Cache] = None,
|
| 664 |
+
output_attentions: bool = False,
|
| 665 |
+
use_cache: bool = False,
|
| 666 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| 667 |
+
if output_attentions:
|
| 668 |
+
# TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
|
| 669 |
+
logger.warning_once(
|
| 670 |
+
"Qwen2Model is using Qwen2SdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
|
| 671 |
+
'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
|
| 672 |
+
)
|
| 673 |
+
return super().forward(
|
| 674 |
+
hidden_states=hidden_states,
|
| 675 |
+
attention_mask=attention_mask,
|
| 676 |
+
position_ids=position_ids,
|
| 677 |
+
past_key_value=past_key_value,
|
| 678 |
+
output_attentions=output_attentions,
|
| 679 |
+
use_cache=use_cache,
|
| 680 |
+
)
|
| 681 |
+
|
| 682 |
+
bsz, q_len, _ = hidden_states.size()
|
| 683 |
+
|
| 684 |
+
query_states = self.q_proj(hidden_states)
|
| 685 |
+
key_states = self.k_proj(hidden_states)
|
| 686 |
+
value_states = self.v_proj(hidden_states)
|
| 687 |
+
|
| 688 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| 689 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 690 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 691 |
+
|
| 692 |
+
kv_seq_len = key_states.shape[-2]
|
| 693 |
+
if past_key_value is not None:
|
| 694 |
+
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
| 695 |
+
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
|
| 696 |
+
|
| 697 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
| 698 |
+
|
| 699 |
+
if past_key_value is not None:
|
| 700 |
+
cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
|
| 701 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
| 702 |
+
|
| 703 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
| 704 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
| 705 |
+
|
| 706 |
+
if attention_mask is not None:
|
| 707 |
+
if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
|
| 708 |
+
raise ValueError(
|
| 709 |
+
f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
|
| 710 |
+
)
|
| 711 |
+
|
| 712 |
+
# SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
|
| 713 |
+
# Reference: https://github.com/pytorch/pytorch/issues/112577.
|
| 714 |
+
if query_states.device.type == "cuda" and attention_mask is not None:
|
| 715 |
+
query_states = query_states.contiguous()
|
| 716 |
+
key_states = key_states.contiguous()
|
| 717 |
+
value_states = value_states.contiguous()
|
| 718 |
+
|
| 719 |
+
attn_output = torch.nn.functional.scaled_dot_product_attention(
|
| 720 |
+
query_states,
|
| 721 |
+
key_states,
|
| 722 |
+
value_states,
|
| 723 |
+
attn_mask=attention_mask,
|
| 724 |
+
dropout_p=self.attention_dropout if self.training else 0.0,
|
| 725 |
+
# The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1.
|
| 726 |
+
# is_causal=self.is_causal and attention_mask is None and q_len > 1,
|
| 727 |
+
is_causal=False # TODO
|
| 728 |
+
)
|
| 729 |
+
|
| 730 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
| 731 |
+
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
|
| 732 |
+
|
| 733 |
+
attn_output = self.o_proj(attn_output)
|
| 734 |
+
|
| 735 |
+
return attn_output, None, past_key_value
|
| 736 |
+
|
| 737 |
+
|
| 738 |
+
class Qwen2SdpaAttentionGqa(Qwen2Attention):
|
| 739 |
+
"""
|
| 740 |
+
Qwen2 attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
|
| 741 |
+
`Qwen2Attention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
|
| 742 |
+
SDPA API.
|
| 743 |
+
"""
|
| 744 |
+
|
| 745 |
+
# Adapted from Qwen2Attention.forward
|
| 746 |
+
def forward(
|
| 747 |
+
self,
|
| 748 |
+
hidden_states: torch.Tensor,
|
| 749 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 750 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 751 |
+
past_key_value: Optional[Cache] = None,
|
| 752 |
+
output_attentions: bool = False,
|
| 753 |
+
use_cache: bool = False,
|
| 754 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| 755 |
+
if output_attentions:
|
| 756 |
+
# TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
|
| 757 |
+
logger.warning_once(
|
| 758 |
+
"Qwen2Model is using Qwen2SdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
|
| 759 |
+
'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
|
| 760 |
+
)
|
| 761 |
+
return super().forward(
|
| 762 |
+
hidden_states=hidden_states,
|
| 763 |
+
attention_mask=attention_mask,
|
| 764 |
+
position_ids=position_ids,
|
| 765 |
+
past_key_value=past_key_value,
|
| 766 |
+
output_attentions=output_attentions,
|
| 767 |
+
use_cache=use_cache,
|
| 768 |
+
)
|
| 769 |
+
|
| 770 |
+
bsz, q_len, _ = hidden_states.size()
|
| 771 |
+
|
| 772 |
+
query_states = self.q_proj(hidden_states)
|
| 773 |
+
key_states = self.k_proj(hidden_states)
|
| 774 |
+
value_states = self.v_proj(hidden_states)
|
| 775 |
+
|
| 776 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| 777 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 778 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 779 |
+
|
| 780 |
+
kv_seq_len = key_states.shape[-2]
|
| 781 |
+
if past_key_value is not None:
|
| 782 |
+
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
| 783 |
+
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
|
| 784 |
+
|
| 785 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
| 786 |
+
|
| 787 |
+
if past_key_value is not None:
|
| 788 |
+
cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
|
| 789 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
| 790 |
+
|
| 791 |
+
# key_states = repeat_kv(key_states, self.num_key_value_groups)
|
| 792 |
+
# value_states = repeat_kv(value_states, self.num_key_value_groups)
|
| 793 |
+
|
| 794 |
+
if attention_mask is not None:
|
| 795 |
+
if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
|
| 796 |
+
raise ValueError(
|
| 797 |
+
f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
|
| 798 |
+
)
|
| 799 |
+
|
| 800 |
+
# SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
|
| 801 |
+
# Reference: https://github.com/pytorch/pytorch/issues/112577.
|
| 802 |
+
if query_states.device.type == "cuda" and attention_mask is not None:
|
| 803 |
+
query_states = query_states.contiguous()
|
| 804 |
+
key_states = key_states.contiguous()
|
| 805 |
+
value_states = value_states.contiguous()
|
| 806 |
+
|
| 807 |
+
with torch.backends.cuda.sdp_kernel(enable_flash=True,
|
| 808 |
+
enable_math=True,
|
| 809 |
+
enable_mem_efficient=False):
|
| 810 |
+
|
| 811 |
+
attn_output = torch.nn.functional.scaled_dot_product_attention(
|
| 812 |
+
query_states,
|
| 813 |
+
key_states,
|
| 814 |
+
value_states,
|
| 815 |
+
attn_mask=attention_mask,
|
| 816 |
+
enable_gqa=True,
|
| 817 |
+
dropout_p=self.attention_dropout if self.training else 0.0,
|
| 818 |
+
# The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1.
|
| 819 |
+
# is_causal=self.is_causal and attention_mask is None and q_len > 1,
|
| 820 |
+
is_causal=False # TODO
|
| 821 |
+
)
|
| 822 |
+
|
| 823 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
| 824 |
+
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
|
| 825 |
+
|
| 826 |
+
attn_output = self.o_proj(attn_output)
|
| 827 |
+
|
| 828 |
+
return attn_output, None, past_key_value
|
| 829 |
+
|
| 830 |
+
|
| 831 |
+
# @torch.compile(fullgraph=True, mode="max-autotune-no-cudagraphs")
|
| 832 |
+
@torch.compile(fullgraph=False, dynamic=True)
|
| 833 |
+
def fused_flex_attention(q, k, v, mask=None):
|
| 834 |
+
return flex_attention(q, k, v, block_mask=mask)
|
| 835 |
+
|
| 836 |
+
|
| 837 |
+
class Qwen2FlexAttentionForTraining(Qwen2Attention):
|
| 838 |
+
def forward(
|
| 839 |
+
self,
|
| 840 |
+
hidden_states: torch.Tensor,
|
| 841 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 842 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 843 |
+
past_key_value: Optional[Cache] = None,
|
| 844 |
+
output_attentions: bool = False,
|
| 845 |
+
use_cache: bool = False,
|
| 846 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| 847 |
+
if output_attentions:
|
| 848 |
+
# TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
|
| 849 |
+
logger.warning_once(
|
| 850 |
+
'Using the argument `attn_implementation="eager"` when loading the model while using output_attentions=True.'
|
| 851 |
+
)
|
| 852 |
+
return super().forward(
|
| 853 |
+
hidden_states=hidden_states,
|
| 854 |
+
attention_mask=attention_mask,
|
| 855 |
+
position_ids=position_ids,
|
| 856 |
+
past_key_value=past_key_value,
|
| 857 |
+
output_attentions=output_attentions,
|
| 858 |
+
use_cache=use_cache,
|
| 859 |
+
)
|
| 860 |
+
|
| 861 |
+
bsz, q_len, _ = hidden_states.size()
|
| 862 |
+
|
| 863 |
+
query_states = self.q_proj(hidden_states)
|
| 864 |
+
key_states = self.k_proj(hidden_states)
|
| 865 |
+
value_states = self.v_proj(hidden_states)
|
| 866 |
+
|
| 867 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| 868 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 869 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 870 |
+
|
| 871 |
+
kv_seq_len = key_states.shape[-2]
|
| 872 |
+
if past_key_value is not None:
|
| 873 |
+
if self.layer_idx is None:
|
| 874 |
+
raise ValueError(
|
| 875 |
+
f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
|
| 876 |
+
"for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
|
| 877 |
+
"with a layer index."
|
| 878 |
+
)
|
| 879 |
+
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
| 880 |
+
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
|
| 881 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
| 882 |
+
|
| 883 |
+
if past_key_value is not None:
|
| 884 |
+
cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
|
| 885 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
| 886 |
+
|
| 887 |
+
|
| 888 |
+
# repeat k/v heads if n_kv_heads < n_heads
|
| 889 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
| 890 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
| 891 |
+
|
| 892 |
+
# print(f'In flex attention\n'
|
| 893 |
+
# f'{query_states.shape=} {query_states.dtype=}\n'
|
| 894 |
+
# f'{key_states.shape=} {key_states.dtype=}\n'
|
| 895 |
+
# f'{value_states.shape=} {key_states.dtype=}\n'
|
| 896 |
+
# f'{attention_mask=}'
|
| 897 |
+
# )
|
| 898 |
+
|
| 899 |
+
attn_output = fused_flex_attention(
|
| 900 |
+
query_states,
|
| 901 |
+
key_states,
|
| 902 |
+
value_states,
|
| 903 |
+
mask=attention_mask
|
| 904 |
+
) # B, H_q, L, E_v
|
| 905 |
+
attn_output = attn_output.transpose(1, 2).contiguous() # B, L, H_q, E_V
|
| 906 |
+
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) # B, L, H_dim
|
| 907 |
+
|
| 908 |
+
attn_output = self.o_proj(attn_output)
|
| 909 |
+
|
| 910 |
+
return attn_output, None, past_key_value
|
| 911 |
+
|
| 912 |
+
|
| 913 |
+
|
| 914 |
+
QWEN2_ATTENTION_CLASSES = {
|
| 915 |
+
"eager": Qwen2Attention,
|
| 916 |
+
# "flash_attention_2": Qwen2FlashAttention2,
|
| 917 |
+
"flash_attention_2": Qwen2FlexAttentionForTraining, # TODO replace flash attn to flex attn
|
| 918 |
+
"sdpa": Qwen2SdpaAttention,
|
| 919 |
+
}
|
| 920 |
+
|
| 921 |
+
|
| 922 |
+
class Qwen2DecoderLayer(nn.Module):
|
| 923 |
+
def __init__(self, config: SDLMQwen2Config, layer_idx: int):
|
| 924 |
+
super().__init__()
|
| 925 |
+
self.hidden_size = config.hidden_size
|
| 926 |
+
|
| 927 |
+
# if config.use_sliding_window and config._attn_implementation != "flash_attention_2":
|
| 928 |
+
# logger.warning_once(
|
| 929 |
+
# f"Sliding Window Attention is enabled but not implemented for `{config._attn_implementation}`; "
|
| 930 |
+
# "unexpected results may be encountered."
|
| 931 |
+
# )
|
| 932 |
+
if config._attn_implementation == 'flash_attention_2' and FLEX_ATTN_AVAILABLE is False:
|
| 933 |
+
logger.warning_once(
|
| 934 |
+
'FLEX_ATTN_AVAILABLE=False, using eager for replace'
|
| 935 |
+
)
|
| 936 |
+
config._attn_implementation = 'eager'
|
| 937 |
+
|
| 938 |
+
self.self_attn = QWEN2_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx)
|
| 939 |
+
|
| 940 |
+
self.mlp = Qwen2MLP(config)
|
| 941 |
+
self.input_layernorm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 942 |
+
self.post_attention_layernorm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 943 |
+
|
| 944 |
+
def forward(
|
| 945 |
+
self,
|
| 946 |
+
hidden_states: torch.Tensor,
|
| 947 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 948 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 949 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
| 950 |
+
output_attentions: Optional[bool] = False,
|
| 951 |
+
use_cache: Optional[bool] = False,
|
| 952 |
+
**kwargs,
|
| 953 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
| 954 |
+
if "padding_mask" in kwargs:
|
| 955 |
+
warnings.warn(
|
| 956 |
+
"Passing `padding_mask` is deprecated and will be removed in v4.37. "
|
| 957 |
+
"Please make sure use `attention_mask` instead.`"
|
| 958 |
+
)
|
| 959 |
+
"""
|
| 960 |
+
Args:
|
| 961 |
+
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
| 962 |
+
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
|
| 963 |
+
`(batch, sequence_length)` where padding elements are indicated by 0.
|
| 964 |
+
output_attentions (`bool`, *optional*):
|
| 965 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
| 966 |
+
returned tensors for more detail.
|
| 967 |
+
use_cache (`bool`, *optional*):
|
| 968 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
|
| 969 |
+
(see `past_key_values`).
|
| 970 |
+
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
|
| 971 |
+
"""
|
| 972 |
+
|
| 973 |
+
residual = hidden_states
|
| 974 |
+
|
| 975 |
+
hidden_states = self.input_layernorm(hidden_states)
|
| 976 |
+
|
| 977 |
+
# Self Attention
|
| 978 |
+
hidden_states, self_attn_weights, present_key_value = self.self_attn(
|
| 979 |
+
hidden_states=hidden_states,
|
| 980 |
+
attention_mask=attention_mask,
|
| 981 |
+
position_ids=position_ids,
|
| 982 |
+
past_key_value=past_key_value,
|
| 983 |
+
output_attentions=output_attentions,
|
| 984 |
+
use_cache=use_cache,
|
| 985 |
+
)
|
| 986 |
+
hidden_states = residual + hidden_states
|
| 987 |
+
|
| 988 |
+
# Fully Connected
|
| 989 |
+
residual = hidden_states
|
| 990 |
+
hidden_states = self.post_attention_layernorm(hidden_states)
|
| 991 |
+
hidden_states = self.mlp(hidden_states)
|
| 992 |
+
hidden_states = residual + hidden_states
|
| 993 |
+
|
| 994 |
+
outputs = (hidden_states,)
|
| 995 |
+
|
| 996 |
+
if output_attentions:
|
| 997 |
+
outputs += (self_attn_weights,)
|
| 998 |
+
|
| 999 |
+
if use_cache:
|
| 1000 |
+
outputs += (present_key_value,)
|
| 1001 |
+
|
| 1002 |
+
return outputs
|
| 1003 |
+
|
| 1004 |
+
|
| 1005 |
+
QWEN2_START_DOCSTRING = r"""
|
| 1006 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
| 1007 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
| 1008 |
+
etc.)
|
| 1009 |
+
|
| 1010 |
+
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
| 1011 |
+
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
| 1012 |
+
and behavior.
|
| 1013 |
+
|
| 1014 |
+
Parameters:
|
| 1015 |
+
config ([`SDLMQwen2Config`]):
|
| 1016 |
+
Model configuration class with all the parameters of the model. Initializing with a config file does not
|
| 1017 |
+
load the weights associated with the model, only the configuration. Check out the
|
| 1018 |
+
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
| 1019 |
+
"""
|
| 1020 |
+
|
| 1021 |
+
|
| 1022 |
+
@add_start_docstrings(
|
| 1023 |
+
"The bare Qwen2 Model outputting raw hidden-states without any specific head on top.",
|
| 1024 |
+
QWEN2_START_DOCSTRING,
|
| 1025 |
+
)
|
| 1026 |
+
class Qwen2PreTrainedModel(PreTrainedModel):
|
| 1027 |
+
config_class = SDLMQwen2Config
|
| 1028 |
+
base_model_prefix = "model"
|
| 1029 |
+
supports_gradient_checkpointing = True
|
| 1030 |
+
_no_split_modules = ["Qwen2DecoderLayer"]
|
| 1031 |
+
_skip_keys_device_placement = "past_key_values"
|
| 1032 |
+
_supports_flash_attn_2 = True
|
| 1033 |
+
_supports_sdpa = True
|
| 1034 |
+
_supports_cache_class = True
|
| 1035 |
+
|
| 1036 |
+
def _init_weights(self, module):
|
| 1037 |
+
std = self.config.initializer_range
|
| 1038 |
+
if isinstance(module, nn.Linear):
|
| 1039 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
| 1040 |
+
if module.bias is not None:
|
| 1041 |
+
module.bias.data.zero_()
|
| 1042 |
+
elif isinstance(module, nn.Embedding):
|
| 1043 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
| 1044 |
+
if module.padding_idx is not None:
|
| 1045 |
+
module.weight.data[module.padding_idx].zero_()
|
| 1046 |
+
|
| 1047 |
+
|
| 1048 |
+
QWEN2_INPUTS_DOCSTRING = r"""
|
| 1049 |
+
Args:
|
| 1050 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
| 1051 |
+
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
|
| 1052 |
+
it.
|
| 1053 |
+
|
| 1054 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| 1055 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
| 1056 |
+
|
| 1057 |
+
[What are input IDs?](../glossary#input-ids)
|
| 1058 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 1059 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
| 1060 |
+
|
| 1061 |
+
- 1 for tokens that are **not masked**,
|
| 1062 |
+
- 0 for tokens that are **masked**.
|
| 1063 |
+
|
| 1064 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 1065 |
+
|
| 1066 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| 1067 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
| 1068 |
+
|
| 1069 |
+
If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
|
| 1070 |
+
`past_key_values`).
|
| 1071 |
+
|
| 1072 |
+
If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
|
| 1073 |
+
and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
|
| 1074 |
+
information on the default strategy.
|
| 1075 |
+
|
| 1076 |
+
- 1 indicates the head is **not masked**,
|
| 1077 |
+
- 0 indicates the head is **masked**.
|
| 1078 |
+
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 1079 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
| 1080 |
+
config.n_positions - 1]`.
|
| 1081 |
+
|
| 1082 |
+
[What are position IDs?](../glossary#position-ids)
|
| 1083 |
+
past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
|
| 1084 |
+
Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
|
| 1085 |
+
blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
|
| 1086 |
+
returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
|
| 1087 |
+
|
| 1088 |
+
Two formats are allowed:
|
| 1089 |
+
- a [`~cache_utils.Cache`] instance;
|
| 1090 |
+
- Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
|
| 1091 |
+
shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
|
| 1092 |
+
cache format.
|
| 1093 |
+
|
| 1094 |
+
The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
|
| 1095 |
+
legacy cache format will be returned.
|
| 1096 |
+
|
| 1097 |
+
If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
|
| 1098 |
+
have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
|
| 1099 |
+
of shape `(batch_size, sequence_length)`.
|
| 1100 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
| 1101 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
| 1102 |
+
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
|
| 1103 |
+
model's internal embedding lookup matrix.
|
| 1104 |
+
use_cache (`bool`, *optional*):
|
| 1105 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
| 1106 |
+
`past_key_values`).
|
| 1107 |
+
output_attentions (`bool`, *optional*):
|
| 1108 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 1109 |
+
tensors for more detail.
|
| 1110 |
+
output_hidden_states (`bool`, *optional*):
|
| 1111 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 1112 |
+
more detail.
|
| 1113 |
+
return_dict (`bool`, *optional*):
|
| 1114 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 1115 |
+
"""
|
| 1116 |
+
|
| 1117 |
+
|
| 1118 |
+
@add_start_docstrings(
|
| 1119 |
+
"The bare Qwen2 Model outputting raw hidden-states without any specific head on top.",
|
| 1120 |
+
QWEN2_START_DOCSTRING,
|
| 1121 |
+
)
|
| 1122 |
+
class Qwen2Model(Qwen2PreTrainedModel):
|
| 1123 |
+
"""
|
| 1124 |
+
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Qwen2DecoderLayer`]
|
| 1125 |
+
|
| 1126 |
+
Args:
|
| 1127 |
+
config: SDLMQwen2Config
|
| 1128 |
+
"""
|
| 1129 |
+
|
| 1130 |
+
def __init__(self, config: SDLMQwen2Config):
|
| 1131 |
+
super().__init__(config)
|
| 1132 |
+
self.padding_idx = config.pad_token_id
|
| 1133 |
+
self.vocab_size = config.vocab_size
|
| 1134 |
+
|
| 1135 |
+
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
| 1136 |
+
self.layers = nn.ModuleList(
|
| 1137 |
+
[Qwen2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
|
| 1138 |
+
)
|
| 1139 |
+
self._attn_implementation = config._attn_implementation
|
| 1140 |
+
self.norm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 1141 |
+
|
| 1142 |
+
self.gradient_checkpointing = False
|
| 1143 |
+
# Initialize weights and apply final processing
|
| 1144 |
+
self.post_init()
|
| 1145 |
+
|
| 1146 |
+
|
| 1147 |
+
self.block_size = getattr(config, 'block_size', 4)
|
| 1148 |
+
self.causal_attn = getattr(config, 'causal_attn', False)
|
| 1149 |
+
self.text_mask_token_id = getattr(config, 'text_mask_token_id', 151666)
|
| 1150 |
+
|
| 1151 |
+
# print(f'{self.block_size=} {self.causal_attn=} {self.training=} {self.text_mask_token_id=}\n')
|
| 1152 |
+
|
| 1153 |
+
|
| 1154 |
+
def get_input_embeddings(self):
|
| 1155 |
+
return self.embed_tokens
|
| 1156 |
+
|
| 1157 |
+
def set_input_embeddings(self, value):
|
| 1158 |
+
self.embed_tokens = value
|
| 1159 |
+
|
| 1160 |
+
@add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
|
| 1161 |
+
def forward(
|
| 1162 |
+
self,
|
| 1163 |
+
input_ids: torch.LongTensor = None,
|
| 1164 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1165 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1166 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| 1167 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 1168 |
+
use_cache: Optional[bool] = None,
|
| 1169 |
+
output_attentions: Optional[bool] = None,
|
| 1170 |
+
output_hidden_states: Optional[bool] = None,
|
| 1171 |
+
return_dict: Optional[bool] = None,
|
| 1172 |
+
) -> Union[Tuple, BaseModelOutputWithPast]:
|
| 1173 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 1174 |
+
output_hidden_states = (
|
| 1175 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 1176 |
+
)
|
| 1177 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
| 1178 |
+
|
| 1179 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1180 |
+
|
| 1181 |
+
# retrieve input_ids and inputs_embeds
|
| 1182 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 1183 |
+
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
|
| 1184 |
+
elif input_ids is not None:
|
| 1185 |
+
batch_size, seq_length = input_ids.shape
|
| 1186 |
+
elif inputs_embeds is not None:
|
| 1187 |
+
batch_size, seq_length, _ = inputs_embeds.shape
|
| 1188 |
+
else:
|
| 1189 |
+
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
|
| 1190 |
+
|
| 1191 |
+
if self.gradient_checkpointing and self.training:
|
| 1192 |
+
if use_cache:
|
| 1193 |
+
logger.warning_once(
|
| 1194 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
| 1195 |
+
)
|
| 1196 |
+
use_cache = False
|
| 1197 |
+
|
| 1198 |
+
past_key_values_length = 0
|
| 1199 |
+
|
| 1200 |
+
if use_cache:
|
| 1201 |
+
use_legacy_cache = not isinstance(past_key_values, Cache)
|
| 1202 |
+
if use_legacy_cache:
|
| 1203 |
+
past_key_values = DynamicCache.from_legacy_cache(past_key_values)
|
| 1204 |
+
past_key_values_length = past_key_values.get_usable_length(seq_length)
|
| 1205 |
+
|
| 1206 |
+
if position_ids is None:
|
| 1207 |
+
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
| 1208 |
+
position_ids = torch.arange(
|
| 1209 |
+
past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
|
| 1210 |
+
)
|
| 1211 |
+
position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
|
| 1212 |
+
else:
|
| 1213 |
+
position_ids = position_ids.view(-1, seq_length).long()
|
| 1214 |
+
|
| 1215 |
+
if inputs_embeds is None:
|
| 1216 |
+
inputs_embeds = self.embed_tokens(input_ids)
|
| 1217 |
+
|
| 1218 |
+
if attention_mask is not None and self._attn_implementation == "flash_attention_2" and use_cache:
|
| 1219 |
+
is_padding_right = attention_mask[:, -1].sum().item() != batch_size
|
| 1220 |
+
if is_padding_right:
|
| 1221 |
+
raise ValueError(
|
| 1222 |
+
"You are attempting to perform batched generation with padding_side='right'"
|
| 1223 |
+
" this may lead to unexpected behaviour for Flash Attention version of Qwen2. Make sure to "
|
| 1224 |
+
" call `tokenizer.padding_side = 'left'` before tokenizing the input. "
|
| 1225 |
+
)
|
| 1226 |
+
|
| 1227 |
+
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
| 1228 |
+
x0_len = find_prefix_seq_length_by_pe(position_ids).to(device=device)
|
| 1229 |
+
|
| 1230 |
+
if self._attn_implementation == "sdpa" and not output_attentions:
|
| 1231 |
+
# output_attentions=True can not be supported when using SDPA, and we fall back on
|
| 1232 |
+
# the manual implementation that requires a 4D causal mask in all cases.
|
| 1233 |
+
# attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
|
| 1234 |
+
# attention_mask,
|
| 1235 |
+
# (batch_size, seq_length),
|
| 1236 |
+
# inputs_embeds,
|
| 1237 |
+
# past_key_values_length,
|
| 1238 |
+
# )
|
| 1239 |
+
|
| 1240 |
+
attention_mask, _ = create_block_diff_mask_by_pe_4d(
|
| 1241 |
+
block_size=self.block_size,
|
| 1242 |
+
x0_len_list=x0_len,
|
| 1243 |
+
position_ids=position_ids,
|
| 1244 |
+
causal_attn=self.causal_attn
|
| 1245 |
+
)
|
| 1246 |
+
|
| 1247 |
+
elif self._attn_implementation == "flash_attention_2":
|
| 1248 |
+
# # 2d mask is passed through the layers
|
| 1249 |
+
# attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
|
| 1250 |
+
|
| 1251 |
+
# TODO Update to Flex Attention.
|
| 1252 |
+
block_diff_mask_func = partial(
|
| 1253 |
+
create_block_diff_mask_by_pe_1d,
|
| 1254 |
+
block_size=self.block_size,
|
| 1255 |
+
x0_len_list=x0_len,
|
| 1256 |
+
position_ids_list=position_ids,
|
| 1257 |
+
causal_attn=self.causal_attn
|
| 1258 |
+
)
|
| 1259 |
+
|
| 1260 |
+
attention_mask = create_block_mask(
|
| 1261 |
+
block_diff_mask_func,
|
| 1262 |
+
B=None, H=None, Q_LEN=seq_length, KV_LEN=seq_length, device=device
|
| 1263 |
+
)
|
| 1264 |
+
|
| 1265 |
+
else:
|
| 1266 |
+
if not self.training:
|
| 1267 |
+
# for sampling, set attn = eager
|
| 1268 |
+
attention_mask = _prepare_4d_causal_attention_mask(
|
| 1269 |
+
attention_mask,
|
| 1270 |
+
(batch_size, seq_length),
|
| 1271 |
+
inputs_embeds,
|
| 1272 |
+
past_key_values_length,
|
| 1273 |
+
sliding_window=self.config.sliding_window,
|
| 1274 |
+
)
|
| 1275 |
+
|
| 1276 |
+
if use_cache:
|
| 1277 |
+
update_mask_func = partial(
|
| 1278 |
+
update_causal_mask_for_one_gen_window_2d,
|
| 1279 |
+
block_size=self.block_size,
|
| 1280 |
+
use_cache=use_cache,
|
| 1281 |
+
causal_attn=self.causal_attn
|
| 1282 |
+
)
|
| 1283 |
+
else:
|
| 1284 |
+
update_mask_func = partial(
|
| 1285 |
+
update_causal_mask_with_pad_non_visible_2d,
|
| 1286 |
+
block_size=self.block_size,
|
| 1287 |
+
text_mask_token_id=self.text_mask_token_id,
|
| 1288 |
+
causal_attn=self.causal_attn
|
| 1289 |
+
)
|
| 1290 |
+
|
| 1291 |
+
if attention_mask is not None and len(attention_mask.shape) == 4:
|
| 1292 |
+
new_attention_mask = []
|
| 1293 |
+
for b in range(attention_mask.shape[0]):
|
| 1294 |
+
new_attention_mask.append(
|
| 1295 |
+
update_mask_func(
|
| 1296 |
+
input_ids[b],
|
| 1297 |
+
attention_mask[b][0]
|
| 1298 |
+
).unsqueeze(0)
|
| 1299 |
+
)
|
| 1300 |
+
attention_mask = torch.stack(new_attention_mask, dim=0)
|
| 1301 |
+
|
| 1302 |
+
else:
|
| 1303 |
+
# for training
|
| 1304 |
+
attention_mask, _ = create_block_diff_mask_by_pe_4d(
|
| 1305 |
+
block_size=self.block_size,
|
| 1306 |
+
x0_len_list=x0_len,
|
| 1307 |
+
position_ids=position_ids,
|
| 1308 |
+
causal_attn=self.causal_attn
|
| 1309 |
+
)
|
| 1310 |
+
|
| 1311 |
+
hidden_states = inputs_embeds
|
| 1312 |
+
|
| 1313 |
+
# decoder layers
|
| 1314 |
+
all_hidden_states = () if output_hidden_states else None
|
| 1315 |
+
all_self_attns = () if output_attentions else None
|
| 1316 |
+
next_decoder_cache = None
|
| 1317 |
+
|
| 1318 |
+
for decoder_layer in self.layers:
|
| 1319 |
+
if output_hidden_states:
|
| 1320 |
+
all_hidden_states += (hidden_states,)
|
| 1321 |
+
|
| 1322 |
+
if self.gradient_checkpointing and self.training:
|
| 1323 |
+
layer_outputs = self._gradient_checkpointing_func(
|
| 1324 |
+
decoder_layer.__call__,
|
| 1325 |
+
hidden_states,
|
| 1326 |
+
attention_mask,
|
| 1327 |
+
position_ids,
|
| 1328 |
+
past_key_values,
|
| 1329 |
+
output_attentions,
|
| 1330 |
+
use_cache,
|
| 1331 |
+
)
|
| 1332 |
+
else:
|
| 1333 |
+
layer_outputs = decoder_layer(
|
| 1334 |
+
hidden_states,
|
| 1335 |
+
attention_mask=attention_mask,
|
| 1336 |
+
position_ids=position_ids,
|
| 1337 |
+
past_key_value=past_key_values,
|
| 1338 |
+
output_attentions=output_attentions,
|
| 1339 |
+
use_cache=use_cache,
|
| 1340 |
+
)
|
| 1341 |
+
|
| 1342 |
+
hidden_states = layer_outputs[0]
|
| 1343 |
+
|
| 1344 |
+
if use_cache:
|
| 1345 |
+
next_decoder_cache = layer_outputs[2 if output_attentions else 1]
|
| 1346 |
+
|
| 1347 |
+
if output_attentions:
|
| 1348 |
+
all_self_attns += (layer_outputs[1],)
|
| 1349 |
+
|
| 1350 |
+
hidden_states = self.norm(hidden_states)
|
| 1351 |
+
|
| 1352 |
+
# add hidden states from the last decoder layer
|
| 1353 |
+
if output_hidden_states:
|
| 1354 |
+
all_hidden_states += (hidden_states,)
|
| 1355 |
+
|
| 1356 |
+
next_cache = None
|
| 1357 |
+
if use_cache:
|
| 1358 |
+
next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache
|
| 1359 |
+
|
| 1360 |
+
if not return_dict:
|
| 1361 |
+
return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
|
| 1362 |
+
return BaseModelOutputWithPast(
|
| 1363 |
+
last_hidden_state=hidden_states,
|
| 1364 |
+
past_key_values=next_cache,
|
| 1365 |
+
hidden_states=all_hidden_states,
|
| 1366 |
+
attentions=all_self_attns,
|
| 1367 |
+
)
|
| 1368 |
+
|
| 1369 |
+
|
| 1370 |
+
class SDLMQwen2ForCausalLM(Qwen2PreTrainedModel):
|
| 1371 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 1372 |
+
|
| 1373 |
+
def __init__(self, config):
|
| 1374 |
+
super().__init__(config)
|
| 1375 |
+
self.model = Qwen2Model(config)
|
| 1376 |
+
self.vocab_size = config.vocab_size
|
| 1377 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 1378 |
+
|
| 1379 |
+
self.text_mask_token_id = getattr(config, 'text_mask_token_id', 151666)
|
| 1380 |
+
|
| 1381 |
+
# Initialize weights and apply final processing
|
| 1382 |
+
self.post_init()
|
| 1383 |
+
|
| 1384 |
+
|
| 1385 |
+
def get_input_embeddings(self):
|
| 1386 |
+
return self.model.embed_tokens
|
| 1387 |
+
|
| 1388 |
+
def set_input_embeddings(self, value):
|
| 1389 |
+
self.model.embed_tokens = value
|
| 1390 |
+
|
| 1391 |
+
def get_output_embeddings(self):
|
| 1392 |
+
return self.lm_head
|
| 1393 |
+
|
| 1394 |
+
def set_output_embeddings(self, new_embeddings):
|
| 1395 |
+
self.lm_head = new_embeddings
|
| 1396 |
+
|
| 1397 |
+
def set_decoder(self, decoder):
|
| 1398 |
+
self.model = decoder
|
| 1399 |
+
|
| 1400 |
+
def get_decoder(self):
|
| 1401 |
+
return self.model
|
| 1402 |
+
|
| 1403 |
+
@add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
|
| 1404 |
+
@replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
|
| 1405 |
+
def forward(
|
| 1406 |
+
self,
|
| 1407 |
+
input_ids: torch.LongTensor = None,
|
| 1408 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1409 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1410 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| 1411 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 1412 |
+
labels: Optional[torch.LongTensor] = None,
|
| 1413 |
+
use_cache: Optional[bool] = None,
|
| 1414 |
+
output_attentions: Optional[bool] = None,
|
| 1415 |
+
output_hidden_states: Optional[bool] = None,
|
| 1416 |
+
return_dict: Optional[bool] = None,
|
| 1417 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 1418 |
+
r"""
|
| 1419 |
+
Args:
|
| 1420 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 1421 |
+
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
| 1422 |
+
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
| 1423 |
+
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
| 1424 |
+
|
| 1425 |
+
Returns:
|
| 1426 |
+
|
| 1427 |
+
Example:
|
| 1428 |
+
|
| 1429 |
+
```python
|
| 1430 |
+
>>> from transformers import AutoTokenizer, Qwen2ForCausalLM
|
| 1431 |
+
|
| 1432 |
+
>>> model = Qwen2ForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
|
| 1433 |
+
>>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
|
| 1434 |
+
|
| 1435 |
+
>>> prompt = "Hey, are you conscious? Can you talk to me?"
|
| 1436 |
+
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
| 1437 |
+
|
| 1438 |
+
>>> # Generate
|
| 1439 |
+
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
|
| 1440 |
+
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
| 1441 |
+
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
|
| 1442 |
+
```"""
|
| 1443 |
+
|
| 1444 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 1445 |
+
output_hidden_states = (
|
| 1446 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 1447 |
+
)
|
| 1448 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1449 |
+
|
| 1450 |
+
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
| 1451 |
+
outputs = self.model(
|
| 1452 |
+
input_ids=input_ids,
|
| 1453 |
+
attention_mask=attention_mask,
|
| 1454 |
+
position_ids=position_ids,
|
| 1455 |
+
past_key_values=past_key_values,
|
| 1456 |
+
inputs_embeds=inputs_embeds,
|
| 1457 |
+
use_cache=use_cache,
|
| 1458 |
+
output_attentions=output_attentions,
|
| 1459 |
+
output_hidden_states=output_hidden_states,
|
| 1460 |
+
return_dict=return_dict,
|
| 1461 |
+
)
|
| 1462 |
+
|
| 1463 |
+
hidden_states = outputs[0]
|
| 1464 |
+
logits = self.lm_head(hidden_states)
|
| 1465 |
+
logits = logits.float()
|
| 1466 |
+
|
| 1467 |
+
loss = None
|
| 1468 |
+
if labels is not None:
|
| 1469 |
+
|
| 1470 |
+
# Shift so that tokens < n predict n
|
| 1471 |
+
shift_logits = logits[..., :-1, :].contiguous()
|
| 1472 |
+
shift_labels = labels[..., 1:].contiguous()
|
| 1473 |
+
|
| 1474 |
+
# Flatten the tokens
|
| 1475 |
+
loss_fct = CrossEntropyLoss()
|
| 1476 |
+
shift_logits = shift_logits.view(-1, self.config.vocab_size)
|
| 1477 |
+
|
| 1478 |
+
shift_labels = shift_labels.view(-1)
|
| 1479 |
+
# Enable model parallelism
|
| 1480 |
+
shift_labels = shift_labels.to(shift_logits.device)
|
| 1481 |
+
loss = loss_fct(shift_logits, shift_labels)
|
| 1482 |
+
|
| 1483 |
+
# for log, not needed
|
| 1484 |
+
pos_masks = find_pred_pos_from_input_ids(input_ids, text_mask_token_id=self.text_mask_token_id)
|
| 1485 |
+
shift_input_ids = input_ids[..., :-1].contiguous()
|
| 1486 |
+
shift_pos_masks = pos_masks[:, :-1]
|
| 1487 |
+
|
| 1488 |
+
shift_input_ids = shift_input_ids.view(-1)
|
| 1489 |
+
max_n_future_tokens = min(4, self.model.block_size)
|
| 1490 |
+
|
| 1491 |
+
pos_loss_list = torch.zeros(max_n_future_tokens, device=shift_logits.device)
|
| 1492 |
+
|
| 1493 |
+
shift_pos_masks = shift_pos_masks.reshape(-1)
|
| 1494 |
+
|
| 1495 |
+
for ix in range(max_n_future_tokens):
|
| 1496 |
+
seg_loss = F.cross_entropy(
|
| 1497 |
+
shift_logits[shift_pos_masks==ix],
|
| 1498 |
+
shift_labels[shift_pos_masks==ix],
|
| 1499 |
+
reduction='mean'
|
| 1500 |
+
)
|
| 1501 |
+
|
| 1502 |
+
pos_loss_list[ix] = seg_loss
|
| 1503 |
+
|
| 1504 |
+
|
| 1505 |
+
if not return_dict:
|
| 1506 |
+
output = (logits,) + outputs[1:]
|
| 1507 |
+
return (loss,) + output if loss is not None else output
|
| 1508 |
+
|
| 1509 |
+
if self.training:
|
| 1510 |
+
return CausalLMOutputWithPast(
|
| 1511 |
+
loss=loss,
|
| 1512 |
+
logits=logits,
|
| 1513 |
+
past_key_values=outputs.past_key_values,
|
| 1514 |
+
hidden_states=outputs.hidden_states,
|
| 1515 |
+
attentions=outputs.attentions,
|
| 1516 |
+
), pos_loss_list
|
| 1517 |
+
|
| 1518 |
+
return CausalLMOutputWithPast(
|
| 1519 |
+
loss=loss,
|
| 1520 |
+
logits=logits,
|
| 1521 |
+
past_key_values=outputs.past_key_values,
|
| 1522 |
+
hidden_states=outputs.hidden_states,
|
| 1523 |
+
attentions=outputs.attentions,
|
| 1524 |
+
)
|
| 1525 |
+
|
| 1526 |
+
def prepare_inputs_for_generation(
|
| 1527 |
+
self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
|
| 1528 |
+
):
|
| 1529 |
+
# Omit tokens covered by past_key_values
|
| 1530 |
+
if past_key_values is not None:
|
| 1531 |
+
if isinstance(past_key_values, Cache):
|
| 1532 |
+
cache_length = past_key_values.get_seq_length()
|
| 1533 |
+
past_length = past_key_values.seen_tokens
|
| 1534 |
+
max_cache_length = past_key_values.get_max_length()
|
| 1535 |
+
else:
|
| 1536 |
+
cache_length = past_length = past_key_values[0][0].shape[2]
|
| 1537 |
+
max_cache_length = None
|
| 1538 |
+
|
| 1539 |
+
# Keep only the unprocessed tokens:
|
| 1540 |
+
# 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
|
| 1541 |
+
# some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
|
| 1542 |
+
# input)
|
| 1543 |
+
if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
|
| 1544 |
+
input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
|
| 1545 |
+
# 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
|
| 1546 |
+
# input_ids based on the past_length.
|
| 1547 |
+
elif past_length < input_ids.shape[1]:
|
| 1548 |
+
input_ids = input_ids[:, past_length:]
|
| 1549 |
+
# 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
|
| 1550 |
+
|
| 1551 |
+
# If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
|
| 1552 |
+
if (
|
| 1553 |
+
max_cache_length is not None
|
| 1554 |
+
and attention_mask is not None
|
| 1555 |
+
and cache_length + input_ids.shape[1] > max_cache_length
|
| 1556 |
+
):
|
| 1557 |
+
attention_mask = attention_mask[:, -max_cache_length:]
|
| 1558 |
+
|
| 1559 |
+
position_ids = kwargs.get("position_ids", None)
|
| 1560 |
+
if attention_mask is not None and position_ids is None:
|
| 1561 |
+
# create position_ids on the fly for batch generation
|
| 1562 |
+
position_ids = attention_mask.long().cumsum(-1) - 1
|
| 1563 |
+
position_ids.masked_fill_(attention_mask == 0, 1)
|
| 1564 |
+
if past_key_values:
|
| 1565 |
+
position_ids = position_ids[:, -input_ids.shape[1] :]
|
| 1566 |
+
|
| 1567 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
| 1568 |
+
if inputs_embeds is not None and past_key_values is None:
|
| 1569 |
+
model_inputs = {"inputs_embeds": inputs_embeds}
|
| 1570 |
+
else:
|
| 1571 |
+
model_inputs = {"input_ids": input_ids}
|
| 1572 |
+
|
| 1573 |
+
model_inputs.update(
|
| 1574 |
+
{
|
| 1575 |
+
"position_ids": position_ids,
|
| 1576 |
+
"past_key_values": past_key_values,
|
| 1577 |
+
"use_cache": kwargs.get("use_cache"),
|
| 1578 |
+
"attention_mask": attention_mask,
|
| 1579 |
+
}
|
| 1580 |
+
)
|
| 1581 |
+
return model_inputs
|
| 1582 |
+
|
| 1583 |
+
@staticmethod
|
| 1584 |
+
def _reorder_cache(past_key_values, beam_idx):
|
| 1585 |
+
reordered_past = ()
|
| 1586 |
+
for layer_past in past_key_values:
|
| 1587 |
+
reordered_past += (
|
| 1588 |
+
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
|
| 1589 |
+
)
|
| 1590 |
+
return reordered_past
|
sdlm_ckpt_final/sdlm_32b_bs4/special_tokens_map.json
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"additional_special_tokens": [
|
| 3 |
+
"<|im_start|>",
|
| 4 |
+
"<|im_end|>",
|
| 5 |
+
"<|object_ref_start|>",
|
| 6 |
+
"<|object_ref_end|>",
|
| 7 |
+
"<|box_start|>",
|
| 8 |
+
"<|box_end|>",
|
| 9 |
+
"<|quad_start|>",
|
| 10 |
+
"<|quad_end|>",
|
| 11 |
+
"<|vision_start|>",
|
| 12 |
+
"<|vision_end|>",
|
| 13 |
+
"<|vision_pad|>",
|
| 14 |
+
"<|image_pad|>",
|
| 15 |
+
"<|video_pad|>",
|
| 16 |
+
{
|
| 17 |
+
"content": "<text_mask>",
|
| 18 |
+
"lstrip": false,
|
| 19 |
+
"normalized": false,
|
| 20 |
+
"rstrip": false,
|
| 21 |
+
"single_word": false
|
| 22 |
+
}
|
| 23 |
+
],
|
| 24 |
+
"eos_token": {
|
| 25 |
+
"content": "<|endoftext|>",
|
| 26 |
+
"lstrip": false,
|
| 27 |
+
"normalized": false,
|
| 28 |
+
"rstrip": false,
|
| 29 |
+
"single_word": false
|
| 30 |
+
},
|
| 31 |
+
"pad_token": {
|
| 32 |
+
"content": "<|endoftext|>",
|
| 33 |
+
"lstrip": false,
|
| 34 |
+
"normalized": false,
|
| 35 |
+
"rstrip": false,
|
| 36 |
+
"single_word": false
|
| 37 |
+
}
|
| 38 |
+
}
|
sdlm_ckpt_final/sdlm_32b_bs4/tokenizer_config.json
ADDED
|
@@ -0,0 +1,217 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_bos_token": false,
|
| 3 |
+
"add_eos_token": true,
|
| 4 |
+
"add_prefix_space": false,
|
| 5 |
+
"added_tokens_decoder": {
|
| 6 |
+
"151643": {
|
| 7 |
+
"content": "<|endoftext|>",
|
| 8 |
+
"lstrip": false,
|
| 9 |
+
"normalized": false,
|
| 10 |
+
"rstrip": false,
|
| 11 |
+
"single_word": false,
|
| 12 |
+
"special": true
|
| 13 |
+
},
|
| 14 |
+
"151644": {
|
| 15 |
+
"content": "<|im_start|>",
|
| 16 |
+
"lstrip": false,
|
| 17 |
+
"normalized": false,
|
| 18 |
+
"rstrip": false,
|
| 19 |
+
"single_word": false,
|
| 20 |
+
"special": true
|
| 21 |
+
},
|
| 22 |
+
"151645": {
|
| 23 |
+
"content": "<|im_end|>",
|
| 24 |
+
"lstrip": false,
|
| 25 |
+
"normalized": false,
|
| 26 |
+
"rstrip": false,
|
| 27 |
+
"single_word": false,
|
| 28 |
+
"special": true
|
| 29 |
+
},
|
| 30 |
+
"151646": {
|
| 31 |
+
"content": "<|object_ref_start|>",
|
| 32 |
+
"lstrip": false,
|
| 33 |
+
"normalized": false,
|
| 34 |
+
"rstrip": false,
|
| 35 |
+
"single_word": false,
|
| 36 |
+
"special": true
|
| 37 |
+
},
|
| 38 |
+
"151647": {
|
| 39 |
+
"content": "<|object_ref_end|>",
|
| 40 |
+
"lstrip": false,
|
| 41 |
+
"normalized": false,
|
| 42 |
+
"rstrip": false,
|
| 43 |
+
"single_word": false,
|
| 44 |
+
"special": true
|
| 45 |
+
},
|
| 46 |
+
"151648": {
|
| 47 |
+
"content": "<|box_start|>",
|
| 48 |
+
"lstrip": false,
|
| 49 |
+
"normalized": false,
|
| 50 |
+
"rstrip": false,
|
| 51 |
+
"single_word": false,
|
| 52 |
+
"special": true
|
| 53 |
+
},
|
| 54 |
+
"151649": {
|
| 55 |
+
"content": "<|box_end|>",
|
| 56 |
+
"lstrip": false,
|
| 57 |
+
"normalized": false,
|
| 58 |
+
"rstrip": false,
|
| 59 |
+
"single_word": false,
|
| 60 |
+
"special": true
|
| 61 |
+
},
|
| 62 |
+
"151650": {
|
| 63 |
+
"content": "<|quad_start|>",
|
| 64 |
+
"lstrip": false,
|
| 65 |
+
"normalized": false,
|
| 66 |
+
"rstrip": false,
|
| 67 |
+
"single_word": false,
|
| 68 |
+
"special": true
|
| 69 |
+
},
|
| 70 |
+
"151651": {
|
| 71 |
+
"content": "<|quad_end|>",
|
| 72 |
+
"lstrip": false,
|
| 73 |
+
"normalized": false,
|
| 74 |
+
"rstrip": false,
|
| 75 |
+
"single_word": false,
|
| 76 |
+
"special": true
|
| 77 |
+
},
|
| 78 |
+
"151652": {
|
| 79 |
+
"content": "<|vision_start|>",
|
| 80 |
+
"lstrip": false,
|
| 81 |
+
"normalized": false,
|
| 82 |
+
"rstrip": false,
|
| 83 |
+
"single_word": false,
|
| 84 |
+
"special": true
|
| 85 |
+
},
|
| 86 |
+
"151653": {
|
| 87 |
+
"content": "<|vision_end|>",
|
| 88 |
+
"lstrip": false,
|
| 89 |
+
"normalized": false,
|
| 90 |
+
"rstrip": false,
|
| 91 |
+
"single_word": false,
|
| 92 |
+
"special": true
|
| 93 |
+
},
|
| 94 |
+
"151654": {
|
| 95 |
+
"content": "<|vision_pad|>",
|
| 96 |
+
"lstrip": false,
|
| 97 |
+
"normalized": false,
|
| 98 |
+
"rstrip": false,
|
| 99 |
+
"single_word": false,
|
| 100 |
+
"special": true
|
| 101 |
+
},
|
| 102 |
+
"151655": {
|
| 103 |
+
"content": "<|image_pad|>",
|
| 104 |
+
"lstrip": false,
|
| 105 |
+
"normalized": false,
|
| 106 |
+
"rstrip": false,
|
| 107 |
+
"single_word": false,
|
| 108 |
+
"special": true
|
| 109 |
+
},
|
| 110 |
+
"151656": {
|
| 111 |
+
"content": "<|video_pad|>",
|
| 112 |
+
"lstrip": false,
|
| 113 |
+
"normalized": false,
|
| 114 |
+
"rstrip": false,
|
| 115 |
+
"single_word": false,
|
| 116 |
+
"special": true
|
| 117 |
+
},
|
| 118 |
+
"151657": {
|
| 119 |
+
"content": "<tool_call>",
|
| 120 |
+
"lstrip": false,
|
| 121 |
+
"normalized": false,
|
| 122 |
+
"rstrip": false,
|
| 123 |
+
"single_word": false,
|
| 124 |
+
"special": false
|
| 125 |
+
},
|
| 126 |
+
"151658": {
|
| 127 |
+
"content": "</tool_call>",
|
| 128 |
+
"lstrip": false,
|
| 129 |
+
"normalized": false,
|
| 130 |
+
"rstrip": false,
|
| 131 |
+
"single_word": false,
|
| 132 |
+
"special": false
|
| 133 |
+
},
|
| 134 |
+
"151659": {
|
| 135 |
+
"content": "<|fim_prefix|>",
|
| 136 |
+
"lstrip": false,
|
| 137 |
+
"normalized": false,
|
| 138 |
+
"rstrip": false,
|
| 139 |
+
"single_word": false,
|
| 140 |
+
"special": false
|
| 141 |
+
},
|
| 142 |
+
"151660": {
|
| 143 |
+
"content": "<|fim_middle|>",
|
| 144 |
+
"lstrip": false,
|
| 145 |
+
"normalized": false,
|
| 146 |
+
"rstrip": false,
|
| 147 |
+
"single_word": false,
|
| 148 |
+
"special": false
|
| 149 |
+
},
|
| 150 |
+
"151661": {
|
| 151 |
+
"content": "<|fim_suffix|>",
|
| 152 |
+
"lstrip": false,
|
| 153 |
+
"normalized": false,
|
| 154 |
+
"rstrip": false,
|
| 155 |
+
"single_word": false,
|
| 156 |
+
"special": false
|
| 157 |
+
},
|
| 158 |
+
"151662": {
|
| 159 |
+
"content": "<|fim_pad|>",
|
| 160 |
+
"lstrip": false,
|
| 161 |
+
"normalized": false,
|
| 162 |
+
"rstrip": false,
|
| 163 |
+
"single_word": false,
|
| 164 |
+
"special": false
|
| 165 |
+
},
|
| 166 |
+
"151663": {
|
| 167 |
+
"content": "<|repo_name|>",
|
| 168 |
+
"lstrip": false,
|
| 169 |
+
"normalized": false,
|
| 170 |
+
"rstrip": false,
|
| 171 |
+
"single_word": false,
|
| 172 |
+
"special": false
|
| 173 |
+
},
|
| 174 |
+
"151664": {
|
| 175 |
+
"content": "<|file_sep|>",
|
| 176 |
+
"lstrip": false,
|
| 177 |
+
"normalized": false,
|
| 178 |
+
"rstrip": false,
|
| 179 |
+
"single_word": false,
|
| 180 |
+
"special": false
|
| 181 |
+
},
|
| 182 |
+
"151665": {
|
| 183 |
+
"content": "<text_mask>",
|
| 184 |
+
"lstrip": false,
|
| 185 |
+
"normalized": false,
|
| 186 |
+
"rstrip": false,
|
| 187 |
+
"single_word": false,
|
| 188 |
+
"special": true
|
| 189 |
+
}
|
| 190 |
+
},
|
| 191 |
+
"additional_special_tokens": [
|
| 192 |
+
"<|im_start|>",
|
| 193 |
+
"<|im_end|>",
|
| 194 |
+
"<|object_ref_start|>",
|
| 195 |
+
"<|object_ref_end|>",
|
| 196 |
+
"<|box_start|>",
|
| 197 |
+
"<|box_end|>",
|
| 198 |
+
"<|quad_start|>",
|
| 199 |
+
"<|quad_end|>",
|
| 200 |
+
"<|vision_start|>",
|
| 201 |
+
"<|vision_end|>",
|
| 202 |
+
"<|vision_pad|>",
|
| 203 |
+
"<|image_pad|>",
|
| 204 |
+
"<|video_pad|>",
|
| 205 |
+
"<text_mask>"
|
| 206 |
+
],
|
| 207 |
+
"bos_token": null,
|
| 208 |
+
"chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
|
| 209 |
+
"clean_up_tokenization_spaces": false,
|
| 210 |
+
"eos_token": "<|endoftext|>",
|
| 211 |
+
"errors": "replace",
|
| 212 |
+
"model_max_length": 5632,
|
| 213 |
+
"pad_token": "<|endoftext|>",
|
| 214 |
+
"split_special_tokens": false,
|
| 215 |
+
"tokenizer_class": "Qwen2Tokenizer",
|
| 216 |
+
"unk_token": null
|
| 217 |
+
}
|
sdlm_ckpt_final/sdlm_32b_bs4/train_results.json
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"epoch": 1.0,
|
| 3 |
+
"train_loss": 0.38482022780720976,
|
| 4 |
+
"train_runtime": 198218.2379,
|
| 5 |
+
"train_samples": 3506817,
|
| 6 |
+
"train_samples_per_second": 17.692,
|
| 7 |
+
"train_steps_per_second": 0.038
|
| 8 |
+
}
|
sdlm_ckpt_final/sdlm_32b_bs4/trainer_state.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
sdlm_ckpt_final/sdlm_32b_bs4/training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9b66a973729e935a24f4f6422cd86f48318a88b69a969aafc13068e5bf4ad756
|
| 3 |
+
size 7441
|
sdlm_ckpt_final/sdlm_32b_bs4/training_log.txt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b195e4e67c2a837e8727e676fa7f4997fe282d52f73df2cfa015157b2d5072f4
|
| 3 |
+
size 20324565
|
sdlm_ckpt_final/sdlm_32b_bs4/training_log_from_ckpt_4000.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
sdlm_ckpt_final/sdlm_32b_bs4/training_log_from_ckpt_4000_2.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
sdlm_ckpt_final/sdlm_32b_bs4/training_log_from_ckpt_5200.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
sdlm_ckpt_final/sdlm_32b_bs4/vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
sdlm_ckpt_final/sdlm_3b_bs4/added_tokens.json
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"</tool_call>": 151658,
|
| 3 |
+
"<text_mask>": 151665,
|
| 4 |
+
"<tool_call>": 151657,
|
| 5 |
+
"<|box_end|>": 151649,
|
| 6 |
+
"<|box_start|>": 151648,
|
| 7 |
+
"<|endoftext|>": 151643,
|
| 8 |
+
"<|file_sep|>": 151664,
|
| 9 |
+
"<|fim_middle|>": 151660,
|
| 10 |
+
"<|fim_pad|>": 151662,
|
| 11 |
+
"<|fim_prefix|>": 151659,
|
| 12 |
+
"<|fim_suffix|>": 151661,
|
| 13 |
+
"<|im_end|>": 151645,
|
| 14 |
+
"<|im_start|>": 151644,
|
| 15 |
+
"<|image_pad|>": 151655,
|
| 16 |
+
"<|object_ref_end|>": 151647,
|
| 17 |
+
"<|object_ref_start|>": 151646,
|
| 18 |
+
"<|quad_end|>": 151651,
|
| 19 |
+
"<|quad_start|>": 151650,
|
| 20 |
+
"<|repo_name|>": 151663,
|
| 21 |
+
"<|video_pad|>": 151656,
|
| 22 |
+
"<|vision_end|>": 151653,
|
| 23 |
+
"<|vision_pad|>": 151654,
|
| 24 |
+
"<|vision_start|>": 151652
|
| 25 |
+
}
|
sdlm_ckpt_final/sdlm_3b_bs4/all_results.json
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"epoch": 1.0,
|
| 3 |
+
"train_loss": 1.2385003984309486,
|
| 4 |
+
"train_runtime": 57086.6155,
|
| 5 |
+
"train_samples": 3506817,
|
| 6 |
+
"train_samples_per_second": 61.43,
|
| 7 |
+
"train_steps_per_second": 0.24
|
| 8 |
+
}
|
sdlm_ckpt_final/sdlm_3b_bs4/attn_mask_utils.py
ADDED
|
@@ -0,0 +1,292 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import copy
|
| 3 |
+
|
| 4 |
+
def find_prefix_seq_length_by_pe(
|
| 5 |
+
pe: torch.Tensor
|
| 6 |
+
) -> torch.Tensor:
|
| 7 |
+
"""
|
| 8 |
+
Find the sequence length where position encoding drops (indicating prefix boundary).
|
| 9 |
+
Args:
|
| 10 |
+
pe: Position encoding tensor of shape [Batch size, Sequence length ]
|
| 11 |
+
Contains position indices for each token in the sequence.
|
| 12 |
+
Returns:
|
| 13 |
+
torch.Tensor: A tensor of shape [B] containing:
|
| 14 |
+
- The index where position encoding drops for each sequence
|
| 15 |
+
- -1 if no drop occurs in the sequence
|
| 16 |
+
"""
|
| 17 |
+
batch_size, seq_len = pe.shape
|
| 18 |
+
prev = pe[:, :-1]
|
| 19 |
+
curr = pe[:, 1:]
|
| 20 |
+
drop_mask = curr < prev # [batch_size, seq_len-1]
|
| 21 |
+
|
| 22 |
+
seq_len = torch.full((batch_size,), -1, dtype=torch.long)
|
| 23 |
+
|
| 24 |
+
for b in range(batch_size):
|
| 25 |
+
drop_pos = torch.nonzero(drop_mask[b], as_tuple=False)
|
| 26 |
+
if drop_pos.numel() > 0:
|
| 27 |
+
i = drop_pos[0].item() + 1 # Take first drop position (+1 because we compared shifted sequences)
|
| 28 |
+
seq_len[b] = i
|
| 29 |
+
|
| 30 |
+
return seq_len
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def update_causal_mask_with_pad_non_visible_2d(
|
| 35 |
+
input_ids: torch.Tensor,
|
| 36 |
+
attn_mask_2d: torch.Tensor,
|
| 37 |
+
text_mask_token_id: int = 151666,
|
| 38 |
+
block_size: int = 4,
|
| 39 |
+
causal_attn: bool = False
|
| 40 |
+
) -> torch.Tensor:
|
| 41 |
+
"""
|
| 42 |
+
Updates a 2D attention mask for hole sequence through input_ids and text_mask_token_id
|
| 43 |
+
|
| 44 |
+
Args:
|
| 45 |
+
input_ids: Input token IDs (unused in current implementation)
|
| 46 |
+
attn_mask_2d: 2D attention mask matrix of shape [seq_len, seq_len] where:
|
| 47 |
+
- 0.0 indicates allowed attention
|
| 48 |
+
- -inf indicates masked attention
|
| 49 |
+
text_mask_token_id: ID representing masked tokens
|
| 50 |
+
block_size: Size of the diffusion window
|
| 51 |
+
causal_attn: If True, maintains strict causal masking throughout
|
| 52 |
+
|
| 53 |
+
Returns:
|
| 54 |
+
Modified attention mask with updated visibility patterns
|
| 55 |
+
"""
|
| 56 |
+
seq_len = input_ids.shape[0]
|
| 57 |
+
device = input_ids.device
|
| 58 |
+
|
| 59 |
+
# Identify masked tokens and their preceding positions
|
| 60 |
+
input_mask = input_ids.eq(text_mask_token_id)
|
| 61 |
+
input_before_mask = torch.zeros_like(input_mask)
|
| 62 |
+
input_before_mask[:-1] = input_mask[1:]
|
| 63 |
+
mask_cols = (input_mask | input_before_mask)
|
| 64 |
+
non_mask = ~mask_cols
|
| 65 |
+
|
| 66 |
+
rows = torch.arange(seq_len, device=device)[:, None] # (seq_len, 1)
|
| 67 |
+
cols = torch.arange(seq_len, device=device) # (seq_len,)
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
indices = torch.arange(seq_len, device=device)
|
| 71 |
+
prev_non_mask = (indices * non_mask).cummax(dim=0).values
|
| 72 |
+
|
| 73 |
+
max_value = torch.iinfo(indices.dtype).max
|
| 74 |
+
mask_indices = torch.where(non_mask, indices, torch.full_like(indices, max_value))
|
| 75 |
+
reversed_mask_indices = torch.flip(mask_indices, dims=[0])
|
| 76 |
+
reversed_cummin = reversed_mask_indices.cummin(dim=0).values
|
| 77 |
+
next_non_mask = torch.flip(reversed_cummin, dims=[0])
|
| 78 |
+
|
| 79 |
+
# ================= Part 1: Make positions after masks invisible =================
|
| 80 |
+
infra_mask = (
|
| 81 |
+
(cols > prev_non_mask) &
|
| 82 |
+
(rows >= next_non_mask[None, :]) &
|
| 83 |
+
mask_cols[None, :]
|
| 84 |
+
)
|
| 85 |
+
attn_mask_2d.masked_fill_(infra_mask, -float('inf'))
|
| 86 |
+
|
| 87 |
+
# ================= Part 2: Allow visibility to previous positions (if not causal) =================
|
| 88 |
+
if not causal_attn:
|
| 89 |
+
visible_mask = (
|
| 90 |
+
(rows > prev_non_mask[None, :]) &
|
| 91 |
+
(rows < cols) &
|
| 92 |
+
mask_cols[None, :]
|
| 93 |
+
)
|
| 94 |
+
attn_mask_2d.masked_fill_(visible_mask, 0.0)
|
| 95 |
+
|
| 96 |
+
return attn_mask_2d
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
def update_causal_mask_for_one_gen_window_2d(
|
| 100 |
+
input_ids: torch.Tensor,
|
| 101 |
+
attn_mask_2d: torch.Tensor,
|
| 102 |
+
block_size: int = 4,
|
| 103 |
+
use_cache: bool = True,
|
| 104 |
+
causal_attn: bool = False
|
| 105 |
+
) -> torch.Tensor:
|
| 106 |
+
"""
|
| 107 |
+
Updates a 2D attention mask for a diffusion window in transformer inference.
|
| 108 |
+
|
| 109 |
+
Args:
|
| 110 |
+
input_ids: Input token IDs (unused in current implementation)
|
| 111 |
+
attn_mask_2d: 2D attention mask matrix of shape [seq_len, seq_len] where:
|
| 112 |
+
- 0.0 indicates allowed attention
|
| 113 |
+
- -inf indicates masked attention
|
| 114 |
+
block_size: Size of the diffusion window
|
| 115 |
+
use_cache: Whether key-value cache is being used
|
| 116 |
+
causal_attn: If True, maintains strict causal masking throughout
|
| 117 |
+
|
| 118 |
+
Returns:
|
| 119 |
+
Modified attention mask with updated visibility patterns
|
| 120 |
+
"""
|
| 121 |
+
|
| 122 |
+
if not causal_attn:
|
| 123 |
+
# Make the diffusion window (last block_size tokens) fully visible to itself
|
| 124 |
+
# This allows bidirectional attention within the diffusion window
|
| 125 |
+
attn_mask_2d[-block_size:, -block_size:] = 0.0
|
| 126 |
+
if use_cache:
|
| 127 |
+
# Mask the last token from previous round to prevent recomputation and maintain generation consistency.
|
| 128 |
+
attn_mask_2d[-block_size:, -block_size-1] = -float('inf')
|
| 129 |
+
|
| 130 |
+
return attn_mask_2d
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
def create_block_diff_mask_by_pe_1d(
|
| 134 |
+
b: int,
|
| 135 |
+
h: int,
|
| 136 |
+
q_idx: torch.Tensor,
|
| 137 |
+
kv_idx: torch.Tensor,
|
| 138 |
+
block_size: int,
|
| 139 |
+
x0_len_list: torch.Tensor,
|
| 140 |
+
position_ids_list: torch.Tensor,
|
| 141 |
+
causal_attn: bool = False,
|
| 142 |
+
) -> torch.Tensor:
|
| 143 |
+
"""Computes attention mask for a single query-key position in Flex Attention.
|
| 144 |
+
|
| 145 |
+
Args:
|
| 146 |
+
b (int): Batch index (0 <= b < batch_size).
|
| 147 |
+
h (int): Head index (unused in current implementation, reserved for future multi-head support).
|
| 148 |
+
q_idx (torch.Tensor): Query position index (scalar or 0D tensor).
|
| 149 |
+
kv_idx (torch.Tensor): Key/Value position index (scalar or 0D tensor).
|
| 150 |
+
block_size (int): Size of processing blocks for non-`x0` tokens.
|
| 151 |
+
x0_len_list (torch.Tensor): Tensor of shape [batch_size] with `x0` segment lengths.
|
| 152 |
+
position_ids_list (torch.Tensor): Tensor of shape [batch_size, seq_len] with position IDs.
|
| 153 |
+
causal_attn (bool, optional): Enforces causal masking in mutual blocks if True. Defaults to False.
|
| 154 |
+
|
| 155 |
+
Returns:
|
| 156 |
+
torch.Tensor: Boolean indicating whether attention is allowed (True = allowed).
|
| 157 |
+
"""
|
| 158 |
+
x0_len = x0_len_list[b]
|
| 159 |
+
position_ids = position_ids_list[b]
|
| 160 |
+
|
| 161 |
+
x0_flag_q = (q_idx < x0_len)
|
| 162 |
+
x0_flag_kv = (kv_idx < x0_len)
|
| 163 |
+
|
| 164 |
+
# top - left causal
|
| 165 |
+
block_causal = (
|
| 166 |
+
x0_flag_q & \
|
| 167 |
+
x0_flag_kv & \
|
| 168 |
+
(q_idx >= kv_idx)
|
| 169 |
+
)
|
| 170 |
+
|
| 171 |
+
q_ith_block = (q_idx - x0_len) // block_size
|
| 172 |
+
kv_ith_block = (kv_idx - x0_len) // block_size
|
| 173 |
+
|
| 174 |
+
# bottom - right
|
| 175 |
+
block_mutual = (
|
| 176 |
+
(~x0_flag_q & ~x0_flag_kv) & \
|
| 177 |
+
(q_ith_block == kv_ith_block) & \
|
| 178 |
+
(q_idx >= kv_idx if causal_attn else 1)
|
| 179 |
+
)
|
| 180 |
+
|
| 181 |
+
# bottom - left
|
| 182 |
+
prefix_len = position_ids[x0_len + q_ith_block * block_size] # kv_idx's cosponding prefix
|
| 183 |
+
block_prefix = (
|
| 184 |
+
(~x0_flag_q & x0_flag_kv) & \
|
| 185 |
+
(kv_idx < prefix_len)
|
| 186 |
+
)
|
| 187 |
+
|
| 188 |
+
mask_val = (block_causal | block_mutual | block_prefix)
|
| 189 |
+
return mask_val.to(torch.bool)
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
def create_block_diff_mask_by_pe_4d(
|
| 193 |
+
block_size: int,
|
| 194 |
+
x0_len_list: torch.Tensor,
|
| 195 |
+
position_ids: torch.Tensor,
|
| 196 |
+
causal_attn: bool = False
|
| 197 |
+
) -> tuple[torch.Tensor, torch.Tensor]:
|
| 198 |
+
"""Generates a 4D attention mask for block-difference attention patterns.
|
| 199 |
+
|
| 200 |
+
The mask consists of three regions:
|
| 201 |
+
1. Causal block (top-left): Standard causal attention for `x0` tokens.
|
| 202 |
+
2. Mutual block (bottom-right): Non-causal attention within the same block for non-`x0` tokens.
|
| 203 |
+
3. Prefix block (bottom-left): Non-`x0` tokens can attend to a prefix of `x0` tokens.
|
| 204 |
+
|
| 205 |
+
Args:
|
| 206 |
+
block_size (int): Size of processing blocks for non-`x0` tokens.
|
| 207 |
+
x0_len_list (torch.Tensor): Tensor of shape [B] containing lengths of `x0` segments per batch.
|
| 208 |
+
position_ids (torch.Tensor): Tensor of shape [B, seq_len] containing position IDs.
|
| 209 |
+
causal_attn (bool, optional): If True, enforces causal masking in mutual blocks. Defaults to False.
|
| 210 |
+
|
| 211 |
+
Returns:
|
| 212 |
+
tuple[torch.Tensor, torch.Tensor]:
|
| 213 |
+
- A float mask of shape [batch_size, 1, seq_len, seq_len] with `-inf` for masked positions (non visiable).
|
| 214 |
+
- A boolean mask of shape [batch_size, 1, seq_len, seq_len] indicating allowed attention positions.
|
| 215 |
+
"""
|
| 216 |
+
batch_size, seq_len = position_ids.shape
|
| 217 |
+
device = position_ids.device
|
| 218 |
+
|
| 219 |
+
# Create position indices [batch_size, seq_len, seq_len]
|
| 220 |
+
q_idx = torch.arange(seq_len, device=device).view(1, seq_len, 1) # [1, seq_len, 1]
|
| 221 |
+
kv_idx = torch.arange(seq_len, device=device).view(1, 1, seq_len) # [1, 1, seq_len]
|
| 222 |
+
|
| 223 |
+
# Broadcast to [B, seq_len, seq_len]
|
| 224 |
+
x0_len = x0_len_list.view(batch_size, 1, 1) # [batch_size, 1, 1]
|
| 225 |
+
x0_flag_q = q_idx < x0_len # [batch_size, seq_len, seq_len]
|
| 226 |
+
x0_flag_kv = kv_idx < x0_len
|
| 227 |
+
|
| 228 |
+
# Block indices calculation [batch_size, seq_len, seq_len]
|
| 229 |
+
q_block_idx = (q_idx - x0_len) // block_size
|
| 230 |
+
kv_block_idx = (kv_idx - x0_len) // block_size
|
| 231 |
+
|
| 232 |
+
# causal block (top-left)
|
| 233 |
+
block_causal = x0_flag_q & x0_flag_kv & (q_idx >= kv_idx)
|
| 234 |
+
|
| 235 |
+
# Mutual block (bottom-right)
|
| 236 |
+
mutual_condition = (q_idx >= kv_idx) if causal_attn else torch.ones_like(q_idx, dtype=torch.bool)
|
| 237 |
+
block_mutual = (~x0_flag_q & ~x0_flag_kv &
|
| 238 |
+
(q_block_idx == kv_block_idx) &
|
| 239 |
+
mutual_condition)
|
| 240 |
+
|
| 241 |
+
# Prefix block (bottom-left)
|
| 242 |
+
q_blk = torch.div(q_idx - x0_len, block_size, rounding_mode='floor')
|
| 243 |
+
q_blk_start = (x0_len_list.view(batch_size, 1) + q_blk[:, :, 0] * block_size).clamp(min=0, max=seq_len-1) # (batch_size, L)
|
| 244 |
+
prefix_len = position_ids.gather(1, q_blk_start)
|
| 245 |
+
prefix_len = prefix_len.unsqueeze(2)
|
| 246 |
+
block_prefix = (~x0_flag_q & x0_flag_kv) & (kv_idx < prefix_len)
|
| 247 |
+
|
| 248 |
+
# FIXME Padding Mask
|
| 249 |
+
# padding_mask = (position_ids.view(batch_size, 1, seq_len) != -1) & (position_ids.view(batch_size, seq_len, -1) != -1)
|
| 250 |
+
|
| 251 |
+
# Combine masks
|
| 252 |
+
final_mask = (block_causal | block_mutual | block_prefix) # bool
|
| 253 |
+
# & padding_mask
|
| 254 |
+
customized_mask = torch.full_like(final_mask, float('-inf'), dtype=torch.bfloat16)
|
| 255 |
+
customized_mask.masked_fill_(final_mask, 0.0) # 0.0 or -inf
|
| 256 |
+
|
| 257 |
+
# Add head dimension [batch_size, 1, seq_len, seq_len]
|
| 258 |
+
return customized_mask.unsqueeze(1).to(device=device), final_mask.unsqueeze(1).to(device=device)
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
def find_pred_pos_from_input_ids(
|
| 262 |
+
input_ids: torch.LongTensor = None,
|
| 263 |
+
text_mask_token_id: int = 151666,
|
| 264 |
+
) -> torch.Tensor:
|
| 265 |
+
"""Compute the relative prediction positions for masked tokens in a sequence.
|
| 266 |
+
|
| 267 |
+
For non-masked positions, the output is 0. For masked positions, the value increments
|
| 268 |
+
by 1 for each consecutive mask token, indicating how many steps ahead the prediction is.
|
| 269 |
+
|
| 270 |
+
Args:
|
| 271 |
+
input_ids (torch.LongTensor): Input token IDs of shape [batch_size, seq_len].
|
| 272 |
+
text_mask_token_id (int, optional): Token ID representing masked positions. Defaults to 151666.
|
| 273 |
+
|
| 274 |
+
Returns:
|
| 275 |
+
torch.Tensor: A tensor of shape [batch_size, seq_len] where:
|
| 276 |
+
- 0 indicates a non-masked token.
|
| 277 |
+
- n > 0 indicates the nth consecutive masked token (e.g., 1 = first mask, 2 = second mask, etc.).
|
| 278 |
+
"""
|
| 279 |
+
batch_size, seq_len = input_ids.shape
|
| 280 |
+
device = input_ids.device
|
| 281 |
+
|
| 282 |
+
is_mask = (input_ids == text_mask_token_id)
|
| 283 |
+
|
| 284 |
+
base_mask = torch.zeros((batch_size, seq_len), dtype=torch.int8, device=device)
|
| 285 |
+
|
| 286 |
+
for b in range(batch_size):
|
| 287 |
+
for ix in range(1, seq_len):
|
| 288 |
+
if is_mask[b][ix] == True:
|
| 289 |
+
# Increment counter if current token is masked
|
| 290 |
+
base_mask[b][ix] = base_mask[b][ix-1] + 1
|
| 291 |
+
|
| 292 |
+
return base_mask
|
sdlm_ckpt_final/sdlm_3b_bs4/config.json
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"SDLMQwen2ForCausalLM"
|
| 4 |
+
],
|
| 5 |
+
"auto_map": {
|
| 6 |
+
"AutoConfig": "configuration_sdlm.SDLMQwen2Config",
|
| 7 |
+
"AutoModelForCausalLM": "modeling_sdlm.SDLMQwen2ForCausalLM"
|
| 8 |
+
},
|
| 9 |
+
"attention_dropout": 0.0,
|
| 10 |
+
"attn_implementation": "eager",
|
| 11 |
+
"block_size": 4,
|
| 12 |
+
"bos_token_id": 151643,
|
| 13 |
+
"casual_attn": false,
|
| 14 |
+
"eos_token_id": 151643,
|
| 15 |
+
"hidden_act": "silu",
|
| 16 |
+
"hidden_size": 2048,
|
| 17 |
+
"initializer_range": 0.02,
|
| 18 |
+
"intermediate_size": 11008,
|
| 19 |
+
"max_position_embeddings": 32768,
|
| 20 |
+
"max_window_layers": 36,
|
| 21 |
+
"model_type": "qwen2",
|
| 22 |
+
"num_attention_heads": 16,
|
| 23 |
+
"num_hidden_layers": 36,
|
| 24 |
+
"num_key_value_heads": 2,
|
| 25 |
+
"rms_norm_eps": 1e-06,
|
| 26 |
+
"rope_theta": 1000000.0,
|
| 27 |
+
"sliding_window": 32768,
|
| 28 |
+
"text_mask_token": "<text_mask>",
|
| 29 |
+
"text_mask_token_id": 151665,
|
| 30 |
+
"tie_word_embeddings": true,
|
| 31 |
+
"torch_dtype": "bfloat16",
|
| 32 |
+
"transformers_version": "4.37.2",
|
| 33 |
+
"use_cache": true,
|
| 34 |
+
"use_mrope": false,
|
| 35 |
+
"use_sliding_window": false,
|
| 36 |
+
"vocab_size": 151666
|
| 37 |
+
}
|
sdlm_ckpt_final/sdlm_3b_bs4/configuration_sdlm.py
ADDED
|
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
""" Qwen2 model configuration"""
|
| 16 |
+
|
| 17 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 18 |
+
from transformers.utils import logging
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
logger = logging.get_logger(__name__)
|
| 22 |
+
|
| 23 |
+
QWEN2_PRETRAINED_CONFIG_ARCHIVE_MAP = {
|
| 24 |
+
"Qwen/Qwen2-7B-beta": "https://huggingface.co/Qwen/Qwen2-7B-beta/resolve/main/config.json",
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
class SDLMQwen2Config(PretrainedConfig):
|
| 28 |
+
r"""
|
| 29 |
+
This is the configuration class to store the configuration of a [`Qwen2Model`]. It is used to instantiate a
|
| 30 |
+
Qwen2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
|
| 31 |
+
with the defaults will yield a similar configuration to that of
|
| 32 |
+
Qwen2-7B-beta [Qwen/Qwen2-7B-beta](https://huggingface.co/Qwen/Qwen2-7B-beta).
|
| 33 |
+
|
| 34 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 35 |
+
documentation from [`PretrainedConfig`] for more information.
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
Args:
|
| 39 |
+
vocab_size (`int`, *optional*, defaults to 151936):
|
| 40 |
+
Vocabulary size of the Qwen2 model. Defines the number of different tokens that can be represented by the
|
| 41 |
+
`inputs_ids` passed when calling [`Qwen2Model`]
|
| 42 |
+
hidden_size (`int`, *optional*, defaults to 4096):
|
| 43 |
+
Dimension of the hidden representations.
|
| 44 |
+
intermediate_size (`int`, *optional*, defaults to 22016):
|
| 45 |
+
Dimension of the MLP representations.
|
| 46 |
+
num_hidden_layers (`int`, *optional*, defaults to 32):
|
| 47 |
+
Number of hidden layers in the Transformer encoder.
|
| 48 |
+
num_attention_heads (`int`, *optional*, defaults to 32):
|
| 49 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
| 50 |
+
num_key_value_heads (`int`, *optional*, defaults to 32):
|
| 51 |
+
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
|
| 52 |
+
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
|
| 53 |
+
`num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
|
| 54 |
+
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
|
| 55 |
+
by meanpooling all the original heads within that group. For more details checkout [this
|
| 56 |
+
paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `32`.
|
| 57 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
|
| 58 |
+
The non-linear activation function (function or string) in the decoder.
|
| 59 |
+
max_position_embeddings (`int`, *optional*, defaults to 32768):
|
| 60 |
+
The maximum sequence length that this model might ever be used with.
|
| 61 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
| 62 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
| 63 |
+
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
|
| 64 |
+
The epsilon used by the rms normalization layers.
|
| 65 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
| 66 |
+
Whether or not the model should return the last key/values attentions (not used by all models). Only
|
| 67 |
+
relevant if `config.is_decoder=True`.
|
| 68 |
+
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
|
| 69 |
+
Whether the model's input and output word embeddings should be tied.
|
| 70 |
+
rope_theta (`float`, *optional*, defaults to 10000.0):
|
| 71 |
+
The base period of the RoPE embeddings.
|
| 72 |
+
use_sliding_window (`bool`, *optional*, defaults to `False`):
|
| 73 |
+
Whether to use sliding window attention.
|
| 74 |
+
sliding_window (`int`, *optional*, defaults to 4096):
|
| 75 |
+
Sliding window attention (SWA) window size. If not specified, will default to `4096`.
|
| 76 |
+
max_window_layers (`int`, *optional*, defaults to 28):
|
| 77 |
+
The number of layers that use SWA (Sliding Window Attention). The bottom layers use SWA while the top use full attention.
|
| 78 |
+
attention_dropout (`float`, *optional*, defaults to 0.0):
|
| 79 |
+
The dropout ratio for the attention probabilities.
|
| 80 |
+
|
| 81 |
+
```python
|
| 82 |
+
>>> from transformers import Qwen2Model, Qwen2Config
|
| 83 |
+
|
| 84 |
+
>>> # Initializing a Qwen2 style configuration
|
| 85 |
+
>>> configuration = Qwen2Config()
|
| 86 |
+
|
| 87 |
+
>>> # Initializing a model from the Qwen2-7B style configuration
|
| 88 |
+
>>> model = Qwen2Model(configuration)
|
| 89 |
+
|
| 90 |
+
>>> # Accessing the model configuration
|
| 91 |
+
>>> configuration = model.config
|
| 92 |
+
```"""
|
| 93 |
+
|
| 94 |
+
model_type = "qwen2"
|
| 95 |
+
keys_to_ignore_at_inference = ["past_key_values"]
|
| 96 |
+
|
| 97 |
+
def __init__(
|
| 98 |
+
self,
|
| 99 |
+
vocab_size=151936,
|
| 100 |
+
hidden_size=4096,
|
| 101 |
+
intermediate_size=22016,
|
| 102 |
+
num_hidden_layers=32,
|
| 103 |
+
num_attention_heads=32,
|
| 104 |
+
num_key_value_heads=32,
|
| 105 |
+
hidden_act="silu",
|
| 106 |
+
max_position_embeddings=32768,
|
| 107 |
+
initializer_range=0.02,
|
| 108 |
+
rms_norm_eps=1e-6,
|
| 109 |
+
use_cache=True,
|
| 110 |
+
tie_word_embeddings=False,
|
| 111 |
+
rope_theta=10000.0,
|
| 112 |
+
use_sliding_window=False,
|
| 113 |
+
sliding_window=4096,
|
| 114 |
+
max_window_layers=28,
|
| 115 |
+
attention_dropout=0.0,
|
| 116 |
+
**kwargs,
|
| 117 |
+
):
|
| 118 |
+
self.vocab_size = vocab_size
|
| 119 |
+
self.max_position_embeddings = max_position_embeddings
|
| 120 |
+
self.hidden_size = hidden_size
|
| 121 |
+
self.intermediate_size = intermediate_size
|
| 122 |
+
self.num_hidden_layers = num_hidden_layers
|
| 123 |
+
self.num_attention_heads = num_attention_heads
|
| 124 |
+
self.use_sliding_window = use_sliding_window
|
| 125 |
+
self.sliding_window = sliding_window
|
| 126 |
+
self.max_window_layers = max_window_layers
|
| 127 |
+
|
| 128 |
+
# for backward compatibility
|
| 129 |
+
if num_key_value_heads is None:
|
| 130 |
+
num_key_value_heads = num_attention_heads
|
| 131 |
+
|
| 132 |
+
self.num_key_value_heads = num_key_value_heads
|
| 133 |
+
self.hidden_act = hidden_act
|
| 134 |
+
self.initializer_range = initializer_range
|
| 135 |
+
self.rms_norm_eps = rms_norm_eps
|
| 136 |
+
self.use_cache = use_cache
|
| 137 |
+
self.rope_theta = rope_theta
|
| 138 |
+
self.attention_dropout = attention_dropout
|
| 139 |
+
if kwargs.get('attn_implementation', None) is None:
|
| 140 |
+
self.attn_implementation = kwargs['attn_implementation'] = 'flash_attention_2'
|
| 141 |
+
else:
|
| 142 |
+
self.attn_implementation = kwargs['attn_implementation']
|
| 143 |
+
|
| 144 |
+
super().__init__(
|
| 145 |
+
tie_word_embeddings=tie_word_embeddings,
|
| 146 |
+
**kwargs,
|
| 147 |
+
)
|
sdlm_ckpt_final/sdlm_3b_bs4/generation_config.json
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"attn_implementation": "eager",
|
| 3 |
+
"bos_token_id": 151643,
|
| 4 |
+
"eos_token_id": [
|
| 5 |
+
151643,
|
| 6 |
+
151645
|
| 7 |
+
],
|
| 8 |
+
"max_new_tokens": 4096,
|
| 9 |
+
"transformers_version": "4.37.2"
|
| 10 |
+
}
|
sdlm_ckpt_final/sdlm_3b_bs4/merges.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
sdlm_ckpt_final/sdlm_3b_bs4/model-00001-of-00002.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:200449dfca9b05753170676361263f19072087724bd7c064df86cf3dcbd025a7
|
| 3 |
+
size 4956454384
|
sdlm_ckpt_final/sdlm_3b_bs4/model-00002-of-00002.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3bbcf44b214a63dca958a35df24f051f9732714bef3764afaf6fd7b84c3e1696
|
| 3 |
+
size 1835590832
|