init
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitignore +3 -0
- README.md +99 -0
- README_CN.md +100 -0
- cuda/wkv5_cuda.cu +202 -0
- cuda/wkv5_op.cpp +22 -0
- cuda/wkv6_cuda.cu +242 -0
- cuda/wkv6_op.cpp +22 -0
- cuda/wkv6infctx_cuda.cu +311 -0
- cuda/wkv6infctx_op.cpp +22 -0
- cuda/wkv6state_cuda.cu +311 -0
- cuda/wkv6state_op.cpp +22 -0
- demo/demo-predict.sh +39 -0
- demo/demo-state-tuning.sh +38 -0
- fla/__init__.py +50 -0
- fla/layers/__init__.py +25 -0
- fla/layers/abc.py +195 -0
- fla/layers/based.py +126 -0
- fla/layers/delta_net.py +254 -0
- fla/layers/gated_abc.py +234 -0
- fla/layers/gla.py +268 -0
- fla/layers/hgrn.py +165 -0
- fla/layers/hgrn2.py +186 -0
- fla/layers/linear_attn.py +156 -0
- fla/layers/multiscale_retention.py +271 -0
- fla/layers/rebased.py +137 -0
- fla/layers/rwkv6.py +264 -0
- fla/layers/simple_gla.py +143 -0
- fla/models/__init__.py +29 -0
- fla/models/abc/__init__.py +13 -0
- fla/models/abc/configuration_abc.py +74 -0
- fla/models/abc/modeling_abc.py +394 -0
- fla/models/delta_net/__init__.py +14 -0
- fla/models/delta_net/configuration_delta_net.py +77 -0
- fla/models/delta_net/modeling_delta_net.py +405 -0
- fla/models/gla/__init__.py +13 -0
- fla/models/gla/configuration_gla.py +80 -0
- fla/models/gla/modeling_gla.py +403 -0
- fla/models/hgrn/__init__.py +13 -0
- fla/models/hgrn/configuration_hgrn.py +66 -0
- fla/models/hgrn/modeling_hgrn.py +407 -0
- fla/models/hgrn2/__init__.py +13 -0
- fla/models/hgrn2/configuration_hgrn2.py +66 -0
- fla/models/hgrn2/modeling_hgrn2.py +407 -0
- fla/models/linear_attn/__init__.py +14 -0
- fla/models/linear_attn/configuration_linear_attn.py +70 -0
- fla/models/linear_attn/modeling_linear_attn.py +424 -0
- fla/models/mamba/__init__.py +14 -0
- fla/models/mamba/configuration_mamba.py +156 -0
- fla/models/mamba/modeling_mamba.py +605 -0
- fla/models/retnet/__init__.py +13 -0
.gitignore
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__pycache__/
|
| 2 |
+
wandb/
|
| 3 |
+
src/__pycache__/
|
README.md
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
## Speech missions with frozen RWKV language models
|
| 2 |
+
|
| 3 |
+
- [中文说明](README_CN.md)
|
| 4 |
+
- [English](README.md)
|
| 5 |
+
|
| 6 |
+
This repo is an exploratory experiment to enable frozen pretrained RWKV language models to accept speech modality input. Generally, LLMs trained on text data are not directly applicable to speech recognition tasks, and there are many solutions (such as adapters + pretrained audio encoders, or neural audio codecs) to bridge the gap between text and speech. We followed the idea of [SLAM_ASR](https://arxiv.org/abs/2402.08846) and used the RWKV language model as the LLM, and instead of directly writing a prompt template we directly finetuned the initial state of the RWKV model. We were able to achieve 4.6% WER on Librispeech 960h Clean test set (6.9% on Other test) with a 3B RWKV model.
|
| 7 |
+
|
| 8 |
+
This code inside is developed on [RWKV-PEFT](https://github.com/JL-er/RWKV-PEFT). And the current implementation of speech encoder and adapter is based on [SLAM_ASR](https://arxiv.org/abs/2402.08846#).
|
| 9 |
+
|
| 10 |
+
### Roadmap
|
| 11 |
+
|
| 12 |
+
We want to explore compute-efficient and high-performance ways to extend text-based RWKV into multimodal ones. In the audio and speech modality, these are the tasks we are attempting:
|
| 13 |
+
|
| 14 |
+
- [x] ASR in single language
|
| 15 |
+
- [x] ASR in many languages
|
| 16 |
+
- [x] Speech Translation
|
| 17 |
+
- [x] Voice input question answering (like GPT-4o)
|
| 18 |
+
- [ ] Other audio missions
|
| 19 |
+
- [ ] Multiple turns answering
|
| 20 |
+
|
| 21 |
+
### Environment
|
| 22 |
+
|
| 23 |
+
The following command will create a new conda environment and install the required packages:
|
| 24 |
+
|
| 25 |
+
```bash
|
| 26 |
+
conda create -n rwkv python=3.10
|
| 27 |
+
conda activate rwkv
|
| 28 |
+
pip install -r requirements.txt
|
| 29 |
+
```
|
| 30 |
+
|
| 31 |
+
### Training
|
| 32 |
+
|
| 33 |
+
1. Download RWKV-6-World model files from one of the following links. We used the 3B model in our experiments, i.e. RWKV-x060-World-3B-v2.1-20240417-ctx4096.pth.
|
| 34 |
+
|
| 35 |
+
- [Hugging Face](https://huggingface.co/BlinkDL/rwkv-6-world/tree/main)
|
| 36 |
+
- [Hf Mirror (CN)](https://hf-mirror.com/BlinkDL/rwkv-6-world/tree/main)
|
| 37 |
+
- [Modelscope](https://modelscope.cn/models/Blink_DL/rwkv-6-world/files)
|
| 38 |
+
|
| 39 |
+
2. Open ```demo/demo-state-tuning.sh```. Set ```OP=train``` for training and ```load_model=path/to/your/model/```. Modify ```n_layer``` and ```n_embd``` according to the table below:
|
| 40 |
+
|
| 41 |
+
| Model | n_layer | n_embd |
|
| 42 |
+
| --------- | ---- | ---- |
|
| 43 |
+
| 1.6B | 24 | 2048 |
|
| 44 |
+
| 3B | 32 | 2560 |
|
| 45 |
+
| 7B | 32 | 4096 |
|
| 46 |
+
| 14B | 61 | 4096 |
|
| 47 |
+
|
| 48 |
+
Other parameters for training:
|
| 49 |
+
| parameter | description |
|
| 50 |
+
| --------- | ---- |
|
| 51 |
+
| micro_bsz | batch size for each device |
|
| 52 |
+
| epoch_steps | num of steps in 1 epoch. please modified as (dataset size / real batch size) |
|
| 53 |
+
| device | num of GPU for training |
|
| 54 |
+
|
| 55 |
+
The default setting will train a 3B rwkv model on librispeech 960h dataset, with 4 devices and a batch size of 4 per device (real batch size = 16).
|
| 56 |
+
|
| 57 |
+
3. The script will overwrite the .pth file in ```output/```. Make sure to save the needed .pth model files under this path to other dir before the training.
|
| 58 |
+
4. run ```sh demo/demo-state-tuning.sh``` to start the training process.
|
| 59 |
+
|
| 60 |
+
The training process looks like this:
|
| 61 |
+
|
| 62 |
+
- It first loads the provided RWKV model, and a speech encoder model from huggingface. An adapter and an initial state for RWKV model will be initialized randomly.
|
| 63 |
+
- The (symbolically) simplified formula for this model is:
|
| 64 |
+
|
| 65 |
+
```
|
| 66 |
+
RWKV( [InitialState], [Adapter](SpeechEncoder(audio))) -> "The weather is good. <s>"
|
| 67 |
+
```
|
| 68 |
+
|
| 69 |
+
Modules and variables in `[ ]` will be trained, the rest is all frozen.
|
| 70 |
+
|
| 71 |
+
There are also some codes to enable other PEFT training of the whole model. Note that not all methods are fully adapted to speech modality training as of now, and we are still actively working on this.
|
| 72 |
+
|
| 73 |
+
### Evaluation
|
| 74 |
+
|
| 75 |
+
Follow the instruction in Training, but modify ```OP=eval``` in ```demo/demo-state-tuning.sh```. The trained model in ```output/``` will be used to calculate the WER of the model in ```output/``` on the clean test set and the other test set of Librispeech.
|
| 76 |
+
|
| 77 |
+
### Audio File Prediction
|
| 78 |
+
|
| 79 |
+
Open ```demo/demo-predict.sh``` and modify ```file_path=path/to/your/audio/file```. Run ```sh demo/demo-predict.sh``` to load trained weights in ```output/``` and predict the content of the input audio file.
|
| 80 |
+
|
| 81 |
+
### Pretrained weights
|
| 82 |
+
|
| 83 |
+
Download the pretrained weights from the following link:
|
| 84 |
+
|
| 85 |
+
ASR:https://huggingface.co/JerryAGENDD/RWKV-ASR/tree/main/ASR
|
| 86 |
+
|
| 87 |
+
SpeechTranslate:https://huggingface.co/JerryAGENDD/RWKV-ASR/tree/main/ST
|
| 88 |
+
|
| 89 |
+
SpeechQA:https://huggingface.co/JerryAGENDD/RWKV-ASR/tree/main/SpeechQA
|
| 90 |
+
|
| 91 |
+
The pretrained weights contain the necessary parameters for the adapter and the RWKV initial state. These weights are trained using WavLM Large as the speech encoder and RWKV-3B as the language model (script default configuration). Place the weights in the ```output/``` directory for the script to load them.
|
| 92 |
+
|
| 93 |
+
### Speech Chat with RWKV
|
| 94 |
+
|
| 95 |
+
A script for real-time speech conversation with RWKV:
|
| 96 |
+
|
| 97 |
+
https://github.com/AGENDD/RWKV-SpeechChat
|
| 98 |
+
|
| 99 |
+
You can use the trained weights to interact with RWKV in real time.
|
README_CN.md
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
## 使用预训练的 RWKV 语言模型进行语音识别
|
| 2 |
+
|
| 3 |
+
- [中文说明](README_CN.md)
|
| 4 |
+
- [English](README.md)
|
| 5 |
+
|
| 6 |
+
本仓库是一个探索性实验,旨在使预训练的 RWKV 语言模型能够接受语音输入。通常,在文本数据上训练的 LLM 不直接适用于语音识别任务,有很多解决方案(例如适配器 + 预训练音频编码器或神经音频编解码器)可以弥合文本和语音之间的差距。我们遵循了 [SLAM_ASR](https://arxiv.org/abs/2402.08846) 的思路,使用 RWKV 语言模型作为 LLM,而不是直接编写提示模板,我们直接微调了 RWKV 模型的初始状态。在 Librispeech 960h Clean 测试集上,我们使用 3B RWKV 模型实现了 4.6% 的 WER(Other 测试集为 6.9%)。
|
| 7 |
+
|
| 8 |
+
本仓库的代码基于 [RWKV-PEFT](https://github.com/JL-er/RWKV-PEFT) 开发。当前的语音编码器和适配器实现基于 [SLAM_ASR](https://arxiv.org/abs/2402.08846#)。
|
| 9 |
+
|
| 10 |
+
### 路线图
|
| 11 |
+
|
| 12 |
+
我们希望探索计算效率高、性能优越的方式将基于文本的 RWKV 扩展到多模态模型。在音频和语音领域,我们正在尝试以下任务:
|
| 13 |
+
|
| 14 |
+
- [x] 单语言 ASR
|
| 15 |
+
- [x] 多语言 ASR
|
| 16 |
+
- [x] 语音翻译
|
| 17 |
+
- [x] 语音输入问答(如 GPT-4o)
|
| 18 |
+
- [ ] 其他音频任务
|
| 19 |
+
- [ ] 多轮对话
|
| 20 |
+
|
| 21 |
+
### 环境
|
| 22 |
+
|
| 23 |
+
以下命令将创建一个新的 conda 环境并安装所需的包:
|
| 24 |
+
|
| 25 |
+
```bash
|
| 26 |
+
conda create -n rwkv python=3.10
|
| 27 |
+
conda activate rwkv
|
| 28 |
+
pip install -r requirements.txt
|
| 29 |
+
```
|
| 30 |
+
|
| 31 |
+
### 训练
|
| 32 |
+
|
| 33 |
+
1. 从以下链接之一下载 RWKV-6-World 模型文件。我们在实验中使用了 3B 模型,即 RWKV-x060-World-3B-v2.1-20240417-ctx4096.pth。
|
| 34 |
+
|
| 35 |
+
- [Hugging Face](https://huggingface.co/BlinkDL/rwkv-6-world/tree/main)
|
| 36 |
+
- [Hf Mirror (CN)](https://hf-mirror.com/BlinkDL/rwkv-6-world/tree/main)
|
| 37 |
+
- [Modelscope](https://modelscope.cn/models/Blink_DL/rwkv-6-world/files)
|
| 38 |
+
|
| 39 |
+
2. 打开 ```demo/demo-state-tuning.sh```。将 ```OP=train``` 设置为训练,并将 ```load_model=path/to/your/model/``` 设置为您的模型路径。根据以下表修改 ```n_layer``` 和 ```n_embd```:
|
| 40 |
+
|
| 41 |
+
| 模型 | n_layer | n_embd |
|
| 42 |
+
| --------- | ---- | ---- |
|
| 43 |
+
| 1.6B | 24 | 2048 |
|
| 44 |
+
| 3B | 32 | 2560 |
|
| 45 |
+
| 7B | 32 | 4096 |
|
| 46 |
+
| 14B | 61 | 4096 |
|
| 47 |
+
|
| 48 |
+
其他训练参数:
|
| 49 |
+
| 参数 | 描述 |
|
| 50 |
+
| --------- | ---- |
|
| 51 |
+
| micro_bsz | 每个设备的批量大小 |
|
| 52 |
+
| epoch_steps | 每个 epoch 的步骤数。请根据(数据集大小 / 实际批量大小)进行修改 |
|
| 53 |
+
| device | 用于训练的 GPU 数量 |
|
| 54 |
+
|
| 55 |
+
默认设置将在 4 个设备上训练 3B rwkv 模型,每个设备的批量大小为 4(实际批量大小 = 16)。
|
| 56 |
+
|
| 57 |
+
3. 该脚本将覆盖 ```output/``` 中的 .pth 文件。确保在训练前将所需的 .pth 模型文件保存到其他目录下!
|
| 58 |
+
4. 运行 ```sh demo/demo-state-tuning.sh``` 以开始训练过程。
|
| 59 |
+
|
| 60 |
+
训练过程如下:
|
| 61 |
+
|
| 62 |
+
- 它首先加载RWKV模型和从huggingface下载的语音编码模型。将随机初始化适配器和 RWKV 模型的初始状态。
|
| 63 |
+
- 模型的(符号)简化公式如下:
|
| 64 |
+
|
| 65 |
+
```
|
| 66 |
+
RWKV( [InitialState], [Adapter](SpeechEncoder(audio))) -> "The weather is good.
|
| 67 |
+
```
|
| 68 |
+
|
| 69 |
+
用`[ ]`包围的部分会被训练,其他参数是锁定的。
|
| 70 |
+
|
| 71 |
+
还有一些代码可以启用整个模型的其他 PEFT 训练。目前,我们还没有完全适配于语音模态训练,我们仍在积极开发中。
|
| 72 |
+
|
| 73 |
+
### 评估
|
| 74 |
+
|
| 75 |
+
参考训练的步骤,但设定`demo/demo-state-tuning.sh`里的`OP=eval`。保存在`output/`中的模型将被用于评估,脚本会计算Librispeech 960h Clean和Other测试集的WER。
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
### 音频文件预测
|
| 79 |
+
|
| 80 |
+
打开```demo/demo-predict.sh```并修改```file_path```为输入音频的路径。运行```sh demo/demo-predict.sh```来从```output/```加载训练权重并预测音频内容。
|
| 81 |
+
|
| 82 |
+
### 预训练权重
|
| 83 |
+
|
| 84 |
+
下载预训练权重,请访问以下链接:
|
| 85 |
+
|
| 86 |
+
语音识别:https://huggingface.co/JerryAGENDD/RWKV-ASR/tree/main/ASR
|
| 87 |
+
|
| 88 |
+
语音翻译:https://huggingface.co/JerryAGENDD/RWKV-ASR/tree/main/ST
|
| 89 |
+
|
| 90 |
+
语音问答:https://huggingface.co/JerryAGENDD/RWKV-ASR/tree/main/SpeechQA
|
| 91 |
+
|
| 92 |
+
预训练权重包含适配器和RWKV初始状态的必要参数。这些权重是使用WavLM Large作为语音编码器和RWKV-3B作为语言模型(脚本默认配置)进行训练的。请将权重放置在```output/```目录中,以便脚本加载它们。
|
| 93 |
+
|
| 94 |
+
### RWKV 语音对话
|
| 95 |
+
|
| 96 |
+
这是一个与 RWKV 进行实时语音对话的脚本:
|
| 97 |
+
|
| 98 |
+
https://github.com/AGENDD/RWKV-SpeechChat
|
| 99 |
+
|
| 100 |
+
您可以使用训练后的权重与 RWKV 进行实时语音交互。
|
cuda/wkv5_cuda.cu
ADDED
|
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include <stdio.h>
|
| 2 |
+
#include <assert.h>
|
| 3 |
+
#include "ATen/ATen.h"
|
| 4 |
+
typedef at::BFloat16 bf16;
|
| 5 |
+
|
| 6 |
+
template <typename F>
|
| 7 |
+
__global__ void kernel_forward(const int B, const int T, const int C, const int H,
|
| 8 |
+
const F *__restrict__ const _r, const F *__restrict__ const _k, const F *__restrict__ const _v, const float *__restrict__ _w, const F *__restrict__ _u,
|
| 9 |
+
F *__restrict__ const _y)
|
| 10 |
+
{
|
| 11 |
+
const int b = blockIdx.x / H;
|
| 12 |
+
const int h = blockIdx.x % H;
|
| 13 |
+
const int i = threadIdx.x;
|
| 14 |
+
_w += h*_N_;
|
| 15 |
+
_u += h*_N_;
|
| 16 |
+
|
| 17 |
+
__shared__ float r[_N_], k[_N_], u[_N_], w[_N_];
|
| 18 |
+
float state[_N_] = {0};
|
| 19 |
+
|
| 20 |
+
__syncthreads();
|
| 21 |
+
w[i] = _w[i];
|
| 22 |
+
u[i] = float(_u[i]);
|
| 23 |
+
__syncthreads();
|
| 24 |
+
|
| 25 |
+
for (int t = b*T*C + h*_N_ + i; t < (b+1)*T*C + h*_N_ + i; t += C)
|
| 26 |
+
{
|
| 27 |
+
__syncthreads();
|
| 28 |
+
r[i] = float(_r[t]);
|
| 29 |
+
k[i] = float(_k[t]);
|
| 30 |
+
__syncthreads();
|
| 31 |
+
|
| 32 |
+
const float v = float(_v[t]);
|
| 33 |
+
float y = 0;
|
| 34 |
+
|
| 35 |
+
#pragma unroll
|
| 36 |
+
for (int j = 0; j < _N_; j+=4)
|
| 37 |
+
{
|
| 38 |
+
const float4& r_ = (float4&)(r[j]);
|
| 39 |
+
const float4& k_ = (float4&)(k[j]);
|
| 40 |
+
const float4& w_ = (float4&)(w[j]);
|
| 41 |
+
const float4& u_ = (float4&)(u[j]);
|
| 42 |
+
float4& s = (float4&)(state[j]);
|
| 43 |
+
float4 x;
|
| 44 |
+
|
| 45 |
+
x.x = k_.x * v;
|
| 46 |
+
x.y = k_.y * v;
|
| 47 |
+
x.z = k_.z * v;
|
| 48 |
+
x.w = k_.w * v;
|
| 49 |
+
|
| 50 |
+
y += r_.x * (u_.x * x.x + s.x);
|
| 51 |
+
y += r_.y * (u_.y * x.y + s.y);
|
| 52 |
+
y += r_.z * (u_.z * x.z + s.z);
|
| 53 |
+
y += r_.w * (u_.w * x.w + s.w);
|
| 54 |
+
|
| 55 |
+
s.x = s.x * w_.x + x.x;
|
| 56 |
+
s.y = s.y * w_.y + x.y;
|
| 57 |
+
s.z = s.z * w_.z + x.z;
|
| 58 |
+
s.w = s.w * w_.w + x.w;
|
| 59 |
+
}
|
| 60 |
+
_y[t] = F(y);
|
| 61 |
+
}
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
template <typename F>
|
| 65 |
+
__global__ void kernel_backward(const int B, const int T, const int C, const int H,
|
| 66 |
+
const F *__restrict__ const _r, const F *__restrict__ const _k, const F *__restrict__ const _v, const float *__restrict__ _w, const float *__restrict__ __w, const F *__restrict__ _u, const F *__restrict__ const _gy,
|
| 67 |
+
F *__restrict__ const _gr, F *__restrict__ const _gk, F *__restrict__ const _gv, F *__restrict__ const _gw, F *__restrict__ const _gu)
|
| 68 |
+
{
|
| 69 |
+
const int b = blockIdx.x / H;
|
| 70 |
+
const int h = blockIdx.x % H;
|
| 71 |
+
const int i = threadIdx.x;
|
| 72 |
+
_w += h*_N_;
|
| 73 |
+
_u += h*_N_;
|
| 74 |
+
__w += h*_N_;
|
| 75 |
+
|
| 76 |
+
__shared__ float w_[_N_], u_[_N_];
|
| 77 |
+
__shared__ float r[_N_], k[_N_], v[_N_], gy[_N_];
|
| 78 |
+
__syncthreads();
|
| 79 |
+
w_[i] = _w[i];
|
| 80 |
+
u_[i] = float(_u[i]);
|
| 81 |
+
__syncthreads();
|
| 82 |
+
|
| 83 |
+
const float w = w_[i];
|
| 84 |
+
const float ww = __w[i];
|
| 85 |
+
const float u = u_[i];
|
| 86 |
+
|
| 87 |
+
float state[_N_] = {0}, saaaa[_N_] = {0}, sbbbb[_N_] = {0}, scccc[_N_] = {0}, sdddd[_N_] = {0};
|
| 88 |
+
|
| 89 |
+
float gw = 0, gu = 0;
|
| 90 |
+
const int t000 = b*T*C + h*_N_ + i;
|
| 91 |
+
const int t111 = (b+1)*T*C + h*_N_ + i;
|
| 92 |
+
const int t222 = t111 - 2*C;
|
| 93 |
+
|
| 94 |
+
for (int t = t000; t < t111; t += C)
|
| 95 |
+
{
|
| 96 |
+
__syncthreads();
|
| 97 |
+
v[i] = float(_v[t]);
|
| 98 |
+
gy[i] = float(_gy[t]);
|
| 99 |
+
__syncthreads();
|
| 100 |
+
|
| 101 |
+
const float k = float(_k[t]);
|
| 102 |
+
float gr = 0, gu_ = 0;
|
| 103 |
+
|
| 104 |
+
#pragma unroll
|
| 105 |
+
for (int j = 0; j < _N_; j++)
|
| 106 |
+
{
|
| 107 |
+
float& s = state[j];
|
| 108 |
+
float x = k * v[j];
|
| 109 |
+
|
| 110 |
+
gr += (u * x + s) * gy[j];
|
| 111 |
+
gu_ += x * gy[j];
|
| 112 |
+
s = s * w + x;
|
| 113 |
+
}
|
| 114 |
+
_gr[t] = F(gr);
|
| 115 |
+
gu += float(_r[t]) * gu_;
|
| 116 |
+
}
|
| 117 |
+
_gu[b*C + h*_N_ + i] = F(gu);
|
| 118 |
+
|
| 119 |
+
for (int t = t000; t < t222; t += C)
|
| 120 |
+
{
|
| 121 |
+
__syncthreads();
|
| 122 |
+
v[i] = float(_v[t]);
|
| 123 |
+
gy[i] = float(_gy[t + 2*C]);
|
| 124 |
+
__syncthreads();
|
| 125 |
+
|
| 126 |
+
const float k = float(_k[t]);
|
| 127 |
+
float gw_ = 0;
|
| 128 |
+
|
| 129 |
+
#pragma unroll
|
| 130 |
+
for (int j = 0; j < _N_; j++)
|
| 131 |
+
{
|
| 132 |
+
float& s = saaaa[j];
|
| 133 |
+
float& s2 = sbbbb[j];
|
| 134 |
+
float x = k * v[j];
|
| 135 |
+
|
| 136 |
+
float tmp = w * (x + s);
|
| 137 |
+
s = tmp;
|
| 138 |
+
s2 = tmp + w * s2;
|
| 139 |
+
gw_ += s2 * gy[j];
|
| 140 |
+
}
|
| 141 |
+
gw += float(_r[t + 2*C]) * gw_;
|
| 142 |
+
}
|
| 143 |
+
_gw[b*C + h*_N_ + i] = F(ww * gw);
|
| 144 |
+
|
| 145 |
+
for (int t = t111 - C; t >= t000; t -= C)
|
| 146 |
+
{
|
| 147 |
+
__syncthreads();
|
| 148 |
+
v[i] = float(_v[t]);
|
| 149 |
+
gy[i] = float(_gy[t]);
|
| 150 |
+
__syncthreads();
|
| 151 |
+
|
| 152 |
+
const float rr = float(_r[t]);
|
| 153 |
+
float gk = 0;
|
| 154 |
+
|
| 155 |
+
#pragma unroll
|
| 156 |
+
for (int j = 0; j < _N_; j++)
|
| 157 |
+
{
|
| 158 |
+
float& s = scccc[j];
|
| 159 |
+
float x = rr * gy[j];
|
| 160 |
+
|
| 161 |
+
gk += (u * x + s) * v[j];
|
| 162 |
+
s = x + s * w;
|
| 163 |
+
}
|
| 164 |
+
_gk[t] = F(gk);
|
| 165 |
+
}
|
| 166 |
+
|
| 167 |
+
for (int t = t111 - C; t >= t000; t -= C)
|
| 168 |
+
{
|
| 169 |
+
__syncthreads();
|
| 170 |
+
r[i] = float(_r[t]);
|
| 171 |
+
k[i] = float(_k[t]);
|
| 172 |
+
__syncthreads();
|
| 173 |
+
|
| 174 |
+
const float gyy = float(_gy[t]);
|
| 175 |
+
float gv = 0;
|
| 176 |
+
|
| 177 |
+
#pragma unroll
|
| 178 |
+
for (int j = 0; j < _N_; j++)
|
| 179 |
+
{
|
| 180 |
+
float& s = sdddd[j];
|
| 181 |
+
float x = gyy * r[j];
|
| 182 |
+
|
| 183 |
+
gv += (u_[j] * x + s) * k[j];
|
| 184 |
+
s = x + s * w_[j];
|
| 185 |
+
}
|
| 186 |
+
_gv[t] = F(gv);
|
| 187 |
+
}
|
| 188 |
+
}
|
| 189 |
+
|
| 190 |
+
void cuda_forward(int B, int T, int C, int H, bf16 *r, bf16 *k, bf16 *v, float *w, bf16 *u, bf16 *y)
|
| 191 |
+
{
|
| 192 |
+
assert(H*_N_ == C);
|
| 193 |
+
assert(_N_%4 == 0);
|
| 194 |
+
kernel_forward<<<dim3(B * H), dim3(_N_)>>>(B, T, C, H, r, k, v, w, u, y);
|
| 195 |
+
}
|
| 196 |
+
|
| 197 |
+
void cuda_backward(int B, int T, int C, int H, bf16 *r, bf16 *k, bf16 *v, float *w, float *ww, bf16 *u, bf16 *gy, bf16 *gr, bf16 *gk, bf16 *gv, bf16 *gw, bf16 *gu)
|
| 198 |
+
{
|
| 199 |
+
assert(H*_N_ == C);
|
| 200 |
+
assert(_N_%4 == 0);
|
| 201 |
+
kernel_backward<<<dim3(B * H), dim3(_N_)>>>(B, T, C, H, r, k, v, w, ww, u, gy, gr, gk, gv, gw, gu);
|
| 202 |
+
}
|
cuda/wkv5_op.cpp
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include <torch/extension.h>
|
| 2 |
+
#include "ATen/ATen.h"
|
| 3 |
+
typedef at::BFloat16 bf16;
|
| 4 |
+
|
| 5 |
+
void cuda_forward(int B, int T, int C, int H, bf16 *r, bf16 *k, bf16 *v, float *w, bf16 *u, bf16 *y);
|
| 6 |
+
void cuda_backward(int B, int T, int C, int H, bf16 *r, bf16 *k, bf16 *v, float *w, float *ww, bf16 *u, bf16 *gy, bf16 *gr, bf16 *gk, bf16 *gv, bf16 *gw, bf16 *gu);
|
| 7 |
+
|
| 8 |
+
void forward(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &r, torch::Tensor &k, torch::Tensor &v, torch::Tensor &w, torch::Tensor &u, torch::Tensor &y) {
|
| 9 |
+
cuda_forward(B, T, C, H, r.data_ptr<bf16>(), k.data_ptr<bf16>(), v.data_ptr<bf16>(), w.data_ptr<float>(), u.data_ptr<bf16>(), y.data_ptr<bf16>());
|
| 10 |
+
}
|
| 11 |
+
void backward(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &r, torch::Tensor &k, torch::Tensor &v, torch::Tensor &w, torch::Tensor &ww, torch::Tensor &u, torch::Tensor &gy, torch::Tensor &gr, torch::Tensor &gk, torch::Tensor &gv, torch::Tensor &gw, torch::Tensor &gu) {
|
| 12 |
+
cuda_backward(B, T, C, H, r.data_ptr<bf16>(), k.data_ptr<bf16>(), v.data_ptr<bf16>(), w.data_ptr<float>(), ww.data_ptr<float>(), u.data_ptr<bf16>(), gy.data_ptr<bf16>(), gr.data_ptr<bf16>(), gk.data_ptr<bf16>(), gv.data_ptr<bf16>(), gw.data_ptr<bf16>(), gu.data_ptr<bf16>());
|
| 13 |
+
}
|
| 14 |
+
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
| 15 |
+
m.def("forward", &forward, "wkv5 forward");
|
| 16 |
+
m.def("backward", &backward, "wkv5 backward");
|
| 17 |
+
}
|
| 18 |
+
|
| 19 |
+
TORCH_LIBRARY(wkv5, m) {
|
| 20 |
+
m.def("forward", forward);
|
| 21 |
+
m.def("backward", backward);
|
| 22 |
+
}
|
cuda/wkv6_cuda.cu
ADDED
|
@@ -0,0 +1,242 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include <stdio.h>
|
| 2 |
+
#include <assert.h>
|
| 3 |
+
#include "ATen/ATen.h"
|
| 4 |
+
typedef at::BFloat16 bf16;
|
| 5 |
+
|
| 6 |
+
template <typename F>
|
| 7 |
+
__global__ void kernel_forward(const int B, const int T, const int C, const int H,
|
| 8 |
+
const F *__restrict__ const _r, const F *__restrict__ const _k, const F *__restrict__ const _v, const float *__restrict__ _w, const F *__restrict__ _u,
|
| 9 |
+
F *__restrict__ const _y)
|
| 10 |
+
{
|
| 11 |
+
const int b = blockIdx.x / H;
|
| 12 |
+
const int h = blockIdx.x % H;
|
| 13 |
+
const int i = threadIdx.x;
|
| 14 |
+
_u += h*_N_;
|
| 15 |
+
|
| 16 |
+
__shared__ float r[_N_], k[_N_], u[_N_], w[_N_];
|
| 17 |
+
float state[_N_] = {0};
|
| 18 |
+
|
| 19 |
+
__syncthreads();
|
| 20 |
+
u[i] = float(_u[i]);
|
| 21 |
+
__syncthreads();
|
| 22 |
+
|
| 23 |
+
for (int t = b*T*C + h*_N_ + i; t < (b+1)*T*C + h*_N_ + i; t += C)
|
| 24 |
+
{
|
| 25 |
+
__syncthreads();
|
| 26 |
+
w[i] = exp(_w[t]);
|
| 27 |
+
r[i] = float(_r[t]);
|
| 28 |
+
k[i] = float(_k[t]);
|
| 29 |
+
__syncthreads();
|
| 30 |
+
|
| 31 |
+
const float v = float(_v[t]);
|
| 32 |
+
float y = 0;
|
| 33 |
+
|
| 34 |
+
#pragma unroll
|
| 35 |
+
for (int j = 0; j < _N_; j+=4)
|
| 36 |
+
{
|
| 37 |
+
const float4& r_ = (float4&)(r[j]);
|
| 38 |
+
const float4& k_ = (float4&)(k[j]);
|
| 39 |
+
const float4& w_ = (float4&)(w[j]);
|
| 40 |
+
const float4& u_ = (float4&)(u[j]);
|
| 41 |
+
float4& s = (float4&)(state[j]);
|
| 42 |
+
float4 x;
|
| 43 |
+
|
| 44 |
+
x.x = k_.x * v;
|
| 45 |
+
x.y = k_.y * v;
|
| 46 |
+
x.z = k_.z * v;
|
| 47 |
+
x.w = k_.w * v;
|
| 48 |
+
|
| 49 |
+
y += r_.x * (u_.x * x.x + s.x);
|
| 50 |
+
y += r_.y * (u_.y * x.y + s.y);
|
| 51 |
+
y += r_.z * (u_.z * x.z + s.z);
|
| 52 |
+
y += r_.w * (u_.w * x.w + s.w);
|
| 53 |
+
|
| 54 |
+
s.x = s.x * w_.x + x.x;
|
| 55 |
+
s.y = s.y * w_.y + x.y;
|
| 56 |
+
s.z = s.z * w_.z + x.z;
|
| 57 |
+
s.w = s.w * w_.w + x.w;
|
| 58 |
+
}
|
| 59 |
+
_y[t] = F(y);
|
| 60 |
+
}
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
template <typename F>
|
| 64 |
+
__global__ void kernel_backward_111(const int B, const int T, const int C, const int H,
|
| 65 |
+
const F *__restrict__ const _r, const F *__restrict__ const _k, const F *__restrict__ const _v, const float *__restrict__ _w, const F *__restrict__ _u, const F *__restrict__ const _gy,
|
| 66 |
+
F *__restrict__ const _gr, F *__restrict__ const _gk, F *__restrict__ const _gv, F *__restrict__ const _gu)
|
| 67 |
+
{
|
| 68 |
+
const int b = blockIdx.x / H;
|
| 69 |
+
const int h = blockIdx.x % H;
|
| 70 |
+
const int i = threadIdx.x;
|
| 71 |
+
_u += h*_N_;
|
| 72 |
+
|
| 73 |
+
__shared__ float u_[_N_];
|
| 74 |
+
__shared__ float r[_N_], k[_N_], v[_N_], w_[_N_], gy[_N_];
|
| 75 |
+
__syncthreads();
|
| 76 |
+
u_[i] = float(_u[i]);
|
| 77 |
+
__syncthreads();
|
| 78 |
+
|
| 79 |
+
const float u = u_[i];
|
| 80 |
+
|
| 81 |
+
float state[_N_] = {0}, scccc[_N_] = {0}, sdddd[_N_] = {0};
|
| 82 |
+
|
| 83 |
+
const int t_0 = b*T*C + h*_N_ + i;
|
| 84 |
+
const int t_T_1 = t_0 + (T-1)*C;
|
| 85 |
+
const int t_T = t_0 + T*C;
|
| 86 |
+
|
| 87 |
+
float gu = 0;
|
| 88 |
+
for (int t = t_0; t < t_T; t += C)
|
| 89 |
+
{
|
| 90 |
+
__syncthreads();
|
| 91 |
+
v[i] = float(_v[t]);
|
| 92 |
+
gy[i] = float(_gy[t]);
|
| 93 |
+
__syncthreads();
|
| 94 |
+
|
| 95 |
+
const float k = float(_k[t]);
|
| 96 |
+
const float w = exp(_w[t]);
|
| 97 |
+
float gr = 0, gu_ = 0;
|
| 98 |
+
|
| 99 |
+
#pragma unroll
|
| 100 |
+
for (int j = 0; j < _N_; j++)
|
| 101 |
+
{
|
| 102 |
+
float& s = state[j];
|
| 103 |
+
float x = k * v[j];
|
| 104 |
+
|
| 105 |
+
gr += (u * x + s) * gy[j];
|
| 106 |
+
gu_ += x * gy[j];
|
| 107 |
+
s = s * w + x;
|
| 108 |
+
}
|
| 109 |
+
_gr[t] = F(gr);
|
| 110 |
+
gu += float(_r[t]) * gu_;
|
| 111 |
+
}
|
| 112 |
+
_gu[b*C + h*_N_ + i] = F(gu);
|
| 113 |
+
|
| 114 |
+
for (int t = t_T_1; t >= t_0; t -= C)
|
| 115 |
+
{
|
| 116 |
+
__syncthreads();
|
| 117 |
+
v[i] = float(_v[t]);
|
| 118 |
+
gy[i] = float(_gy[t]);
|
| 119 |
+
__syncthreads();
|
| 120 |
+
|
| 121 |
+
const float rr = float(_r[t]);
|
| 122 |
+
const float w = exp(_w[t]);
|
| 123 |
+
float gk = 0;
|
| 124 |
+
|
| 125 |
+
#pragma unroll
|
| 126 |
+
for (int j = 0; j < _N_; j++)
|
| 127 |
+
{
|
| 128 |
+
float& s = scccc[j];
|
| 129 |
+
float x = rr * gy[j];
|
| 130 |
+
|
| 131 |
+
gk += (u * x + s) * v[j];
|
| 132 |
+
s = x + s * w;
|
| 133 |
+
}
|
| 134 |
+
_gk[t] = F(gk);
|
| 135 |
+
}
|
| 136 |
+
|
| 137 |
+
for (int t = t_T_1; t >= t_0; t -= C)
|
| 138 |
+
{
|
| 139 |
+
__syncthreads();
|
| 140 |
+
r[i] = float(_r[t]);
|
| 141 |
+
k[i] = float(_k[t]);
|
| 142 |
+
w_[i] = exp(_w[t]);
|
| 143 |
+
__syncthreads();
|
| 144 |
+
|
| 145 |
+
const float gyy = float(_gy[t]);
|
| 146 |
+
float gv = 0;
|
| 147 |
+
|
| 148 |
+
#pragma unroll
|
| 149 |
+
for (int j = 0; j < _N_; j++)
|
| 150 |
+
{
|
| 151 |
+
float& s = sdddd[j];
|
| 152 |
+
float x = gyy * r[j];
|
| 153 |
+
|
| 154 |
+
gv += (u_[j] * x + s) * k[j];
|
| 155 |
+
s = x + s * w_[j];
|
| 156 |
+
}
|
| 157 |
+
_gv[t] = F(gv);
|
| 158 |
+
}
|
| 159 |
+
}
|
| 160 |
+
|
| 161 |
+
template <typename F>
|
| 162 |
+
__global__ void kernel_backward_222(const int B, const int T, const int C, const int H,
|
| 163 |
+
const F *__restrict__ const _r, const F *__restrict__ const _k, const F *__restrict__ const _v, const float *__restrict__ _w, const F *__restrict__ _u, const F *__restrict__ const _gy,
|
| 164 |
+
F *__restrict__ const _gw)
|
| 165 |
+
{
|
| 166 |
+
const int b = blockIdx.x / H;
|
| 167 |
+
const int h = blockIdx.x % H;
|
| 168 |
+
const int i = threadIdx.x;
|
| 169 |
+
|
| 170 |
+
__shared__ float v[_N_], gy[_N_];
|
| 171 |
+
float saaaa[_N_] = {0}, sbbbb[_T_-2] = {0}, scccc[_N_] = {0};
|
| 172 |
+
|
| 173 |
+
const int t_0 = b*T*C + h*_N_ + i;
|
| 174 |
+
const int t_1 = t_0 + C;
|
| 175 |
+
const int t_2 = t_0 + 2*C;
|
| 176 |
+
const int t_T_1 = t_0 + (T-1)*C;
|
| 177 |
+
|
| 178 |
+
for (int t = t_T_1; t > t_1; t -= C)
|
| 179 |
+
{
|
| 180 |
+
__syncthreads();
|
| 181 |
+
gy[i] = float(_gy[t]);
|
| 182 |
+
v[i] = float(_v[t-2*C]);
|
| 183 |
+
__syncthreads();
|
| 184 |
+
|
| 185 |
+
const float r = float(_r[t]);
|
| 186 |
+
const float w = exp(_w[t-C]);
|
| 187 |
+
float sum = 0.0f;
|
| 188 |
+
|
| 189 |
+
#pragma unroll
|
| 190 |
+
for (int j = 0; j < _N_; j++)
|
| 191 |
+
{
|
| 192 |
+
float& s = saaaa[j];
|
| 193 |
+
float x = r * gy[j];
|
| 194 |
+
s = (s + x) * w;
|
| 195 |
+
sum += s * v[j];
|
| 196 |
+
}
|
| 197 |
+
sbbbb[(t-t_2)/C] = sum * float(_k[t-2*C]);
|
| 198 |
+
}
|
| 199 |
+
|
| 200 |
+
float sss = sbbbb[0];
|
| 201 |
+
_gw[t_0] = 0;
|
| 202 |
+
_gw[t_1] = F(sss * _w[t_1]);
|
| 203 |
+
|
| 204 |
+
for (int t = t_2; t < t_T_1; t += C)
|
| 205 |
+
{
|
| 206 |
+
__syncthreads();
|
| 207 |
+
gy[i] = float(_gy[t]);
|
| 208 |
+
v[i] = float(_v[t-2*C]);
|
| 209 |
+
__syncthreads();
|
| 210 |
+
|
| 211 |
+
const float w = exp(_w[t-C]);
|
| 212 |
+
const float k = float(_k[t-2*C]);
|
| 213 |
+
float sum = 0.0f;
|
| 214 |
+
|
| 215 |
+
#pragma unroll
|
| 216 |
+
for (int j = 0; j < _N_; j++)
|
| 217 |
+
{
|
| 218 |
+
float& s = scccc[j];
|
| 219 |
+
float x = k * v[j];
|
| 220 |
+
s = (s + x) * w;
|
| 221 |
+
sum += s * gy[j];
|
| 222 |
+
}
|
| 223 |
+
sss += sbbbb[(t-t_1)/C] - (sum * float(_r[t]));
|
| 224 |
+
_gw[t] = F(sss * _w[t]);
|
| 225 |
+
}
|
| 226 |
+
_gw[t_T_1] = 0;
|
| 227 |
+
}
|
| 228 |
+
|
| 229 |
+
void cuda_forward(int B, int T, int C, int H, bf16 *r, bf16 *k, bf16 *v, float *w, bf16 *u, bf16 *y)
|
| 230 |
+
{
|
| 231 |
+
assert(H*_N_ == C);
|
| 232 |
+
assert(_N_%4 == 0);
|
| 233 |
+
kernel_forward<<<dim3(B * H), dim3(_N_)>>>(B, T, C, H, r, k, v, w, u, y);
|
| 234 |
+
}
|
| 235 |
+
|
| 236 |
+
void cuda_backward(int B, int T, int C, int H, bf16 *r, bf16 *k, bf16 *v, float *w, bf16 *u, bf16 *gy, bf16 *gr, bf16 *gk, bf16 *gv, bf16 *gw, bf16 *gu)
|
| 237 |
+
{
|
| 238 |
+
assert(H*_N_ == C);
|
| 239 |
+
assert(_N_%4 == 0);
|
| 240 |
+
kernel_backward_111<<<dim3(B * H), dim3(_N_)>>>(B, T, C, H, r, k, v, w, u, gy, gr, gk, gv, gu);
|
| 241 |
+
kernel_backward_222<<<dim3(B * H), dim3(_N_)>>>(B, T, C, H, r, k, v, w, u, gy, gw);
|
| 242 |
+
}
|
cuda/wkv6_op.cpp
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include <torch/extension.h>
|
| 2 |
+
#include "ATen/ATen.h"
|
| 3 |
+
typedef at::BFloat16 bf16;
|
| 4 |
+
|
| 5 |
+
void cuda_forward(int B, int T, int C, int H, bf16 *r, bf16 *k, bf16 *v, float *w, bf16 *u, bf16 *y);
|
| 6 |
+
void cuda_backward(int B, int T, int C, int H, bf16 *r, bf16 *k, bf16 *v, float *w, bf16 *u, bf16 *gy, bf16 *gr, bf16 *gk, bf16 *gv, bf16 *gw, bf16 *gu);
|
| 7 |
+
|
| 8 |
+
void forward(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &r, torch::Tensor &k, torch::Tensor &v, torch::Tensor &w, torch::Tensor &u, torch::Tensor &y) {
|
| 9 |
+
cuda_forward(B, T, C, H, r.data_ptr<bf16>(), k.data_ptr<bf16>(), v.data_ptr<bf16>(), w.data_ptr<float>(), u.data_ptr<bf16>(), y.data_ptr<bf16>());
|
| 10 |
+
}
|
| 11 |
+
void backward(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &r, torch::Tensor &k, torch::Tensor &v, torch::Tensor &w, torch::Tensor &u, torch::Tensor &gy, torch::Tensor &gr, torch::Tensor &gk, torch::Tensor &gv, torch::Tensor &gw, torch::Tensor &gu) {
|
| 12 |
+
cuda_backward(B, T, C, H, r.data_ptr<bf16>(), k.data_ptr<bf16>(), v.data_ptr<bf16>(), w.data_ptr<float>(), u.data_ptr<bf16>(), gy.data_ptr<bf16>(), gr.data_ptr<bf16>(), gk.data_ptr<bf16>(), gv.data_ptr<bf16>(), gw.data_ptr<bf16>(), gu.data_ptr<bf16>());
|
| 13 |
+
}
|
| 14 |
+
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
| 15 |
+
m.def("forward", &forward, "wkv6 forward");
|
| 16 |
+
m.def("backward", &backward, "wkv6 backward");
|
| 17 |
+
}
|
| 18 |
+
|
| 19 |
+
TORCH_LIBRARY(wkv6, m) {
|
| 20 |
+
m.def("forward", forward);
|
| 21 |
+
m.def("backward", backward);
|
| 22 |
+
}
|
cuda/wkv6infctx_cuda.cu
ADDED
|
@@ -0,0 +1,311 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include <stdio.h>
|
| 2 |
+
#include <assert.h>
|
| 3 |
+
#include "ATen/ATen.h"
|
| 4 |
+
typedef at::BFloat16 bf16;
|
| 5 |
+
|
| 6 |
+
template <typename F>
|
| 7 |
+
__global__ void kernel_forward(const int B, const int T, const int C, const int H,
|
| 8 |
+
const F *__restrict__ const _r, const F *__restrict__ const _k, const F *__restrict__ const _v, const F *__restrict__ _w, const F *__restrict__ _u, F *__restrict__ _s,
|
| 9 |
+
F *__restrict__ const _y)
|
| 10 |
+
{
|
| 11 |
+
const int b = blockIdx.x / H;
|
| 12 |
+
const int h = blockIdx.x % H;
|
| 13 |
+
const int i = threadIdx.x;
|
| 14 |
+
_u += h*_N_;
|
| 15 |
+
_s += b*C*_N_ + h*_N_*_N_ + i*_N_;
|
| 16 |
+
|
| 17 |
+
__shared__ float r[_N_], k[_N_], u[_N_], w[_N_];
|
| 18 |
+
float state[_N_];
|
| 19 |
+
|
| 20 |
+
__syncthreads();
|
| 21 |
+
u[i] = float(_u[i]);
|
| 22 |
+
__syncthreads();
|
| 23 |
+
for (int j = 0; j < _N_; j++) {
|
| 24 |
+
state[j] = float(_s[j]);
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
for (int t = b*T*C + h*_N_ + i; t < (b+1)*T*C + h*_N_ + i; t += C)
|
| 28 |
+
{
|
| 29 |
+
__syncthreads();
|
| 30 |
+
w[i] = __expf(-__expf(float(_w[t])));
|
| 31 |
+
r[i] = float(_r[t]);
|
| 32 |
+
k[i] = float(_k[t]);
|
| 33 |
+
__syncthreads();
|
| 34 |
+
|
| 35 |
+
const float v = float(_v[t]);
|
| 36 |
+
float y = 0;
|
| 37 |
+
|
| 38 |
+
#pragma unroll
|
| 39 |
+
for (int j = 0; j < _N_; j+=4)
|
| 40 |
+
{
|
| 41 |
+
const float4& r_ = (float4&)(r[j]);
|
| 42 |
+
const float4& k_ = (float4&)(k[j]);
|
| 43 |
+
const float4& w_ = (float4&)(w[j]);
|
| 44 |
+
const float4& u_ = (float4&)(u[j]);
|
| 45 |
+
float4& s = (float4&)(state[j]);
|
| 46 |
+
float4 x;
|
| 47 |
+
|
| 48 |
+
x.x = k_.x * v;
|
| 49 |
+
x.y = k_.y * v;
|
| 50 |
+
x.z = k_.z * v;
|
| 51 |
+
x.w = k_.w * v;
|
| 52 |
+
|
| 53 |
+
y += r_.x * (u_.x * x.x + s.x);
|
| 54 |
+
y += r_.y * (u_.y * x.y + s.y);
|
| 55 |
+
y += r_.z * (u_.z * x.z + s.z);
|
| 56 |
+
y += r_.w * (u_.w * x.w + s.w);
|
| 57 |
+
|
| 58 |
+
s.x = s.x * w_.x + x.x;
|
| 59 |
+
s.y = s.y * w_.y + x.y;
|
| 60 |
+
s.z = s.z * w_.z + x.z;
|
| 61 |
+
s.w = s.w * w_.w + x.w;
|
| 62 |
+
}
|
| 63 |
+
_y[t] = F(y);
|
| 64 |
+
}
|
| 65 |
+
#pragma unroll
|
| 66 |
+
for (int j = 0; j < _N_; j++)
|
| 67 |
+
_s[j] = F(state[j]);
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
template <typename F>
|
| 71 |
+
__global__ void kernel_backward_111(const int B, const int T, const int C, const int H,
|
| 72 |
+
const F *__restrict__ const _r, const F *__restrict__ const _k, const F *__restrict__ const _v, const F *__restrict__ _w, const F *__restrict__ _u, const F *__restrict__ _s, const F *__restrict__ const _gy,
|
| 73 |
+
F *__restrict__ const _gr, F *__restrict__ const _gk, F *__restrict__ const _gv, F *__restrict__ const _gu, F *__restrict__ const _gs)
|
| 74 |
+
{
|
| 75 |
+
const int b = blockIdx.x / H;
|
| 76 |
+
const int h = blockIdx.x % H;
|
| 77 |
+
const int i = threadIdx.x;
|
| 78 |
+
_u += h*_N_;
|
| 79 |
+
_s += b*C+ h*_N_ + i;
|
| 80 |
+
|
| 81 |
+
__shared__ float u_[_N_];
|
| 82 |
+
__shared__ float r[_N_], k[_N_], v[_N_], w_[_N_], gy[_N_];
|
| 83 |
+
__syncthreads();
|
| 84 |
+
u_[i] = float(_u[i]);
|
| 85 |
+
__syncthreads();
|
| 86 |
+
|
| 87 |
+
const float u = u_[i];
|
| 88 |
+
|
| 89 |
+
float state[_N_], scccc[_N_] = {0}, sdddd[_N_] = {0}, sssss[_N_] = {0}, swwww[_N_];
|
| 90 |
+
for (int j = 0; j < _N_; j++) {
|
| 91 |
+
state[j] = float(_s[j*_N_]);
|
| 92 |
+
swwww[j] = 1.0;
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
const int t_0 = b*T*C + h*_N_ + i;
|
| 96 |
+
const int t_T_1 = t_0 + (T-1)*C;
|
| 97 |
+
const int t_T = t_0 + T*C;
|
| 98 |
+
|
| 99 |
+
float gu = 0;
|
| 100 |
+
for (int t = t_0; t < t_T; t += C)
|
| 101 |
+
{
|
| 102 |
+
__syncthreads();
|
| 103 |
+
v[i] = float(_v[t]);
|
| 104 |
+
gy[i] = float(_gy[t]);
|
| 105 |
+
__syncthreads();
|
| 106 |
+
|
| 107 |
+
const float k = float(_k[t]);
|
| 108 |
+
const float w = __expf(-__expf(float(_w[t])));
|
| 109 |
+
float gr = 0, gu_ = 0;
|
| 110 |
+
|
| 111 |
+
#pragma unroll
|
| 112 |
+
for (int j = 0; j < _N_; j++)
|
| 113 |
+
{
|
| 114 |
+
float& s = state[j];
|
| 115 |
+
float x = k * v[j];
|
| 116 |
+
|
| 117 |
+
gr += (u * x + s) * gy[j];
|
| 118 |
+
gu_ += x * gy[j];
|
| 119 |
+
s = s * w + x;
|
| 120 |
+
}
|
| 121 |
+
_gr[t] = F(gr);
|
| 122 |
+
gu += float(_r[t]) * gu_;
|
| 123 |
+
}
|
| 124 |
+
_gu[b*C + h*_N_ + i] = F(gu);
|
| 125 |
+
|
| 126 |
+
for (int t = t_T_1; t >= t_0; t -= C)
|
| 127 |
+
{
|
| 128 |
+
__syncthreads();
|
| 129 |
+
v[i] = float(_v[t]);
|
| 130 |
+
gy[i] = float(_gy[t]);
|
| 131 |
+
__syncthreads();
|
| 132 |
+
|
| 133 |
+
const float rr = float(_r[t]);
|
| 134 |
+
const float w = __expf(-__expf(float(_w[t])));
|
| 135 |
+
float gk = 0;
|
| 136 |
+
|
| 137 |
+
#pragma unroll
|
| 138 |
+
for (int j = 0; j < _N_; j++)
|
| 139 |
+
{
|
| 140 |
+
float& s = scccc[j];
|
| 141 |
+
float x = rr * gy[j];
|
| 142 |
+
|
| 143 |
+
gk += (u * x + s) * v[j];
|
| 144 |
+
s = x + s * w;
|
| 145 |
+
}
|
| 146 |
+
_gk[t] = F(gk);
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
for (int t = t_T_1; t >= t_0; t -= C)
|
| 150 |
+
{
|
| 151 |
+
__syncthreads();
|
| 152 |
+
r[i] = float(_r[t]);
|
| 153 |
+
k[i] = float(_k[t]);
|
| 154 |
+
w_[i] = __expf(-__expf(float(_w[t])));
|
| 155 |
+
__syncthreads();
|
| 156 |
+
|
| 157 |
+
const float gyy = float(_gy[t]);
|
| 158 |
+
float gv = 0;
|
| 159 |
+
|
| 160 |
+
#pragma unroll
|
| 161 |
+
for (int j = 0; j < _N_; j++)
|
| 162 |
+
{
|
| 163 |
+
float& s = sdddd[j];
|
| 164 |
+
float x = gyy * r[j];
|
| 165 |
+
|
| 166 |
+
gv += (u_[j] * x + s) * k[j];
|
| 167 |
+
s = x + s * w_[j];
|
| 168 |
+
}
|
| 169 |
+
_gv[t] = F(gv);
|
| 170 |
+
}
|
| 171 |
+
|
| 172 |
+
for (int t = t_0; t < t_T; t += C)
|
| 173 |
+
{
|
| 174 |
+
__syncthreads();
|
| 175 |
+
r[i] = float(_r[t]);
|
| 176 |
+
w_[i] = __expf(-__expf(float(_w[t])));
|
| 177 |
+
__syncthreads();
|
| 178 |
+
|
| 179 |
+
const float gyy = float(_gy[t]);
|
| 180 |
+
|
| 181 |
+
#pragma unroll
|
| 182 |
+
for (int j = 0; j < _N_; j++)
|
| 183 |
+
{
|
| 184 |
+
float& w = swwww[j];
|
| 185 |
+
sssss[j] += gyy * w * r[j];
|
| 186 |
+
w *= w_[j];
|
| 187 |
+
}
|
| 188 |
+
}
|
| 189 |
+
for (int j = 0; j < _N_; j++)
|
| 190 |
+
_gs[b*H*_N_*_N_ + h*_N_*_N_ + i*_N_ + j] = F(sssss[j]);
|
| 191 |
+
}
|
| 192 |
+
|
| 193 |
+
template <typename F>
|
| 194 |
+
__global__ void kernel_backward_222(const int B, const int T, const int C, const int H,
|
| 195 |
+
const F *__restrict__ const _r, const F *__restrict__ const _k, const F *__restrict__ const _v, const F *__restrict__ _w, const F *__restrict__ _u, const F *__restrict__ _s, const F *__restrict__ const _gy,
|
| 196 |
+
F *__restrict__ const _gw)
|
| 197 |
+
{
|
| 198 |
+
const int b = blockIdx.x / H;
|
| 199 |
+
const int h = blockIdx.x % H;
|
| 200 |
+
const int i = threadIdx.x;
|
| 201 |
+
_s += b*C + h*_N_ + i;
|
| 202 |
+
|
| 203 |
+
__shared__ float v[_N_], gy[_N_];
|
| 204 |
+
float state[_N_], saaaa[_N_] = {0}, sbbbb[_T_-1] = {0}, scccc[_N_] = {0};
|
| 205 |
+
for (int j = 0; j < _N_; j++) {
|
| 206 |
+
state[j] = float(_s[j*_N_]);
|
| 207 |
+
}
|
| 208 |
+
|
| 209 |
+
const int t_0 = b*T*C + h*_N_ + i;
|
| 210 |
+
const int t_1 = t_0 + C;
|
| 211 |
+
const int t_2 = t_0 + 2*C;
|
| 212 |
+
const int t_T_1 = t_0 + (T-1)*C;
|
| 213 |
+
|
| 214 |
+
for (int t = t_T_1; t > t_1; t -= C)
|
| 215 |
+
{
|
| 216 |
+
__syncthreads();
|
| 217 |
+
gy[i] = float(_gy[t]);
|
| 218 |
+
v[i] = float(_v[t-2*C]);
|
| 219 |
+
__syncthreads();
|
| 220 |
+
|
| 221 |
+
const float r = float(_r[t]);
|
| 222 |
+
const float w = __expf(-__expf(float(_w[t-C])));
|
| 223 |
+
float sum = 0.0f;
|
| 224 |
+
|
| 225 |
+
#pragma unroll
|
| 226 |
+
for (int j = 0; j < _N_; j++)
|
| 227 |
+
{
|
| 228 |
+
float& s = saaaa[j];
|
| 229 |
+
s = (s + r * gy[j]) * w;
|
| 230 |
+
sum += s * v[j];
|
| 231 |
+
}
|
| 232 |
+
sbbbb[(t-t_1)/C] = sum * float(_k[t-2*C]);
|
| 233 |
+
}
|
| 234 |
+
{
|
| 235 |
+
__syncthreads();
|
| 236 |
+
gy[i] = float(_gy[t_1]);
|
| 237 |
+
__syncthreads();
|
| 238 |
+
|
| 239 |
+
const float r = float(_r[t_1]);
|
| 240 |
+
const float w = __expf(-__expf(float(_w[t_0])));
|
| 241 |
+
float sum = 0.0f;
|
| 242 |
+
|
| 243 |
+
#pragma unroll
|
| 244 |
+
for (int j = 0; j < _N_; j++)
|
| 245 |
+
{
|
| 246 |
+
float& s = saaaa[j];
|
| 247 |
+
s = (s + r * gy[j]) * w;
|
| 248 |
+
sum += s * state[j];
|
| 249 |
+
}
|
| 250 |
+
sbbbb[0] = sum;
|
| 251 |
+
}
|
| 252 |
+
|
| 253 |
+
float sss = sbbbb[0];
|
| 254 |
+
_gw[t_0] = F(sss * -__expf(float(_w[t_0])));
|
| 255 |
+
|
| 256 |
+
{
|
| 257 |
+
__syncthreads();
|
| 258 |
+
gy[i] = float(_gy[t_1]);
|
| 259 |
+
__syncthreads();
|
| 260 |
+
|
| 261 |
+
const float w = __expf(-__expf(float(_w[t_0])));
|
| 262 |
+
float sum = 0.0f;
|
| 263 |
+
|
| 264 |
+
#pragma unroll
|
| 265 |
+
for (int j = 0; j < _N_; j++)
|
| 266 |
+
{
|
| 267 |
+
float& s = scccc[j];
|
| 268 |
+
s = (s + state[j]) * w;
|
| 269 |
+
sum += s * gy[j];
|
| 270 |
+
}
|
| 271 |
+
sss += sbbbb[1] - (sum * float(_r[t_1]));
|
| 272 |
+
_gw[t_1] = F(sss * -__expf(float(_w[t_1])));
|
| 273 |
+
}
|
| 274 |
+
for (int t = t_2; t < t_T_1; t += C)
|
| 275 |
+
{
|
| 276 |
+
__syncthreads();
|
| 277 |
+
gy[i] = float(_gy[t]);
|
| 278 |
+
v[i] = float(_v[t-2*C]);
|
| 279 |
+
__syncthreads();
|
| 280 |
+
|
| 281 |
+
const float w = __expf(-__expf(float(_w[t-C])));
|
| 282 |
+
const float k = float(_k[t-2*C]);
|
| 283 |
+
float sum = 0.0f;
|
| 284 |
+
|
| 285 |
+
#pragma unroll
|
| 286 |
+
for (int j = 0; j < _N_; j++)
|
| 287 |
+
{
|
| 288 |
+
float& s = scccc[j];
|
| 289 |
+
s = (s + k * v[j]) * w;
|
| 290 |
+
sum += s * gy[j];
|
| 291 |
+
}
|
| 292 |
+
sss += sbbbb[(t-t_0)/C] - (sum * float(_r[t]));
|
| 293 |
+
_gw[t] = F(sss * -__expf(float(_w[t])));
|
| 294 |
+
}
|
| 295 |
+
_gw[t_T_1] = 0;
|
| 296 |
+
}
|
| 297 |
+
|
| 298 |
+
void cuda_forward(int B, int T, int C, int H, bf16 *r, bf16 *k, bf16 *v, bf16 *w, bf16 *u, bf16 *z, bf16 *y)
|
| 299 |
+
{
|
| 300 |
+
assert(H*_N_ == C);
|
| 301 |
+
assert(_N_%4 == 0);
|
| 302 |
+
kernel_forward<<<dim3(B * H), dim3(_N_)>>>(B, T, C, H, r, k, v, w, u, z, y);
|
| 303 |
+
}
|
| 304 |
+
|
| 305 |
+
void cuda_backward(int B, int T, int C, int H, bf16 *r, bf16 *k, bf16 *v, bf16 *w, bf16 *u, bf16 *z, bf16 *gy, bf16 *gr, bf16 *gk, bf16 *gv, bf16 *gw, bf16 *gu, bf16 *gs)
|
| 306 |
+
{
|
| 307 |
+
assert(H*_N_ == C);
|
| 308 |
+
assert(_N_%4 == 0);
|
| 309 |
+
kernel_backward_111<<<dim3(B * H), dim3(_N_)>>>(B, T, C, H, r, k, v, w, u, z, gy, gr, gk, gv, gu, gs);
|
| 310 |
+
kernel_backward_222<<<dim3(B * H), dim3(_N_)>>>(B, T, C, H, r, k, v, w, u, z, gy, gw);
|
| 311 |
+
}
|
cuda/wkv6infctx_op.cpp
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include <torch/extension.h>
|
| 2 |
+
#include "ATen/ATen.h"
|
| 3 |
+
typedef at::BFloat16 bf16;
|
| 4 |
+
|
| 5 |
+
void cuda_forward(int B, int T, int C, int H, bf16 *r, bf16 *k, bf16 *v, bf16 *w, bf16 *u, bf16 *s, bf16 *y);
|
| 6 |
+
void cuda_backward(int B, int T, int C, int H, bf16 *r, bf16 *k, bf16 *v, bf16 *w, bf16 *u, bf16 *s, bf16 *gy, bf16 *gr, bf16 *gk, bf16 *gv, bf16 *gw, bf16 *gu, bf16 *gs);
|
| 7 |
+
|
| 8 |
+
void forward(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &r, torch::Tensor &k, torch::Tensor &v, torch::Tensor &w, torch::Tensor &u, torch::Tensor &s, torch::Tensor &y) {
|
| 9 |
+
cuda_forward(B, T, C, H, r.data_ptr<bf16>(), k.data_ptr<bf16>(), v.data_ptr<bf16>(), w.data_ptr<bf16>(), u.data_ptr<bf16>(), s.data_ptr<bf16>(), y.data_ptr<bf16>());
|
| 10 |
+
}
|
| 11 |
+
void backward(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &r, torch::Tensor &k, torch::Tensor &v, torch::Tensor &w, torch::Tensor &u, torch::Tensor &s, torch::Tensor &gy, torch::Tensor &gr, torch::Tensor &gk, torch::Tensor &gv, torch::Tensor &gw, torch::Tensor &gu, torch::Tensor &gs) {
|
| 12 |
+
cuda_backward(B, T, C, H, r.data_ptr<bf16>(), k.data_ptr<bf16>(), v.data_ptr<bf16>(), w.data_ptr<bf16>(), u.data_ptr<bf16>(), s.data_ptr<bf16>(), gy.data_ptr<bf16>(), gr.data_ptr<bf16>(), gk.data_ptr<bf16>(), gv.data_ptr<bf16>(), gw.data_ptr<bf16>(), gu.data_ptr<bf16>(), gs.data_ptr<bf16>());
|
| 13 |
+
}
|
| 14 |
+
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
| 15 |
+
m.def("forward", &forward, "wkv6state forward");
|
| 16 |
+
m.def("backward", &backward, "wkv6state backward");
|
| 17 |
+
}
|
| 18 |
+
|
| 19 |
+
TORCH_LIBRARY(wkv6state, m) {
|
| 20 |
+
m.def("forward", forward);
|
| 21 |
+
m.def("backward", backward);
|
| 22 |
+
}
|
cuda/wkv6state_cuda.cu
ADDED
|
@@ -0,0 +1,311 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include <stdio.h>
|
| 2 |
+
#include <assert.h>
|
| 3 |
+
#include "ATen/ATen.h"
|
| 4 |
+
typedef at::BFloat16 bf16;
|
| 5 |
+
|
| 6 |
+
template <typename F>
|
| 7 |
+
__global__ void kernel_forward(const int B, const int T, const int C, const int H,
|
| 8 |
+
const F *__restrict__ const _r, const F *__restrict__ const _k, const F *__restrict__ const _v, const F *__restrict__ _w, const F *__restrict__ _u,const F *__restrict__ _s,
|
| 9 |
+
F *__restrict__ const _y)
|
| 10 |
+
{
|
| 11 |
+
const int b = blockIdx.x / H;
|
| 12 |
+
const int h = blockIdx.x % H;
|
| 13 |
+
const int i = threadIdx.x;
|
| 14 |
+
_u += h*_N_;
|
| 15 |
+
_s += h*_N_*_N_ + i*_N_;
|
| 16 |
+
|
| 17 |
+
__shared__ float r[_N_], k[_N_], u[_N_], w[_N_];
|
| 18 |
+
float state[_N_];
|
| 19 |
+
|
| 20 |
+
__syncthreads();
|
| 21 |
+
u[i] = float(_u[i]);
|
| 22 |
+
__syncthreads();
|
| 23 |
+
for (int j = 0; j < _N_; j++) {
|
| 24 |
+
state[j] = float(_s[j]);
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
for (int t = b*T*C + h*_N_ + i; t < (b+1)*T*C + h*_N_ + i; t += C)
|
| 28 |
+
{
|
| 29 |
+
__syncthreads();
|
| 30 |
+
w[i] = __expf(-__expf(float(_w[t])));
|
| 31 |
+
r[i] = float(_r[t]);
|
| 32 |
+
k[i] = float(_k[t]);
|
| 33 |
+
__syncthreads();
|
| 34 |
+
|
| 35 |
+
const float v = float(_v[t]);
|
| 36 |
+
float y = 0;
|
| 37 |
+
|
| 38 |
+
#pragma unroll
|
| 39 |
+
for (int j = 0; j < _N_; j+=4)
|
| 40 |
+
{
|
| 41 |
+
const float4& r_ = (float4&)(r[j]);
|
| 42 |
+
const float4& k_ = (float4&)(k[j]);
|
| 43 |
+
const float4& w_ = (float4&)(w[j]);
|
| 44 |
+
const float4& u_ = (float4&)(u[j]);
|
| 45 |
+
float4& s = (float4&)(state[j]);
|
| 46 |
+
float4 x;
|
| 47 |
+
|
| 48 |
+
x.x = k_.x * v;
|
| 49 |
+
x.y = k_.y * v;
|
| 50 |
+
x.z = k_.z * v;
|
| 51 |
+
x.w = k_.w * v;
|
| 52 |
+
|
| 53 |
+
y += r_.x * (u_.x * x.x + s.x);
|
| 54 |
+
y += r_.y * (u_.y * x.y + s.y);
|
| 55 |
+
y += r_.z * (u_.z * x.z + s.z);
|
| 56 |
+
y += r_.w * (u_.w * x.w + s.w);
|
| 57 |
+
|
| 58 |
+
s.x = s.x * w_.x + x.x;
|
| 59 |
+
s.y = s.y * w_.y + x.y;
|
| 60 |
+
s.z = s.z * w_.z + x.z;
|
| 61 |
+
s.w = s.w * w_.w + x.w;
|
| 62 |
+
}
|
| 63 |
+
_y[t] = F(y);
|
| 64 |
+
}
|
| 65 |
+
// #pragma unroll
|
| 66 |
+
// for (int j = 0; j < _N_; j++)
|
| 67 |
+
// _s[j] = F(state[j]);
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
template <typename F>
|
| 71 |
+
__global__ void kernel_backward_111(const int B, const int T, const int C, const int H,
|
| 72 |
+
const F *__restrict__ const _r, const F *__restrict__ const _k, const F *__restrict__ const _v, const F *__restrict__ _w, const F *__restrict__ _u, const F *__restrict__ _s, const F *__restrict__ const _gy,
|
| 73 |
+
F *__restrict__ const _gr, F *__restrict__ const _gk, F *__restrict__ const _gv, F *__restrict__ const _gu, F *__restrict__ const _gs)
|
| 74 |
+
{
|
| 75 |
+
const int b = blockIdx.x / H;
|
| 76 |
+
const int h = blockIdx.x % H;
|
| 77 |
+
const int i = threadIdx.x;
|
| 78 |
+
_u += h*_N_;
|
| 79 |
+
_s += b*C*_N_ + h*_N_*_N_ + i*_N_;
|
| 80 |
+
|
| 81 |
+
__shared__ float u_[_N_];
|
| 82 |
+
__shared__ float r[_N_], k[_N_], v[_N_], w_[_N_], gy[_N_];
|
| 83 |
+
__syncthreads();
|
| 84 |
+
u_[i] = float(_u[i]);
|
| 85 |
+
__syncthreads();
|
| 86 |
+
|
| 87 |
+
const float u = u_[i];
|
| 88 |
+
|
| 89 |
+
float state[_N_], scccc[_N_] = {0}, sdddd[_N_] = {0}, sssss[_N_] = {0}, swwww[_N_];
|
| 90 |
+
for (int j = 0; j < _N_; j++) {
|
| 91 |
+
state[j] = float(_s[j*_N_]);
|
| 92 |
+
swwww[j] = 1.0;
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
const int t_0 = b*T*C + h*_N_ + i;
|
| 96 |
+
const int t_T_1 = t_0 + (T-1)*C;
|
| 97 |
+
const int t_T = t_0 + T*C;
|
| 98 |
+
|
| 99 |
+
float gu = 0;
|
| 100 |
+
for (int t = t_0; t < t_T; t += C)
|
| 101 |
+
{
|
| 102 |
+
__syncthreads();
|
| 103 |
+
v[i] = float(_v[t]);
|
| 104 |
+
gy[i] = float(_gy[t]);
|
| 105 |
+
__syncthreads();
|
| 106 |
+
|
| 107 |
+
const float k = float(_k[t]);
|
| 108 |
+
const float w = __expf(-__expf(float(_w[t])));
|
| 109 |
+
float gr = 0, gu_ = 0;
|
| 110 |
+
|
| 111 |
+
#pragma unroll
|
| 112 |
+
for (int j = 0; j < _N_; j++)
|
| 113 |
+
{
|
| 114 |
+
float& s = state[j];
|
| 115 |
+
float x = k * v[j];
|
| 116 |
+
|
| 117 |
+
gr += (u * x + s) * gy[j];
|
| 118 |
+
gu_ += x * gy[j];
|
| 119 |
+
s = s * w + x;
|
| 120 |
+
}
|
| 121 |
+
_gr[t] = F(gr);
|
| 122 |
+
gu += float(_r[t]) * gu_;
|
| 123 |
+
}
|
| 124 |
+
_gu[b*C + h*_N_ + i] = F(gu);
|
| 125 |
+
|
| 126 |
+
for (int t = t_T_1; t >= t_0; t -= C)
|
| 127 |
+
{
|
| 128 |
+
__syncthreads();
|
| 129 |
+
v[i] = float(_v[t]);
|
| 130 |
+
gy[i] = float(_gy[t]);
|
| 131 |
+
__syncthreads();
|
| 132 |
+
|
| 133 |
+
const float rr = float(_r[t]);
|
| 134 |
+
const float w = __expf(-__expf(float(_w[t])));
|
| 135 |
+
float gk = 0;
|
| 136 |
+
|
| 137 |
+
#pragma unroll
|
| 138 |
+
for (int j = 0; j < _N_; j++)
|
| 139 |
+
{
|
| 140 |
+
float& s = scccc[j];
|
| 141 |
+
float x = rr * gy[j];
|
| 142 |
+
|
| 143 |
+
gk += (u * x + s) * v[j];
|
| 144 |
+
s = x + s * w;
|
| 145 |
+
}
|
| 146 |
+
_gk[t] = F(gk);
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
for (int t = t_T_1; t >= t_0; t -= C)
|
| 150 |
+
{
|
| 151 |
+
__syncthreads();
|
| 152 |
+
r[i] = float(_r[t]);
|
| 153 |
+
k[i] = float(_k[t]);
|
| 154 |
+
w_[i] = __expf(-__expf(float(_w[t])));
|
| 155 |
+
__syncthreads();
|
| 156 |
+
|
| 157 |
+
const float gyy = float(_gy[t]);
|
| 158 |
+
float gv = 0;
|
| 159 |
+
|
| 160 |
+
#pragma unroll
|
| 161 |
+
for (int j = 0; j < _N_; j++)
|
| 162 |
+
{
|
| 163 |
+
float& s = sdddd[j];
|
| 164 |
+
float x = gyy * r[j];
|
| 165 |
+
|
| 166 |
+
gv += (u_[j] * x + s) * k[j];
|
| 167 |
+
s = x + s * w_[j];
|
| 168 |
+
}
|
| 169 |
+
_gv[t] = F(gv);
|
| 170 |
+
}
|
| 171 |
+
|
| 172 |
+
for (int t = t_0; t < t_T; t += C)
|
| 173 |
+
{
|
| 174 |
+
__syncthreads();
|
| 175 |
+
r[i] = float(_r[t]);
|
| 176 |
+
w_[i] = __expf(-__expf(float(_w[t])));
|
| 177 |
+
__syncthreads();
|
| 178 |
+
|
| 179 |
+
const float gyy = float(_gy[t]);
|
| 180 |
+
|
| 181 |
+
#pragma unroll
|
| 182 |
+
for (int j = 0; j < _N_; j++)
|
| 183 |
+
{
|
| 184 |
+
float& w = swwww[j];
|
| 185 |
+
sssss[j] += gyy * w * r[j];
|
| 186 |
+
w *= w_[j];
|
| 187 |
+
}
|
| 188 |
+
}
|
| 189 |
+
for (int j = 0; j < _N_; j++)
|
| 190 |
+
_gs[b*H*_N_*_N_ + h*_N_*_N_ + i*_N_ + j] = F(sssss[j]);
|
| 191 |
+
}
|
| 192 |
+
|
| 193 |
+
template <typename F>
|
| 194 |
+
__global__ void kernel_backward_222(const int B, const int T, const int C, const int H,
|
| 195 |
+
const F *__restrict__ const _r, const F *__restrict__ const _k, const F *__restrict__ const _v, const F *__restrict__ _w, const F *__restrict__ _u, const F *__restrict__ _s, const F *__restrict__ const _gy,
|
| 196 |
+
F *__restrict__ const _gw)
|
| 197 |
+
{
|
| 198 |
+
const int b = blockIdx.x / H;
|
| 199 |
+
const int h = blockIdx.x % H;
|
| 200 |
+
const int i = threadIdx.x;
|
| 201 |
+
_s += h*_N_*_N_ + i;
|
| 202 |
+
|
| 203 |
+
__shared__ float v[_N_], gy[_N_];
|
| 204 |
+
float state[_N_], saaaa[_N_] = {0}, sbbbb[_T_-1] = {0}, scccc[_N_] = {0};
|
| 205 |
+
for (int j = 0; j < _N_; j++) {
|
| 206 |
+
state[j] = float(_s[j*_N_]);
|
| 207 |
+
}
|
| 208 |
+
|
| 209 |
+
const int t_0 = b*T*C + h*_N_ + i;
|
| 210 |
+
const int t_1 = t_0 + C;
|
| 211 |
+
const int t_2 = t_0 + 2*C;
|
| 212 |
+
const int t_T_1 = t_0 + (T-1)*C;
|
| 213 |
+
|
| 214 |
+
for (int t = t_T_1; t > t_1; t -= C)
|
| 215 |
+
{
|
| 216 |
+
__syncthreads();
|
| 217 |
+
gy[i] = float(_gy[t]);
|
| 218 |
+
v[i] = float(_v[t-2*C]);
|
| 219 |
+
__syncthreads();
|
| 220 |
+
|
| 221 |
+
const float r = float(_r[t]);
|
| 222 |
+
const float w = __expf(-__expf(float(_w[t-C])));
|
| 223 |
+
float sum = 0.0f;
|
| 224 |
+
|
| 225 |
+
#pragma unroll
|
| 226 |
+
for (int j = 0; j < _N_; j++)
|
| 227 |
+
{
|
| 228 |
+
float& s = saaaa[j];
|
| 229 |
+
s = (s + r * gy[j]) * w;
|
| 230 |
+
sum += s * v[j];
|
| 231 |
+
}
|
| 232 |
+
sbbbb[(t-t_1)/C] = sum * float(_k[t-2*C]);
|
| 233 |
+
}
|
| 234 |
+
{
|
| 235 |
+
__syncthreads();
|
| 236 |
+
gy[i] = float(_gy[t_1]);
|
| 237 |
+
__syncthreads();
|
| 238 |
+
|
| 239 |
+
const float r = float(_r[t_1]);
|
| 240 |
+
const float w = __expf(-__expf(float(_w[t_0])));
|
| 241 |
+
float sum = 0.0f;
|
| 242 |
+
|
| 243 |
+
#pragma unroll
|
| 244 |
+
for (int j = 0; j < _N_; j++)
|
| 245 |
+
{
|
| 246 |
+
float& s = saaaa[j];
|
| 247 |
+
s = (s + r * gy[j]) * w;
|
| 248 |
+
sum += s * state[j];
|
| 249 |
+
}
|
| 250 |
+
sbbbb[0] = sum;
|
| 251 |
+
}
|
| 252 |
+
|
| 253 |
+
float sss = sbbbb[0];
|
| 254 |
+
_gw[t_0] = F(sss * -__expf(float(_w[t_0])));
|
| 255 |
+
|
| 256 |
+
{
|
| 257 |
+
__syncthreads();
|
| 258 |
+
gy[i] = float(_gy[t_1]);
|
| 259 |
+
__syncthreads();
|
| 260 |
+
|
| 261 |
+
const float w = __expf(-__expf(float(_w[t_0])));
|
| 262 |
+
float sum = 0.0f;
|
| 263 |
+
|
| 264 |
+
#pragma unroll
|
| 265 |
+
for (int j = 0; j < _N_; j++)
|
| 266 |
+
{
|
| 267 |
+
float& s = scccc[j];
|
| 268 |
+
s = (s + state[j]) * w;
|
| 269 |
+
sum += s * gy[j];
|
| 270 |
+
}
|
| 271 |
+
sss += sbbbb[1] - (sum * float(_r[t_1]));
|
| 272 |
+
_gw[t_1] = F(sss * -__expf(float(_w[t_1])));
|
| 273 |
+
}
|
| 274 |
+
for (int t = t_2; t < t_T_1; t += C)
|
| 275 |
+
{
|
| 276 |
+
__syncthreads();
|
| 277 |
+
gy[i] = float(_gy[t]);
|
| 278 |
+
v[i] = float(_v[t-2*C]);
|
| 279 |
+
__syncthreads();
|
| 280 |
+
|
| 281 |
+
const float w = __expf(-__expf(float(_w[t-C])));
|
| 282 |
+
const float k = float(_k[t-2*C]);
|
| 283 |
+
float sum = 0.0f;
|
| 284 |
+
|
| 285 |
+
#pragma unroll
|
| 286 |
+
for (int j = 0; j < _N_; j++)
|
| 287 |
+
{
|
| 288 |
+
float& s = scccc[j];
|
| 289 |
+
s = (s + k * v[j]) * w;
|
| 290 |
+
sum += s * gy[j];
|
| 291 |
+
}
|
| 292 |
+
sss += sbbbb[(t-t_0)/C] - (sum * float(_r[t]));
|
| 293 |
+
_gw[t] = F(sss * -__expf(float(_w[t])));
|
| 294 |
+
}
|
| 295 |
+
_gw[t_T_1] = 0;
|
| 296 |
+
}
|
| 297 |
+
|
| 298 |
+
void cuda_forward(int B, int T, int C, int H, bf16 *r, bf16 *k, bf16 *v, bf16 *w, bf16 *u, bf16 *z, bf16 *y)
|
| 299 |
+
{
|
| 300 |
+
assert(H*_N_ == C);
|
| 301 |
+
assert(_N_%4 == 0);
|
| 302 |
+
kernel_forward<<<dim3(B * H), dim3(_N_)>>>(B, T, C, H, r, k, v, w, u, z, y);
|
| 303 |
+
}
|
| 304 |
+
|
| 305 |
+
void cuda_backward(int B, int T, int C, int H, bf16 *r, bf16 *k, bf16 *v, bf16 *w, bf16 *u, bf16 *z, bf16 *gy, bf16 *gr, bf16 *gk, bf16 *gv, bf16 *gw, bf16 *gu, bf16 *gs)
|
| 306 |
+
{
|
| 307 |
+
assert(H*_N_ == C);
|
| 308 |
+
assert(_N_%4 == 0);
|
| 309 |
+
kernel_backward_111<<<dim3(B * H), dim3(_N_)>>>(B, T, C, H, r, k, v, w, u, z, gy, gr, gk, gv, gu, gs);
|
| 310 |
+
kernel_backward_222<<<dim3(B * H), dim3(_N_)>>>(B, T, C, H, r, k, v, w, u, z, gy, gw);
|
| 311 |
+
}
|
cuda/wkv6state_op.cpp
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include <torch/extension.h>
|
| 2 |
+
#include "ATen/ATen.h"
|
| 3 |
+
typedef at::BFloat16 bf16;
|
| 4 |
+
|
| 5 |
+
void cuda_forward(int B, int T, int C, int H, bf16 *r, bf16 *k, bf16 *v, bf16 *w, bf16 *u, bf16 *s, bf16 *y);
|
| 6 |
+
void cuda_backward(int B, int T, int C, int H, bf16 *r, bf16 *k, bf16 *v, bf16 *w, bf16 *u, bf16 *s, bf16 *gy, bf16 *gr, bf16 *gk, bf16 *gv, bf16 *gw, bf16 *gu, bf16 *gs);
|
| 7 |
+
|
| 8 |
+
void forward(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &r, torch::Tensor &k, torch::Tensor &v, torch::Tensor &w, torch::Tensor &u, torch::Tensor &s, torch::Tensor &y) {
|
| 9 |
+
cuda_forward(B, T, C, H, r.data_ptr<bf16>(), k.data_ptr<bf16>(), v.data_ptr<bf16>(), w.data_ptr<bf16>(), u.data_ptr<bf16>(), s.data_ptr<bf16>(), y.data_ptr<bf16>());
|
| 10 |
+
}
|
| 11 |
+
void backward(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &r, torch::Tensor &k, torch::Tensor &v, torch::Tensor &w, torch::Tensor &u, torch::Tensor &s, torch::Tensor &gy, torch::Tensor &gr, torch::Tensor &gk, torch::Tensor &gv, torch::Tensor &gw, torch::Tensor &gu, torch::Tensor &gs) {
|
| 12 |
+
cuda_backward(B, T, C, H, r.data_ptr<bf16>(), k.data_ptr<bf16>(), v.data_ptr<bf16>(), w.data_ptr<bf16>(), u.data_ptr<bf16>(), s.data_ptr<bf16>(), gy.data_ptr<bf16>(), gr.data_ptr<bf16>(), gk.data_ptr<bf16>(), gv.data_ptr<bf16>(), gw.data_ptr<bf16>(), gu.data_ptr<bf16>(), gs.data_ptr<bf16>());
|
| 13 |
+
}
|
| 14 |
+
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
| 15 |
+
m.def("forward", &forward, "wkv6state forward");
|
| 16 |
+
m.def("backward", &backward, "wkv6state backward");
|
| 17 |
+
}
|
| 18 |
+
|
| 19 |
+
TORCH_LIBRARY(wkv6state, m) {
|
| 20 |
+
m.def("forward", forward);
|
| 21 |
+
m.def("backward", backward);
|
| 22 |
+
}
|
demo/demo-predict.sh
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# 3B
|
| 3 |
+
load_model='RWKV-x060-World-3B-v2.1-20240417-ctx4096.pth'
|
| 4 |
+
#7B
|
| 5 |
+
# load_model='RWKV-x060-World-7B-v2.1-20240507-ctx4096.pth'
|
| 6 |
+
|
| 7 |
+
#model output dir
|
| 8 |
+
proj_dir='output'
|
| 9 |
+
|
| 10 |
+
# 3B
|
| 11 |
+
n_layer=32
|
| 12 |
+
n_embd=2560
|
| 13 |
+
|
| 14 |
+
# 7B
|
| 15 |
+
# n_layer=32
|
| 16 |
+
# n_embd=4096
|
| 17 |
+
|
| 18 |
+
micro_bsz=4
|
| 19 |
+
epoch_steps=18089
|
| 20 |
+
ctx_len=1024
|
| 21 |
+
device=4
|
| 22 |
+
epoch_save=1
|
| 23 |
+
|
| 24 |
+
file_path="path/to/your/audio/file"
|
| 25 |
+
OP="predict"
|
| 26 |
+
|
| 27 |
+
QUANT='nf4'
|
| 28 |
+
export HF_ENDPOINT=https://hf-mirror.com
|
| 29 |
+
python train.py --load_model $load_model --devices $device --file_path $path_file\
|
| 30 |
+
--proj_dir $proj_dir \
|
| 31 |
+
--data_type binidx --vocab_size 65536 \
|
| 32 |
+
--ctx_len $ctx_len --epoch_steps $epoch_steps --epoch_count 1000 --epoch_begin 0 --epoch_save $epoch_save --micro_bsz $micro_bsz \
|
| 33 |
+
--n_layer $n_layer --n_embd $n_embd \
|
| 34 |
+
--pre_ffn 0 --head_qk 0 --lr_init 1e-4 --lr_final 1e-4 --warmup_steps 100 --beta1 0.9 --beta2 0.99 --adam_eps 1e-8 \
|
| 35 |
+
--accelerator gpu --strategy deepspeed_stage_1 --grad_cp 1 --op $OP \
|
| 36 |
+
--precision bf16 \
|
| 37 |
+
--my_testing "x060" \
|
| 38 |
+
--train_type "state" --dataload pad
|
| 39 |
+
# --quant $QUANT
|
demo/demo-state-tuning.sh
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# 3B
|
| 3 |
+
load_model='RWKV-x060-World-3B-v2.1-20240417-ctx4096.pth'
|
| 4 |
+
#7B
|
| 5 |
+
# load_model='RWKV-x060-World-7B-v2.1-20240507-ctx4096.pth'
|
| 6 |
+
|
| 7 |
+
#model output dir
|
| 8 |
+
proj_dir='output'
|
| 9 |
+
|
| 10 |
+
# 3B
|
| 11 |
+
n_layer=32
|
| 12 |
+
n_embd=2560
|
| 13 |
+
|
| 14 |
+
# 7B
|
| 15 |
+
# n_layer=32
|
| 16 |
+
# n_embd=4096
|
| 17 |
+
|
| 18 |
+
micro_bsz=4
|
| 19 |
+
epoch_steps=18089
|
| 20 |
+
ctx_len=1024
|
| 21 |
+
device=4
|
| 22 |
+
epoch_save=1
|
| 23 |
+
|
| 24 |
+
OP="train"
|
| 25 |
+
|
| 26 |
+
QUANT='nf4'
|
| 27 |
+
|
| 28 |
+
python train.py --load_model $load_model --devices $device \
|
| 29 |
+
--proj_dir $proj_dir \
|
| 30 |
+
--data_type binidx --vocab_size 65536 \
|
| 31 |
+
--ctx_len $ctx_len --epoch_steps $epoch_steps --epoch_count 1000 --epoch_begin 0 --epoch_save $epoch_save --micro_bsz $micro_bsz \
|
| 32 |
+
--n_layer $n_layer --n_embd $n_embd \
|
| 33 |
+
--pre_ffn 0 --head_qk 0 --lr_init 1e-4 --lr_final 1e-4 --warmup_steps 100 --beta1 0.9 --beta2 0.99 --adam_eps 1e-8 \
|
| 34 |
+
--accelerator gpu --strategy deepspeed_stage_1 --grad_cp 1 --op $OP \
|
| 35 |
+
--precision bf16 \
|
| 36 |
+
--my_testing "x060" \
|
| 37 |
+
--train_type "state" --dataload pad
|
| 38 |
+
# --quant $QUANT
|
fla/__init__.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from fla.layers import (ABCAttention, BasedLinearAttention, DeltaNet,
|
| 4 |
+
GatedLinearAttention, HGRN2Attention, LinearAttention,
|
| 5 |
+
MultiScaleRetention, ReBasedLinearAttention)
|
| 6 |
+
from fla.models import (ABCForCausalLM, ABCModel, DeltaNetForCausalLM,
|
| 7 |
+
DeltaNetModel, GLAForCausalLM, GLAModel,
|
| 8 |
+
HGRN2ForCausalLM, HGRN2Model, HGRNForCausalLM,
|
| 9 |
+
HGRNModel, LinearAttentionForCausalLM,
|
| 10 |
+
LinearAttentionModel, RetNetForCausalLM, RetNetModel,
|
| 11 |
+
RWKV6ForCausalLM, RWKV6Model, TransformerForCausalLM,
|
| 12 |
+
TransformerModel)
|
| 13 |
+
from fla.ops import (chunk_gla, chunk_retention, fused_chunk_based,
|
| 14 |
+
fused_chunk_gla, fused_chunk_retention)
|
| 15 |
+
|
| 16 |
+
__all__ = [
|
| 17 |
+
'ABCAttention',
|
| 18 |
+
'BasedLinearAttention',
|
| 19 |
+
'DeltaNet',
|
| 20 |
+
'HGRN2Attention',
|
| 21 |
+
'GatedLinearAttention',
|
| 22 |
+
'LinearAttention',
|
| 23 |
+
'MultiScaleRetention',
|
| 24 |
+
'ReBasedLinearAttention',
|
| 25 |
+
'ABCForCausalLM',
|
| 26 |
+
'ABCModel',
|
| 27 |
+
'DeltaNetForCausalLM',
|
| 28 |
+
'DeltaNetModel',
|
| 29 |
+
'HGRNForCausalLM',
|
| 30 |
+
'HGRNModel',
|
| 31 |
+
'HGRN2ForCausalLM',
|
| 32 |
+
'HGRN2Model',
|
| 33 |
+
'GLAForCausalLM',
|
| 34 |
+
'GLAModel',
|
| 35 |
+
'LinearAttentionForCausalLM',
|
| 36 |
+
'LinearAttentionModel',
|
| 37 |
+
'RetNetForCausalLM',
|
| 38 |
+
'RetNetModel',
|
| 39 |
+
'RWKV6ForCausalLM',
|
| 40 |
+
'RWKV6Model',
|
| 41 |
+
'TransformerForCausalLM',
|
| 42 |
+
'TransformerModel',
|
| 43 |
+
'chunk_gla',
|
| 44 |
+
'chunk_retention',
|
| 45 |
+
'fused_chunk_based',
|
| 46 |
+
'fused_chunk_gla',
|
| 47 |
+
'fused_chunk_retention'
|
| 48 |
+
]
|
| 49 |
+
|
| 50 |
+
__version__ = '0.1'
|
fla/layers/__init__.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from .abc import ABCAttention
|
| 4 |
+
from .based import BasedLinearAttention
|
| 5 |
+
from .delta_net import DeltaNet
|
| 6 |
+
from .gla import GatedLinearAttention
|
| 7 |
+
from .hgrn import HGRNAttention
|
| 8 |
+
from .hgrn2 import HGRN2Attention
|
| 9 |
+
from .linear_attn import LinearAttention
|
| 10 |
+
from .multiscale_retention import MultiScaleRetention
|
| 11 |
+
from .rebased import ReBasedLinearAttention
|
| 12 |
+
from .rwkv6 import RWKV6Attention
|
| 13 |
+
|
| 14 |
+
__all__ = [
|
| 15 |
+
'ABCAttention',
|
| 16 |
+
'BasedLinearAttention',
|
| 17 |
+
'DeltaNet',
|
| 18 |
+
'GatedLinearAttention',
|
| 19 |
+
'HGRNAttention',
|
| 20 |
+
'HGRN2Attention',
|
| 21 |
+
'LinearAttention',
|
| 22 |
+
'MultiScaleRetention',
|
| 23 |
+
'ReBasedLinearAttention',
|
| 24 |
+
'RWKV6Attention'
|
| 25 |
+
]
|
fla/layers/abc.py
ADDED
|
@@ -0,0 +1,195 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import warnings
|
| 6 |
+
from typing import Optional, Tuple
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
import torch.nn as nn
|
| 10 |
+
from einops import rearrange
|
| 11 |
+
from transformers.cache_utils import Cache
|
| 12 |
+
|
| 13 |
+
from fla.modules import (FusedRMSNormSwishGate, RMSNorm, RotaryEmbedding,
|
| 14 |
+
ShortConvolution)
|
| 15 |
+
from fla.modules.activations import swiglu, swish
|
| 16 |
+
from fla.modules.convolution import proj_then_conv1d
|
| 17 |
+
from fla.ops.abc.chunk import chunk_abc
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class ABCAttention(nn.Module):
|
| 21 |
+
|
| 22 |
+
def __init__(
|
| 23 |
+
self,
|
| 24 |
+
hidden_size: int = 1024,
|
| 25 |
+
expand_k: float = 0.5,
|
| 26 |
+
expand_v: float = 1.0,
|
| 27 |
+
num_heads: int = 4,
|
| 28 |
+
use_short_conv: bool = False,
|
| 29 |
+
conv_size: int = 4,
|
| 30 |
+
conv_bias: bool = False,
|
| 31 |
+
share_conv_kernel: bool = True,
|
| 32 |
+
num_slots: Optional[int] = None,
|
| 33 |
+
elementwise_affine: Optional[bool] = True,
|
| 34 |
+
norm_eps: float = 1e-5,
|
| 35 |
+
gate_low_rank_dim: int = 16,
|
| 36 |
+
gate_logit_normalizer: int = 16,
|
| 37 |
+
use_input_gate: bool = False,
|
| 38 |
+
use_output_gate: bool = True,
|
| 39 |
+
use_norm: bool = True,
|
| 40 |
+
clamp_min: Optional[float] = -32,
|
| 41 |
+
clamp_max: Optional[float] = 32,
|
| 42 |
+
layer_idx: Optional[int] = None,
|
| 43 |
+
**kwargs
|
| 44 |
+
) -> ABCAttention:
|
| 45 |
+
super().__init__()
|
| 46 |
+
|
| 47 |
+
self.hidden_size = hidden_size
|
| 48 |
+
self.expand_k = expand_k
|
| 49 |
+
self.expand_v = expand_v
|
| 50 |
+
self.num_heads = num_heads
|
| 51 |
+
self.key_dim = int(self.hidden_size * self.expand_k)
|
| 52 |
+
self.value_dim = int(self.hidden_size * self.expand_v)
|
| 53 |
+
self.head_k_dim = self.key_dim // self.num_heads
|
| 54 |
+
self.head_v_dim = self.value_dim // self.num_heads
|
| 55 |
+
|
| 56 |
+
self.use_short_conv = use_short_conv
|
| 57 |
+
self.conv_size = conv_size
|
| 58 |
+
self.conv_bias = conv_bias
|
| 59 |
+
self.share_conv_kernel = share_conv_kernel
|
| 60 |
+
|
| 61 |
+
self.gate_low_rank_dim = gate_low_rank_dim
|
| 62 |
+
self.gate_logit_normalizer = gate_logit_normalizer
|
| 63 |
+
|
| 64 |
+
self.use_input_gate = use_input_gate
|
| 65 |
+
self.use_output_gate = use_output_gate
|
| 66 |
+
self.use_norm = use_norm
|
| 67 |
+
|
| 68 |
+
if num_slots is None:
|
| 69 |
+
num_slots = self.head_k_dim
|
| 70 |
+
self.num_slots = num_slots
|
| 71 |
+
|
| 72 |
+
self.norm_eps = norm_eps
|
| 73 |
+
|
| 74 |
+
self.clamp_min = clamp_min
|
| 75 |
+
self.clamp_max = clamp_max
|
| 76 |
+
self.layer_idx = layer_idx
|
| 77 |
+
|
| 78 |
+
if layer_idx is None:
|
| 79 |
+
warnings.warn(
|
| 80 |
+
f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will "
|
| 81 |
+
"to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
|
| 82 |
+
"when creating this class."
|
| 83 |
+
)
|
| 84 |
+
|
| 85 |
+
self.q_proj = nn.Linear(self.hidden_size, self.key_dim, bias=False)
|
| 86 |
+
self.k_proj = nn.Linear(self.hidden_size, self.key_dim, bias=False)
|
| 87 |
+
self.v_proj = nn.Linear(self.hidden_size, self.value_dim, bias=False)
|
| 88 |
+
|
| 89 |
+
if use_output_gate:
|
| 90 |
+
self.g_proj = nn.Linear(self.hidden_size, self.value_dim, bias=False)
|
| 91 |
+
self.s_proj = nn.Linear(self.hidden_size, self.num_heads * self.num_slots, bias=False)
|
| 92 |
+
self.o_proj = nn.Linear(self.value_dim, self.hidden_size, bias=False)
|
| 93 |
+
|
| 94 |
+
if use_short_conv:
|
| 95 |
+
self.conv_size = conv_size
|
| 96 |
+
if share_conv_kernel:
|
| 97 |
+
self.h_conv1d = ShortConvolution(hidden_size, conv_size, activation='silu')
|
| 98 |
+
else:
|
| 99 |
+
self.q_conv1d = ShortConvolution(self.key_dim, conv_size, activation='silu')
|
| 100 |
+
self.k_conv1d = ShortConvolution(self.key_dim, conv_size, activation='silu')
|
| 101 |
+
self.v_conv1d = ShortConvolution(self.value_dim, conv_size, activation='silu')
|
| 102 |
+
|
| 103 |
+
if self.use_norm:
|
| 104 |
+
if self.use_output_gate:
|
| 105 |
+
self.g_norm = FusedRMSNormSwishGate(self.head_v_dim, elementwise_affine, norm_eps)
|
| 106 |
+
else:
|
| 107 |
+
self.g_norm = RMSNorm(self.head_v_dim, elementwise_affine, norm_eps)
|
| 108 |
+
|
| 109 |
+
if self.use_rope:
|
| 110 |
+
self.rotary = RotaryEmbedding(self.head_k_dim)
|
| 111 |
+
|
| 112 |
+
self.apply(self._initialize_weights)
|
| 113 |
+
|
| 114 |
+
def _initialize_weights(self, module: nn.Module):
|
| 115 |
+
if getattr(module, "_is_hf_initialized", False):
|
| 116 |
+
return
|
| 117 |
+
if isinstance(module, nn.Linear):
|
| 118 |
+
nn.init.xavier_uniform_(module.weight, gain=2 ** -2.5)
|
| 119 |
+
if module.bias is not None:
|
| 120 |
+
nn.init.zeros_(module.bias)
|
| 121 |
+
module._is_hf_initialized = True
|
| 122 |
+
|
| 123 |
+
def forward(
|
| 124 |
+
self,
|
| 125 |
+
hidden_states: torch.Tensor,
|
| 126 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 127 |
+
past_key_values: Optional[Cache] = None,
|
| 128 |
+
use_cache: Optional[bool] = False,
|
| 129 |
+
output_attentions: Optional[bool] = False,
|
| 130 |
+
**kwargs
|
| 131 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Cache]]:
|
| 132 |
+
|
| 133 |
+
if self.use_short_conv:
|
| 134 |
+
if self.share_conv_kernel:
|
| 135 |
+
hidden_states = self.h_conv1d(hidden_states)
|
| 136 |
+
q = self.q_proj(hidden_states)
|
| 137 |
+
k = self.k_proj(hidden_states)
|
| 138 |
+
v = self.v_proj(hidden_states)
|
| 139 |
+
else:
|
| 140 |
+
q = proj_then_conv1d(hidden_states, self.q_proj.weight, self.q_conv1d.weight, self.q_conv1d.bias)
|
| 141 |
+
k = proj_then_conv1d(hidden_states, self.k_proj.weight, self.k_conv1d.weight, self.k_conv1d.bias)
|
| 142 |
+
v = proj_then_conv1d(hidden_states, self.v_proj.weight, self.v_conv1d.weight, self.v_conv1d.bias)
|
| 143 |
+
else:
|
| 144 |
+
q = self.q_proj(hidden_states)
|
| 145 |
+
k = self.k_proj(hidden_states)
|
| 146 |
+
v = self.v_proj(hidden_states)
|
| 147 |
+
|
| 148 |
+
if self.use_input_gate:
|
| 149 |
+
q, k, v = map(lambda x: swish(x), (q, k, v))
|
| 150 |
+
|
| 151 |
+
if self.use_rope:
|
| 152 |
+
q = rearrange(q, '... (h d) -> ... h d', h=self.num_heads)
|
| 153 |
+
k = rearrange(k, '... (h d) -> ... h d', h=self.num_heads)
|
| 154 |
+
seqlen_offset = 0
|
| 155 |
+
if past_key_values is not None:
|
| 156 |
+
seqlen_offset = past_key_values.get_seq_length(self.layer_idx)
|
| 157 |
+
q, k = self.rotary(q, k, seqlen_offset)
|
| 158 |
+
q = rearrange(q, 'b n h d -> b h n d', h=self.num_heads)
|
| 159 |
+
k = rearrange(k, 'b n h d -> b h n d', h=self.num_heads)
|
| 160 |
+
else:
|
| 161 |
+
q = rearrange(q, 'b n (h d) -> b h n d', h=self.num_heads)
|
| 162 |
+
k = rearrange(k, 'b n (h d) -> b h n d', h=self.num_heads)
|
| 163 |
+
v = rearrange(v, 'b n (h d) -> b h n d', h=self.num_heads)
|
| 164 |
+
|
| 165 |
+
# [batch_size, n_heads, seq_len, num_slots]
|
| 166 |
+
s = rearrange(self.s_proj(hidden_states), 'b t (h m) -> b h t m', h=self.num_heads)
|
| 167 |
+
s = s.clamp_(self.clamp_min, self.clamp_max)
|
| 168 |
+
|
| 169 |
+
last_state = past_key_values[self.layer_idx] if use_cache else None
|
| 170 |
+
o, last_state = chunk_abc(q, k, v, s, initial_state=last_state, output_final_state=use_cache)
|
| 171 |
+
if past_key_values is not None and last_state is not None:
|
| 172 |
+
past_key_values.update(last_state, self.layer_idx, q.shape[2])
|
| 173 |
+
|
| 174 |
+
o = rearrange(o, 'b h t d -> b t h d')
|
| 175 |
+
if self.use_norm and not self.use_output_gate:
|
| 176 |
+
o = self.g_norm(o)
|
| 177 |
+
elif self.use_output_gate:
|
| 178 |
+
g = rearrange(self.g_proj(hidden_states), 'b t (h d) -> b t h d', h=self.num_heads)
|
| 179 |
+
o = self.g_norm(o, g) if self.use_norm else swiglu(g, o)
|
| 180 |
+
o = rearrange(o, 'b t h d -> b t (h d)')
|
| 181 |
+
o = self.o_proj(o)
|
| 182 |
+
|
| 183 |
+
return o, None, past_key_values
|
| 184 |
+
|
| 185 |
+
def init_state(self, batch_size: int) -> Tuple[torch.Tensor]:
|
| 186 |
+
param = next(self.parameters())
|
| 187 |
+
state = tuple()
|
| 188 |
+
if self.use_short_conv:
|
| 189 |
+
state += (param.new_zeros(batch_size, self.hidden_size, self.conv_size),)
|
| 190 |
+
state += (param.new_zeros(batch_size, self.num_heads, self.head_k_dim, self.num_slots),
|
| 191 |
+
param.new_zeros(batch_size, self.num_heads, self.num_slots, self.head_v_dim))
|
| 192 |
+
return state
|
| 193 |
+
|
| 194 |
+
def state_size(self, sequence_length: int = 2048):
|
| 195 |
+
return self.num_heads * self.key_dim * self.head_v_dim
|
fla/layers/based.py
ADDED
|
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
"""
|
| 4 |
+
Linear attention in Based.
|
| 5 |
+
https://github.com/HazyResearch/zoology/blob/main/zoology/mixers/based.py
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
import torch.nn as nn
|
| 10 |
+
from einops import rearrange
|
| 11 |
+
|
| 12 |
+
from fla.modules.feature_map import TaylorFeatureMap
|
| 13 |
+
from fla.ops.based import parallel_based
|
| 14 |
+
from fla.ops.linear_attn import chunk_linear_attn, fused_chunk_linear_attn
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class BasedLinearAttention(nn.Module):
|
| 18 |
+
def __init__(
|
| 19 |
+
self,
|
| 20 |
+
hidden_size: int,
|
| 21 |
+
l_max: int = 2048,
|
| 22 |
+
feature_dim: int = 16,
|
| 23 |
+
num_key_value_heads: int = 12,
|
| 24 |
+
num_heads: int = 12,
|
| 25 |
+
feature_name: str = "taylor_exp",
|
| 26 |
+
eps: float = 1e-12,
|
| 27 |
+
causal: bool = True,
|
| 28 |
+
mode: str = "parallel",
|
| 29 |
+
):
|
| 30 |
+
super().__init__()
|
| 31 |
+
self.hidden_size
|
| 32 |
+
self.l_max = l_max
|
| 33 |
+
self.mode = mode
|
| 34 |
+
assert self.mode in ["fused_chunk", "parallel", 'chunk']
|
| 35 |
+
|
| 36 |
+
# linear attention
|
| 37 |
+
self.feature_name = feature_name
|
| 38 |
+
self.feature_dim = feature_dim
|
| 39 |
+
self.num_key_value_heads = num_key_value_heads
|
| 40 |
+
self.num_heads = num_heads
|
| 41 |
+
self.head_dim = self.hidden_size // self.num_key_value_heads
|
| 42 |
+
self.causal = causal
|
| 43 |
+
|
| 44 |
+
self.q_proj = nn.Linear(self.hidden_size, self.feature_dim * self.num_heads, bias=False)
|
| 45 |
+
self.k_proj = nn.Linear(self.hidden_size, self.feature_dim * self.num_heads, bias=False)
|
| 46 |
+
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
|
| 47 |
+
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
|
| 48 |
+
self.dropout = nn.Identity()
|
| 49 |
+
self.feature_map = TaylorFeatureMap(feature_dim)
|
| 50 |
+
self.eps = eps
|
| 51 |
+
|
| 52 |
+
self.apply(self._initialize_weights)
|
| 53 |
+
|
| 54 |
+
def _initialize_weights(self, module: nn.Module):
|
| 55 |
+
if getattr(module, "_is_hf_initialized", False):
|
| 56 |
+
return
|
| 57 |
+
if isinstance(module, nn.Linear):
|
| 58 |
+
nn.init.xavier_uniform_(module.weight, gain=2 ** -2.5)
|
| 59 |
+
if module.bias is not None:
|
| 60 |
+
nn.init.zeros_(module.bias)
|
| 61 |
+
module._is_hf_initialized = True
|
| 62 |
+
|
| 63 |
+
def forward(self, hidden_states: torch.Tensor, **kwargs):
|
| 64 |
+
mode = self.mode
|
| 65 |
+
q, k, v = self.q_proj(hidden_states), self.k_proj(hidden_states), self.v_proj(hidden_states)
|
| 66 |
+
q, k, v = map(lambda x: rearrange(x, "b l (h d) -> b h l d", h=self.num_heads), [q, k, v])
|
| 67 |
+
if mode == "fused_chunk":
|
| 68 |
+
q, k = self.feature_map(q), self.feature_map(k)
|
| 69 |
+
o = fused_chunk_linear_attn(q, k, v, normalize=True, scale=1)
|
| 70 |
+
elif mode == 'chunk':
|
| 71 |
+
q, k = self.feature_map(q), self.feature_map(k)
|
| 72 |
+
o = chunk_linear_attn(q, k, v, normalize=True, scale=1)
|
| 73 |
+
elif mode == 'parallel':
|
| 74 |
+
assert q.shape[-1] <= 128
|
| 75 |
+
o = parallel_based(q, k, v, True, True)
|
| 76 |
+
o = rearrange(o, "b h l d -> b l (h d)")
|
| 77 |
+
o = self.o_proj(o)
|
| 78 |
+
o = self.dropout(o)
|
| 79 |
+
return o
|
| 80 |
+
|
| 81 |
+
# https://github.com/HazyResearch/zoology/blob/main/zoology/mixers/based.py#L119
|
| 82 |
+
|
| 83 |
+
def forward_reference(self, hidden_states: torch.Tensor, filters: torch.Tensor = None, *args, **kwargs):
|
| 84 |
+
"""
|
| 85 |
+
x (torch.Tensor): tensor of shape (b, d, l)
|
| 86 |
+
y (torch.Tensor): tensor of shape (b, d, l)
|
| 87 |
+
"""
|
| 88 |
+
# hidden_states = hidden_states.transpose(1, 2)
|
| 89 |
+
b, l, _ = hidden_states.size()
|
| 90 |
+
q, k, v = self.q_proj(hidden_states), self.k_proj(hidden_states), self.v_proj(hidden_states)
|
| 91 |
+
|
| 92 |
+
q = q.view(b, l, self.num_heads, self.feature_dim).transpose(1, 2)
|
| 93 |
+
k = k.view(b, l, self.num_key_value_heads, self.feature_dim).transpose(1, 2)
|
| 94 |
+
v = v.view(b, l, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 95 |
+
|
| 96 |
+
# Linear attention
|
| 97 |
+
q, k = self.feature_map(q), self.feature_map(k)
|
| 98 |
+
q, k, v = q.unsqueeze(-2), k.unsqueeze(-2), v.unsqueeze(-1)
|
| 99 |
+
|
| 100 |
+
# Compute attention
|
| 101 |
+
if self.causal:
|
| 102 |
+
y = ((q * (k * v).cumsum(2)).sum(-1) / ((q * k.cumsum(2)).sum(-1) + self.eps))
|
| 103 |
+
else:
|
| 104 |
+
y = ((q * (k * v).sum(2, True)).sum(-1) / ((q * k.sum(2, True)).sum(-1) + self.eps))
|
| 105 |
+
y = rearrange(y, 'b h l d -> b l (h d)')
|
| 106 |
+
y = self.o_proj(y.to(hidden_states.dtype))
|
| 107 |
+
y = self.dropout(y)
|
| 108 |
+
return y.to(hidden_states.dtype)
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
if __name__ == '__main__':
|
| 112 |
+
batch = 4
|
| 113 |
+
seq_len = 1024
|
| 114 |
+
hidden_size = 1024
|
| 115 |
+
dtype = torch.float32
|
| 116 |
+
x = torch.randn(batch, seq_len, hidden_size).to(dtype).cuda().requires_grad_(True)
|
| 117 |
+
dy = torch.randn(batch, seq_len, hidden_size).to(dtype).cuda()
|
| 118 |
+
model = BasedLinearAttention(hidden_size, mode='chunk').to(dtype).cuda()
|
| 119 |
+
y = model(x)
|
| 120 |
+
y.backward(dy, retain_graph=True)
|
| 121 |
+
x_grad, x.grad = x.grad, None
|
| 122 |
+
y2 = model.forward_reference(x)
|
| 123 |
+
y2.backward(dy)
|
| 124 |
+
assert y.allclose(y2, 0, 1e-4), breakpoint()
|
| 125 |
+
assert x_grad.allclose(x.grad, 0, 1e-4), breakpoint()
|
| 126 |
+
print("Pass")
|
fla/layers/delta_net.py
ADDED
|
@@ -0,0 +1,254 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
# Sect4.2 of Linear Transformers Are Secretly Fast Weight Programmers https://arxiv.org/abs/2102.11174
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
from __future__ import annotations
|
| 7 |
+
|
| 8 |
+
from typing import Optional, Tuple
|
| 9 |
+
|
| 10 |
+
import torch
|
| 11 |
+
import torch.nn as nn
|
| 12 |
+
from einops import rearrange
|
| 13 |
+
from transformers.cache_utils import Cache
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
from fla.modules import FusedRMSNormSwishGate, RMSNorm, ShortConvolution, LayerNorm
|
| 17 |
+
from fla.modules.rotary import RotaryEmbedding
|
| 18 |
+
from fla.ops.delta_rule import (fused_chunk_delta_rule,
|
| 19 |
+
fused_recurrent_linear_attn_delta_rule,
|
| 20 |
+
chunk_delta_rule)
|
| 21 |
+
from torch.nn import functional as F
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def simple_norm(x):
|
| 25 |
+
return (F.normalize(x, dim=-1) * x.shape[-1] ** 0.5).to(x)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
# @torch.jit.script
|
| 29 |
+
def elu_p1(x):
|
| 30 |
+
return (F.elu(x, 1., False) + 1.).to(x)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
# @torch.jit.script
|
| 34 |
+
def sum_norm(x):
|
| 35 |
+
return (x / x.sum(-1, keepdim=True)).to(x)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
# @torch.jit.script
|
| 39 |
+
def elu_norm(x):
|
| 40 |
+
dtype = x.dtype
|
| 41 |
+
x = F.elu(x, 1., False) + 1.
|
| 42 |
+
return (x / x.sum(-1, keepdim=True)).to(dtype)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
# https://github.com/IDSIA/recurrent-fwp/blob/master/algorithmic/layers.py#L86C1-L146C1
|
| 48 |
+
class DeltaNet(nn.Module):
|
| 49 |
+
def __init__(
|
| 50 |
+
self,
|
| 51 |
+
d_model: int = None,
|
| 52 |
+
hidden_size: int = 1024,
|
| 53 |
+
expand_k: float = 1.0,
|
| 54 |
+
expand_v: float = 1.0,
|
| 55 |
+
num_heads: int = 4,
|
| 56 |
+
mode: str = 'fused_chunk',
|
| 57 |
+
chunk_size: int = 16,
|
| 58 |
+
use_beta: bool = True,
|
| 59 |
+
use_gate: bool = True,
|
| 60 |
+
use_rope: bool = False,
|
| 61 |
+
use_output_norm: bool = True,
|
| 62 |
+
use_elu: bool = False,
|
| 63 |
+
use_short_conv: bool = True,
|
| 64 |
+
conv_size: int = 4,
|
| 65 |
+
conv_bias: bool = False,
|
| 66 |
+
share_conv_kernel: bool = False,
|
| 67 |
+
layer_idx: int = None,
|
| 68 |
+
qk_activation: str = 'silu',
|
| 69 |
+
qk_norm: str = None,
|
| 70 |
+
save_memory: str = False,
|
| 71 |
+
**kwargs
|
| 72 |
+
) -> DeltaNet:
|
| 73 |
+
super().__init__()
|
| 74 |
+
self.mode = mode
|
| 75 |
+
self.qk_activation = qk_activation
|
| 76 |
+
self.qk_norm = qk_norm
|
| 77 |
+
assert self.qk_activation in ['silu', 'relu', 'elu', 'identity']
|
| 78 |
+
assert self.qk_norm in ['l2', 'sum']
|
| 79 |
+
if d_model is not None:
|
| 80 |
+
hidden_size = d_model
|
| 81 |
+
self.hidden_size = hidden_size
|
| 82 |
+
self.expand_k = expand_k
|
| 83 |
+
self.expand_v = expand_v
|
| 84 |
+
self.num_heads = num_heads
|
| 85 |
+
self.chunk_size = chunk_size
|
| 86 |
+
self.use_gate = use_gate
|
| 87 |
+
self.use_output_norm = use_output_norm
|
| 88 |
+
self.use_short_conv = use_short_conv
|
| 89 |
+
self.conv_size = conv_size
|
| 90 |
+
self.conv_bias = conv_bias
|
| 91 |
+
self.share_conv_kernel = share_conv_kernel
|
| 92 |
+
|
| 93 |
+
self.key_dim = int(hidden_size * expand_k)
|
| 94 |
+
self.value_dim = int(hidden_size * expand_v)
|
| 95 |
+
self.head_qk_dim = self.key_dim // num_heads
|
| 96 |
+
self.head_v_dim = self.value_dim // num_heads
|
| 97 |
+
self.layer_idx = layer_idx
|
| 98 |
+
|
| 99 |
+
self.silu = torch.nn.SiLU()
|
| 100 |
+
|
| 101 |
+
assert mode in ['chunk', 'fused_chunk', 'fused_recurrent'], f"Not suppoerted mode `{mode}`."
|
| 102 |
+
assert self.key_dim % num_heads == 0, f"key dim must be divisible by num_heads of {num_heads}"
|
| 103 |
+
assert self.value_dim % num_heads == 0, f"value dim must be divisible by num_heads of {num_heads}"
|
| 104 |
+
|
| 105 |
+
self.q_proj = nn.Linear(hidden_size, self.key_dim, bias=False)
|
| 106 |
+
self.k_proj = nn.Linear(hidden_size, self.key_dim, bias=False)
|
| 107 |
+
self.v_proj = nn.Linear(hidden_size, self.value_dim, bias=False)
|
| 108 |
+
self.o_proj = nn.Linear(self.value_dim, hidden_size, bias=False)
|
| 109 |
+
|
| 110 |
+
self.use_beta = use_beta
|
| 111 |
+
self.use_elu = use_elu
|
| 112 |
+
if self.use_beta:
|
| 113 |
+
self.b_proj = nn.Linear(hidden_size, self.num_heads, bias=False)
|
| 114 |
+
if use_short_conv:
|
| 115 |
+
self.conv_size = conv_size
|
| 116 |
+
if share_conv_kernel:
|
| 117 |
+
self.h_conv1d = ShortConvolution(hidden_size, conv_size, activation=None)
|
| 118 |
+
else:
|
| 119 |
+
self.q_conv1d = ShortConvolution(self.key_dim, conv_size, activation='silu' if qk_activation == 'silu' else None)
|
| 120 |
+
self.k_conv1d = ShortConvolution(self.key_dim, conv_size, activation='silu' if qk_activation == 'silu' else None)
|
| 121 |
+
self.v_conv1d = ShortConvolution(self.value_dim, conv_size, activation='silu')
|
| 122 |
+
if use_gate:
|
| 123 |
+
self.g_proj = nn.Linear(hidden_size, self.value_dim, bias=False)
|
| 124 |
+
if self.use_gate:
|
| 125 |
+
self.norm = FusedRMSNormSwishGate(self.head_v_dim)
|
| 126 |
+
else:
|
| 127 |
+
self.norm = RMSNorm(self.head_v_dim)
|
| 128 |
+
self.apply(self._initialize_weights)
|
| 129 |
+
|
| 130 |
+
def _initialize_weights(self, module: nn.Module):
|
| 131 |
+
if getattr(module, "_is_hf_initialized", False):
|
| 132 |
+
return
|
| 133 |
+
if isinstance(module, nn.Linear):
|
| 134 |
+
nn.init.xavier_uniform_(module.weight, gain=2 ** -2.5)
|
| 135 |
+
if module.bias is not None:
|
| 136 |
+
nn.init.zeros_(module.bias)
|
| 137 |
+
module._is_hf_initialized = True
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
def forward(
|
| 141 |
+
self,
|
| 142 |
+
hidden_states: torch.Tensor,
|
| 143 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 144 |
+
past_key_values: Optional[Cache] = None,
|
| 145 |
+
use_cache: Optional[bool] = False,
|
| 146 |
+
output_attentions: Optional[bool] = False,
|
| 147 |
+
**kwargs
|
| 148 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Cache]]:
|
| 149 |
+
|
| 150 |
+
# change to inference mode.
|
| 151 |
+
mode = 'fused_recurrent' if hidden_states.shape[1] < 64 else self.mode
|
| 152 |
+
last_state = past_key_values[self.layer_idx] if use_cache else None
|
| 153 |
+
|
| 154 |
+
if attention_mask is not None:
|
| 155 |
+
if attention_mask.shape[-1] != hidden_states.shape[-2]:
|
| 156 |
+
attention_mask = attention_mask[:, -1:]
|
| 157 |
+
|
| 158 |
+
if self.use_short_conv:
|
| 159 |
+
conv_state = last_state[0] if use_cache else None
|
| 160 |
+
if self.share_conv_kernel:
|
| 161 |
+
# conv state is updated inplace
|
| 162 |
+
hidden_states = self.h_conv1d(hidden_states, attention_mask, conv_state)
|
| 163 |
+
q = self.q_proj(hidden_states)
|
| 164 |
+
k = self.k_proj(hidden_states)
|
| 165 |
+
v = self.v_proj(hidden_states)
|
| 166 |
+
else:
|
| 167 |
+
conv_state_q = last_state[0] if use_cache else None
|
| 168 |
+
conv_state_k = last_state[1] if use_cache else None
|
| 169 |
+
conv_state_v = last_state[2] if use_cache else None
|
| 170 |
+
k = self.k_proj(hidden_states)
|
| 171 |
+
v = self.v_proj(hidden_states)
|
| 172 |
+
q = self.q_proj(hidden_states)
|
| 173 |
+
q = self.q_conv1d(q, attention_mask, conv_state_q)
|
| 174 |
+
k = self.k_conv1d(k, attention_mask, conv_state_k)
|
| 175 |
+
v = self.v_conv1d(v, attention_mask, conv_state_v)
|
| 176 |
+
else:
|
| 177 |
+
q = (self.q_proj(hidden_states))
|
| 178 |
+
k = (self.k_proj(hidden_states))
|
| 179 |
+
v = self.silu(self.v_proj(hidden_states))
|
| 180 |
+
|
| 181 |
+
# dealing with left-padding
|
| 182 |
+
if attention_mask is not None:
|
| 183 |
+
v = v.mul_(attention_mask.unsqueeze(-1))
|
| 184 |
+
|
| 185 |
+
q, k, v = map(lambda x: rearrange(x, 'b l (h d) -> b h l d', h=self.num_heads), (q, k, v))
|
| 186 |
+
|
| 187 |
+
if self.qk_activation != 'silu':
|
| 188 |
+
if self.qk_activation == 'relu':
|
| 189 |
+
q, k = q.relu(), k.relu()
|
| 190 |
+
elif self.qk_activation == 'elu':
|
| 191 |
+
q, k = elu_p1(q), elu_p1(k)
|
| 192 |
+
elif self.qk_activation == 'identity':
|
| 193 |
+
pass
|
| 194 |
+
else:
|
| 195 |
+
raise NotImplementedError
|
| 196 |
+
|
| 197 |
+
if self.qk_norm is not None:
|
| 198 |
+
if self.qk_norm == 'l2':
|
| 199 |
+
k = torch.nn.functional.normalize(k, dim=-1, p=2).to(v) #auto mixed precision type transfer is annoying.
|
| 200 |
+
q = torch.nn.functional.normalize(q, dim=-1, p=2).to(v)
|
| 201 |
+
elif self.qk_norm == 'sum':
|
| 202 |
+
q = sum_norm(q).to(v)
|
| 203 |
+
k = sum_norm(k).to(v)
|
| 204 |
+
|
| 205 |
+
if self.use_beta:
|
| 206 |
+
beta = rearrange(self.b_proj(hidden_states), 'b l h -> b h l').sigmoid()
|
| 207 |
+
else:
|
| 208 |
+
beta = q.new_ones(q.shape[0], q.shape[1], q.shape[2])
|
| 209 |
+
state = past_key_values[self.layer_idx][-1] if use_cache else None
|
| 210 |
+
if mode == 'fused_recurrent':
|
| 211 |
+
o, recurrent_state = fused_recurrent_linear_attn_delta_rule(q, k, v, beta, state, output_final_state=use_cache)
|
| 212 |
+
elif mode == 'fused_chunk':
|
| 213 |
+
assert self.chunk_size in [16, 32, 64]
|
| 214 |
+
o, recurrent_state = fused_chunk_delta_rule(q, k, v, beta, self.chunk_size, state, output_final_state=use_cache)
|
| 215 |
+
elif mode == 'chunk':
|
| 216 |
+
assert self.chunk_size in [16, 32, 64]
|
| 217 |
+
o, recurrent_state = chunk_delta_rule(q, k, v, beta, self.chunk_size, state, output_final_state=use_cache)
|
| 218 |
+
else:
|
| 219 |
+
raise NotImplementedError(f"Not supported mode `{mode}`.")
|
| 220 |
+
|
| 221 |
+
if past_key_values is not None:
|
| 222 |
+
if self.use_short_conv:
|
| 223 |
+
if self.share_conv_kernel:
|
| 224 |
+
state = (conv_state, recurrent_state)
|
| 225 |
+
else:
|
| 226 |
+
state = (conv_state_q, conv_state_k, conv_state_v, recurrent_state)
|
| 227 |
+
else:
|
| 228 |
+
state = (recurrent_state,)
|
| 229 |
+
past_key_values.update(state, self.layer_idx)
|
| 230 |
+
|
| 231 |
+
o = rearrange(o, 'b h l d -> b l h d')
|
| 232 |
+
if self.use_gate:
|
| 233 |
+
g = rearrange(self.g_proj(hidden_states), 'b l (h d) -> b l h d', h=self.num_heads)
|
| 234 |
+
o = self.norm(o, g)
|
| 235 |
+
else:
|
| 236 |
+
o = self.norm(o)
|
| 237 |
+
o = rearrange(o, 'b l h d -> b l (h d)')
|
| 238 |
+
o = self.o_proj(o)
|
| 239 |
+
|
| 240 |
+
return o, None, past_key_values
|
| 241 |
+
|
| 242 |
+
def init_state(self, batch_size: int) -> Tuple[torch.Tensor]:
|
| 243 |
+
param = next(self.parameters())
|
| 244 |
+
state = tuple()
|
| 245 |
+
if self.use_short_conv:
|
| 246 |
+
if self.share_conv_kernel:
|
| 247 |
+
state += (param.new_zeros(batch_size, self.hidden_size, self.conv_size),)
|
| 248 |
+
else:
|
| 249 |
+
# for q/k/v each
|
| 250 |
+
state += (param.new_zeros(batch_size, self.key_dim, self.conv_size),
|
| 251 |
+
param.new_zeros(batch_size, self.key_dim, self.conv_size),
|
| 252 |
+
param.new_zeros(batch_size, self.value_dim, self.conv_size))
|
| 253 |
+
state += (param.new_zeros(batch_size, self.num_heads, self.head_qk_dim, self.head_v_dim),)
|
| 254 |
+
return state
|
fla/layers/gated_abc.py
ADDED
|
@@ -0,0 +1,234 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import warnings
|
| 6 |
+
from typing import Optional, Tuple
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
import torch.nn as nn
|
| 10 |
+
import torch.nn.functional as F
|
| 11 |
+
from einops import rearrange, repeat
|
| 12 |
+
from transformers.cache_utils import Cache
|
| 13 |
+
|
| 14 |
+
from fla.modules import (FusedRMSNormSwishGateLinear, RMSNormLinear,
|
| 15 |
+
RotaryEmbedding, ShortConvolution)
|
| 16 |
+
from fla.modules.activations import ACT2FN, swiglu_linear, swish
|
| 17 |
+
from fla.ops.abc.chunk_gate import chunk_gated_abc
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class GatedABCAttention(nn.Module):
|
| 21 |
+
|
| 22 |
+
def __init__(
|
| 23 |
+
self,
|
| 24 |
+
hidden_size: int = 1024,
|
| 25 |
+
expand_k: float = 1.,
|
| 26 |
+
expand_v: float = 1.,
|
| 27 |
+
num_heads: int = 4,
|
| 28 |
+
num_kv_heads: Optional[int] = None,
|
| 29 |
+
use_short_conv: bool = False,
|
| 30 |
+
conv_size: int = 4,
|
| 31 |
+
conv_bias: bool = False,
|
| 32 |
+
share_conv_kernel: bool = True,
|
| 33 |
+
num_slots: Optional[int] = None,
|
| 34 |
+
elementwise_affine: Optional[bool] = True,
|
| 35 |
+
norm_eps: float = 1e-5,
|
| 36 |
+
gate_low_rank_dim: Optional[int] = None,
|
| 37 |
+
gate_logit_normalizer: int = 16,
|
| 38 |
+
feature_map: str = 'swish',
|
| 39 |
+
use_rope: bool = False,
|
| 40 |
+
use_output_gate: bool = False,
|
| 41 |
+
use_norm: bool = True,
|
| 42 |
+
layer_idx: Optional[int] = None,
|
| 43 |
+
**kwargs
|
| 44 |
+
) -> GatedABCAttention:
|
| 45 |
+
super().__init__()
|
| 46 |
+
|
| 47 |
+
self.hidden_size = hidden_size
|
| 48 |
+
self.expand_k = expand_k
|
| 49 |
+
self.expand_v = expand_v
|
| 50 |
+
self.num_heads = num_heads
|
| 51 |
+
self.num_kv_heads = num_heads if num_kv_heads is None else num_kv_heads
|
| 52 |
+
self.num_kv_groups = self.num_heads // self.num_kv_heads
|
| 53 |
+
self.key_dim = int(hidden_size * expand_k)
|
| 54 |
+
self.value_dim = int(hidden_size * expand_v)
|
| 55 |
+
self.key_dim_per_group = self.key_dim // self.num_kv_groups
|
| 56 |
+
self.value_dim_per_group = self.value_dim // self.num_kv_groups
|
| 57 |
+
self.head_k_dim = self.key_dim // self.num_heads
|
| 58 |
+
self.head_v_dim = self.value_dim // self.num_heads
|
| 59 |
+
|
| 60 |
+
self.use_short_conv = use_short_conv
|
| 61 |
+
self.conv_size = conv_size
|
| 62 |
+
self.conv_bias = conv_bias
|
| 63 |
+
self.share_conv_kernel = share_conv_kernel
|
| 64 |
+
|
| 65 |
+
if gate_low_rank_dim is None:
|
| 66 |
+
gate_low_rank_dim = self.hidden_size // 16
|
| 67 |
+
self.gate_low_rank_dim = gate_low_rank_dim
|
| 68 |
+
self.gate_logit_normalizer = gate_logit_normalizer
|
| 69 |
+
|
| 70 |
+
self.feature_map = feature_map
|
| 71 |
+
self.use_rope = use_rope
|
| 72 |
+
self.use_output_gate = use_output_gate
|
| 73 |
+
self.use_norm = use_norm
|
| 74 |
+
|
| 75 |
+
if num_slots is None:
|
| 76 |
+
num_slots = self.head_k_dim
|
| 77 |
+
self.num_slots = num_slots
|
| 78 |
+
|
| 79 |
+
self.layer_idx = layer_idx
|
| 80 |
+
|
| 81 |
+
if layer_idx is None:
|
| 82 |
+
warnings.warn(
|
| 83 |
+
f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will "
|
| 84 |
+
"to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
|
| 85 |
+
"when creating this class."
|
| 86 |
+
)
|
| 87 |
+
|
| 88 |
+
self.q_proj = nn.Linear(self.hidden_size, self.key_dim, bias=False)
|
| 89 |
+
self.k_proj = nn.Linear(self.hidden_size, self.key_dim_per_group, bias=False)
|
| 90 |
+
self.v_proj = nn.Linear(self.hidden_size, self.value_dim_per_group, bias=False)
|
| 91 |
+
self.f_proj = nn.Linear(self.hidden_size, self.num_kv_heads * self.num_slots, bias=False)
|
| 92 |
+
|
| 93 |
+
if use_output_gate:
|
| 94 |
+
self.g_proj = nn.Linear(hidden_size, self.value_dim, bias=False)
|
| 95 |
+
|
| 96 |
+
if use_short_conv:
|
| 97 |
+
self.conv_size = conv_size
|
| 98 |
+
if share_conv_kernel:
|
| 99 |
+
self.h_conv1d = ShortConvolution(hidden_size, conv_size, activation='silu')
|
| 100 |
+
else:
|
| 101 |
+
self.q_conv1d = ShortConvolution(self.key_dim, conv_size, activation='silu')
|
| 102 |
+
self.k_conv1d = ShortConvolution(self.key_dim_per_group, conv_size, activation='silu')
|
| 103 |
+
self.v_conv1d = ShortConvolution(self.value_dim_per_group, conv_size, activation='silu')
|
| 104 |
+
|
| 105 |
+
if self.use_norm:
|
| 106 |
+
if self.use_output_gate:
|
| 107 |
+
self.g_norm = FusedRMSNormSwishGateLinear(self.hidden_size, elementwise_affine, norm_eps)
|
| 108 |
+
else:
|
| 109 |
+
self.g_norm = RMSNormLinear(self.hidden_size, elementwise_affine, norm_eps)
|
| 110 |
+
self.o_proj = nn.Linear(self.value_dim, self.hidden_size, bias=False)
|
| 111 |
+
|
| 112 |
+
if self.use_rope:
|
| 113 |
+
self.rotary = RotaryEmbedding(self.head_k_dim)
|
| 114 |
+
|
| 115 |
+
self.apply(self._initialize_weights)
|
| 116 |
+
|
| 117 |
+
def _initialize_weights(self, module: nn.Module):
|
| 118 |
+
if getattr(module, "_is_hf_initialized", False):
|
| 119 |
+
return
|
| 120 |
+
if isinstance(module, nn.Linear):
|
| 121 |
+
nn.init.xavier_uniform_(module.weight, gain=2 ** -2.5)
|
| 122 |
+
if module.bias is not None:
|
| 123 |
+
nn.init.zeros_(module.bias)
|
| 124 |
+
module._is_hf_initialized = True
|
| 125 |
+
|
| 126 |
+
def forward(
|
| 127 |
+
self,
|
| 128 |
+
hidden_states: torch.Tensor,
|
| 129 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 130 |
+
past_key_values: Optional[Cache] = None,
|
| 131 |
+
use_cache: Optional[bool] = False,
|
| 132 |
+
output_attentions: Optional[bool] = False,
|
| 133 |
+
lower_bound: Optional[torch.Tensor] = None,
|
| 134 |
+
**kwargs
|
| 135 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Cache]]:
|
| 136 |
+
|
| 137 |
+
last_state = past_key_values[self.layer_idx] if use_cache else None
|
| 138 |
+
if self.use_short_conv:
|
| 139 |
+
conv_state = last_state[0] if use_cache else None
|
| 140 |
+
if self.share_conv_kernel:
|
| 141 |
+
# conv state is updated inplace
|
| 142 |
+
hidden_states = self.h_conv1d(hidden_states, attention_mask, conv_state)
|
| 143 |
+
q = self.q_proj(hidden_states)
|
| 144 |
+
k = self.k_proj(hidden_states)
|
| 145 |
+
v = self.v_proj(hidden_states)
|
| 146 |
+
else:
|
| 147 |
+
conv_state_q = last_state[0] if use_cache else None
|
| 148 |
+
conv_state_k = last_state[1] if use_cache else None
|
| 149 |
+
conv_state_v = last_state[2] if use_cache else None
|
| 150 |
+
q = self.q_proj(hidden_states)
|
| 151 |
+
k = self.k_proj(hidden_states)
|
| 152 |
+
v = self.v_proj(hidden_states)
|
| 153 |
+
q = self.q_conv1d(q, attention_mask, conv_state_q)
|
| 154 |
+
k = self.k_conv1d(k, attention_mask, conv_state_k)
|
| 155 |
+
v = self.v_conv1d(v, attention_mask, conv_state_v)
|
| 156 |
+
else:
|
| 157 |
+
q = self.q_proj(hidden_states)
|
| 158 |
+
k = self.k_proj(hidden_states)
|
| 159 |
+
v = self.v_proj(hidden_states)
|
| 160 |
+
f = self.f_proj(hidden_states)
|
| 161 |
+
|
| 162 |
+
if self.use_rope:
|
| 163 |
+
q = rearrange(q, '... (h d) -> ... h d', h=self.num_heads)
|
| 164 |
+
k = rearrange(k, '... (h d) -> ... h d', h=self.num_kv_heads)
|
| 165 |
+
seqlen_offset = 0
|
| 166 |
+
if past_key_values is not None:
|
| 167 |
+
seqlen_offset = past_key_values.get_seq_length(self.layer_idx)
|
| 168 |
+
q, k = self.rotary(q, k, seqlen_offset)
|
| 169 |
+
q = rearrange(q, 'b n h d -> b h n d', h=self.num_heads)
|
| 170 |
+
k = rearrange(k, 'b n h d -> b h n d', h=self.num_kv_heads)
|
| 171 |
+
else:
|
| 172 |
+
q = rearrange(q, 'b n (h d) -> b h n d', h=self.num_heads)
|
| 173 |
+
if self.num_kv_groups > 1:
|
| 174 |
+
k = repeat(k, 'b n (h d) -> b (h g) n d', h=self.num_kv_heads, g=self.num_kv_groups)
|
| 175 |
+
else:
|
| 176 |
+
k = rearrange(k, 'b n (h d) -> b h n d', h=self.num_kv_heads)
|
| 177 |
+
if self.num_kv_groups > 1:
|
| 178 |
+
v = repeat(v, 'b n (h d) -> b (h g) n d', h=self.num_kv_heads, g=self.num_kv_groups)
|
| 179 |
+
f = repeat(f, 'b n (h m) -> b (h g) n m', h=self.num_kv_heads, g=self.num_kv_groups)
|
| 180 |
+
else:
|
| 181 |
+
v = rearrange(v, 'b n (h d) -> b h n d', h=self.num_kv_heads)
|
| 182 |
+
f = rearrange(f, 'b n (h m) -> b h n m', h=self.num_kv_heads)
|
| 183 |
+
|
| 184 |
+
if self.feature_map is not None:
|
| 185 |
+
q, k, v = map(lambda x: ACT2FN[self.feature_map](x), (q, k, v))
|
| 186 |
+
f = F.logsigmoid(f) / self.gate_logit_normalizer
|
| 187 |
+
s = (1 - f.exp()).to(f.dtype)
|
| 188 |
+
# dealing with left-padding
|
| 189 |
+
if attention_mask is not None:
|
| 190 |
+
s = s.mul_(attention_mask.view(attention_mask.shape[0], 1, -1, 1))
|
| 191 |
+
v = v.mul_(attention_mask.view(attention_mask.shape[0], 1, -1, 1))
|
| 192 |
+
|
| 193 |
+
recurrent_state = last_state[-2:] if use_cache else None
|
| 194 |
+
o, recurrent_state = chunk_gated_abc(q, k, v, s, f,
|
| 195 |
+
initial_state=recurrent_state,
|
| 196 |
+
output_final_state=use_cache)
|
| 197 |
+
if past_key_values is not None:
|
| 198 |
+
if self.use_short_conv:
|
| 199 |
+
if self.share_conv_kernel:
|
| 200 |
+
last_state = (conv_state,) + recurrent_state
|
| 201 |
+
else:
|
| 202 |
+
last_state = (conv_state_q, conv_state_k, conv_state_v) + recurrent_state
|
| 203 |
+
else:
|
| 204 |
+
last_state = recurrent_state
|
| 205 |
+
past_key_values.update(last_state, self.layer_idx, q.shape[2])
|
| 206 |
+
|
| 207 |
+
o = rearrange(o, 'b h t d -> b t (h d)')
|
| 208 |
+
if self.use_norm and not self.use_output_gate:
|
| 209 |
+
o = swish(o)
|
| 210 |
+
o = self.g_norm(o, self.o_proj.weight, self.o_proj.bias)
|
| 211 |
+
elif self.use_output_gate and not self.use_norm:
|
| 212 |
+
o = swiglu_linear(self.g_proj(hidden_states), o, self.o_proj.weight, self.o_proj.bias)
|
| 213 |
+
elif self.use_output_gate and self.use_norm:
|
| 214 |
+
o = self.g_norm(o, self.g_proj(hidden_states), self.o_proj.weight, self.o_proj.bias)
|
| 215 |
+
else:
|
| 216 |
+
o = self.o_proj(o)
|
| 217 |
+
return o, None, past_key_values
|
| 218 |
+
|
| 219 |
+
def init_state(self, batch_size: int) -> Tuple[torch.Tensor]:
|
| 220 |
+
param = next(self.parameters())
|
| 221 |
+
state = tuple()
|
| 222 |
+
if self.use_short_conv:
|
| 223 |
+
if self.share_conv_kernel:
|
| 224 |
+
state += (param.new_zeros(batch_size, self.hidden_size, self.conv_size),)
|
| 225 |
+
else:
|
| 226 |
+
state += (param.new_zeros(batch_size, self.key_dim, self.conv_size),
|
| 227 |
+
param.new_zeros(batch_size, self.key_dim, self.conv_size),
|
| 228 |
+
param.new_zeros(batch_size, self.value_dim, self.conv_size))
|
| 229 |
+
state += (param.new_zeros(batch_size, self.num_heads, self.head_k_dim, self.num_slots),
|
| 230 |
+
param.new_zeros(batch_size, self.num_heads, self.num_slots, self.head_v_dim))
|
| 231 |
+
return state
|
| 232 |
+
|
| 233 |
+
def state_size(self, sequence_length: int = 2048):
|
| 234 |
+
return self.num_heads * self.key_dim * self.head_v_dim
|
fla/layers/gla.py
ADDED
|
@@ -0,0 +1,268 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
from __future__ import annotations
|
| 5 |
+
|
| 6 |
+
from typing import Optional, Tuple
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
import torch.nn as nn
|
| 10 |
+
import torch.nn.functional as F
|
| 11 |
+
from einops import rearrange, repeat
|
| 12 |
+
from transformers.cache_utils import Cache
|
| 13 |
+
|
| 14 |
+
from fla.modules import FusedRMSNormSwishGate, RMSNorm, ShortConvolution
|
| 15 |
+
from fla.modules.activations import ACT2FN
|
| 16 |
+
from fla.ops.gla import chunk_gla, fused_chunk_gla, fused_recurrent_gla
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class GatedLinearAttention(nn.Module):
|
| 20 |
+
r"""
|
| 21 |
+
The layer implementaion for [Gated Linear Attention Transformers with Hardware-Efficient Training](https://arxiv.org/abs/2312.06635). # noqa
|
| 22 |
+
|
| 23 |
+
Args:
|
| 24 |
+
mode (str, Optional):
|
| 25 |
+
Which GLA kernel to use.
|
| 26 |
+
Currently available: `chunk`, `fused_recurrent`, and `fused_chunk`.
|
| 27 |
+
Default: `chunk`.
|
| 28 |
+
hidden_size (int, Optional):
|
| 29 |
+
The hidden size of the input. Default: 1024.
|
| 30 |
+
expand_k (float, Optional):
|
| 31 |
+
The expansion ratio for the key dim. Default: 0.5.
|
| 32 |
+
expand_v (float, Optional):
|
| 33 |
+
The expansion ratio for the value dim. Default: 1.0.
|
| 34 |
+
num_heads (int, Optional):
|
| 35 |
+
The number of heads. Default: 4.
|
| 36 |
+
num_kv_heads (int, Optional):
|
| 37 |
+
The number of key/value heads, used for MQA. Default: None.
|
| 38 |
+
feature_map (str, Optional):
|
| 39 |
+
Feature map function applied to queries/keys. Default: None.
|
| 40 |
+
use_short_conv (bool, Optional):
|
| 41 |
+
Whether to use short convolutions. Default: `False`.
|
| 42 |
+
conv_size (int, Optional):
|
| 43 |
+
The kernel size of the short convolution, only used when `use_short_conv` is `True`. Default: 4.
|
| 44 |
+
conv_bias (bool, Optional):
|
| 45 |
+
Whether to use bias in the short convolution, only used when `use_short_conv` is `True`. Default: `False`.
|
| 46 |
+
share_conv_kernel (bool, Optional):
|
| 47 |
+
Whether to apply convolutions berfore q/k/v mapping, only taking effects when `use_short_conv`. Default: `True`.
|
| 48 |
+
use_output_gate (bool, Optional):
|
| 49 |
+
Whether to use output gate. Default: `True`.
|
| 50 |
+
gate_fn (str, Optional):
|
| 51 |
+
The activation function for the output gate. Default: `swish`.
|
| 52 |
+
elementwise_affine (bool, Optional):
|
| 53 |
+
If `True`, applies elementwise affine to LayerNorm with learnable parameters. Default: `True`.
|
| 54 |
+
norm_eps (float, Optional):
|
| 55 |
+
The epsilon value for the layernorm/rmsnorm layer. Default: 1e-5.
|
| 56 |
+
gate_logit_normalizer (int, Optional):
|
| 57 |
+
The normalizer for the gate logits, appied after `logsigmoid`. Default: 16.
|
| 58 |
+
gate_low_rank_dim (int, Optional):
|
| 59 |
+
The low rank dim for the gate projection. Default: 16.
|
| 60 |
+
clamp_min (float, Optional):
|
| 61 |
+
The minimum value for the gate logits. Default: None.
|
| 62 |
+
fuse_norm (bool, Optional):
|
| 63 |
+
Whether to fuse the norm and the output gate for better memory footprint. Default: `True`.
|
| 64 |
+
layer_idx (int, Optional):
|
| 65 |
+
The index of the layer. Default: None.
|
| 66 |
+
"""
|
| 67 |
+
|
| 68 |
+
def __init__(
|
| 69 |
+
self,
|
| 70 |
+
mode: str = 'chunk',
|
| 71 |
+
hidden_size: int = 1024,
|
| 72 |
+
expand_k: float = 0.5,
|
| 73 |
+
expand_v: float = 1.0,
|
| 74 |
+
num_heads: int = 4,
|
| 75 |
+
num_kv_heads: Optional[int] = None,
|
| 76 |
+
feature_map: Optional[str] = None,
|
| 77 |
+
use_short_conv: bool = False,
|
| 78 |
+
conv_size: int = 4,
|
| 79 |
+
conv_bias: bool = False,
|
| 80 |
+
share_conv_kernel: bool = True,
|
| 81 |
+
use_output_gate: bool = True,
|
| 82 |
+
gate_fn: str = 'swish',
|
| 83 |
+
elementwise_affine: Optional[bool] = True,
|
| 84 |
+
norm_eps: float = 1e-5,
|
| 85 |
+
gate_logit_normalizer: int = 16,
|
| 86 |
+
gate_low_rank_dim: int = 16,
|
| 87 |
+
clamp_min: Optional[float] = None,
|
| 88 |
+
fuse_norm: bool = True,
|
| 89 |
+
layer_idx: int = None,
|
| 90 |
+
) -> GatedLinearAttention:
|
| 91 |
+
super().__init__()
|
| 92 |
+
|
| 93 |
+
self.mode = mode
|
| 94 |
+
self.hidden_size = hidden_size
|
| 95 |
+
self.expand_k = expand_k
|
| 96 |
+
self.expand_v = expand_v
|
| 97 |
+
self.num_heads = num_heads
|
| 98 |
+
self.num_kv_heads = num_kv_heads if num_kv_heads is not None else num_heads
|
| 99 |
+
self.num_kv_groups = self.num_heads // self.num_kv_heads
|
| 100 |
+
self.feature_map_fn = ACT2FN[feature_map] if feature_map is not None else None
|
| 101 |
+
|
| 102 |
+
self.use_short_conv = use_short_conv
|
| 103 |
+
self.conv_size = conv_size
|
| 104 |
+
self.conv_bias = conv_bias
|
| 105 |
+
self.share_conv_kernel = share_conv_kernel
|
| 106 |
+
self.use_output_gate = use_output_gate
|
| 107 |
+
|
| 108 |
+
self.key_dim = int(hidden_size * expand_k)
|
| 109 |
+
self.value_dim = int(hidden_size * expand_v)
|
| 110 |
+
self.key_dim_per_group = self.key_dim // self.num_kv_groups
|
| 111 |
+
self.value_dim_per_group = self.value_dim // self.num_kv_groups
|
| 112 |
+
self.clamp_min = clamp_min
|
| 113 |
+
self.layer_idx = layer_idx
|
| 114 |
+
|
| 115 |
+
assert mode in ['chunk', 'fused_recurrent', 'fused_chunk'], f"Not suppoerted mode `{mode}`."
|
| 116 |
+
assert self.key_dim % num_heads == 0, f"key dim must be divisible by num_heads of {num_heads}"
|
| 117 |
+
assert self.value_dim % num_heads == 0, f"value dim must be divisible by num_heads of {num_heads}"
|
| 118 |
+
|
| 119 |
+
self.head_qk_dim = self.key_dim // num_heads
|
| 120 |
+
self.head_v_dim = self.value_dim // num_heads
|
| 121 |
+
|
| 122 |
+
self.q_proj = nn.Linear(hidden_size, self.key_dim, bias=False)
|
| 123 |
+
self.k_proj = nn.Linear(hidden_size, self.key_dim_per_group, bias=False)
|
| 124 |
+
self.v_proj = nn.Linear(hidden_size, self.value_dim_per_group, bias=False)
|
| 125 |
+
if self.use_output_gate:
|
| 126 |
+
self.g_proj = nn.Linear(hidden_size, self.value_dim, bias=False)
|
| 127 |
+
|
| 128 |
+
if use_short_conv:
|
| 129 |
+
self.conv_size = conv_size
|
| 130 |
+
if share_conv_kernel:
|
| 131 |
+
self.h_conv1d = ShortConvolution(hidden_size, conv_size, activation='silu')
|
| 132 |
+
else:
|
| 133 |
+
self.q_conv1d = ShortConvolution(self.key_dim, conv_size, activation='silu')
|
| 134 |
+
self.k_conv1d = ShortConvolution(self.key_dim_per_group, conv_size, activation='silu')
|
| 135 |
+
self.v_conv1d = ShortConvolution(self.value_dim_per_group, conv_size, activation='silu')
|
| 136 |
+
|
| 137 |
+
self.gk_proj = nn.Sequential(nn.Linear(hidden_size, gate_low_rank_dim, bias=False),
|
| 138 |
+
nn.Linear(gate_low_rank_dim, self.key_dim_per_group, bias=True))
|
| 139 |
+
self.o_proj = nn.Linear(self.value_dim, hidden_size, bias=False)
|
| 140 |
+
|
| 141 |
+
if gate_fn == 'swish' and fuse_norm and use_output_gate:
|
| 142 |
+
self.g_norm_swish_gate = FusedRMSNormSwishGate(self.head_v_dim, elementwise_affine, norm_eps)
|
| 143 |
+
self.fuse_norm_and_gate = True
|
| 144 |
+
else:
|
| 145 |
+
self.fuse_norm_and_gate = False
|
| 146 |
+
self.g_norm = RMSNorm(self.head_v_dim, elementwise_affine, norm_eps)
|
| 147 |
+
self.gate_fn = ACT2FN[gate_fn]
|
| 148 |
+
|
| 149 |
+
self.gate_logit_normalizer = gate_logit_normalizer
|
| 150 |
+
|
| 151 |
+
self.apply(self._initialize_weights)
|
| 152 |
+
|
| 153 |
+
def _initialize_weights(self, module: nn.Module):
|
| 154 |
+
if getattr(module, "_is_hf_initialized", False):
|
| 155 |
+
return
|
| 156 |
+
if isinstance(module, nn.Linear):
|
| 157 |
+
nn.init.xavier_uniform_(module.weight, gain=2 ** -2.5)
|
| 158 |
+
if module.bias is not None:
|
| 159 |
+
nn.init.zeros_(module.bias)
|
| 160 |
+
module._is_hf_initialized = True
|
| 161 |
+
|
| 162 |
+
def forward(
|
| 163 |
+
self,
|
| 164 |
+
hidden_states: torch.Tensor,
|
| 165 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 166 |
+
past_key_values: Optional[Cache] = None,
|
| 167 |
+
use_cache: Optional[bool] = False,
|
| 168 |
+
output_attentions: Optional[bool] = False,
|
| 169 |
+
**kwargs
|
| 170 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Cache]]:
|
| 171 |
+
# launching the triton kernel for just one token will actually be slower
|
| 172 |
+
mode = 'fused_recurrent' if hidden_states.shape[1] == 1 else self.mode
|
| 173 |
+
|
| 174 |
+
last_state = past_key_values[self.layer_idx] if use_cache else None
|
| 175 |
+
if self.use_short_conv:
|
| 176 |
+
conv_state = last_state[0] if use_cache else None
|
| 177 |
+
if self.share_conv_kernel:
|
| 178 |
+
# conv state is updated inplace
|
| 179 |
+
hidden_states = self.h_conv1d(hidden_states, attention_mask, conv_state)
|
| 180 |
+
q = self.q_proj(hidden_states)
|
| 181 |
+
k = self.k_proj(hidden_states)
|
| 182 |
+
v = self.v_proj(hidden_states)
|
| 183 |
+
else:
|
| 184 |
+
conv_state_q = last_state[0] if use_cache else None
|
| 185 |
+
conv_state_k = last_state[1] if use_cache else None
|
| 186 |
+
conv_state_v = last_state[2] if use_cache else None
|
| 187 |
+
q = self.q_proj(hidden_states)
|
| 188 |
+
k = self.k_proj(hidden_states)
|
| 189 |
+
v = self.v_proj(hidden_states)
|
| 190 |
+
q = self.q_conv1d(q, attention_mask, conv_state_q)
|
| 191 |
+
k = self.k_conv1d(k, attention_mask, conv_state_k)
|
| 192 |
+
v = self.v_conv1d(v, attention_mask, conv_state_v)
|
| 193 |
+
else:
|
| 194 |
+
q = self.q_proj(hidden_states)
|
| 195 |
+
k = self.k_proj(hidden_states)
|
| 196 |
+
v = self.v_proj(hidden_states)
|
| 197 |
+
gk = self.gk_proj(hidden_states)
|
| 198 |
+
|
| 199 |
+
if self.feature_map_fn is not None:
|
| 200 |
+
q, k = map(self.feature_map_fn, (q, k))
|
| 201 |
+
# dealing with left-padding
|
| 202 |
+
if attention_mask is not None:
|
| 203 |
+
v = v.mul_(attention_mask.unsqueeze(-1))
|
| 204 |
+
q = rearrange(q, 'b l (h d) -> b h l d', h=self.num_heads)
|
| 205 |
+
if self.num_kv_groups > 1:
|
| 206 |
+
k, v, gk = (repeat(x, 'b l (h d) -> b (h g) l d', h=self.num_kv_heads, g=self.num_kv_groups) for x in (k, v, gk))
|
| 207 |
+
else:
|
| 208 |
+
k, v, gk = (rearrange(x, 'b l (h d) -> b h l d', h=self.num_kv_heads) for x in (k, v, gk))
|
| 209 |
+
gk = F.logsigmoid(gk) / self.gate_logit_normalizer
|
| 210 |
+
|
| 211 |
+
if self.clamp_min is not None:
|
| 212 |
+
gk = torch.clamp_min(gk, self.clamp_min)
|
| 213 |
+
|
| 214 |
+
recurrent_state = last_state[-1] if use_cache else None
|
| 215 |
+
if mode == 'fused_recurrent':
|
| 216 |
+
o, recurrent_state = fused_recurrent_gla(q, k, v, gk, initial_state=recurrent_state, output_final_state=use_cache)
|
| 217 |
+
elif mode == 'fused_chunk':
|
| 218 |
+
o, recurrent_state = fused_chunk_gla(q, k, v, gk, initial_state=recurrent_state, output_final_state=use_cache)
|
| 219 |
+
elif mode == 'chunk':
|
| 220 |
+
o, recurrent_state = chunk_gla(q, k, v, gk, initial_state=recurrent_state, output_final_state=use_cache)
|
| 221 |
+
else:
|
| 222 |
+
raise NotImplementedError(f"Not supported mode `{mode}`.")
|
| 223 |
+
|
| 224 |
+
if past_key_values is not None:
|
| 225 |
+
if self.use_short_conv:
|
| 226 |
+
if self.share_conv_kernel:
|
| 227 |
+
last_state = (conv_state, recurrent_state)
|
| 228 |
+
else:
|
| 229 |
+
last_state = (conv_state_q, conv_state_k, conv_state_v, recurrent_state)
|
| 230 |
+
else:
|
| 231 |
+
last_state = (recurrent_state,)
|
| 232 |
+
past_key_values.update(last_state, self.layer_idx, q.shape[2])
|
| 233 |
+
|
| 234 |
+
o = rearrange(o, 'b h l d -> b l h d')
|
| 235 |
+
if self.use_output_gate:
|
| 236 |
+
g = self.g_proj(hidden_states)
|
| 237 |
+
if self.fuse_norm_and_gate:
|
| 238 |
+
g = rearrange(g, 'b l (h d) -> b l h d', h=self.num_heads)
|
| 239 |
+
o = self.g_norm_swish_gate(o, g)
|
| 240 |
+
o = rearrange(o, 'b l h d -> b l (h d)')
|
| 241 |
+
else:
|
| 242 |
+
o = rearrange(self.g_norm(o), 'b l h d -> b l (h d)')
|
| 243 |
+
o = o * self.gate_fn(g)
|
| 244 |
+
else:
|
| 245 |
+
o = rearrange(self.g_norm(o), 'b l h d -> b l (h d)')
|
| 246 |
+
o = self.o_proj(o)
|
| 247 |
+
|
| 248 |
+
return o, None, past_key_values
|
| 249 |
+
|
| 250 |
+
def init_state(self, batch_size: int) -> Tuple[torch.Tensor]:
|
| 251 |
+
param = next(self.parameters())
|
| 252 |
+
state = tuple()
|
| 253 |
+
if self.use_short_conv:
|
| 254 |
+
if self.share_conv_kernel:
|
| 255 |
+
state += (param.new_zeros(batch_size, self.hidden_size, self.conv_size),)
|
| 256 |
+
else:
|
| 257 |
+
state += (param.new_zeros(batch_size, self.key_dim, self.conv_size),
|
| 258 |
+
param.new_zeros(batch_size, self.key_dim, self.conv_size),
|
| 259 |
+
param.new_zeros(batch_size, self.value_dim, self.conv_size))
|
| 260 |
+
state += (param.new_zeros(batch_size, self.num_heads, self.head_qk_dim, self.head_v_dim),)
|
| 261 |
+
return state
|
| 262 |
+
|
| 263 |
+
def state_size(self, **kwargs) -> int:
|
| 264 |
+
state_size = self.key_dim * self.head_v_dim
|
| 265 |
+
for module in self.children():
|
| 266 |
+
if isinstance(module, ShortConvolution):
|
| 267 |
+
state_size += module.state_size
|
| 268 |
+
return state_size
|
fla/layers/hgrn.py
ADDED
|
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
# "Hierarchically Gated Recurrent Neural Network for Sequence Modeling" [https://arxiv.org/abs/2311.04823]
|
| 4 |
+
|
| 5 |
+
from __future__ import annotations
|
| 6 |
+
|
| 7 |
+
from typing import Optional, Tuple
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn as nn
|
| 11 |
+
import torch.nn.functional as F
|
| 12 |
+
from einops import rearrange
|
| 13 |
+
from transformers.cache_utils import Cache
|
| 14 |
+
|
| 15 |
+
from fla.modules import FusedRMSNormSwishGate, ShortConvolution
|
| 16 |
+
from fla.modules.activations import swiglu
|
| 17 |
+
from fla.ops.hgrn import chunk_hgrn, fused_recurrent_hgrn
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class HGRNAttention(nn.Module):
|
| 21 |
+
|
| 22 |
+
def __init__(
|
| 23 |
+
self,
|
| 24 |
+
mode: str = 'chunk',
|
| 25 |
+
hidden_size: int = 1024,
|
| 26 |
+
num_heads: Optional[int] = None,
|
| 27 |
+
expand_ratio: Optional[int] = 1,
|
| 28 |
+
use_short_conv: bool = False,
|
| 29 |
+
conv_size: int = 4,
|
| 30 |
+
conv_bias: bool = False,
|
| 31 |
+
share_conv_kernel: bool = True,
|
| 32 |
+
elementwise_affine: Optional[bool] = True,
|
| 33 |
+
norm_eps: float = 1e-5,
|
| 34 |
+
layer_idx: int = None
|
| 35 |
+
) -> HGRNAttention:
|
| 36 |
+
super().__init__()
|
| 37 |
+
|
| 38 |
+
self.mode = mode
|
| 39 |
+
self.hidden_size = hidden_size
|
| 40 |
+
self.num_heads = num_heads
|
| 41 |
+
self.expand_ratio = expand_ratio
|
| 42 |
+
self.input_dim = int(hidden_size * expand_ratio)
|
| 43 |
+
self.head_dim = self.input_dim // self.num_heads
|
| 44 |
+
|
| 45 |
+
self.use_short_conv = use_short_conv
|
| 46 |
+
self.conv_size = conv_size
|
| 47 |
+
self.conv_bias = conv_bias
|
| 48 |
+
self.share_conv_kernel = share_conv_kernel
|
| 49 |
+
|
| 50 |
+
self.layer_idx = layer_idx
|
| 51 |
+
|
| 52 |
+
assert mode in ['chunk', 'fused_recurrent'], f"Not suppoerted mode `{mode}`."
|
| 53 |
+
assert self.hidden_size % num_heads == 0, f"hidden size must be divisible by num_heads of {num_heads}"
|
| 54 |
+
|
| 55 |
+
self.i_proj = nn.Linear(hidden_size, self.input_dim, bias=False)
|
| 56 |
+
self.f_proj = nn.Linear(hidden_size, self.input_dim, bias=False)
|
| 57 |
+
self.g_proj = nn.Linear(hidden_size, self.input_dim, bias=False)
|
| 58 |
+
|
| 59 |
+
if use_short_conv:
|
| 60 |
+
self.conv_size = conv_size
|
| 61 |
+
if share_conv_kernel:
|
| 62 |
+
self.h_conv1d = ShortConvolution(hidden_size, conv_size, activation='silu')
|
| 63 |
+
else:
|
| 64 |
+
self.q_conv1d = ShortConvolution(self.input_dim, conv_size, activation='silu')
|
| 65 |
+
self.f_conv1d = ShortConvolution(self.input_dim, conv_size, activation='silu')
|
| 66 |
+
self.i_conv1d = ShortConvolution(self.input_dim, conv_size, activation='silu')
|
| 67 |
+
|
| 68 |
+
self.g_norm = FusedRMSNormSwishGate(self.input_dim, elementwise_affine, norm_eps)
|
| 69 |
+
self.o_proj = nn.Linear(self.input_dim, hidden_size, bias=False)
|
| 70 |
+
|
| 71 |
+
self.apply(self._initialize_weights)
|
| 72 |
+
|
| 73 |
+
def _initialize_weights(self, module: nn.Module):
|
| 74 |
+
if getattr(module, "_is_hf_initialized", False):
|
| 75 |
+
return
|
| 76 |
+
if isinstance(module, nn.Linear):
|
| 77 |
+
nn.init.xavier_uniform_(module.weight, gain=2 ** -2.5)
|
| 78 |
+
if module.bias is not None:
|
| 79 |
+
nn.init.zeros_(module.bias)
|
| 80 |
+
module._is_hf_initialized = True
|
| 81 |
+
|
| 82 |
+
def forward(
|
| 83 |
+
self,
|
| 84 |
+
hidden_states: torch.Tensor,
|
| 85 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 86 |
+
past_key_values: Optional[Cache] = None,
|
| 87 |
+
use_cache: Optional[bool] = False,
|
| 88 |
+
output_attentions: Optional[bool] = False,
|
| 89 |
+
lower_bound: Optional[torch.Tensor] = None,
|
| 90 |
+
**kwargs
|
| 91 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Cache]]:
|
| 92 |
+
# launching the triton kernel for just one token will actually be slower
|
| 93 |
+
mode = 'fused_recurrent' if hidden_states.shape[1] == 1 else self.mode
|
| 94 |
+
|
| 95 |
+
last_state = past_key_values[self.layer_idx] if use_cache else None
|
| 96 |
+
if self.use_short_conv:
|
| 97 |
+
conv_state = last_state[0] if use_cache else None
|
| 98 |
+
if self.share_conv_kernel:
|
| 99 |
+
# conv state is updated inplace
|
| 100 |
+
hidden_states = self.h_conv1d(hidden_states, attention_mask, conv_state)
|
| 101 |
+
i = self.i_proj(hidden_states)
|
| 102 |
+
f = self.f_proj(hidden_states)
|
| 103 |
+
else:
|
| 104 |
+
conv_state_i = last_state[2] if use_cache else None
|
| 105 |
+
conv_state_f = last_state[1] if use_cache else None
|
| 106 |
+
i = self.i_conv1d(self.i_proj(hidden_states), attention_mask, conv_state_i)
|
| 107 |
+
f = self.f_conv1d(self.f_proj(hidden_states), attention_mask, conv_state_f)
|
| 108 |
+
else:
|
| 109 |
+
i = self.i_proj(hidden_states)
|
| 110 |
+
f = self.f_proj(hidden_states)
|
| 111 |
+
|
| 112 |
+
# the lower bound for the first layer is zero
|
| 113 |
+
if lower_bound is None or self.layer_idx == 0:
|
| 114 |
+
i, f = swiglu(i, 1 - f.sigmoid()), F.logsigmoid(f)
|
| 115 |
+
else:
|
| 116 |
+
g = lower_bound + (1 - lower_bound) * f.sigmoid()
|
| 117 |
+
i, f = swiglu(i, 1 - g), g.log()
|
| 118 |
+
|
| 119 |
+
# dealing with left-padding
|
| 120 |
+
if attention_mask is not None:
|
| 121 |
+
i = i.mul_(attention_mask.unsqueeze(-1))
|
| 122 |
+
i, f = map(lambda x: rearrange(x, 'b l (h d) -> b h l d', h=self.num_heads), (i, f))
|
| 123 |
+
|
| 124 |
+
recurrent_state = last_state[-1] if use_cache else None
|
| 125 |
+
if mode == 'chunk':
|
| 126 |
+
o, recurrent_state = chunk_hgrn(i, f, initial_state=recurrent_state, output_final_state=use_cache)
|
| 127 |
+
elif mode == 'fused_recurrent':
|
| 128 |
+
o, recurrent_state = fused_recurrent_hgrn(i, f, initial_state=recurrent_state, output_final_state=use_cache)
|
| 129 |
+
else:
|
| 130 |
+
raise NotImplementedError(f"Not supported mode `{mode}`.")
|
| 131 |
+
|
| 132 |
+
if past_key_values is not None:
|
| 133 |
+
if self.use_short_conv:
|
| 134 |
+
if self.share_conv_kernel:
|
| 135 |
+
last_state = (conv_state, recurrent_state)
|
| 136 |
+
else:
|
| 137 |
+
last_state = (conv_state_i, conv_state_f, recurrent_state)
|
| 138 |
+
else:
|
| 139 |
+
last_state = (recurrent_state,)
|
| 140 |
+
past_key_values.update(last_state, self.layer_idx, i.shape[2])
|
| 141 |
+
|
| 142 |
+
o = self.g_norm(self.g_proj(hidden_states), rearrange(o, 'b h l d -> b l (h d)'))
|
| 143 |
+
o = self.o_proj(o)
|
| 144 |
+
|
| 145 |
+
return o, None, past_key_values
|
| 146 |
+
|
| 147 |
+
def init_state(self, batch_size: int) -> Tuple[torch.Tensor]:
|
| 148 |
+
param = next(self.parameters())
|
| 149 |
+
state = tuple()
|
| 150 |
+
if self.use_short_conv:
|
| 151 |
+
if self.share_conv_kernel:
|
| 152 |
+
state += (param.new_zeros(batch_size, self.hidden_size, self.conv_size),)
|
| 153 |
+
else:
|
| 154 |
+
state += (param.new_zeros(batch_size, self.hidden_size, self.conv_size),
|
| 155 |
+
param.new_zeros(batch_size, self.hidden_size, self.conv_size),
|
| 156 |
+
param.new_zeros(batch_size, self.hidden_size, self.conv_size))
|
| 157 |
+
state += (param.new_zeros(batch_size, self.num_heads, self.head_dim),)
|
| 158 |
+
return state
|
| 159 |
+
|
| 160 |
+
def state_size(self, **kwargs) -> int:
|
| 161 |
+
state_size = self.hidden_size
|
| 162 |
+
for module in self.children():
|
| 163 |
+
if isinstance(module, ShortConvolution):
|
| 164 |
+
state_size += module.state_size
|
| 165 |
+
return state_size
|
fla/layers/hgrn2.py
ADDED
|
@@ -0,0 +1,186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
# "HGRN2: Gated Linear RNNs with State Expansion"[https://arxiv.org/abs/2404.07904]
|
| 4 |
+
|
| 5 |
+
from __future__ import annotations
|
| 6 |
+
|
| 7 |
+
from typing import Optional, Tuple
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn as nn
|
| 11 |
+
import torch.nn.functional as F
|
| 12 |
+
from einops import rearrange
|
| 13 |
+
from transformers.cache_utils import Cache
|
| 14 |
+
|
| 15 |
+
from fla.modules import RMSNorm, ShortConvolution
|
| 16 |
+
from fla.modules.activations import swish
|
| 17 |
+
from fla.ops.gla import chunk_gla, fused_chunk_gla, fused_recurrent_gla
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class HGRN2Attention(nn.Module):
|
| 21 |
+
|
| 22 |
+
def __init__(
|
| 23 |
+
self,
|
| 24 |
+
mode: str = 'chunk',
|
| 25 |
+
hidden_size: int = 1024,
|
| 26 |
+
num_heads: Optional[int] = None,
|
| 27 |
+
expand_ratio: Optional[int] = 128,
|
| 28 |
+
use_short_conv: bool = False,
|
| 29 |
+
conv_size: int = 4,
|
| 30 |
+
conv_bias: bool = False,
|
| 31 |
+
share_conv_kernel: bool = True,
|
| 32 |
+
elementwise_affine: Optional[bool] = True,
|
| 33 |
+
norm_eps: float = 1e-5,
|
| 34 |
+
layer_idx: int = None
|
| 35 |
+
) -> HGRN2Attention:
|
| 36 |
+
super().__init__()
|
| 37 |
+
|
| 38 |
+
self.mode = mode
|
| 39 |
+
self.hidden_size = hidden_size
|
| 40 |
+
|
| 41 |
+
if expand_ratio is None and num_heads is not None:
|
| 42 |
+
expand_ratio = hidden_size // num_heads
|
| 43 |
+
elif expand_ratio is not None and num_heads is None:
|
| 44 |
+
num_heads = hidden_size // expand_ratio
|
| 45 |
+
else:
|
| 46 |
+
raise RuntimeError("One of `expand_ratio` or `num_heads` should be provided.")
|
| 47 |
+
self.num_heads = num_heads
|
| 48 |
+
self.expand_ratio = expand_ratio
|
| 49 |
+
|
| 50 |
+
self.use_short_conv = use_short_conv
|
| 51 |
+
self.conv_size = conv_size
|
| 52 |
+
self.conv_bias = conv_bias
|
| 53 |
+
self.share_conv_kernel = share_conv_kernel
|
| 54 |
+
|
| 55 |
+
self.forget_dim = int(self.num_heads * self.expand_ratio)
|
| 56 |
+
self.input_dim = hidden_size
|
| 57 |
+
self.layer_idx = layer_idx
|
| 58 |
+
|
| 59 |
+
assert mode in ['chunk', 'fused_recurrent', 'fused_chunk'], f"Not suppoerted mode `{mode}`."
|
| 60 |
+
assert self.forget_dim % num_heads == 0, f"forget dim must be divisible by num_heads of {num_heads}"
|
| 61 |
+
assert self.input_dim % num_heads == 0, f"input dim must be divisible by num_heads of {num_heads}"
|
| 62 |
+
|
| 63 |
+
self.head_f_dim = self.expand_ratio
|
| 64 |
+
self.head_i_dim = self.hidden_size // num_heads
|
| 65 |
+
|
| 66 |
+
self.q_proj = nn.Linear(hidden_size, self.forget_dim, bias=False)
|
| 67 |
+
self.f_proj = nn.Linear(hidden_size, self.forget_dim, bias=False)
|
| 68 |
+
self.i_proj = nn.Linear(hidden_size, self.input_dim, bias=False)
|
| 69 |
+
|
| 70 |
+
if use_short_conv:
|
| 71 |
+
self.conv_size = conv_size
|
| 72 |
+
if share_conv_kernel:
|
| 73 |
+
self.h_conv1d = ShortConvolution(hidden_size, conv_size, activation='silu')
|
| 74 |
+
else:
|
| 75 |
+
self.q_conv1d = ShortConvolution(self.forget_dim, conv_size, activation='silu')
|
| 76 |
+
self.f_conv1d = ShortConvolution(self.forget_dim, conv_size, activation='silu')
|
| 77 |
+
self.i_conv1d = ShortConvolution(self.input_dim, conv_size, activation='silu')
|
| 78 |
+
|
| 79 |
+
self.g_norm = RMSNorm(self.hidden_size, elementwise_affine, norm_eps)
|
| 80 |
+
self.o_proj = nn.Linear(self.input_dim, hidden_size, bias=False)
|
| 81 |
+
|
| 82 |
+
self.apply(self._initialize_weights)
|
| 83 |
+
|
| 84 |
+
def _initialize_weights(self, module: nn.Module):
|
| 85 |
+
if getattr(module, "_is_hf_initialized", False):
|
| 86 |
+
return
|
| 87 |
+
if isinstance(module, nn.Linear):
|
| 88 |
+
nn.init.xavier_uniform_(module.weight, gain=2 ** -2.5)
|
| 89 |
+
if module.bias is not None:
|
| 90 |
+
nn.init.zeros_(module.bias)
|
| 91 |
+
module._is_hf_initialized = True
|
| 92 |
+
|
| 93 |
+
def forward(
|
| 94 |
+
self,
|
| 95 |
+
hidden_states: torch.Tensor,
|
| 96 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 97 |
+
past_key_values: Optional[Cache] = None,
|
| 98 |
+
use_cache: Optional[bool] = False,
|
| 99 |
+
output_attentions: Optional[bool] = False,
|
| 100 |
+
lower_bound: Optional[torch.Tensor] = None,
|
| 101 |
+
**kwargs
|
| 102 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Cache]]:
|
| 103 |
+
# launching the triton kernel for just one token will actually be slower
|
| 104 |
+
mode = 'fused_recurrent' if hidden_states.shape[1] == 1 else self.mode
|
| 105 |
+
|
| 106 |
+
last_state = past_key_values[self.layer_idx] if use_cache else None
|
| 107 |
+
if self.use_short_conv:
|
| 108 |
+
conv_state = last_state[0] if use_cache else None
|
| 109 |
+
if self.share_conv_kernel:
|
| 110 |
+
# conv state is updated inplace
|
| 111 |
+
hidden_states = self.h_conv1d(hidden_states, attention_mask, conv_state)
|
| 112 |
+
q = self.q_proj(hidden_states)
|
| 113 |
+
f = self.f_proj(hidden_states)
|
| 114 |
+
i = self.i_proj(hidden_states)
|
| 115 |
+
else:
|
| 116 |
+
conv_state_q = last_state[0] if use_cache else None
|
| 117 |
+
conv_state_f = last_state[1] if use_cache else None
|
| 118 |
+
conv_state_i = last_state[2] if use_cache else None
|
| 119 |
+
q = self.q_proj(hidden_states)
|
| 120 |
+
f = self.f_proj(hidden_states)
|
| 121 |
+
i = self.i_proj(hidden_states)
|
| 122 |
+
q = self.q_conv1d(q, attention_mask, conv_state_q)
|
| 123 |
+
f = self.f_conv1d(f, attention_mask, conv_state_f)
|
| 124 |
+
i = self.i_conv1d(i, attention_mask, conv_state_i)
|
| 125 |
+
else:
|
| 126 |
+
q = self.q_proj(hidden_states)
|
| 127 |
+
f = self.f_proj(hidden_states)
|
| 128 |
+
i = self.i_proj(hidden_states)
|
| 129 |
+
|
| 130 |
+
# dealing with left-padding
|
| 131 |
+
if attention_mask is not None:
|
| 132 |
+
i = i.mul_(attention_mask.unsqueeze(-1))
|
| 133 |
+
|
| 134 |
+
q = swish(q)
|
| 135 |
+
# the lower bound for the first layer is zero
|
| 136 |
+
if lower_bound is None or self.layer_idx == 0:
|
| 137 |
+
k, g = 1 - f.sigmoid(), F.logsigmoid(f)
|
| 138 |
+
else:
|
| 139 |
+
g = lower_bound + (1 - lower_bound) * f.sigmoid()
|
| 140 |
+
k, g = 1 - g, g.log()
|
| 141 |
+
q, k, i, g = map(lambda x: rearrange(x, 'b l (h d) -> b h l d', h=self.num_heads), (q, k, i, g))
|
| 142 |
+
|
| 143 |
+
recurrent_state = last_state[-1] if use_cache else None
|
| 144 |
+
if mode == 'fused_recurrent':
|
| 145 |
+
o, recurrent_state = fused_recurrent_gla(q, k, i, g, initial_state=recurrent_state, output_final_state=use_cache)
|
| 146 |
+
elif mode == 'fused_chunk':
|
| 147 |
+
o, recurrent_state = fused_chunk_gla(q, k, i, g, initial_state=recurrent_state, output_final_state=use_cache)
|
| 148 |
+
elif mode == 'chunk':
|
| 149 |
+
o, recurrent_state = chunk_gla(q, k, i, g, initial_state=recurrent_state, output_final_state=use_cache)
|
| 150 |
+
else:
|
| 151 |
+
raise NotImplementedError(f"Not supported mode `{mode}`.")
|
| 152 |
+
|
| 153 |
+
if past_key_values is not None:
|
| 154 |
+
if self.use_short_conv:
|
| 155 |
+
if self.share_conv_kernel:
|
| 156 |
+
last_state = (conv_state, recurrent_state)
|
| 157 |
+
else:
|
| 158 |
+
last_state = (conv_state_q, conv_state_f, conv_state_i, recurrent_state)
|
| 159 |
+
else:
|
| 160 |
+
last_state = (recurrent_state,)
|
| 161 |
+
past_key_values.update(last_state, self.layer_idx, q.shape[2])
|
| 162 |
+
|
| 163 |
+
o = self.g_norm(rearrange(o, 'b h l d -> b l (h d)'))
|
| 164 |
+
o = self.o_proj(o)
|
| 165 |
+
|
| 166 |
+
return o, None, past_key_values
|
| 167 |
+
|
| 168 |
+
def init_state(self, batch_size: int) -> Tuple[torch.Tensor]:
|
| 169 |
+
param = next(self.parameters())
|
| 170 |
+
state = tuple()
|
| 171 |
+
if self.use_short_conv:
|
| 172 |
+
if self.share_conv_kernel:
|
| 173 |
+
state += (param.new_zeros(batch_size, self.hidden_size, self.conv_size),)
|
| 174 |
+
else:
|
| 175 |
+
state += (param.new_zeros(batch_size, self.forget_dim, self.conv_size),
|
| 176 |
+
param.new_zeros(batch_size, self.forget_dim, self.conv_size),
|
| 177 |
+
param.new_zeros(batch_size, self.input_dim, self.conv_size))
|
| 178 |
+
state += (param.new_zeros(batch_size, self.num_heads, self.head_f_dim, self.head_i_dim),)
|
| 179 |
+
return state
|
| 180 |
+
|
| 181 |
+
def state_size(self, **kwargs) -> int:
|
| 182 |
+
state_size = self.forget_dim * self.head_i_dim
|
| 183 |
+
for module in self.children():
|
| 184 |
+
if isinstance(module, ShortConvolution):
|
| 185 |
+
state_size += module.state_size
|
| 186 |
+
return state_size
|
fla/layers/linear_attn.py
ADDED
|
@@ -0,0 +1,156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
import torch.nn.functional as F
|
| 5 |
+
from einops import rearrange
|
| 6 |
+
|
| 7 |
+
from fla.modules import RMSNorm
|
| 8 |
+
from fla.modules.feature_map import (DPFPFeatureMap, HadamardFeatureMap,
|
| 9 |
+
HedgehogFeatureMap, T2RFeatureMap)
|
| 10 |
+
from fla.ops.linear_attn import (chunk_linear_attn, fused_chunk_linear_attn,
|
| 11 |
+
fused_recurrent_linear_attn)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class LinearAttention(nn.Module):
|
| 15 |
+
def __init__(
|
| 16 |
+
self,
|
| 17 |
+
hidden_size: str = 1024,
|
| 18 |
+
expand_k: int = 1.0,
|
| 19 |
+
expand_v: int = 1.0,
|
| 20 |
+
num_heads: int = 8,
|
| 21 |
+
mode: str = 'chunk',
|
| 22 |
+
feature_map: str = 'elementwise_product',
|
| 23 |
+
tie_feature_map_qk: bool = False,
|
| 24 |
+
output_norm: str = 'rmsnorm',
|
| 25 |
+
norm_q: bool = False,
|
| 26 |
+
norm_k: bool = False,
|
| 27 |
+
# standard linear attention normalization
|
| 28 |
+
do_feature_map_norm: bool = False,
|
| 29 |
+
elementwise_affine: bool = True,
|
| 30 |
+
norm_eps: float = 1e-5,
|
| 31 |
+
**kwargs,
|
| 32 |
+
):
|
| 33 |
+
super().__init__()
|
| 34 |
+
assert feature_map in ['elu', 'relu', 'hedgehog', 't2r', 'dpfp',
|
| 35 |
+
'identity', 'elementwise_product'], f"Not supported feature map `{feature_map}`."
|
| 36 |
+
|
| 37 |
+
assert output_norm in ['rmsnorm', 'identity'], f"Not supported output norm `{output_norm}`."
|
| 38 |
+
|
| 39 |
+
self.hidden_size
|
| 40 |
+
self.mode = mode
|
| 41 |
+
self.key_dim = int(hidden_size * expand_k)
|
| 42 |
+
self.value_dim = int(hidden_size * expand_v)
|
| 43 |
+
self.num_heads = num_heads
|
| 44 |
+
|
| 45 |
+
assert mode in ['chunk', 'fused_chunk', 'fused_recurrent'], f"Not suppoerted mode `{mode}`."
|
| 46 |
+
assert self.key_dim % num_heads == 0, f"key dim must be divisible by num_heads of {num_heads}"
|
| 47 |
+
assert self.value_dim % num_heads == 0, f"value dim must be divisible by num_heads of {num_heads}"
|
| 48 |
+
|
| 49 |
+
self.head_qk_dim = self.key_dim // num_heads
|
| 50 |
+
self.head_v_dim = self.value_dim // num_heads
|
| 51 |
+
|
| 52 |
+
if feature_map == 'hedgehog':
|
| 53 |
+
if tie_feature_map_qk:
|
| 54 |
+
self.feature_map_q = self.feature_map_k = HedgehogFeatureMap(head_dim=self.head_qk_dim)
|
| 55 |
+
else:
|
| 56 |
+
self.feature_map_q = HedgehogFeatureMap(head_dim=self.head_qk_dim)
|
| 57 |
+
self.feature_map_k = HedgehogFeatureMap(head_dim=self.head_qk_dim)
|
| 58 |
+
|
| 59 |
+
elif feature_map == 't2r':
|
| 60 |
+
if tie_feature_map_qk:
|
| 61 |
+
self.feature_map_q = self.feature_map_k = T2RFeatureMap(head_dim=self.head_qk_dim)
|
| 62 |
+
else:
|
| 63 |
+
self.feature_map_q = T2RFeatureMap(head_dim=self.head_qk_dim)
|
| 64 |
+
self.feature_map_k = T2RFeatureMap(head_dim=self.head_qk_dim)
|
| 65 |
+
|
| 66 |
+
elif feature_map == 'elementwise_product':
|
| 67 |
+
if tie_feature_map_qk:
|
| 68 |
+
self.feature_map_q = self.feature_map_k = HadamardFeatureMap(head_dim=self.head_qk_dim)
|
| 69 |
+
else:
|
| 70 |
+
self.feature_map_q = HadamardFeatureMap(head_dim=self.head_qk_dim)
|
| 71 |
+
self.feature_map_k = HadamardFeatureMap(head_dim=self.head_qk_dim)
|
| 72 |
+
|
| 73 |
+
elif feature_map == 'dpfp':
|
| 74 |
+
self.feature_map_q = DPFPFeatureMap(head_dim=self.head_qk_dim)
|
| 75 |
+
self.feature_map_k = DPFPFeatureMap(head_dim=self.head_qk_dim)
|
| 76 |
+
|
| 77 |
+
elif feature_map == 'elu':
|
| 78 |
+
def elu(x):
|
| 79 |
+
return F.elu(x) + 1
|
| 80 |
+
self.feature_map_q = elu
|
| 81 |
+
self.feature_map_k = elu
|
| 82 |
+
|
| 83 |
+
elif feature_map == 'relu':
|
| 84 |
+
self.feature_map_q = nn.ReLU()
|
| 85 |
+
self.feature_map_k = nn.ReLU()
|
| 86 |
+
|
| 87 |
+
elif feature_map == 'identity':
|
| 88 |
+
self.feature_map_q = nn.Identity()
|
| 89 |
+
self.feature_map_k = nn.Identity()
|
| 90 |
+
else:
|
| 91 |
+
raise NotImplementedError
|
| 92 |
+
|
| 93 |
+
self.do_feature_map_norm = do_feature_map_norm
|
| 94 |
+
if output_norm == 'rmsnorm':
|
| 95 |
+
self.norm = RMSNorm(self.head_v_dim, elementwise_affine, norm_eps)
|
| 96 |
+
elif output_norm == 'identity':
|
| 97 |
+
self.norm = nn.Identity()
|
| 98 |
+
else:
|
| 99 |
+
raise NotImplementedError
|
| 100 |
+
|
| 101 |
+
self.q_proj = nn.Linear(hidden_size, self.key_dim, bias=False)
|
| 102 |
+
self.k_proj = nn.Linear(hidden_size, self.key_dim, bias=False)
|
| 103 |
+
self.v_proj = nn.Linear(hidden_size, self.value_dim, bias=False)
|
| 104 |
+
self.o_proj = nn.Linear(self.value_dim, hidden_size, bias=False)
|
| 105 |
+
|
| 106 |
+
self.norm_q = norm_q
|
| 107 |
+
self.norm_k = norm_k
|
| 108 |
+
|
| 109 |
+
self.apply(self._initialize_weights)
|
| 110 |
+
|
| 111 |
+
def _initialize_weights(self, module: nn.Module):
|
| 112 |
+
if getattr(module, "_is_hf_initialized", False):
|
| 113 |
+
return
|
| 114 |
+
if isinstance(module, nn.Linear):
|
| 115 |
+
nn.init.xavier_uniform_(module.weight, gain=2 ** -2.5)
|
| 116 |
+
if module.bias is not None:
|
| 117 |
+
nn.init.zeros_(module.bias)
|
| 118 |
+
module._is_hf_initialized = True
|
| 119 |
+
|
| 120 |
+
def forward(self, x):
|
| 121 |
+
mode = self.mode
|
| 122 |
+
q = rearrange(self.q_proj(x), 'b n (h d) -> b h n d', h=self.num_heads)
|
| 123 |
+
k = rearrange(self.k_proj(x), 'b n (h d) -> b h n d', h=self.num_heads)
|
| 124 |
+
v = rearrange(self.v_proj(x), 'b n (h d) -> b h n d', h=self.num_heads)
|
| 125 |
+
q = self.feature_map_q(q)
|
| 126 |
+
k = self.feature_map_k(k)
|
| 127 |
+
if self.norm_q:
|
| 128 |
+
q = q / (q.sum(-1, keepdim=True) + 1e-4)
|
| 129 |
+
if self.norm_k:
|
| 130 |
+
k = k / (k.sum(-1, keepdim=True) + 1e-4)
|
| 131 |
+
|
| 132 |
+
if mode == 'chunk':
|
| 133 |
+
o = chunk_linear_attn(q, k, v, normalize=self.do_feature_map_norm)
|
| 134 |
+
elif mode == 'fused_chunk':
|
| 135 |
+
o = fused_chunk_linear_attn(q, k, v, normalize=self.do_feature_map_norm)
|
| 136 |
+
elif mode == 'fused_recurrent':
|
| 137 |
+
o = fused_recurrent_linear_attn(q, k, v, normalize=self.do_feature_map_norm)
|
| 138 |
+
else:
|
| 139 |
+
raise NotImplementedError
|
| 140 |
+
o = self.norm(o)
|
| 141 |
+
o = rearrange(o, 'b h n d -> b n (h d)')
|
| 142 |
+
o = self.o_proj(o)
|
| 143 |
+
return o
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
if __name__ == '__main__':
|
| 147 |
+
import torch
|
| 148 |
+
batch = 4
|
| 149 |
+
seq_len = 1024
|
| 150 |
+
hidden_size = 1024
|
| 151 |
+
x = torch.randn(batch, seq_len, hidden_size).to(torch.bfloat16).cuda().requires_grad_(True)
|
| 152 |
+
model = LinearAttention(hidden_size, feature_map='dplp').to(torch.bfloat16).cuda()
|
| 153 |
+
y = model(x)
|
| 154 |
+
print(y.shape)
|
| 155 |
+
y.sum().backward()
|
| 156 |
+
print(x.grad.shape)
|
fla/layers/multiscale_retention.py
ADDED
|
@@ -0,0 +1,271 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
from typing import Optional, Tuple
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
import torch.nn as nn
|
| 9 |
+
from einops import rearrange, repeat
|
| 10 |
+
from transformers.activations import ACT2FN
|
| 11 |
+
from transformers.cache_utils import Cache
|
| 12 |
+
|
| 13 |
+
from fla.modules import FusedRMSNormSwishGate, RMSNorm, ShortConvolution
|
| 14 |
+
from fla.modules.rotary import RotaryEmbedding
|
| 15 |
+
from fla.ops.retention import (chunk_retention, fused_chunk_retention,
|
| 16 |
+
fused_recurrent_retention, parallel_retention)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class MultiScaleRetention(nn.Module):
|
| 20 |
+
r"""
|
| 21 |
+
The layer implementaion for [Retentive Network: A Successor to Transformer for Large Language Models](https://arxiv.org/pdf/2307.08621.pdf). # noqa
|
| 22 |
+
|
| 23 |
+
Args:
|
| 24 |
+
mode (str, Optional):
|
| 25 |
+
Which Retention kernel to use.
|
| 26 |
+
Currently available: `chunk`, `fused_recurrent`, `parallel`, and `fused_chunk`.
|
| 27 |
+
Default: `fused_chunk`.
|
| 28 |
+
hidden_size (int, Optional):
|
| 29 |
+
The hidden size of the input. Default: 1024.
|
| 30 |
+
expand_k (float, Optional):
|
| 31 |
+
The expansion ratio for the key dim. Default: 1.0.
|
| 32 |
+
expand_v (float, Optional):
|
| 33 |
+
The expansion ratio for the value dim. Default: 2.0.
|
| 34 |
+
num_heads (int, Optional):
|
| 35 |
+
The number of heads. Default: 8.
|
| 36 |
+
num_kv_heads (int, Optional):
|
| 37 |
+
The number of key/value heads, used for MQA. Default: None.
|
| 38 |
+
feature_map (str, Optional):
|
| 39 |
+
Feature map function applied to queries/keys. Default: None.
|
| 40 |
+
use_short_conv (bool, Optional):
|
| 41 |
+
Whether to use short convolutions. Default: `False`.
|
| 42 |
+
conv_size (int, Optional):
|
| 43 |
+
The kernel size of the short convolution, only used when `use_short_conv` is `True`. Default: 4.
|
| 44 |
+
conv_bias (bool, Optional):
|
| 45 |
+
Whether to use bias in the short convolution, only used when `use_short_conv` is `True`. Default: `False`.
|
| 46 |
+
share_conv_kernel (bool, Optional):
|
| 47 |
+
Whether to apply convolutions berfore q/k/v mapping, only taking effects when `use_short_conv`. Default: `True`.
|
| 48 |
+
use_output_gate (bool, Optional):
|
| 49 |
+
Whether to use output gate. Default: `True`.
|
| 50 |
+
gate_fn (str, Optional):
|
| 51 |
+
The activation function for the output gate. Default: `swish`.
|
| 52 |
+
elementwise_affine (bool, Optional):
|
| 53 |
+
If `True`, applies elementwise affine to LayerNorm with learnable parameters. Default: `True`.
|
| 54 |
+
norm_eps (float, Optional):
|
| 55 |
+
The epsilon value for the layernorm/rmsnorm layer. Default: 1e-5.
|
| 56 |
+
fuse_norm (bool, Optional):
|
| 57 |
+
Whether to fuse the norm and the output gate for better memory footprint. Default: `True`.
|
| 58 |
+
layer_idx (int, Optional):
|
| 59 |
+
The index of the layer. Default: None.
|
| 60 |
+
"""
|
| 61 |
+
|
| 62 |
+
def __init__(
|
| 63 |
+
self,
|
| 64 |
+
mode: str = 'fused_chunk',
|
| 65 |
+
hidden_size: int = 1024,
|
| 66 |
+
expand_k: float = 1.0,
|
| 67 |
+
expand_v: float = 2.0,
|
| 68 |
+
num_heads: int = 8,
|
| 69 |
+
num_kv_heads: Optional[int] = None,
|
| 70 |
+
feature_map: Optional[str] = None,
|
| 71 |
+
use_short_conv: bool = False,
|
| 72 |
+
conv_size: int = 4,
|
| 73 |
+
conv_bias: bool = False,
|
| 74 |
+
share_conv_kernel: bool = True,
|
| 75 |
+
use_output_gate: bool = True,
|
| 76 |
+
gate_fn: str = 'swish',
|
| 77 |
+
elementwise_affine: Optional[bool] = True,
|
| 78 |
+
norm_eps: float = 1e-5,
|
| 79 |
+
fuse_norm: bool = True,
|
| 80 |
+
layer_idx: int = None,
|
| 81 |
+
**kwargs
|
| 82 |
+
) -> MultiScaleRetention:
|
| 83 |
+
super().__init__()
|
| 84 |
+
|
| 85 |
+
self.mode = mode
|
| 86 |
+
self.hidden_size = hidden_size
|
| 87 |
+
self.expand_k = expand_k
|
| 88 |
+
self.expand_v = expand_v
|
| 89 |
+
self.num_heads = num_heads
|
| 90 |
+
self.num_kv_heads = num_kv_heads if num_kv_heads is not None else num_heads
|
| 91 |
+
self.num_kv_groups = self.num_heads // self.num_kv_heads
|
| 92 |
+
self.feature_map_fn = ACT2FN[feature_map] if feature_map is not None else None
|
| 93 |
+
|
| 94 |
+
self.use_short_conv = use_short_conv
|
| 95 |
+
self.conv_size = conv_size
|
| 96 |
+
self.conv_bias = conv_bias
|
| 97 |
+
self.share_conv_kernel = share_conv_kernel
|
| 98 |
+
self.use_output_gate = use_output_gate
|
| 99 |
+
|
| 100 |
+
self.key_dim = int(hidden_size * expand_k)
|
| 101 |
+
self.value_dim = int(hidden_size * expand_v)
|
| 102 |
+
self.key_dim_per_group = self.key_dim // self.num_kv_groups
|
| 103 |
+
self.value_dim_per_group = self.value_dim // self.num_kv_groups
|
| 104 |
+
self.layer_idx = layer_idx
|
| 105 |
+
|
| 106 |
+
assert mode in ['chunk', 'fused_chunk', 'parallel', 'fused_recurrent'], f"Not suppoerted mode `{mode}`."
|
| 107 |
+
assert self.key_dim % num_heads == 0, f"key dim must be divisible by num_heads of {num_heads}"
|
| 108 |
+
assert self.value_dim % num_heads == 0, f"value dim must be divisible by num_heads of {num_heads}"
|
| 109 |
+
|
| 110 |
+
self.head_qk_dim = self.key_dim // num_heads
|
| 111 |
+
self.head_v_dim = self.value_dim // num_heads
|
| 112 |
+
|
| 113 |
+
self.q_proj = nn.Linear(hidden_size, self.key_dim, bias=False)
|
| 114 |
+
self.k_proj = nn.Linear(hidden_size, self.key_dim_per_group, bias=False)
|
| 115 |
+
self.v_proj = nn.Linear(hidden_size, self.value_dim_per_group, bias=False)
|
| 116 |
+
if self.use_output_gate:
|
| 117 |
+
self.g_proj = nn.Linear(hidden_size, self.value_dim, bias=False)
|
| 118 |
+
|
| 119 |
+
if use_short_conv:
|
| 120 |
+
self.conv_size = conv_size
|
| 121 |
+
if share_conv_kernel:
|
| 122 |
+
self.h_conv1d = ShortConvolution(hidden_size, conv_size, activation='silu')
|
| 123 |
+
else:
|
| 124 |
+
self.q_conv1d = ShortConvolution(self.key_dim, conv_size, activation='silu')
|
| 125 |
+
self.k_conv1d = ShortConvolution(self.key_dim_per_group, conv_size, activation='silu')
|
| 126 |
+
self.v_conv1d = ShortConvolution(self.value_dim_per_group, conv_size, activation='silu')
|
| 127 |
+
|
| 128 |
+
self.o_proj = nn.Linear(self.value_dim, hidden_size, bias=False)
|
| 129 |
+
|
| 130 |
+
if gate_fn == 'swish' and fuse_norm and use_output_gate:
|
| 131 |
+
self.g_norm_swish_gate = FusedRMSNormSwishGate(self.head_v_dim, elementwise_affine, norm_eps)
|
| 132 |
+
self.fuse_norm_and_gate = True
|
| 133 |
+
else:
|
| 134 |
+
self.fuse_norm_and_gate = False
|
| 135 |
+
self.g_norm = RMSNorm(self.head_v_dim, elementwise_affine, norm_eps)
|
| 136 |
+
self.gate_fn = ACT2FN[gate_fn]
|
| 137 |
+
|
| 138 |
+
# TODO: fix this issue
|
| 139 |
+
# https://github.com/Dao-AILab/flash-attention/blob/main/flash_attn/ops/triton/rotary.py#L180
|
| 140 |
+
# Ideally, we would want to support arbitrary d_head_qk
|
| 141 |
+
assert self.head_qk_dim <= 256, "head_qk_dim must be less than or equal to 256"
|
| 142 |
+
self.rotary = RotaryEmbedding(dim=self.head_qk_dim)
|
| 143 |
+
|
| 144 |
+
self.apply(self._initialize_weights)
|
| 145 |
+
|
| 146 |
+
def _initialize_weights(self, module: nn.Module):
|
| 147 |
+
if getattr(module, "_is_hf_initialized", False):
|
| 148 |
+
return
|
| 149 |
+
if isinstance(module, nn.Linear):
|
| 150 |
+
nn.init.xavier_uniform_(module.weight, gain=2 ** -2.5)
|
| 151 |
+
if module.bias is not None:
|
| 152 |
+
nn.init.zeros_(module.bias)
|
| 153 |
+
module._is_hf_initialized = True
|
| 154 |
+
|
| 155 |
+
def forward(
|
| 156 |
+
self,
|
| 157 |
+
hidden_states: torch.Tensor,
|
| 158 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 159 |
+
past_key_values: Optional[Cache] = None,
|
| 160 |
+
use_cache: Optional[bool] = False,
|
| 161 |
+
output_attentions: Optional[bool] = False,
|
| 162 |
+
**kwargs
|
| 163 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Cache]]:
|
| 164 |
+
# launching the triton kernel for just one token will actually be slower
|
| 165 |
+
mode = 'fused_recurrent' if hidden_states.shape[1] == 1 else self.mode
|
| 166 |
+
|
| 167 |
+
last_state = past_key_values[self.layer_idx] if use_cache else None
|
| 168 |
+
if self.use_short_conv:
|
| 169 |
+
conv_state = last_state[0] if use_cache else None
|
| 170 |
+
if self.share_conv_kernel:
|
| 171 |
+
# conv state is updated inplace
|
| 172 |
+
hidden_states = self.h_conv1d(hidden_states, attention_mask, conv_state)
|
| 173 |
+
q = self.q_proj(hidden_states)
|
| 174 |
+
k = self.k_proj(hidden_states)
|
| 175 |
+
v = self.v_proj(hidden_states)
|
| 176 |
+
else:
|
| 177 |
+
conv_state_q = last_state[0] if use_cache else None
|
| 178 |
+
conv_state_k = last_state[1] if use_cache else None
|
| 179 |
+
conv_state_v = last_state[2] if use_cache else None
|
| 180 |
+
q = self.q_proj(hidden_states)
|
| 181 |
+
k = self.k_proj(hidden_states)
|
| 182 |
+
v = self.v_proj(hidden_states)
|
| 183 |
+
q = self.q_conv1d(q, attention_mask, conv_state_q)
|
| 184 |
+
k = self.k_conv1d(k, attention_mask, conv_state_k)
|
| 185 |
+
v = self.v_conv1d(v, attention_mask, conv_state_v)
|
| 186 |
+
else:
|
| 187 |
+
q = self.q_proj(hidden_states)
|
| 188 |
+
k = self.k_proj(hidden_states)
|
| 189 |
+
v = self.v_proj(hidden_states)
|
| 190 |
+
|
| 191 |
+
# dealing with left-padding
|
| 192 |
+
if attention_mask is not None:
|
| 193 |
+
v = v.mul_(attention_mask.unsqueeze(-1))
|
| 194 |
+
q = rearrange(q, '... (h d) -> ... h d', h=self.num_heads)
|
| 195 |
+
k = rearrange(k, '... (h d) -> ... h d', h=self.num_kv_heads)
|
| 196 |
+
if self.feature_map_fn is not None:
|
| 197 |
+
q, k = map(self.feature_map_fn, (q, k))
|
| 198 |
+
|
| 199 |
+
seqlen_offset, max_seqlen = 0, None
|
| 200 |
+
if past_key_values is not None:
|
| 201 |
+
seqlen_offset = past_key_values.get_seq_length(self.layer_idx)
|
| 202 |
+
max_seqlen = q.shape[1] + seqlen_offset
|
| 203 |
+
if attention_mask is not None:
|
| 204 |
+
# to deliminate the offsets of padding tokens
|
| 205 |
+
seqlen_offset = seqlen_offset + attention_mask.sum(-1) - attention_mask.shape[-1]
|
| 206 |
+
max_seqlen = q.shape[1] + max(seqlen_offset)
|
| 207 |
+
q, k = self.rotary(q, k, seqlen_offset, max_seqlen)
|
| 208 |
+
q = q.transpose(1, 2)
|
| 209 |
+
if self.num_kv_groups > 1:
|
| 210 |
+
k = repeat(k, 'b t h d -> b (h g) t d', h=self.num_kv_heads, g=self.num_kv_groups)
|
| 211 |
+
v = repeat(v, 'b t (h d) -> b (h g) t d', h=self.num_kv_heads, g=self.num_kv_groups)
|
| 212 |
+
else:
|
| 213 |
+
k, v = rearrange(k, 'b t h d -> b h t d'), rearrange(v, 'b t (h d) -> b h t d', h=self.num_kv_heads)
|
| 214 |
+
|
| 215 |
+
state = last_state[-1] if use_cache else None
|
| 216 |
+
if mode == 'chunk':
|
| 217 |
+
o, recurrent_state = chunk_retention(q, k, v, initial_state=state, output_final_state=use_cache)
|
| 218 |
+
elif mode == 'fused_chunk':
|
| 219 |
+
o, recurrent_state = fused_chunk_retention(q, k, v, initial_state=state, output_final_state=use_cache)
|
| 220 |
+
elif mode == 'parallel':
|
| 221 |
+
o, recurrent_state = parallel_retention(q, k, v, initial_state=state, output_final_state=use_cache)
|
| 222 |
+
elif mode == 'fused_recurrent':
|
| 223 |
+
o, recurrent_state = fused_recurrent_retention(q, k, v, initial_state=state, output_final_state=use_cache)
|
| 224 |
+
else:
|
| 225 |
+
raise NotImplementedError(f"Not supported mode `{mode}`.")
|
| 226 |
+
|
| 227 |
+
if past_key_values is not None:
|
| 228 |
+
if self.use_short_conv:
|
| 229 |
+
if self.share_conv_kernel:
|
| 230 |
+
last_state = (conv_state, recurrent_state)
|
| 231 |
+
else:
|
| 232 |
+
last_state = (conv_state_q, conv_state_k, conv_state_v, recurrent_state)
|
| 233 |
+
else:
|
| 234 |
+
last_state = (recurrent_state,)
|
| 235 |
+
past_key_values.update(last_state, self.layer_idx, q.shape[2])
|
| 236 |
+
|
| 237 |
+
o = rearrange(o, 'b h l d -> b l h d')
|
| 238 |
+
if self.use_output_gate:
|
| 239 |
+
g = self.g_proj(hidden_states)
|
| 240 |
+
if self.fuse_norm_and_gate:
|
| 241 |
+
g = rearrange(g, 'b l (h d) -> b l h d', h=self.num_heads)
|
| 242 |
+
o = self.g_norm_swish_gate(o, g)
|
| 243 |
+
o = rearrange(o, 'b l h d -> b l (h d)')
|
| 244 |
+
else:
|
| 245 |
+
o = rearrange(self.g_norm(o), 'b l h d -> b l (h d)')
|
| 246 |
+
o = o * self.gate_fn(g)
|
| 247 |
+
else:
|
| 248 |
+
o = rearrange(self.g_norm(o), 'b l h d -> b l (h d)')
|
| 249 |
+
o = self.o_proj(o)
|
| 250 |
+
|
| 251 |
+
return o, None, past_key_values
|
| 252 |
+
|
| 253 |
+
def init_state(self, batch_size: int) -> Tuple[torch.Tensor]:
|
| 254 |
+
param = next(self.parameters())
|
| 255 |
+
state = tuple()
|
| 256 |
+
if self.use_short_conv:
|
| 257 |
+
if self.share_conv_kernel:
|
| 258 |
+
state += (param.new_zeros(batch_size, self.hidden_size, self.conv_size),)
|
| 259 |
+
else:
|
| 260 |
+
state += (param.new_zeros(batch_size, self.key_dim, self.conv_size),
|
| 261 |
+
param.new_zeros(batch_size, self.key_dim, self.conv_size),
|
| 262 |
+
param.new_zeros(batch_size, self.value_dim, self.conv_size))
|
| 263 |
+
state += (param.new_zeros(batch_size, self.num_heads, self.head_qk_dim, self.head_v_dim),)
|
| 264 |
+
return state
|
| 265 |
+
|
| 266 |
+
def state_size(self, **kwargs) -> int:
|
| 267 |
+
state_size = self.key_dim * self.head_v_dim
|
| 268 |
+
for module in self.children():
|
| 269 |
+
if isinstance(module, ShortConvolution):
|
| 270 |
+
state_size += module.state_size
|
| 271 |
+
return state_size
|
fla/layers/rebased.py
ADDED
|
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
"""
|
| 4 |
+
https://github.com/corl-team/rebased/blob/main/flash_linear_attention/fla/layers/rebased_fast.py
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from __future__ import annotations
|
| 8 |
+
|
| 9 |
+
from typing import Optional
|
| 10 |
+
|
| 11 |
+
import torch
|
| 12 |
+
import torch.nn as nn
|
| 13 |
+
from einops import rearrange
|
| 14 |
+
|
| 15 |
+
from fla.modules.feature_map import RebasedFeatureMap
|
| 16 |
+
from fla.ops.linear_attn import chunk_linear_attn, fused_chunk_linear_attn
|
| 17 |
+
from fla.ops.rebased import parallel_rebased
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class ReBasedLinearAttention(nn.Module):
|
| 21 |
+
def __init__(
|
| 22 |
+
self,
|
| 23 |
+
hidden_size: int,
|
| 24 |
+
l_max: int = 2048,
|
| 25 |
+
feature_dim: int = 16,
|
| 26 |
+
num_key_value_heads: int = 16,
|
| 27 |
+
num_heads: int = 16,
|
| 28 |
+
use_gamma: Optional[bool] = True,
|
| 29 |
+
use_beta: Optional[bool] = True,
|
| 30 |
+
normalize: Optional[bool] = True,
|
| 31 |
+
causal: bool = True,
|
| 32 |
+
eps: float = 1e-5,
|
| 33 |
+
mode: str = "parallel",
|
| 34 |
+
layer_idx: Optional[int] = None,
|
| 35 |
+
**kwargs
|
| 36 |
+
) -> ReBasedLinearAttention:
|
| 37 |
+
super().__init__()
|
| 38 |
+
self.hidden_size = hidden_size
|
| 39 |
+
self.l_max = l_max
|
| 40 |
+
self.mode = mode
|
| 41 |
+
assert self.mode in ["fused_chunk", "parallel", 'chunk']
|
| 42 |
+
|
| 43 |
+
# linear attention
|
| 44 |
+
self.feature_dim = feature_dim
|
| 45 |
+
self.num_key_value_heads = num_key_value_heads
|
| 46 |
+
self.num_heads = num_heads
|
| 47 |
+
self.head_dim = self.hidden_size // self.num_key_value_heads
|
| 48 |
+
self.use_gamma = use_gamma
|
| 49 |
+
self.use_beta = use_beta
|
| 50 |
+
self.normalize = normalize
|
| 51 |
+
self.causal = causal
|
| 52 |
+
|
| 53 |
+
self.feature_map = RebasedFeatureMap(self.feature_dim, use_gamma, use_beta, normalize)
|
| 54 |
+
self.q_proj = nn.Linear(self.hidden_size, self.feature_dim * self.num_heads, bias=False)
|
| 55 |
+
self.k_proj = nn.Linear(self.hidden_size, self.feature_dim * self.num_heads, bias=False)
|
| 56 |
+
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
|
| 57 |
+
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
|
| 58 |
+
self.dropout = nn.Identity()
|
| 59 |
+
self.eps = eps
|
| 60 |
+
|
| 61 |
+
self.apply(self._initialize_weights)
|
| 62 |
+
|
| 63 |
+
def _initialize_weights(self, module: nn.Module):
|
| 64 |
+
if getattr(module, "_is_hf_initialized", False):
|
| 65 |
+
return
|
| 66 |
+
if isinstance(module, nn.Linear):
|
| 67 |
+
nn.init.xavier_uniform_(module.weight, gain=2 ** -2.5)
|
| 68 |
+
if module.bias is not None:
|
| 69 |
+
nn.init.zeros_(module.bias)
|
| 70 |
+
module._is_hf_initialized = True
|
| 71 |
+
|
| 72 |
+
def forward(self, hidden_states: torch.Tensor, **kwargs):
|
| 73 |
+
mode = self.mode
|
| 74 |
+
q, k, v = self.q_proj(hidden_states), self.k_proj(hidden_states), self.v_proj(hidden_states)
|
| 75 |
+
q, k, v = map(lambda x: rearrange(x, "b l (h d) -> b h l d", h=self.num_heads), [q, k, v])
|
| 76 |
+
q, k = self.feature_map(q, flatten=(mode != 'parallel')), self.feature_map(k, flatten=(mode != 'parallel'))
|
| 77 |
+
if mode == "fused_chunk":
|
| 78 |
+
o = fused_chunk_linear_attn(q, k, v, normalize=True, scale=1)
|
| 79 |
+
elif mode == 'chunk':
|
| 80 |
+
o = chunk_linear_attn(q, k, v, normalize=True, scale=1)
|
| 81 |
+
elif mode == 'parallel':
|
| 82 |
+
assert q.shape[-1] <= 128
|
| 83 |
+
o = parallel_rebased(q, k, v, self.eps, True, True)
|
| 84 |
+
o = rearrange(o, "b h l d -> b l (h d)")
|
| 85 |
+
o = self.o_proj(o)
|
| 86 |
+
o = self.dropout(o)
|
| 87 |
+
return o
|
| 88 |
+
|
| 89 |
+
# https://github.com/HazyResearch/zoology/blob/main/zoology/mixers/based.py#L119
|
| 90 |
+
def forward_reference(self, hidden_states: torch.Tensor, filters: torch.Tensor = None, *args, **kwargs):
|
| 91 |
+
"""
|
| 92 |
+
x (torch.Tensor): tensor of shape (b, d, l)
|
| 93 |
+
y (torch.Tensor): tensor of shape (b, d, l)
|
| 94 |
+
"""
|
| 95 |
+
# hidden_states = hidden_states.transpose(1, 2)
|
| 96 |
+
b, l, _ = hidden_states.size()
|
| 97 |
+
q, k, v = self.q_proj(hidden_states), self.k_proj(hidden_states), self.v_proj(hidden_states)
|
| 98 |
+
|
| 99 |
+
q = q.view(b, l, self.num_heads, self.feature_dim).transpose(1, 2)
|
| 100 |
+
k = k.view(b, l, self.num_key_value_heads, self.feature_dim).transpose(1, 2)
|
| 101 |
+
v = v.view(b, l, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 102 |
+
|
| 103 |
+
# Linear attention
|
| 104 |
+
q, k = self.feature_map(q), self.feature_map(k)
|
| 105 |
+
q, k, v = q.unsqueeze(-2), k.unsqueeze(-2), v.unsqueeze(-1)
|
| 106 |
+
|
| 107 |
+
# Compute attention
|
| 108 |
+
if self.causal:
|
| 109 |
+
y = ((q * (k * v).cumsum(2)).sum(-1) / ((q * k.cumsum(2)).sum(-1) + self.eps))
|
| 110 |
+
else:
|
| 111 |
+
y = ((q * (k * v).sum(2, True)).sum(-1) / ((q * k.sum(2, True)).sum(-1) + self.eps))
|
| 112 |
+
y = rearrange(y, 'b h l d -> b l (h d)')
|
| 113 |
+
y = self.o_proj(y.to(hidden_states.dtype))
|
| 114 |
+
y = self.dropout(y)
|
| 115 |
+
return y.to(hidden_states.dtype)
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
if __name__ == '__main__':
|
| 119 |
+
batch = 4
|
| 120 |
+
seq_len = 1024
|
| 121 |
+
hidden_size = 1024
|
| 122 |
+
dtype = torch.float32
|
| 123 |
+
x = torch.randn(batch, seq_len, hidden_size).to(dtype).cuda().requires_grad_(True)
|
| 124 |
+
dy = torch.randn(batch, seq_len, hidden_size).to(dtype).cuda()
|
| 125 |
+
model = ReBasedLinearAttention(hidden_size=hidden_size, mode='parallel').to(dtype).cuda()
|
| 126 |
+
|
| 127 |
+
y = model(x)
|
| 128 |
+
y.backward(dy, retain_graph=True)
|
| 129 |
+
x_grad, x.grad = x.grad, None
|
| 130 |
+
print(model.mode)
|
| 131 |
+
model.mode = 'fused_chunk'
|
| 132 |
+
y2 = model(x)
|
| 133 |
+
print(model.mode)
|
| 134 |
+
y2.backward(dy)
|
| 135 |
+
# assert y.allclose(y2, 0, 1e-4), breakpoint()
|
| 136 |
+
# assert x_grad.allclose(x.grad, 0, 1e-4), breakpoint()
|
| 137 |
+
print("Pass")
|
fla/layers/rwkv6.py
ADDED
|
@@ -0,0 +1,264 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
# "Eagle and Finch: RWKV with Matrix-Valued States and Dynamic Recurrence"[https://arxiv.org/abs/2404.05892]
|
| 4 |
+
|
| 5 |
+
from __future__ import annotations
|
| 6 |
+
|
| 7 |
+
from typing import Optional, Tuple
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn as nn
|
| 11 |
+
from einops import rearrange
|
| 12 |
+
from transformers.activations import ACT2FN
|
| 13 |
+
from transformers.cache_utils import Cache
|
| 14 |
+
|
| 15 |
+
from fla.modules import FusedLayerNormSwishGate, LayerNorm
|
| 16 |
+
from fla.ops.rwkv6 import chunk_rwkv6, fused_recurrent_rwkv6
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class RWKV6Attention(nn.Module):
|
| 20 |
+
|
| 21 |
+
def __init__(
|
| 22 |
+
self,
|
| 23 |
+
mode: str = 'chunk',
|
| 24 |
+
hidden_size: int = 1024,
|
| 25 |
+
expand_k: float = 0.5,
|
| 26 |
+
expand_v: float = 1.0,
|
| 27 |
+
num_heads: int = 4,
|
| 28 |
+
gate_fn: str = 'swish',
|
| 29 |
+
proj_low_rank_dim: int = 32,
|
| 30 |
+
gate_low_rank_dim: int = 64,
|
| 31 |
+
fuse_norm: bool = True,
|
| 32 |
+
elementwise_affine: Optional[bool] = True,
|
| 33 |
+
norm_eps: float = 1e-5,
|
| 34 |
+
layer_idx: int = None,
|
| 35 |
+
**kwargs
|
| 36 |
+
) -> RWKV6Attention:
|
| 37 |
+
super().__init__()
|
| 38 |
+
|
| 39 |
+
self.mode = mode
|
| 40 |
+
self.hidden_size = hidden_size
|
| 41 |
+
self.expand_k = expand_k
|
| 42 |
+
self.expand_v = expand_v
|
| 43 |
+
self.num_heads = num_heads
|
| 44 |
+
self.proj_low_rank_dim = proj_low_rank_dim
|
| 45 |
+
self.gate_low_rank_dim = gate_low_rank_dim
|
| 46 |
+
|
| 47 |
+
self.key_dim = int(hidden_size * expand_k)
|
| 48 |
+
self.value_dim = int(hidden_size * expand_v)
|
| 49 |
+
self.layer_idx = layer_idx
|
| 50 |
+
|
| 51 |
+
assert mode in ['chunk', 'fused_recurrent'], f"Not suppoerted mode `{mode}`."
|
| 52 |
+
assert self.key_dim % num_heads == 0, f"key dim must be divisible by num_heads of {num_heads}"
|
| 53 |
+
assert self.value_dim % num_heads == 0, f"value dim must be divisible by num_heads of {num_heads}"
|
| 54 |
+
|
| 55 |
+
self.head_qk_dim = self.key_dim // num_heads
|
| 56 |
+
self.head_v_dim = self.value_dim // num_heads
|
| 57 |
+
|
| 58 |
+
self.time_shift = nn.ZeroPad2d((0, 0, 1, -1))
|
| 59 |
+
self.x_proj = nn.Sequential(
|
| 60 |
+
LerpLinear(hidden_size, proj_low_rank_dim * 5),
|
| 61 |
+
nn.Tanh(),
|
| 62 |
+
nn.Linear(proj_low_rank_dim * 5, hidden_size, bias=True)
|
| 63 |
+
)
|
| 64 |
+
self.r_proj = DDLerpLinear(hidden_size, self.key_dim)
|
| 65 |
+
self.w_proj = DDLerpLinear(hidden_size, self.key_dim, low_rank_dim=gate_low_rank_dim)
|
| 66 |
+
self.k_proj = DDLerpLinear(hidden_size, self.key_dim)
|
| 67 |
+
self.v_proj = DDLerpLinear(hidden_size, self.value_dim)
|
| 68 |
+
self.g_proj = DDLerpLinear(hidden_size, self.value_dim)
|
| 69 |
+
self.bonus = nn.Parameter(torch.zeros(num_heads, self.head_qk_dim))
|
| 70 |
+
|
| 71 |
+
self.o_proj = nn.Linear(self.value_dim, hidden_size, bias=False)
|
| 72 |
+
|
| 73 |
+
if gate_fn == 'swish' and fuse_norm:
|
| 74 |
+
self.g_norm_swish_gate = FusedLayerNormSwishGate(self.head_v_dim, elementwise_affine, norm_eps)
|
| 75 |
+
self.fuse_norm_and_gate = True
|
| 76 |
+
else:
|
| 77 |
+
self.fuse_norm_and_gate = False
|
| 78 |
+
self.g_norm = LayerNorm(self.head_v_dim, elementwise_affine, norm_eps)
|
| 79 |
+
self.gate_fn = ACT2FN[gate_fn]
|
| 80 |
+
|
| 81 |
+
self.apply(self._initialize_weights)
|
| 82 |
+
|
| 83 |
+
def _initialize_weights(self, module: nn.Module):
|
| 84 |
+
if getattr(module, "_is_hf_initialized", False):
|
| 85 |
+
return
|
| 86 |
+
if isinstance(module, nn.Linear):
|
| 87 |
+
nn.init.xavier_uniform_(module.weight, gain=2 ** -2.5)
|
| 88 |
+
if module.bias is not None:
|
| 89 |
+
nn.init.zeros_(module.bias)
|
| 90 |
+
if isinstance(module, nn.Parameter):
|
| 91 |
+
nn.init.xavier_uniform_(module, gain=2 ** -2.5)
|
| 92 |
+
module._is_hf_initialized = True
|
| 93 |
+
|
| 94 |
+
def forward(
|
| 95 |
+
self,
|
| 96 |
+
hidden_states: torch.Tensor,
|
| 97 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 98 |
+
past_key_values: Optional[Cache] = None,
|
| 99 |
+
use_cache: Optional[bool] = False,
|
| 100 |
+
output_attentions: Optional[bool] = False,
|
| 101 |
+
**kwargs
|
| 102 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Cache]]:
|
| 103 |
+
batch_size, seq_len, hidden_size = hidden_states.size()
|
| 104 |
+
# launching the triton kernel for just one token will actually be slower
|
| 105 |
+
mode = 'fused_recurrent' if hidden_states.shape[1] == 1 else self.mode
|
| 106 |
+
|
| 107 |
+
delta = self.time_shift(hidden_states) - hidden_states
|
| 108 |
+
x = self.x_proj[0](hidden_states, delta).view(batch_size, seq_len, -1, self.proj_low_rank_dim)
|
| 109 |
+
r, w, k, v, g = torch.einsum('b l n r, n r d-> b l n d',
|
| 110 |
+
self.x_proj[1](x),
|
| 111 |
+
self.x_proj[2].weight.view(5, -1, hidden_size)).unbind(-2)
|
| 112 |
+
r = self.r_proj(hidden_states, r, delta)
|
| 113 |
+
w = self.w_proj(hidden_states, w, delta)
|
| 114 |
+
k = self.k_proj(hidden_states, k, delta)
|
| 115 |
+
v = self.v_proj(hidden_states, v, delta)
|
| 116 |
+
g = self.g_proj(hidden_states, g, delta)
|
| 117 |
+
|
| 118 |
+
# dealing with left-padding
|
| 119 |
+
if attention_mask is not None:
|
| 120 |
+
v = v.mul_(attention_mask.unsqueeze(-1))
|
| 121 |
+
r, w, k, v = map(lambda x: rearrange(x, 'b l (h d) -> b h l d', h=self.num_heads), (r, w, k, v))
|
| 122 |
+
w = -torch.exp(w)
|
| 123 |
+
u = self.bonus
|
| 124 |
+
|
| 125 |
+
last_state = past_key_values[self.layer_idx] if use_cache else None
|
| 126 |
+
state = last_state[-1] if use_cache else None
|
| 127 |
+
if mode == 'fused_recurrent':
|
| 128 |
+
o, recurrent_state = fused_recurrent_rwkv6(r, k, v, w, u, initial_state=state, output_final_state=use_cache)
|
| 129 |
+
elif mode == 'chunk':
|
| 130 |
+
o, recurrent_state = chunk_rwkv6(r, k, v, w, u, initial_state=state, output_final_state=use_cache)
|
| 131 |
+
else:
|
| 132 |
+
raise NotImplementedError(f"Not supported mode `{mode}`.")
|
| 133 |
+
|
| 134 |
+
if past_key_values is not None:
|
| 135 |
+
past_key_values.update((recurrent_state,), self.layer_idx, r.shape[2])
|
| 136 |
+
|
| 137 |
+
o = rearrange(o, 'b h l d -> b l h d')
|
| 138 |
+
if self.fuse_norm_and_gate:
|
| 139 |
+
g = rearrange(g, 'b l (h d) -> b l h d', h=self.num_heads)
|
| 140 |
+
o = self.g_norm_swish_gate(o, g)
|
| 141 |
+
o = rearrange(o, 'b l h d -> b l (h d)')
|
| 142 |
+
else:
|
| 143 |
+
o = self.g_norm(o)
|
| 144 |
+
o = rearrange(o, 'b l h d -> b l (h d)')
|
| 145 |
+
o = o * self.gate_fn(g)
|
| 146 |
+
o = self.o_proj(o)
|
| 147 |
+
|
| 148 |
+
return o, None, past_key_values
|
| 149 |
+
|
| 150 |
+
def init_state(self, batch_size: int) -> Tuple[torch.Tensor]:
|
| 151 |
+
param = next(self.parameters())
|
| 152 |
+
state = (param.new_zeros(batch_size, self.num_heads, self.head_qk_dim, self.head_v_dim),)
|
| 153 |
+
return state
|
| 154 |
+
|
| 155 |
+
def state_size(self, **kwargs) -> int:
|
| 156 |
+
state_size = self.key_dim * self.head_v_dim
|
| 157 |
+
return state_size
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
class LoRA(nn.Module):
|
| 161 |
+
|
| 162 |
+
def __init__(
|
| 163 |
+
self,
|
| 164 |
+
input_dim: int,
|
| 165 |
+
output_dim: int,
|
| 166 |
+
low_rank_dim: int,
|
| 167 |
+
bias: Optional[bool] = True
|
| 168 |
+
):
|
| 169 |
+
super().__init__()
|
| 170 |
+
|
| 171 |
+
self.input_dim = input_dim
|
| 172 |
+
self.output_dim = output_dim
|
| 173 |
+
self.low_rank_dim = low_rank_dim
|
| 174 |
+
self.bias = bias
|
| 175 |
+
|
| 176 |
+
self.lora = nn.Sequential(
|
| 177 |
+
nn.Linear(input_dim, low_rank_dim, bias=False),
|
| 178 |
+
nn.Tanh(),
|
| 179 |
+
nn.Linear(low_rank_dim, output_dim, bias=bias)
|
| 180 |
+
)
|
| 181 |
+
|
| 182 |
+
def __repr__(self) -> str:
|
| 183 |
+
s = f"{self.__class__.__name__}("
|
| 184 |
+
s += f"input_dim={self.input_dim}, low_rank_dim={self.low_rank_dim}, output_dim={self.output_dim}"
|
| 185 |
+
if not self.bias:
|
| 186 |
+
s += f", bias={self.bias}"
|
| 187 |
+
s += ")"
|
| 188 |
+
return s
|
| 189 |
+
|
| 190 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 191 |
+
return self.lora(x)
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
class LerpLinear(nn.Module):
|
| 195 |
+
|
| 196 |
+
def __init__(
|
| 197 |
+
self,
|
| 198 |
+
input_dim: int,
|
| 199 |
+
output_dim: int,
|
| 200 |
+
low_rank_dim: Optional[int] = None
|
| 201 |
+
):
|
| 202 |
+
super().__init__()
|
| 203 |
+
|
| 204 |
+
self.input_dim = input_dim
|
| 205 |
+
self.output_dim = output_dim
|
| 206 |
+
self.low_rank_dim = low_rank_dim
|
| 207 |
+
|
| 208 |
+
self.time_shift = nn.ZeroPad2d((0, 0, 1, -1))
|
| 209 |
+
if low_rank_dim is None:
|
| 210 |
+
self.linear = nn.Linear(input_dim, output_dim, bias=False)
|
| 211 |
+
else:
|
| 212 |
+
self.linear = LoRA(input_dim, output_dim, low_rank_dim)
|
| 213 |
+
self.mu = nn.Parameter(torch.zeros(input_dim))
|
| 214 |
+
|
| 215 |
+
def __repr__(self) -> str:
|
| 216 |
+
s = f"{self.__class__.__name__}({self.input_dim}, {self.output_dim}"
|
| 217 |
+
if self.low_rank_dim is not None:
|
| 218 |
+
s += f", low_rank_dim={self.low_rank_dim}"
|
| 219 |
+
s += ")"
|
| 220 |
+
return s
|
| 221 |
+
|
| 222 |
+
def forward(self, x: torch.Tensor, delta: Optional[torch.Tensor] = None) -> torch.Tensor:
|
| 223 |
+
if delta is None:
|
| 224 |
+
shifted = self.time_shift(x)
|
| 225 |
+
if len(shifted.shape) == 2:
|
| 226 |
+
shifted = shifted.unsqueeze(1)
|
| 227 |
+
delta = shifted - x
|
| 228 |
+
return self.linear(x + delta * self.mu)
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
class DDLerpLinear(nn.Module):
|
| 232 |
+
|
| 233 |
+
def __init__(
|
| 234 |
+
self,
|
| 235 |
+
input_dim: int,
|
| 236 |
+
output_dim: int,
|
| 237 |
+
low_rank_dim: Optional[int] = None
|
| 238 |
+
):
|
| 239 |
+
super().__init__()
|
| 240 |
+
|
| 241 |
+
self.input_dim = input_dim
|
| 242 |
+
self.output_dim = output_dim
|
| 243 |
+
self.low_rank_dim = low_rank_dim
|
| 244 |
+
|
| 245 |
+
self.time_shift = nn.ZeroPad2d((0, 0, 1, -1))
|
| 246 |
+
if low_rank_dim is None:
|
| 247 |
+
self.linear = nn.Linear(input_dim, output_dim, bias=False)
|
| 248 |
+
else:
|
| 249 |
+
self.linear = LoRA(input_dim, output_dim, low_rank_dim)
|
| 250 |
+
|
| 251 |
+
def __repr__(self) -> str:
|
| 252 |
+
s = f"{self.__class__.__name__}({self.input_dim}, {self.output_dim}"
|
| 253 |
+
if self.low_rank_dim is not None:
|
| 254 |
+
s += f", low_rank_dim={self.low_rank_dim}"
|
| 255 |
+
s += ")"
|
| 256 |
+
return s
|
| 257 |
+
|
| 258 |
+
def forward(self, x: torch.Tensor, mu: torch.Tensor, delta: Optional[torch.Tensor] = None) -> torch.Tensor:
|
| 259 |
+
if delta is None:
|
| 260 |
+
shifted = self.time_shift(x)
|
| 261 |
+
if len(shifted.shape) == 2:
|
| 262 |
+
shifted = shifted.unsqueeze(1)
|
| 263 |
+
delta = shifted - x
|
| 264 |
+
return self.linear(x + delta * mu)
|
fla/layers/simple_gla.py
ADDED
|
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
from typing import Optional
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
import torch.nn as nn
|
| 9 |
+
import torch.nn.functional as F
|
| 10 |
+
from einops import rearrange
|
| 11 |
+
from transformers.activations import ACT2FN
|
| 12 |
+
|
| 13 |
+
from fla.modules import FusedRMSNormSwishGate, RMSNorm
|
| 14 |
+
from fla.ops.simple_gla import chunk_simple_gla
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class SimpleGatedLinearAttention(nn.Module):
|
| 18 |
+
r"""
|
| 19 |
+
The layer implementaion for [Gated Linear Attention Transformers with Hardware-Efficient Training](https://arxiv.org/abs/2312.06635). # noqa
|
| 20 |
+
This layer calls the simplified GLA kernel in which the gating is head-wise instead of elementwise.
|
| 21 |
+
|
| 22 |
+
Args:
|
| 23 |
+
mode (str, Optional):
|
| 24 |
+
Which GLA kernel to use.
|
| 25 |
+
Currently available: `chunk`.
|
| 26 |
+
Default: `chunk`.
|
| 27 |
+
hidden_size (int, Optional):
|
| 28 |
+
The hidden size of the input. Default: 1024.
|
| 29 |
+
expand_k (float, Optional):
|
| 30 |
+
The expansion ratio for the key dim. Default: 0.5.
|
| 31 |
+
expand_v (float, Optional):
|
| 32 |
+
The expansion ratio for the value dim. Default: 1.0.
|
| 33 |
+
num_heads (int, Optional):
|
| 34 |
+
The number of heads. Default: 4.
|
| 35 |
+
gate_fn (str, Optional):
|
| 36 |
+
The activation function for the output gate. Default: `swish`.
|
| 37 |
+
elementwise_affine (bool, Optional):
|
| 38 |
+
If `True`, applies elementwise affine to LayerNorm with learnable parameters. Default: `True`.
|
| 39 |
+
norm_eps (float, Optional):
|
| 40 |
+
The epsilon value for the layernorm/rmsnorm layer. Default: 1e-5.
|
| 41 |
+
gate_logit_normalizer (int, Optional):
|
| 42 |
+
The normalizer for the gate logits, appied after `logsigmoid`. Default: 16.
|
| 43 |
+
fuse_norm (bool, Optional):
|
| 44 |
+
Whether to fuse the norm and the output gate for better memory footprint. Default: `True`.
|
| 45 |
+
layer_idx (int, Optional):
|
| 46 |
+
The index of the layer. Default: None.
|
| 47 |
+
"""
|
| 48 |
+
|
| 49 |
+
def __init__(
|
| 50 |
+
self,
|
| 51 |
+
mode: str = 'chunk',
|
| 52 |
+
hidden_size: int = 1024,
|
| 53 |
+
expand_k: float = 1.0,
|
| 54 |
+
expand_v: float = 2.0,
|
| 55 |
+
num_heads: int = 4,
|
| 56 |
+
gate_fn: str = 'swish',
|
| 57 |
+
elementwise_affine: Optional[bool] = True,
|
| 58 |
+
norm_eps: float = 1e-5,
|
| 59 |
+
gate_logit_normalizer: int = 16,
|
| 60 |
+
fuse_norm: bool = True,
|
| 61 |
+
**kwargs
|
| 62 |
+
) -> SimpleGatedLinearAttention:
|
| 63 |
+
super().__init__()
|
| 64 |
+
self.hidden_size = hidden_size
|
| 65 |
+
|
| 66 |
+
self.mode = mode
|
| 67 |
+
self.key_dim = int(hidden_size * expand_k)
|
| 68 |
+
self.value_dim = int(hidden_size * expand_v)
|
| 69 |
+
assert mode in ['chunk'], f"Not suppoerted mode `{mode}`."
|
| 70 |
+
assert self.key_dim % num_heads == 0, f"key dim must be divisible by num_heads of {num_heads}"
|
| 71 |
+
assert self.value_dim % num_heads == 0, f"value dim must be divisible by num_heads of {num_heads}"
|
| 72 |
+
self.num_heads = num_heads
|
| 73 |
+
self.head_qk_dim = self.key_dim // num_heads
|
| 74 |
+
self.head_v_dim = self.value_dim // num_heads
|
| 75 |
+
self.gate_fn = ACT2FN[gate_fn]
|
| 76 |
+
|
| 77 |
+
self.q_proj = nn.Linear(hidden_size, self.key_dim, bias=False)
|
| 78 |
+
self.k_proj = nn.Linear(hidden_size, self.key_dim, bias=False)
|
| 79 |
+
self.v_proj = nn.Linear(hidden_size, self.value_dim, bias=False)
|
| 80 |
+
self.g_proj = nn.Linear(hidden_size, self.value_dim, bias=False)
|
| 81 |
+
|
| 82 |
+
self.gk_proj = nn.Linear(hidden_size, self.num_heads)
|
| 83 |
+
self.o_proj = nn.Linear(self.value_dim, hidden_size, bias=False)
|
| 84 |
+
|
| 85 |
+
if gate_fn == 'swish' and fuse_norm:
|
| 86 |
+
self.g_norm_swish_gate = FusedRMSNormSwishGate(self.head_v_dim, elementwise_affine, norm_eps)
|
| 87 |
+
self.fuse_norm_and_gate = True
|
| 88 |
+
else:
|
| 89 |
+
self.fuse_norm_and_gate = False
|
| 90 |
+
self.g_norm = RMSNorm(self.head_v_dim, elementwise_affine, norm_eps)
|
| 91 |
+
|
| 92 |
+
self.gate_logit_normalizer = gate_logit_normalizer
|
| 93 |
+
|
| 94 |
+
self.apply(self._initialize_weights)
|
| 95 |
+
|
| 96 |
+
def _initialize_weights(self, module: nn.Module):
|
| 97 |
+
if getattr(module, "_is_hf_initialized", False):
|
| 98 |
+
return
|
| 99 |
+
if isinstance(module, nn.Linear):
|
| 100 |
+
nn.init.xavier_uniform_(module.weight, gain=2 ** -2.5)
|
| 101 |
+
if module.bias is not None:
|
| 102 |
+
nn.init.zeros_(module.bias)
|
| 103 |
+
module._is_hf_initialized = True
|
| 104 |
+
|
| 105 |
+
def forward(self, x):
|
| 106 |
+
mode = self.mode
|
| 107 |
+
q = rearrange(self.q_proj(x), 'b n (h d) -> b h n d', h=self.num_heads)
|
| 108 |
+
k = rearrange(self.k_proj(x), 'b n (h d) -> b h n d', h=self.num_heads)
|
| 109 |
+
v = rearrange(self.v_proj(x), 'b n (h d) -> b h n d', h=self.num_heads)
|
| 110 |
+
gk = rearrange(self.gk_proj(x), 'b n h -> b h n')
|
| 111 |
+
gk = (F.logsigmoid(gk) / self.gate_logit_normalizer)
|
| 112 |
+
|
| 113 |
+
if mode == 'chunk':
|
| 114 |
+
o = chunk_simple_gla(q, k, v, gk)
|
| 115 |
+
else:
|
| 116 |
+
raise NotImplementedError(f"Not supported mode `{mode}`.")
|
| 117 |
+
|
| 118 |
+
o = rearrange(o, 'b h l d -> b l h d')
|
| 119 |
+
g = self.g_proj(x)
|
| 120 |
+
|
| 121 |
+
if self.fuse_norm_and_gate:
|
| 122 |
+
g = rearrange(g, 'b l (h d) -> b l h d', h=self.num_heads)
|
| 123 |
+
o = self.g_norm_swish_gate(o, g)
|
| 124 |
+
o = rearrange(o, 'b l h d -> b l (h d)')
|
| 125 |
+
else:
|
| 126 |
+
o = self.g_norm(o)
|
| 127 |
+
o = rearrange(o, 'b l h d -> b l (h d)')
|
| 128 |
+
o = o * self.gate_fn(g)
|
| 129 |
+
o = self.o_proj(o)
|
| 130 |
+
return o
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
if __name__ == '__main__':
|
| 134 |
+
batch = 4
|
| 135 |
+
seq_len = 1024
|
| 136 |
+
|
| 137 |
+
hidden_size = 2048
|
| 138 |
+
x = torch.randn(batch, seq_len, hidden_size).to(torch.bfloat16).cuda().requires_grad_(True)
|
| 139 |
+
model = SimpleGatedLinearAttention(hidden_size=hidden_size, mode='chunk').to(torch.bfloat16).cuda()
|
| 140 |
+
y = model(x)
|
| 141 |
+
print(y.shape)
|
| 142 |
+
y.sum().backward()
|
| 143 |
+
print(x.grad.shape)
|
fla/models/__init__.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from fla.models.abc import ABCConfig, ABCForCausalLM, ABCModel
|
| 4 |
+
from fla.models.delta_net import (DeltaNetConfig, DeltaNetForCausalLM,
|
| 5 |
+
DeltaNetModel)
|
| 6 |
+
from fla.models.gla import GLAConfig, GLAForCausalLM, GLAModel
|
| 7 |
+
from fla.models.hgrn import HGRNConfig, HGRNForCausalLM, HGRNModel
|
| 8 |
+
from fla.models.hgrn2 import HGRN2Config, HGRN2ForCausalLM, HGRN2Model
|
| 9 |
+
from fla.models.linear_attn import (LinearAttentionConfig,
|
| 10 |
+
LinearAttentionForCausalLM,
|
| 11 |
+
LinearAttentionModel)
|
| 12 |
+
from fla.models.mamba import MambaConfig, MambaForCausalLM, MambaModel
|
| 13 |
+
from fla.models.retnet import RetNetConfig, RetNetForCausalLM, RetNetModel
|
| 14 |
+
from fla.models.rwkv6 import RWKV6Config, RWKV6ForCausalLM, RWKV6Model
|
| 15 |
+
from fla.models.transformer import (TransformerConfig, TransformerForCausalLM,
|
| 16 |
+
TransformerModel)
|
| 17 |
+
|
| 18 |
+
__all__ = [
|
| 19 |
+
'ABCConfig', 'ABCForCausalLM', 'ABCModel',
|
| 20 |
+
'DeltaNetConfig', 'DeltaNetForCausalLM', 'DeltaNetModel',
|
| 21 |
+
'GLAConfig', 'GLAForCausalLM', 'GLAModel',
|
| 22 |
+
'HGRNConfig', 'HGRNForCausalLM', 'HGRNModel',
|
| 23 |
+
'HGRN2Config', 'HGRN2ForCausalLM', 'HGRN2Model',
|
| 24 |
+
'LinearAttentionConfig', 'LinearAttentionForCausalLM', 'LinearAttentionModel',
|
| 25 |
+
'MambaConfig', 'MambaForCausalLM', 'MambaModel',
|
| 26 |
+
'RetNetConfig', 'RetNetForCausalLM', 'RetNetModel',
|
| 27 |
+
'RWKV6Config', 'RWKV6ForCausalLM', 'RWKV6Model',
|
| 28 |
+
'TransformerConfig', 'TransformerForCausalLM', 'TransformerModel'
|
| 29 |
+
]
|
fla/models/abc/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from transformers import AutoConfig, AutoModel, AutoModelForCausalLM
|
| 4 |
+
|
| 5 |
+
from fla.models.abc.configuration_abc import ABCConfig
|
| 6 |
+
from fla.models.abc.modeling_abc import ABCForCausalLM, ABCModel
|
| 7 |
+
|
| 8 |
+
AutoConfig.register(ABCConfig.model_type, ABCConfig)
|
| 9 |
+
AutoModel.register(ABCConfig, ABCModel)
|
| 10 |
+
AutoModelForCausalLM.register(ABCConfig, ABCForCausalLM)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
__all__ = ['ABCConfig', 'ABCForCausalLM', 'ABCModel']
|
fla/models/abc/configuration_abc.py
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from typing import Optional
|
| 4 |
+
|
| 5 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class ABCConfig(PretrainedConfig):
|
| 9 |
+
|
| 10 |
+
model_type = 'abc'
|
| 11 |
+
keys_to_ignore_at_inference = ['past_key_values']
|
| 12 |
+
|
| 13 |
+
def __init__(
|
| 14 |
+
self,
|
| 15 |
+
vocab_size: int = 32000,
|
| 16 |
+
hidden_size: int = 2048,
|
| 17 |
+
gate_low_rank_dim: int = 16,
|
| 18 |
+
clamp_min: float = -32,
|
| 19 |
+
clamp_max: float = 32,
|
| 20 |
+
hidden_ratio: Optional[int] = 4,
|
| 21 |
+
intermediate_size: Optional[int] = None,
|
| 22 |
+
num_hidden_layers: int = 24,
|
| 23 |
+
num_heads: int = 4,
|
| 24 |
+
num_slots: Optional[int] = 64,
|
| 25 |
+
use_short_conv: bool = True,
|
| 26 |
+
conv_size: int = 4,
|
| 27 |
+
share_conv_kernel: bool = True,
|
| 28 |
+
exapnd_k: float = 0.5,
|
| 29 |
+
exapnd_v: float = 1,
|
| 30 |
+
hidden_act: str = "swish",
|
| 31 |
+
max_position_embeddings: int = 2048,
|
| 32 |
+
elementwise_affine: Optional[bool] = True,
|
| 33 |
+
norm_eps: float = 1e-6,
|
| 34 |
+
use_cache: bool = True,
|
| 35 |
+
pad_token_id: int = None,
|
| 36 |
+
bos_token_id: int = 1,
|
| 37 |
+
eos_token_id: int = 2,
|
| 38 |
+
initializer_range: float = 0.02,
|
| 39 |
+
tie_word_embeddings: bool = False,
|
| 40 |
+
fuse_norm: bool = True,
|
| 41 |
+
fuse_cross_entropy: bool = True,
|
| 42 |
+
**kwargs
|
| 43 |
+
):
|
| 44 |
+
self.vocab_size = vocab_size
|
| 45 |
+
self.max_position_embeddings = max_position_embeddings
|
| 46 |
+
self.hidden_size = hidden_size
|
| 47 |
+
self.gate_low_rank_dim = gate_low_rank_dim
|
| 48 |
+
self.clamp_min = clamp_min
|
| 49 |
+
self.clamp_max = clamp_max
|
| 50 |
+
self.hidden_ratio = hidden_ratio
|
| 51 |
+
self.intermediate_size = intermediate_size
|
| 52 |
+
self.num_hidden_layers = num_hidden_layers
|
| 53 |
+
self.num_heads = num_heads
|
| 54 |
+
self.num_slots = num_slots
|
| 55 |
+
self.use_short_conv = use_short_conv
|
| 56 |
+
self.conv_size = conv_size
|
| 57 |
+
self.share_conv_kernel = share_conv_kernel
|
| 58 |
+
self.expand_k = exapnd_k
|
| 59 |
+
self.expand_v = exapnd_v
|
| 60 |
+
self.hidden_act = hidden_act
|
| 61 |
+
self.elementwise_affine = elementwise_affine
|
| 62 |
+
self.norm_eps = norm_eps
|
| 63 |
+
self.use_cache = use_cache
|
| 64 |
+
self.initializer_range = initializer_range
|
| 65 |
+
self.fuse_cross_entropy = fuse_cross_entropy
|
| 66 |
+
self.fuse_norm = fuse_norm
|
| 67 |
+
|
| 68 |
+
super().__init__(
|
| 69 |
+
pad_token_id=pad_token_id,
|
| 70 |
+
bos_token_id=bos_token_id,
|
| 71 |
+
eos_token_id=eos_token_id,
|
| 72 |
+
tie_word_embeddings=tie_word_embeddings,
|
| 73 |
+
**kwargs,
|
| 74 |
+
)
|
fla/models/abc/modeling_abc.py
ADDED
|
@@ -0,0 +1,394 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import math
|
| 6 |
+
import warnings
|
| 7 |
+
from typing import List, Optional, Tuple, Union
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn as nn
|
| 11 |
+
import torch.utils.checkpoint
|
| 12 |
+
from transformers.activations import ACT2FN
|
| 13 |
+
from transformers.modeling_outputs import (BaseModelOutputWithPast,
|
| 14 |
+
CausalLMOutputWithPast)
|
| 15 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 16 |
+
from transformers.utils import logging
|
| 17 |
+
|
| 18 |
+
from fla.layers.abc import ABCAttention
|
| 19 |
+
from fla.models.abc.configuration_abc import ABCConfig
|
| 20 |
+
from fla.models.utils import RecurrentCache
|
| 21 |
+
from fla.modules import FusedCrossEntropyLoss, RMSNorm
|
| 22 |
+
from fla.modules.activations import swiglu_linear
|
| 23 |
+
|
| 24 |
+
logger = logging.get_logger(__name__)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class ABCMLP(nn.Module):
|
| 28 |
+
|
| 29 |
+
def __init__(
|
| 30 |
+
self,
|
| 31 |
+
hidden_size: int,
|
| 32 |
+
hidden_ratio: Optional[int] = None,
|
| 33 |
+
intermediate_size: Optional[int] = None,
|
| 34 |
+
hidden_act: str = 'swish'
|
| 35 |
+
) -> ABCMLP:
|
| 36 |
+
super().__init__()
|
| 37 |
+
|
| 38 |
+
self.hidden_size = hidden_size
|
| 39 |
+
# the final number of params is `hidden_ratio * hidden_size^2`
|
| 40 |
+
# `intermediate_size` is chosen to be a multiple of 256 closest to `2/3 * hidden_size * hidden_ratio`
|
| 41 |
+
if hidden_ratio is None:
|
| 42 |
+
hidden_ratio = 4
|
| 43 |
+
if intermediate_size is None:
|
| 44 |
+
intermediate_size = int(hidden_size * hidden_ratio * 2 / 3)
|
| 45 |
+
intermediate_size = 256 * ((intermediate_size + 256 - 1) // 256)
|
| 46 |
+
self.hidden_ratio = hidden_ratio
|
| 47 |
+
self.intermediate_size = intermediate_size
|
| 48 |
+
|
| 49 |
+
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size * 2, bias=False)
|
| 50 |
+
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
|
| 51 |
+
self.act_fn = ACT2FN[hidden_act]
|
| 52 |
+
|
| 53 |
+
def forward(self, x):
|
| 54 |
+
y = self.gate_proj(x)
|
| 55 |
+
gate, y = y.chunk(2, -1)
|
| 56 |
+
return swiglu_linear(gate, y, self.down_proj.weight, self.down_proj.bias)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
class ABCBlock(nn.Module):
|
| 60 |
+
def __init__(self, config: ABCConfig, layer_idx: int):
|
| 61 |
+
super().__init__()
|
| 62 |
+
self.hidden_size = config.hidden_size
|
| 63 |
+
|
| 64 |
+
self.attn_norm = RMSNorm(hidden_size=config.hidden_size, eps=config.norm_eps)
|
| 65 |
+
self.attn = ABCAttention(
|
| 66 |
+
hidden_size=config.hidden_size,
|
| 67 |
+
expand_k=config.expand_k,
|
| 68 |
+
expand_v=config.expand_v,
|
| 69 |
+
num_heads=config.num_heads,
|
| 70 |
+
num_slots=config.num_slots,
|
| 71 |
+
use_short_conv=config.use_short_conv,
|
| 72 |
+
conv_size=config.conv_size,
|
| 73 |
+
share_conv_kernel=config.share_conv_kernel,
|
| 74 |
+
gate_fn=config.hidden_act,
|
| 75 |
+
elementwise_affine=config.elementwise_affine,
|
| 76 |
+
norm_eps=config.norm_eps,
|
| 77 |
+
clamp_min=config.clamp_min,
|
| 78 |
+
clamp_max=config.clamp_max,
|
| 79 |
+
fuse_norm=config.fuse_norm,
|
| 80 |
+
layer_idx=layer_idx
|
| 81 |
+
)
|
| 82 |
+
self.mlp_norm = RMSNorm(hidden_size=config.hidden_size, eps=config.norm_eps)
|
| 83 |
+
self.mlp = ABCMLP(
|
| 84 |
+
hidden_size=config.hidden_size,
|
| 85 |
+
hidden_ratio=config.hidden_ratio,
|
| 86 |
+
intermediate_size=config.intermediate_size,
|
| 87 |
+
hidden_act=config.hidden_act
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
def forward(
|
| 91 |
+
self,
|
| 92 |
+
hidden_states: torch.Tensor,
|
| 93 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 94 |
+
past_key_values: Optional[Tuple[List[torch.Tensor]]] = None,
|
| 95 |
+
use_cache: Optional[bool] = False,
|
| 96 |
+
output_attentions: Optional[bool] = False,
|
| 97 |
+
**kwargs,
|
| 98 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
| 99 |
+
|
| 100 |
+
residual = hidden_states
|
| 101 |
+
|
| 102 |
+
hidden_states = self.attn_norm(hidden_states)
|
| 103 |
+
hidden_states, attentions, past_key_values = self.attn(
|
| 104 |
+
hidden_states=hidden_states,
|
| 105 |
+
attention_mask=attention_mask,
|
| 106 |
+
past_key_values=past_key_values,
|
| 107 |
+
use_cache=use_cache,
|
| 108 |
+
output_attentions=output_attentions
|
| 109 |
+
)
|
| 110 |
+
hidden_states, residual = self.mlp_norm(hidden_states, residual, True)
|
| 111 |
+
hidden_states = self.mlp(hidden_states)
|
| 112 |
+
hidden_states = residual + hidden_states
|
| 113 |
+
|
| 114 |
+
outputs = (hidden_states, attentions, past_key_values)
|
| 115 |
+
|
| 116 |
+
return outputs
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
class ABCPreTrainedModel(PreTrainedModel):
|
| 120 |
+
|
| 121 |
+
config_class = ABCConfig
|
| 122 |
+
supports_gradient_checkpointing = True
|
| 123 |
+
_no_split_modules = ['ABCBlock']
|
| 124 |
+
|
| 125 |
+
def __init__(self, *inputs, **kwargs):
|
| 126 |
+
super().__init__(*inputs, **kwargs)
|
| 127 |
+
|
| 128 |
+
def _init_weights(
|
| 129 |
+
self,
|
| 130 |
+
module: nn.Module,
|
| 131 |
+
rescale_prenorm_residual: bool = True,
|
| 132 |
+
num_residuals_per_layer: int = 2,
|
| 133 |
+
):
|
| 134 |
+
if isinstance(module, (nn.Linear, nn.Conv1d)):
|
| 135 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
| 136 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
| 137 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 138 |
+
if module.bias is not None:
|
| 139 |
+
nn.init.zeros_(module.bias)
|
| 140 |
+
elif isinstance(module, nn.Embedding):
|
| 141 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 142 |
+
if module.padding_idx is not None:
|
| 143 |
+
module.weight.data[module.padding_idx].zero_()
|
| 144 |
+
|
| 145 |
+
if rescale_prenorm_residual:
|
| 146 |
+
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
|
| 147 |
+
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
|
| 148 |
+
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
|
| 149 |
+
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
|
| 150 |
+
#
|
| 151 |
+
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
|
| 152 |
+
for name, p in module.named_parameters():
|
| 153 |
+
if name in ["o_proj.weight", "down_proj.weight"]:
|
| 154 |
+
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
|
| 155 |
+
# Following Pytorch init, except scale by 1/sqrt(2 * n_layer)
|
| 156 |
+
# We need to reinit p since this code could be called multiple times
|
| 157 |
+
# Having just p *= scale would repeatedly scale it down
|
| 158 |
+
with torch.no_grad():
|
| 159 |
+
p /= math.sqrt(num_residuals_per_layer * self.config.num_hidden_layers)
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
class ABCModel(ABCPreTrainedModel):
|
| 163 |
+
|
| 164 |
+
def __init__(self, config: ABCConfig):
|
| 165 |
+
super().__init__(config)
|
| 166 |
+
self.padding_idx = config.pad_token_id
|
| 167 |
+
self.vocab_size = config.vocab_size
|
| 168 |
+
|
| 169 |
+
self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
| 170 |
+
self.layers = nn.ModuleList([ABCBlock(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
|
| 171 |
+
self.norm = RMSNorm(config.hidden_size, eps=config.norm_eps)
|
| 172 |
+
|
| 173 |
+
self.gradient_checkpointing = False
|
| 174 |
+
|
| 175 |
+
self.post_init()
|
| 176 |
+
|
| 177 |
+
def get_input_embeddings(self):
|
| 178 |
+
return self.embeddings
|
| 179 |
+
|
| 180 |
+
def set_input_embeddings(self, value):
|
| 181 |
+
self.embeddings = value
|
| 182 |
+
|
| 183 |
+
def forward(
|
| 184 |
+
self,
|
| 185 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 186 |
+
attention_mask: Optional[torch.Tensor] = None, # noqa
|
| 187 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 188 |
+
past_key_values: Optional[Tuple[List[torch.Tensor]]] = None,
|
| 189 |
+
use_cache: Optional[bool] = None,
|
| 190 |
+
output_attentions: Optional[bool] = None,
|
| 191 |
+
output_hidden_states: Optional[bool] = None,
|
| 192 |
+
return_dict: Optional[bool] = None
|
| 193 |
+
) -> Union[Tuple, BaseModelOutputWithPast]:
|
| 194 |
+
if output_attentions:
|
| 195 |
+
warnings.warn("`ABCModel` does not `output_attentions` now, setting it to `False`.")
|
| 196 |
+
output_attentions = False
|
| 197 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 198 |
+
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 199 |
+
use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
|
| 200 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 201 |
+
|
| 202 |
+
# retrieve input_ids and inputs_embeds
|
| 203 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 204 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
| 205 |
+
elif input_ids is not None:
|
| 206 |
+
batch_size = input_ids.shape[0]
|
| 207 |
+
elif inputs_embeds is not None:
|
| 208 |
+
batch_size = inputs_embeds.shape[0]
|
| 209 |
+
else:
|
| 210 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
| 211 |
+
|
| 212 |
+
if inputs_embeds is None:
|
| 213 |
+
inputs_embeds = self.embeddings(input_ids)
|
| 214 |
+
hidden_states = inputs_embeds
|
| 215 |
+
|
| 216 |
+
if use_cache:
|
| 217 |
+
if past_key_values is None:
|
| 218 |
+
past_key_values = [layer.attn.init_state(batch_size) for layer in self.layers]
|
| 219 |
+
if not isinstance(past_key_values, RecurrentCache):
|
| 220 |
+
past_key_values = RecurrentCache.from_legacy_cache(past_key_values)
|
| 221 |
+
|
| 222 |
+
if self.gradient_checkpointing and self.training:
|
| 223 |
+
if use_cache:
|
| 224 |
+
logger.warning_once(
|
| 225 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
| 226 |
+
)
|
| 227 |
+
use_cache = False
|
| 228 |
+
|
| 229 |
+
all_hidden_states = () if output_hidden_states else None
|
| 230 |
+
all_attns = () if output_attentions else None
|
| 231 |
+
for layer in self.layers:
|
| 232 |
+
if output_hidden_states:
|
| 233 |
+
all_hidden_states += (hidden_states,)
|
| 234 |
+
|
| 235 |
+
if self.gradient_checkpointing and self.training:
|
| 236 |
+
hidden_states, attentions, past_key_values = self._gradient_checkpointing_func(
|
| 237 |
+
layer.__call__,
|
| 238 |
+
hidden_states,
|
| 239 |
+
attention_mask,
|
| 240 |
+
past_key_values,
|
| 241 |
+
use_cache,
|
| 242 |
+
output_attentions
|
| 243 |
+
)
|
| 244 |
+
else:
|
| 245 |
+
hidden_states, attentions, past_key_values = layer(
|
| 246 |
+
hidden_states,
|
| 247 |
+
attention_mask,
|
| 248 |
+
past_key_values=past_key_values,
|
| 249 |
+
use_cache=use_cache,
|
| 250 |
+
output_attentions=output_attentions
|
| 251 |
+
)
|
| 252 |
+
|
| 253 |
+
if output_attentions:
|
| 254 |
+
all_attns += (attentions,)
|
| 255 |
+
|
| 256 |
+
hidden_states = self.norm(hidden_states)
|
| 257 |
+
|
| 258 |
+
# add hidden states from the last decoder layer
|
| 259 |
+
if output_hidden_states:
|
| 260 |
+
all_hidden_states += (hidden_states,)
|
| 261 |
+
|
| 262 |
+
next_cache = None
|
| 263 |
+
if use_cache:
|
| 264 |
+
next_cache = past_key_values.to_legacy_cache()
|
| 265 |
+
if not return_dict:
|
| 266 |
+
return tuple(x for x in [hidden_states, next_cache, all_hidden_states, all_attns] if x is not None)
|
| 267 |
+
return BaseModelOutputWithPast(
|
| 268 |
+
last_hidden_state=hidden_states,
|
| 269 |
+
past_key_values=next_cache,
|
| 270 |
+
hidden_states=all_hidden_states,
|
| 271 |
+
attentions=all_attns
|
| 272 |
+
)
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
class ABCForCausalLM(ABCPreTrainedModel):
|
| 276 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 277 |
+
|
| 278 |
+
def __init__(self, config):
|
| 279 |
+
super().__init__(config)
|
| 280 |
+
self.model = ABCModel(config)
|
| 281 |
+
self.vocab_size = config.vocab_size
|
| 282 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 283 |
+
|
| 284 |
+
# Initialize weights and apply final processing
|
| 285 |
+
self.post_init()
|
| 286 |
+
|
| 287 |
+
def get_input_embeddings(self):
|
| 288 |
+
return self.model.embeddings
|
| 289 |
+
|
| 290 |
+
def set_input_embeddings(self, value):
|
| 291 |
+
self.model.embeddings = value
|
| 292 |
+
|
| 293 |
+
def get_output_embeddings(self):
|
| 294 |
+
return self.lm_head
|
| 295 |
+
|
| 296 |
+
def set_output_embeddings(self, new_embeddings):
|
| 297 |
+
self.lm_head = new_embeddings
|
| 298 |
+
|
| 299 |
+
def set_decoder(self, decoder):
|
| 300 |
+
self.model = decoder
|
| 301 |
+
|
| 302 |
+
def get_decoder(self):
|
| 303 |
+
return self.model
|
| 304 |
+
|
| 305 |
+
def generate(self, *args, **kwargs):
|
| 306 |
+
try:
|
| 307 |
+
return super().generate(*args, **kwargs)
|
| 308 |
+
except AttributeError as exception:
|
| 309 |
+
if 'past_key_values' in str(exception):
|
| 310 |
+
raise AttributeError(
|
| 311 |
+
f"You tried to call `generate` with a decoding strategy that manipulates `past_key_values`, "
|
| 312 |
+
f"which is not supported for {self.__class__.__name__}. "
|
| 313 |
+
f"Try another generation strategy instead. "
|
| 314 |
+
f"For the available generation strategies, check this doc: "
|
| 315 |
+
f"https://huggingface.co/docs/transformers/en/generation_strategies#decoding-strategies"
|
| 316 |
+
)
|
| 317 |
+
else:
|
| 318 |
+
raise exception
|
| 319 |
+
|
| 320 |
+
def prepare_inputs_for_generation(
|
| 321 |
+
self,
|
| 322 |
+
input_ids: torch.LongTensor = None,
|
| 323 |
+
past_key_values: Optional[Tuple[List[torch.Tensor]]] = None,
|
| 324 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 325 |
+
**kwargs
|
| 326 |
+
):
|
| 327 |
+
# only last token for `inputs_ids` if the `past_key_values` is passed along.
|
| 328 |
+
if past_key_values is not None:
|
| 329 |
+
if not isinstance(past_key_values, RecurrentCache):
|
| 330 |
+
past_key_values = RecurrentCache.from_legacy_cache(past_key_values, input_ids.shape[1] - 1)
|
| 331 |
+
input_ids = input_ids[:, -1:]
|
| 332 |
+
|
| 333 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
| 334 |
+
if inputs_embeds is not None and past_key_values is None:
|
| 335 |
+
model_inputs = {'inputs_embeds': inputs_embeds}
|
| 336 |
+
else:
|
| 337 |
+
model_inputs = {'input_ids': input_ids}
|
| 338 |
+
model_inputs['past_key_values'] = past_key_values
|
| 339 |
+
return model_inputs
|
| 340 |
+
|
| 341 |
+
def forward(
|
| 342 |
+
self,
|
| 343 |
+
input_ids: torch.LongTensor = None,
|
| 344 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 345 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 346 |
+
past_key_values: Optional[Tuple[List[torch.Tensor]]] = None,
|
| 347 |
+
labels: Optional[torch.LongTensor] = None,
|
| 348 |
+
use_cache: Optional[bool] = None,
|
| 349 |
+
output_attentions: Optional[bool] = None,
|
| 350 |
+
output_hidden_states: Optional[bool] = None,
|
| 351 |
+
return_dict: Optional[bool] = None,
|
| 352 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 353 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 354 |
+
output_hidden_states = (
|
| 355 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 356 |
+
)
|
| 357 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 358 |
+
|
| 359 |
+
outputs = self.model(
|
| 360 |
+
input_ids=input_ids,
|
| 361 |
+
attention_mask=attention_mask,
|
| 362 |
+
inputs_embeds=inputs_embeds,
|
| 363 |
+
past_key_values=past_key_values,
|
| 364 |
+
use_cache=use_cache,
|
| 365 |
+
output_attentions=output_attentions,
|
| 366 |
+
output_hidden_states=output_hidden_states,
|
| 367 |
+
return_dict=return_dict
|
| 368 |
+
)
|
| 369 |
+
|
| 370 |
+
hidden_states = outputs[0]
|
| 371 |
+
logits = self.lm_head(hidden_states)
|
| 372 |
+
|
| 373 |
+
loss = None
|
| 374 |
+
if labels is not None:
|
| 375 |
+
if self.config.fuse_cross_entropy:
|
| 376 |
+
loss_fct = FusedCrossEntropyLoss(inplace_backward=True)
|
| 377 |
+
else:
|
| 378 |
+
loss_fct = nn.CrossEntropyLoss()
|
| 379 |
+
# Enable model parallelism
|
| 380 |
+
labels = labels.to(logits.device)
|
| 381 |
+
labels = torch.cat((labels[..., 1:], torch.full_like(labels[:, :1], loss_fct.ignore_index)), 1)
|
| 382 |
+
loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
|
| 383 |
+
|
| 384 |
+
if not return_dict:
|
| 385 |
+
output = (logits,) + outputs[1:]
|
| 386 |
+
return (loss,) + output if loss is not None else output
|
| 387 |
+
|
| 388 |
+
return CausalLMOutputWithPast(
|
| 389 |
+
loss=loss,
|
| 390 |
+
logits=logits,
|
| 391 |
+
past_key_values=outputs.past_key_values,
|
| 392 |
+
hidden_states=outputs.hidden_states,
|
| 393 |
+
attentions=outputs.attentions,
|
| 394 |
+
)
|
fla/models/delta_net/__init__.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from transformers import AutoConfig, AutoModel, AutoModelForCausalLM
|
| 4 |
+
|
| 5 |
+
from fla.models.delta_net.configuration_delta_net import \
|
| 6 |
+
DeltaNetConfig
|
| 7 |
+
from fla.models.delta_net.modeling_delta_net import (
|
| 8 |
+
DeltaNetForCausalLM, DeltaNetModel)
|
| 9 |
+
|
| 10 |
+
AutoConfig.register(DeltaNetConfig.model_type, DeltaNetConfig)
|
| 11 |
+
AutoModel.register(DeltaNetConfig, DeltaNetModel)
|
| 12 |
+
AutoModelForCausalLM.register(DeltaNetConfig, DeltaNetForCausalLM)
|
| 13 |
+
|
| 14 |
+
__all__ = ['DeltaNetConfig', 'DeltaNetForCausalLM', 'DeltaNetModel']
|
fla/models/delta_net/configuration_delta_net.py
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from typing import Optional
|
| 4 |
+
|
| 5 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class DeltaNetConfig(PretrainedConfig):
|
| 9 |
+
|
| 10 |
+
model_type = 'delta_net'
|
| 11 |
+
keys_to_ignore_at_inference = ['past_key_values']
|
| 12 |
+
|
| 13 |
+
def __init__(
|
| 14 |
+
self,
|
| 15 |
+
vocab_size: int = 32000,
|
| 16 |
+
hidden_size: int = 2048,
|
| 17 |
+
expand_k: int = 1,
|
| 18 |
+
expand_v: int = 1,
|
| 19 |
+
use_gate: bool = False,
|
| 20 |
+
use_short_conv: bool = True,
|
| 21 |
+
conv_size: int = 4,
|
| 22 |
+
share_conv_kernel: bool = False,
|
| 23 |
+
use_rope: bool = False,
|
| 24 |
+
use_beta: bool = True,
|
| 25 |
+
use_output_norm: bool = True,
|
| 26 |
+
hidden_ratio: Optional[int] = 4,
|
| 27 |
+
intermediate_size: Optional[int] = None,
|
| 28 |
+
num_hidden_layers: int = 24,
|
| 29 |
+
num_heads: int = 4,
|
| 30 |
+
attn_mode: str = "chunk",
|
| 31 |
+
qk_norm: str = 'l2',
|
| 32 |
+
qk_activation: str = 'silu',
|
| 33 |
+
chunk_size: int = 64,
|
| 34 |
+
hidden_act: str = "swish",
|
| 35 |
+
max_position_embeddings: int = 2048,
|
| 36 |
+
rms_norm_eps: float = 1e-6,
|
| 37 |
+
use_cache: bool = True,
|
| 38 |
+
pad_token_id: int = None,
|
| 39 |
+
bos_token_id: int = 1,
|
| 40 |
+
eos_token_id: int = 2,
|
| 41 |
+
tie_word_embeddings: bool = False,
|
| 42 |
+
initializer_range: float = 0.02,
|
| 43 |
+
fuse_cross_entropy: bool = True,
|
| 44 |
+
**kwargs
|
| 45 |
+
):
|
| 46 |
+
self.vocab_size = vocab_size
|
| 47 |
+
self.max_position_embeddings = max_position_embeddings
|
| 48 |
+
self.hidden_size = hidden_size
|
| 49 |
+
self.expand_k = expand_k
|
| 50 |
+
self.expand_v = expand_v
|
| 51 |
+
self.hidden_ratio = hidden_ratio
|
| 52 |
+
self.intermediate_size = intermediate_size
|
| 53 |
+
self.num_hidden_layers = num_hidden_layers
|
| 54 |
+
self.num_heads = num_heads
|
| 55 |
+
self.attn_mode = attn_mode
|
| 56 |
+
self.hidden_act = hidden_act
|
| 57 |
+
self.rms_norm_eps = rms_norm_eps
|
| 58 |
+
self.use_cache = use_cache
|
| 59 |
+
self.initializer_range = initializer_range
|
| 60 |
+
self.fuse_cross_entropy = fuse_cross_entropy
|
| 61 |
+
self.use_gate = use_gate
|
| 62 |
+
self.use_short_conv = use_short_conv
|
| 63 |
+
self.conv_size = conv_size
|
| 64 |
+
self.share_conv_kernel = share_conv_kernel
|
| 65 |
+
self.use_rope = use_rope
|
| 66 |
+
self.use_beta = use_beta
|
| 67 |
+
self.use_output_norm = use_output_norm
|
| 68 |
+
self.qk_norm = qk_norm
|
| 69 |
+
self.qk_activation = qk_activation
|
| 70 |
+
|
| 71 |
+
super().__init__(
|
| 72 |
+
pad_token_id=pad_token_id,
|
| 73 |
+
bos_token_id=bos_token_id,
|
| 74 |
+
eos_token_id=eos_token_id,
|
| 75 |
+
tie_word_embeddings=tie_word_embeddings,
|
| 76 |
+
**kwargs,
|
| 77 |
+
)
|
fla/models/delta_net/modeling_delta_net.py
ADDED
|
@@ -0,0 +1,405 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import math
|
| 6 |
+
import warnings
|
| 7 |
+
from typing import List, Optional, Tuple, Union
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn as nn
|
| 11 |
+
import torch.utils.checkpoint
|
| 12 |
+
from transformers.activations import ACT2FN
|
| 13 |
+
from transformers.modeling_outputs import (BaseModelOutputWithPast,
|
| 14 |
+
CausalLMOutputWithPast)
|
| 15 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 16 |
+
from transformers.utils import logging
|
| 17 |
+
|
| 18 |
+
from fla.layers.delta_net import DeltaNet
|
| 19 |
+
from fla.models.delta_net.configuration_delta_net import DeltaNetConfig
|
| 20 |
+
from fla.models.utils import RecurrentCache
|
| 21 |
+
from fla.modules import FusedCrossEntropyLoss, RMSNorm
|
| 22 |
+
from fla.modules.activations import swiglu_linear
|
| 23 |
+
|
| 24 |
+
logger = logging.get_logger(__name__)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class DeltaNetMLP(nn.Module):
|
| 28 |
+
|
| 29 |
+
def __init__(
|
| 30 |
+
self,
|
| 31 |
+
hidden_size: int,
|
| 32 |
+
hidden_ratio: Optional[int] = None,
|
| 33 |
+
intermediate_size: Optional[int] = None,
|
| 34 |
+
hidden_act: str = 'swish'
|
| 35 |
+
) -> DeltaNetMLP:
|
| 36 |
+
super().__init__()
|
| 37 |
+
|
| 38 |
+
self.hidden_size = hidden_size
|
| 39 |
+
# the final number of params is `hidden_ratio * hidden_size^2`
|
| 40 |
+
# `intermediate_size` is chosen to be a multiple of 256 closest to `2/3 * hidden_size * hidden_ratio`
|
| 41 |
+
if hidden_ratio is None:
|
| 42 |
+
hidden_ratio = 4
|
| 43 |
+
if intermediate_size is None:
|
| 44 |
+
intermediate_size = int(hidden_size * hidden_ratio * 2 / 3)
|
| 45 |
+
intermediate_size = 256 * ((intermediate_size + 256 - 1) // 256)
|
| 46 |
+
self.hidden_ratio = hidden_ratio
|
| 47 |
+
self.intermediate_size = intermediate_size
|
| 48 |
+
|
| 49 |
+
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size * 2, bias=False)
|
| 50 |
+
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
|
| 51 |
+
self.act_fn = ACT2FN[hidden_act]
|
| 52 |
+
|
| 53 |
+
def forward(self, x):
|
| 54 |
+
y = self.gate_proj(x)
|
| 55 |
+
gate, y = y.chunk(2, -1)
|
| 56 |
+
return swiglu_linear(gate, y, self.down_proj.weight, self.down_proj.bias)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
class DeltaNetBlock(nn.Module):
|
| 60 |
+
def __init__(self, config: DeltaNetConfig, layer_idx: int):
|
| 61 |
+
super().__init__()
|
| 62 |
+
self.hidden_size = config.hidden_size
|
| 63 |
+
|
| 64 |
+
self.attn_norm = RMSNorm(hidden_size=config.hidden_size, eps=config.rms_norm_eps)
|
| 65 |
+
self.attn = DeltaNet(
|
| 66 |
+
mode=config.attn_mode,
|
| 67 |
+
hidden_size=config.hidden_size,
|
| 68 |
+
expand_k=config.expand_k,
|
| 69 |
+
expand_v=config.expand_v,
|
| 70 |
+
num_heads=config.num_heads,
|
| 71 |
+
use_gate=config.use_gate,
|
| 72 |
+
use_rope=config.use_rope,
|
| 73 |
+
use_beta=config.use_beta,
|
| 74 |
+
use_short_conv=config.use_short_conv,
|
| 75 |
+
use_output_norm=config.use_output_norm,
|
| 76 |
+
conv_size=config.conv_size,
|
| 77 |
+
share_conv_kernel=config.share_conv_kernel,
|
| 78 |
+
layer_idx=layer_idx,
|
| 79 |
+
qk_norm=config.qk_norm,
|
| 80 |
+
qk_activation=config.qk_activation
|
| 81 |
+
)
|
| 82 |
+
self.mlp_norm = RMSNorm(hidden_size=config.hidden_size, eps=config.rms_norm_eps)
|
| 83 |
+
self.mlp = DeltaNetMLP(
|
| 84 |
+
hidden_size=config.hidden_size,
|
| 85 |
+
hidden_ratio=config.hidden_ratio,
|
| 86 |
+
intermediate_size=config.intermediate_size,
|
| 87 |
+
hidden_act=config.hidden_act
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
def forward(
|
| 91 |
+
self,
|
| 92 |
+
hidden_states: torch.Tensor,
|
| 93 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 94 |
+
past_key_values: Optional[Tuple[List[torch.Tensor]]] = None,
|
| 95 |
+
use_cache: Optional[bool] = False,
|
| 96 |
+
output_attentions: Optional[bool] = False,
|
| 97 |
+
**kwargs,
|
| 98 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
| 99 |
+
|
| 100 |
+
residual = hidden_states
|
| 101 |
+
|
| 102 |
+
hidden_states = self.attn_norm(hidden_states)
|
| 103 |
+
hidden_states, attentions, past_key_values = self.attn(
|
| 104 |
+
hidden_states=hidden_states,
|
| 105 |
+
attention_mask=attention_mask,
|
| 106 |
+
past_key_values=past_key_values,
|
| 107 |
+
use_cache=use_cache,
|
| 108 |
+
output_attentions=output_attentions
|
| 109 |
+
)
|
| 110 |
+
hidden_states, residual = self.mlp_norm(hidden_states, residual, True)
|
| 111 |
+
hidden_states = self.mlp(hidden_states)
|
| 112 |
+
hidden_states = residual + hidden_states
|
| 113 |
+
|
| 114 |
+
outputs = (hidden_states, attentions, past_key_values)
|
| 115 |
+
|
| 116 |
+
return outputs
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
class DeltaNetPreTrainedModel(PreTrainedModel):
|
| 120 |
+
|
| 121 |
+
config_class = DeltaNetConfig
|
| 122 |
+
supports_gradient_checkpointing = True
|
| 123 |
+
_no_split_modules = ['DeltaNetBlock']
|
| 124 |
+
|
| 125 |
+
def __init__(self, *inputs, **kwargs):
|
| 126 |
+
super().__init__(*inputs, **kwargs)
|
| 127 |
+
|
| 128 |
+
def _init_weights(
|
| 129 |
+
self,
|
| 130 |
+
module: nn.Module,
|
| 131 |
+
rescale_prenorm_residual: bool = True,
|
| 132 |
+
num_residuals_per_layer: int = 2,
|
| 133 |
+
):
|
| 134 |
+
if isinstance(module, (nn.Linear, nn.Conv1d)):
|
| 135 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
| 136 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
| 137 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 138 |
+
if module.bias is not None:
|
| 139 |
+
nn.init.zeros_(module.bias)
|
| 140 |
+
elif isinstance(module, nn.Embedding):
|
| 141 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 142 |
+
if module.padding_idx is not None:
|
| 143 |
+
module.weight.data[module.padding_idx].zero_()
|
| 144 |
+
|
| 145 |
+
if rescale_prenorm_residual:
|
| 146 |
+
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
|
| 147 |
+
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
|
| 148 |
+
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
|
| 149 |
+
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
|
| 150 |
+
#
|
| 151 |
+
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
|
| 152 |
+
for name, p in module.named_parameters():
|
| 153 |
+
if name in ["o_proj.weight", "down_proj.weight"]:
|
| 154 |
+
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
|
| 155 |
+
# Following Pytorch init, except scale by 1/sqrt(2 * n_layer)
|
| 156 |
+
# We need to reinit p since this code could be called multiple times
|
| 157 |
+
# Having just p *= scale would repeatedly scale it down
|
| 158 |
+
with torch.no_grad():
|
| 159 |
+
p /= math.sqrt(num_residuals_per_layer * self.config.num_hidden_layers)
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
class DeltaNetModel(DeltaNetPreTrainedModel):
|
| 163 |
+
|
| 164 |
+
def __init__(self, config: DeltaNetConfig):
|
| 165 |
+
super().__init__(config)
|
| 166 |
+
self.padding_idx = config.pad_token_id
|
| 167 |
+
self.vocab_size = config.vocab_size
|
| 168 |
+
|
| 169 |
+
self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
| 170 |
+
self.layers = nn.ModuleList([DeltaNetBlock(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
|
| 171 |
+
self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 172 |
+
|
| 173 |
+
self.gradient_checkpointing = False
|
| 174 |
+
|
| 175 |
+
self.post_init()
|
| 176 |
+
|
| 177 |
+
def get_input_embeddings(self):
|
| 178 |
+
return self.embeddings
|
| 179 |
+
|
| 180 |
+
def set_input_embeddings(self, value):
|
| 181 |
+
self.embeddings = value
|
| 182 |
+
|
| 183 |
+
def forward(
|
| 184 |
+
self,
|
| 185 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 186 |
+
attention_mask: Optional[torch.Tensor] = None, # noqa
|
| 187 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 188 |
+
past_key_values: Optional[Tuple[List[torch.Tensor]]] = None,
|
| 189 |
+
use_cache: Optional[bool] = None,
|
| 190 |
+
output_attentions: Optional[bool] = None,
|
| 191 |
+
output_hidden_states: Optional[bool] = None,
|
| 192 |
+
return_dict: Optional[bool] = None
|
| 193 |
+
) -> Union[Tuple, BaseModelOutputWithPast]:
|
| 194 |
+
if output_attentions:
|
| 195 |
+
warnings.warn("`DeltaNetModel` does not `output_attentions` now, setting it to `False`.")
|
| 196 |
+
output_attentions = False
|
| 197 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 198 |
+
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 199 |
+
use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
|
| 200 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 201 |
+
|
| 202 |
+
# retrieve input_ids and inputs_embeds
|
| 203 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 204 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
| 205 |
+
elif input_ids is not None:
|
| 206 |
+
batch_size = input_ids.shape[0]
|
| 207 |
+
elif inputs_embeds is not None:
|
| 208 |
+
batch_size = inputs_embeds.shape[0]
|
| 209 |
+
else:
|
| 210 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
| 211 |
+
|
| 212 |
+
if inputs_embeds is None:
|
| 213 |
+
inputs_embeds = self.embeddings(input_ids)
|
| 214 |
+
hidden_states = inputs_embeds
|
| 215 |
+
|
| 216 |
+
if use_cache:
|
| 217 |
+
if past_key_values is None:
|
| 218 |
+
past_key_values = [layer.attn.init_state(batch_size) for layer in self.layers]
|
| 219 |
+
if not isinstance(past_key_values, RecurrentCache):
|
| 220 |
+
past_key_values = RecurrentCache.from_legacy_cache(past_key_values)
|
| 221 |
+
|
| 222 |
+
if self.gradient_checkpointing and self.training:
|
| 223 |
+
if use_cache:
|
| 224 |
+
logger.warning_once(
|
| 225 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
| 226 |
+
)
|
| 227 |
+
use_cache = False
|
| 228 |
+
|
| 229 |
+
all_hidden_states = () if output_hidden_states else None
|
| 230 |
+
all_attns = () if output_attentions else None
|
| 231 |
+
for layer in self.layers:
|
| 232 |
+
if output_hidden_states:
|
| 233 |
+
all_hidden_states += (hidden_states,)
|
| 234 |
+
|
| 235 |
+
if self.gradient_checkpointing and self.training:
|
| 236 |
+
hidden_states, attentions, past_key_values = self._gradient_checkpointing_func(
|
| 237 |
+
layer.__call__,
|
| 238 |
+
hidden_states,
|
| 239 |
+
attention_mask,
|
| 240 |
+
past_key_values,
|
| 241 |
+
use_cache,
|
| 242 |
+
output_attentions
|
| 243 |
+
)
|
| 244 |
+
else:
|
| 245 |
+
hidden_states, attentions, past_key_values = layer(
|
| 246 |
+
hidden_states,
|
| 247 |
+
attention_mask=attention_mask,
|
| 248 |
+
past_key_values=past_key_values,
|
| 249 |
+
use_cache=use_cache,
|
| 250 |
+
output_attentions=output_attentions
|
| 251 |
+
)
|
| 252 |
+
|
| 253 |
+
if output_attentions:
|
| 254 |
+
all_attns += (attentions,)
|
| 255 |
+
|
| 256 |
+
hidden_states = self.norm(hidden_states)
|
| 257 |
+
|
| 258 |
+
# add hidden states from the last decoder layer
|
| 259 |
+
if output_hidden_states:
|
| 260 |
+
all_hidden_states += (hidden_states,)
|
| 261 |
+
|
| 262 |
+
next_cache = past_key_values
|
| 263 |
+
# if use_cache:
|
| 264 |
+
# next_cache = past_key_values.to_legacy_cache()
|
| 265 |
+
if not return_dict:
|
| 266 |
+
return tuple(x for x in [hidden_states, next_cache, all_hidden_states, all_attns] if x is not None)
|
| 267 |
+
return BaseModelOutputWithPast(
|
| 268 |
+
last_hidden_state=hidden_states,
|
| 269 |
+
past_key_values=next_cache,
|
| 270 |
+
hidden_states=all_hidden_states,
|
| 271 |
+
attentions=all_attns
|
| 272 |
+
)
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
class DeltaNetForCausalLM(DeltaNetPreTrainedModel):
|
| 276 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 277 |
+
|
| 278 |
+
def __init__(self, config):
|
| 279 |
+
super().__init__(config)
|
| 280 |
+
self.model = DeltaNetModel(config)
|
| 281 |
+
self.vocab_size = config.vocab_size
|
| 282 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 283 |
+
|
| 284 |
+
# Initialize weights and apply final processing
|
| 285 |
+
self.post_init()
|
| 286 |
+
|
| 287 |
+
def get_input_embeddings(self):
|
| 288 |
+
return self.model.embeddings
|
| 289 |
+
|
| 290 |
+
def set_input_embeddings(self, value):
|
| 291 |
+
self.model.embeddings = value
|
| 292 |
+
|
| 293 |
+
def get_output_embeddings(self):
|
| 294 |
+
return self.lm_head
|
| 295 |
+
|
| 296 |
+
def set_output_embeddings(self, new_embeddings):
|
| 297 |
+
self.lm_head = new_embeddings
|
| 298 |
+
|
| 299 |
+
def set_decoder(self, decoder):
|
| 300 |
+
self.model = decoder
|
| 301 |
+
|
| 302 |
+
def get_decoder(self):
|
| 303 |
+
return self.model
|
| 304 |
+
|
| 305 |
+
def generate(self, *args, **kwargs):
|
| 306 |
+
try:
|
| 307 |
+
return super().generate(*args, **kwargs)
|
| 308 |
+
except AttributeError as exception:
|
| 309 |
+
if 'past_key_values' in str(exception):
|
| 310 |
+
raise AttributeError(
|
| 311 |
+
f"You tried to call `generate` with a decoding strategy that manipulates `past_key_values`, "
|
| 312 |
+
f"which is not supported for {self.__class__.__name__}. "
|
| 313 |
+
f"Try another generation strategy instead. "
|
| 314 |
+
f"For the available generation strategies, check this doc: "
|
| 315 |
+
f"https://huggingface.co/docs/transformers/en/generation_strategies#decoding-strategies"
|
| 316 |
+
)
|
| 317 |
+
else:
|
| 318 |
+
raise exception
|
| 319 |
+
|
| 320 |
+
def prepare_inputs_for_generation(
|
| 321 |
+
self,
|
| 322 |
+
input_ids: torch.LongTensor = None,
|
| 323 |
+
past_key_values: Optional[Tuple[List[torch.Tensor]]] = None,
|
| 324 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 325 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 326 |
+
**kwargs
|
| 327 |
+
):
|
| 328 |
+
# only last token for `inputs_ids` if the `past_key_values` is passed along.
|
| 329 |
+
if past_key_values is not None:
|
| 330 |
+
if not isinstance(past_key_values, RecurrentCache):
|
| 331 |
+
past_key_values = RecurrentCache.from_legacy_cache(past_key_values, input_ids.shape[1] - 1)
|
| 332 |
+
# breakpoint()
|
| 333 |
+
input_ids, attention_mask = input_ids[:, -1:], attention_mask[:, -1:]
|
| 334 |
+
|
| 335 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
| 336 |
+
if inputs_embeds is not None and past_key_values is None:
|
| 337 |
+
model_inputs = {'inputs_embeds': inputs_embeds}
|
| 338 |
+
else:
|
| 339 |
+
# The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise
|
| 340 |
+
# recompiles graphs as the stride of the inputs is a guard.
|
| 341 |
+
# Ref: https://github.com/huggingface/transformers/pull/29114
|
| 342 |
+
# TODO: use `next_tokens` directly instead.
|
| 343 |
+
model_inputs = {'input_ids': input_ids.contiguous()}
|
| 344 |
+
|
| 345 |
+
model_inputs.update({
|
| 346 |
+
'past_key_values': past_key_values,
|
| 347 |
+
'use_cache': kwargs.get('use_cache'),
|
| 348 |
+
'attention_mask': attention_mask,
|
| 349 |
+
})
|
| 350 |
+
return model_inputs
|
| 351 |
+
|
| 352 |
+
def forward(
|
| 353 |
+
self,
|
| 354 |
+
input_ids: torch.LongTensor = None,
|
| 355 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 356 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 357 |
+
past_key_values: Optional[Tuple[List[torch.Tensor]]] = None,
|
| 358 |
+
labels: Optional[torch.LongTensor] = None,
|
| 359 |
+
use_cache: Optional[bool] = None,
|
| 360 |
+
output_attentions: Optional[bool] = None,
|
| 361 |
+
output_hidden_states: Optional[bool] = None,
|
| 362 |
+
return_dict: Optional[bool] = None,
|
| 363 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 364 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 365 |
+
output_hidden_states = (
|
| 366 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 367 |
+
)
|
| 368 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 369 |
+
|
| 370 |
+
outputs = self.model(
|
| 371 |
+
input_ids=input_ids,
|
| 372 |
+
attention_mask=attention_mask,
|
| 373 |
+
inputs_embeds=inputs_embeds,
|
| 374 |
+
past_key_values=past_key_values,
|
| 375 |
+
use_cache=use_cache,
|
| 376 |
+
output_attentions=output_attentions,
|
| 377 |
+
output_hidden_states=output_hidden_states,
|
| 378 |
+
return_dict=return_dict
|
| 379 |
+
)
|
| 380 |
+
|
| 381 |
+
hidden_states = outputs[0]
|
| 382 |
+
logits = self.lm_head(hidden_states)
|
| 383 |
+
|
| 384 |
+
loss = None
|
| 385 |
+
if labels is not None:
|
| 386 |
+
if self.config.fuse_cross_entropy:
|
| 387 |
+
loss_fct = FusedCrossEntropyLoss(inplace_backward=True)
|
| 388 |
+
else:
|
| 389 |
+
loss_fct = nn.CrossEntropyLoss()
|
| 390 |
+
# Enable model parallelism
|
| 391 |
+
labels = labels.to(logits.device)
|
| 392 |
+
labels = torch.cat((labels[..., 1:], torch.full_like(labels[:, :1], loss_fct.ignore_index)), 1)
|
| 393 |
+
loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
|
| 394 |
+
|
| 395 |
+
if not return_dict:
|
| 396 |
+
output = (logits,) + outputs[1:]
|
| 397 |
+
return (loss,) + output if loss is not None else output
|
| 398 |
+
|
| 399 |
+
return CausalLMOutputWithPast(
|
| 400 |
+
loss=loss,
|
| 401 |
+
logits=logits,
|
| 402 |
+
past_key_values=outputs.past_key_values,
|
| 403 |
+
hidden_states=outputs.hidden_states,
|
| 404 |
+
attentions=outputs.attentions,
|
| 405 |
+
)
|
fla/models/gla/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from transformers import AutoConfig, AutoModel, AutoModelForCausalLM
|
| 4 |
+
|
| 5 |
+
from fla.models.gla.configuration_gla import GLAConfig
|
| 6 |
+
from fla.models.gla.modeling_gla import GLAForCausalLM, GLAModel
|
| 7 |
+
|
| 8 |
+
AutoConfig.register(GLAConfig.model_type, GLAConfig)
|
| 9 |
+
AutoModel.register(GLAConfig, GLAModel)
|
| 10 |
+
AutoModelForCausalLM.register(GLAConfig, GLAForCausalLM)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
__all__ = ['GLAConfig', 'GLAForCausalLM', 'GLAModel']
|
fla/models/gla/configuration_gla.py
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from typing import Optional
|
| 4 |
+
|
| 5 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class GLAConfig(PretrainedConfig):
|
| 9 |
+
|
| 10 |
+
model_type = 'gla'
|
| 11 |
+
keys_to_ignore_at_inference = ['past_key_values']
|
| 12 |
+
|
| 13 |
+
def __init__(
|
| 14 |
+
self,
|
| 15 |
+
vocab_size: int = 32000,
|
| 16 |
+
hidden_size: int = 2048,
|
| 17 |
+
expand_k: int = 0.5,
|
| 18 |
+
expand_v: int = 1,
|
| 19 |
+
hidden_ratio: Optional[int] = 4,
|
| 20 |
+
intermediate_size: Optional[int] = None,
|
| 21 |
+
num_hidden_layers: int = 24,
|
| 22 |
+
num_heads: int = 4,
|
| 23 |
+
num_kv_heads: Optional[int] = None,
|
| 24 |
+
feature_map: Optional[str] = None,
|
| 25 |
+
attn_mode: str = "chunk",
|
| 26 |
+
use_short_conv: bool = False,
|
| 27 |
+
conv_size: int = 4,
|
| 28 |
+
share_conv_kernel: bool = True,
|
| 29 |
+
use_output_gate: bool = True,
|
| 30 |
+
clamp_min: Optional[float] = None,
|
| 31 |
+
hidden_act: str = "swish",
|
| 32 |
+
max_position_embeddings: int = 2048,
|
| 33 |
+
elementwise_affine: Optional[bool] = True,
|
| 34 |
+
norm_eps: float = 1e-6,
|
| 35 |
+
use_gk: bool = True,
|
| 36 |
+
use_gv: bool = False,
|
| 37 |
+
use_cache: bool = True,
|
| 38 |
+
pad_token_id: int = None,
|
| 39 |
+
bos_token_id: int = 1,
|
| 40 |
+
eos_token_id: int = 2,
|
| 41 |
+
tie_word_embeddings: bool = False,
|
| 42 |
+
initializer_range: float = 0.02,
|
| 43 |
+
fuse_norm: bool = True,
|
| 44 |
+
fuse_cross_entropy: bool = True,
|
| 45 |
+
**kwargs
|
| 46 |
+
):
|
| 47 |
+
self.vocab_size = vocab_size
|
| 48 |
+
self.max_position_embeddings = max_position_embeddings
|
| 49 |
+
self.hidden_size = hidden_size
|
| 50 |
+
self.expand_k = expand_k
|
| 51 |
+
self.expand_v = expand_v
|
| 52 |
+
self.hidden_ratio = hidden_ratio
|
| 53 |
+
self.intermediate_size = intermediate_size
|
| 54 |
+
self.num_hidden_layers = num_hidden_layers
|
| 55 |
+
self.num_heads = num_heads
|
| 56 |
+
self.num_kv_heads = num_kv_heads
|
| 57 |
+
self.feature_map = feature_map
|
| 58 |
+
self.attn_mode = attn_mode
|
| 59 |
+
self.clamp_min = clamp_min
|
| 60 |
+
self.hidden_act = hidden_act
|
| 61 |
+
self.elementwise_affine = elementwise_affine
|
| 62 |
+
self.norm_eps = norm_eps
|
| 63 |
+
self.use_gk = use_gk
|
| 64 |
+
self.use_gv = use_gv
|
| 65 |
+
self.use_cache = use_cache
|
| 66 |
+
self.initializer_range = initializer_range
|
| 67 |
+
self.fuse_norm = fuse_norm
|
| 68 |
+
self.fuse_cross_entropy = fuse_cross_entropy
|
| 69 |
+
self.use_short_conv = use_short_conv
|
| 70 |
+
self.conv_size = conv_size
|
| 71 |
+
self.share_conv_kernel = share_conv_kernel
|
| 72 |
+
self.use_output_gate = use_output_gate
|
| 73 |
+
|
| 74 |
+
super().__init__(
|
| 75 |
+
pad_token_id=pad_token_id,
|
| 76 |
+
bos_token_id=bos_token_id,
|
| 77 |
+
eos_token_id=eos_token_id,
|
| 78 |
+
tie_word_embeddings=tie_word_embeddings,
|
| 79 |
+
**kwargs,
|
| 80 |
+
)
|
fla/models/gla/modeling_gla.py
ADDED
|
@@ -0,0 +1,403 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import math
|
| 6 |
+
import warnings
|
| 7 |
+
from typing import List, Optional, Tuple, Union
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn as nn
|
| 11 |
+
import torch.utils.checkpoint
|
| 12 |
+
from transformers.activations import ACT2FN
|
| 13 |
+
from transformers.modeling_outputs import (BaseModelOutputWithPast,
|
| 14 |
+
CausalLMOutputWithPast)
|
| 15 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 16 |
+
from transformers.utils import logging
|
| 17 |
+
|
| 18 |
+
from fla.layers.gla import GatedLinearAttention
|
| 19 |
+
from fla.models.gla.configuration_gla import GLAConfig
|
| 20 |
+
from fla.models.utils import RecurrentCache
|
| 21 |
+
from fla.modules import FusedCrossEntropyLoss, RMSNorm
|
| 22 |
+
from fla.modules.activations import swiglu_linear
|
| 23 |
+
|
| 24 |
+
logger = logging.get_logger(__name__)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class GLAMLP(nn.Module):
|
| 28 |
+
|
| 29 |
+
def __init__(
|
| 30 |
+
self,
|
| 31 |
+
hidden_size: int,
|
| 32 |
+
hidden_ratio: Optional[int] = None,
|
| 33 |
+
intermediate_size: Optional[int] = None,
|
| 34 |
+
hidden_act: str = 'swish'
|
| 35 |
+
) -> GLAMLP:
|
| 36 |
+
super().__init__()
|
| 37 |
+
|
| 38 |
+
self.hidden_size = hidden_size
|
| 39 |
+
# the final number of params is `hidden_ratio * hidden_size^2`
|
| 40 |
+
# `intermediate_size` is chosen to be a multiple of 256 closest to `2/3 * hidden_size * hidden_ratio`
|
| 41 |
+
if hidden_ratio is None:
|
| 42 |
+
hidden_ratio = 4
|
| 43 |
+
if intermediate_size is None:
|
| 44 |
+
intermediate_size = int(hidden_size * hidden_ratio * 2 / 3)
|
| 45 |
+
intermediate_size = 256 * ((intermediate_size + 256 - 1) // 256)
|
| 46 |
+
self.hidden_ratio = hidden_ratio
|
| 47 |
+
self.intermediate_size = intermediate_size
|
| 48 |
+
|
| 49 |
+
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size * 2, bias=False)
|
| 50 |
+
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
|
| 51 |
+
self.act_fn = ACT2FN[hidden_act]
|
| 52 |
+
|
| 53 |
+
def forward(self, x):
|
| 54 |
+
y = self.gate_proj(x)
|
| 55 |
+
gate, y = y.chunk(2, -1)
|
| 56 |
+
return swiglu_linear(gate, y, self.down_proj.weight, self.down_proj.bias)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
class GLABlock(nn.Module):
|
| 60 |
+
def __init__(self, config: GLAConfig, layer_idx: int):
|
| 61 |
+
super().__init__()
|
| 62 |
+
self.hidden_size = config.hidden_size
|
| 63 |
+
|
| 64 |
+
self.attn_norm = RMSNorm(hidden_size=config.hidden_size, eps=config.norm_eps)
|
| 65 |
+
self.attn = GatedLinearAttention(
|
| 66 |
+
mode=config.attn_mode,
|
| 67 |
+
hidden_size=config.hidden_size,
|
| 68 |
+
expand_k=config.expand_k,
|
| 69 |
+
expand_v=config.expand_v,
|
| 70 |
+
num_heads=config.num_heads,
|
| 71 |
+
num_kv_heads=config.num_kv_heads,
|
| 72 |
+
feature_map=config.feature_map,
|
| 73 |
+
use_short_conv=config.use_short_conv,
|
| 74 |
+
conv_size=config.conv_size,
|
| 75 |
+
share_conv_kernel=config.share_conv_kernel,
|
| 76 |
+
use_output_gate=config.use_output_gate,
|
| 77 |
+
gate_fn=config.hidden_act,
|
| 78 |
+
elementwise_affine=config.elementwise_affine,
|
| 79 |
+
norm_eps=config.norm_eps,
|
| 80 |
+
clamp_min=config.clamp_min,
|
| 81 |
+
fuse_norm=config.fuse_norm,
|
| 82 |
+
layer_idx=layer_idx
|
| 83 |
+
)
|
| 84 |
+
self.mlp_norm = RMSNorm(hidden_size=config.hidden_size, eps=config.norm_eps)
|
| 85 |
+
self.mlp = GLAMLP(
|
| 86 |
+
hidden_size=config.hidden_size,
|
| 87 |
+
hidden_ratio=config.hidden_ratio,
|
| 88 |
+
intermediate_size=config.intermediate_size,
|
| 89 |
+
hidden_act=config.hidden_act
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
def forward(
|
| 93 |
+
self,
|
| 94 |
+
hidden_states: torch.Tensor,
|
| 95 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 96 |
+
past_key_values: Optional[Tuple[List[torch.Tensor]]] = None,
|
| 97 |
+
use_cache: Optional[bool] = False,
|
| 98 |
+
output_attentions: Optional[bool] = False,
|
| 99 |
+
**kwargs,
|
| 100 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
| 101 |
+
residual = hidden_states
|
| 102 |
+
hidden_states = self.attn_norm(hidden_states)
|
| 103 |
+
hidden_states, attentions, past_key_values = self.attn(
|
| 104 |
+
hidden_states=hidden_states,
|
| 105 |
+
attention_mask=attention_mask,
|
| 106 |
+
past_key_values=past_key_values,
|
| 107 |
+
use_cache=use_cache,
|
| 108 |
+
output_attentions=output_attentions
|
| 109 |
+
)
|
| 110 |
+
hidden_states, residual = self.mlp_norm(hidden_states, residual, True)
|
| 111 |
+
hidden_states = self.mlp(hidden_states)
|
| 112 |
+
hidden_states = residual + hidden_states
|
| 113 |
+
|
| 114 |
+
outputs = (hidden_states, attentions, past_key_values)
|
| 115 |
+
|
| 116 |
+
return outputs
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
class GLAPreTrainedModel(PreTrainedModel):
|
| 120 |
+
|
| 121 |
+
config_class = GLAConfig
|
| 122 |
+
supports_gradient_checkpointing = True
|
| 123 |
+
_no_split_modules = ['GLABlock']
|
| 124 |
+
|
| 125 |
+
def __init__(self, *inputs, **kwargs):
|
| 126 |
+
super().__init__(*inputs, **kwargs)
|
| 127 |
+
|
| 128 |
+
def _init_weights(
|
| 129 |
+
self,
|
| 130 |
+
module: nn.Module,
|
| 131 |
+
rescale_prenorm_residual: bool = True,
|
| 132 |
+
num_residuals_per_layer: int = 2,
|
| 133 |
+
):
|
| 134 |
+
if isinstance(module, (nn.Linear, nn.Conv1d)):
|
| 135 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
| 136 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
| 137 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 138 |
+
if module.bias is not None:
|
| 139 |
+
nn.init.zeros_(module.bias)
|
| 140 |
+
elif isinstance(module, nn.Embedding):
|
| 141 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 142 |
+
if module.padding_idx is not None:
|
| 143 |
+
module.weight.data[module.padding_idx].zero_()
|
| 144 |
+
|
| 145 |
+
if rescale_prenorm_residual:
|
| 146 |
+
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
|
| 147 |
+
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
|
| 148 |
+
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
|
| 149 |
+
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
|
| 150 |
+
#
|
| 151 |
+
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
|
| 152 |
+
for name, p in module.named_parameters():
|
| 153 |
+
if name in ["o_proj.weight", "down_proj.weight"]:
|
| 154 |
+
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
|
| 155 |
+
# Following Pytorch init, except scale by 1/sqrt(2 * n_layer)
|
| 156 |
+
# We need to reinit p since this code could be called multiple times
|
| 157 |
+
# Having just p *= scale would repeatedly scale it down
|
| 158 |
+
with torch.no_grad():
|
| 159 |
+
p /= math.sqrt(num_residuals_per_layer * self.config.num_hidden_layers)
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
class GLAModel(GLAPreTrainedModel):
|
| 163 |
+
|
| 164 |
+
def __init__(self, config: GLAConfig):
|
| 165 |
+
super().__init__(config)
|
| 166 |
+
self.padding_idx = config.pad_token_id
|
| 167 |
+
self.vocab_size = config.vocab_size
|
| 168 |
+
|
| 169 |
+
self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
| 170 |
+
self.layers = nn.ModuleList([GLABlock(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
|
| 171 |
+
self.norm = RMSNorm(config.hidden_size, eps=config.norm_eps)
|
| 172 |
+
|
| 173 |
+
self.gradient_checkpointing = False
|
| 174 |
+
|
| 175 |
+
self.post_init()
|
| 176 |
+
|
| 177 |
+
def get_input_embeddings(self):
|
| 178 |
+
return self.embeddings
|
| 179 |
+
|
| 180 |
+
def set_input_embeddings(self, value):
|
| 181 |
+
self.embeddings = value
|
| 182 |
+
|
| 183 |
+
def forward(
|
| 184 |
+
self,
|
| 185 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 186 |
+
attention_mask: Optional[torch.Tensor] = None, # noqa
|
| 187 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 188 |
+
past_key_values: Optional[Tuple[List[torch.Tensor]]] = None,
|
| 189 |
+
use_cache: Optional[bool] = None,
|
| 190 |
+
output_attentions: Optional[bool] = None,
|
| 191 |
+
output_hidden_states: Optional[bool] = None,
|
| 192 |
+
return_dict: Optional[bool] = None
|
| 193 |
+
) -> Union[Tuple, BaseModelOutputWithPast]:
|
| 194 |
+
if output_attentions:
|
| 195 |
+
warnings.warn("`GLAModel` does not `output_attentions` now, setting it to `False`.")
|
| 196 |
+
output_attentions = False
|
| 197 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 198 |
+
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 199 |
+
use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
|
| 200 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 201 |
+
|
| 202 |
+
# retrieve input_ids and inputs_embeds
|
| 203 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 204 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
| 205 |
+
elif input_ids is not None:
|
| 206 |
+
batch_size = input_ids.shape[0]
|
| 207 |
+
elif inputs_embeds is not None:
|
| 208 |
+
batch_size = inputs_embeds.shape[0]
|
| 209 |
+
else:
|
| 210 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
| 211 |
+
|
| 212 |
+
if inputs_embeds is None:
|
| 213 |
+
inputs_embeds = self.embeddings(input_ids)
|
| 214 |
+
hidden_states = inputs_embeds
|
| 215 |
+
|
| 216 |
+
if use_cache:
|
| 217 |
+
if past_key_values is None:
|
| 218 |
+
past_key_values = [layer.attn.init_state(batch_size) for layer in self.layers]
|
| 219 |
+
if not isinstance(past_key_values, RecurrentCache):
|
| 220 |
+
past_key_values = RecurrentCache.from_legacy_cache(past_key_values)
|
| 221 |
+
|
| 222 |
+
if self.gradient_checkpointing and self.training:
|
| 223 |
+
if use_cache:
|
| 224 |
+
logger.warning_once(
|
| 225 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
| 226 |
+
)
|
| 227 |
+
use_cache = False
|
| 228 |
+
|
| 229 |
+
all_hidden_states = () if output_hidden_states else None
|
| 230 |
+
all_attns = () if output_attentions else None
|
| 231 |
+
for layer in self.layers:
|
| 232 |
+
if output_hidden_states:
|
| 233 |
+
all_hidden_states += (hidden_states,)
|
| 234 |
+
|
| 235 |
+
if self.gradient_checkpointing and self.training:
|
| 236 |
+
hidden_states, attentions, past_key_values = self._gradient_checkpointing_func(
|
| 237 |
+
layer.__call__,
|
| 238 |
+
hidden_states,
|
| 239 |
+
attention_mask,
|
| 240 |
+
past_key_values,
|
| 241 |
+
use_cache,
|
| 242 |
+
output_attentions
|
| 243 |
+
)
|
| 244 |
+
else:
|
| 245 |
+
hidden_states, attentions, past_key_values = layer(
|
| 246 |
+
hidden_states,
|
| 247 |
+
attention_mask=attention_mask,
|
| 248 |
+
past_key_values=past_key_values,
|
| 249 |
+
use_cache=use_cache,
|
| 250 |
+
output_attentions=output_attentions
|
| 251 |
+
)
|
| 252 |
+
|
| 253 |
+
if output_attentions:
|
| 254 |
+
all_attns += (attentions,)
|
| 255 |
+
|
| 256 |
+
hidden_states = self.norm(hidden_states)
|
| 257 |
+
|
| 258 |
+
# add hidden states from the last decoder layer
|
| 259 |
+
if output_hidden_states:
|
| 260 |
+
all_hidden_states += (hidden_states,)
|
| 261 |
+
|
| 262 |
+
next_cache = None
|
| 263 |
+
if use_cache:
|
| 264 |
+
next_cache = past_key_values.to_legacy_cache()
|
| 265 |
+
if not return_dict:
|
| 266 |
+
return tuple(x for x in [hidden_states, next_cache, all_hidden_states, all_attns] if x is not None)
|
| 267 |
+
return BaseModelOutputWithPast(
|
| 268 |
+
last_hidden_state=hidden_states,
|
| 269 |
+
past_key_values=next_cache,
|
| 270 |
+
hidden_states=all_hidden_states,
|
| 271 |
+
attentions=all_attns
|
| 272 |
+
)
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
class GLAForCausalLM(GLAPreTrainedModel):
|
| 276 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 277 |
+
|
| 278 |
+
def __init__(self, config):
|
| 279 |
+
super().__init__(config)
|
| 280 |
+
self.model = GLAModel(config)
|
| 281 |
+
self.vocab_size = config.vocab_size
|
| 282 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 283 |
+
|
| 284 |
+
# Initialize weights and apply final processing
|
| 285 |
+
self.post_init()
|
| 286 |
+
|
| 287 |
+
def get_input_embeddings(self):
|
| 288 |
+
return self.model.embeddings
|
| 289 |
+
|
| 290 |
+
def set_input_embeddings(self, value):
|
| 291 |
+
self.model.embeddings = value
|
| 292 |
+
|
| 293 |
+
def get_output_embeddings(self):
|
| 294 |
+
return self.lm_head
|
| 295 |
+
|
| 296 |
+
def set_output_embeddings(self, new_embeddings):
|
| 297 |
+
self.lm_head = new_embeddings
|
| 298 |
+
|
| 299 |
+
def set_decoder(self, decoder):
|
| 300 |
+
self.model = decoder
|
| 301 |
+
|
| 302 |
+
def get_decoder(self):
|
| 303 |
+
return self.model
|
| 304 |
+
|
| 305 |
+
def generate(self, *args, **kwargs):
|
| 306 |
+
try:
|
| 307 |
+
return super().generate(*args, **kwargs)
|
| 308 |
+
except AttributeError as exception:
|
| 309 |
+
if 'past_key_values' in str(exception):
|
| 310 |
+
raise AttributeError(
|
| 311 |
+
f"You tried to call `generate` with a decoding strategy that manipulates `past_key_values`, "
|
| 312 |
+
f"which is not supported for {self.__class__.__name__}. "
|
| 313 |
+
f"Try another generation strategy instead. "
|
| 314 |
+
f"For the available generation strategies, check this doc: "
|
| 315 |
+
f"https://huggingface.co/docs/transformers/en/generation_strategies#decoding-strategies"
|
| 316 |
+
)
|
| 317 |
+
else:
|
| 318 |
+
raise exception
|
| 319 |
+
|
| 320 |
+
def prepare_inputs_for_generation(
|
| 321 |
+
self,
|
| 322 |
+
input_ids: torch.LongTensor = None,
|
| 323 |
+
past_key_values: Optional[Tuple[List[torch.Tensor]]] = None,
|
| 324 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 325 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 326 |
+
**kwargs
|
| 327 |
+
):
|
| 328 |
+
# only last token for `inputs_ids` if the `past_key_values` is passed along.
|
| 329 |
+
if past_key_values is not None:
|
| 330 |
+
if not isinstance(past_key_values, RecurrentCache):
|
| 331 |
+
past_key_values = RecurrentCache.from_legacy_cache(past_key_values, input_ids.shape[1] - 1)
|
| 332 |
+
input_ids, attention_mask = input_ids[:, -1:], attention_mask[:, -1:]
|
| 333 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
| 334 |
+
if inputs_embeds is not None and past_key_values is None:
|
| 335 |
+
model_inputs = {'inputs_embeds': inputs_embeds}
|
| 336 |
+
else:
|
| 337 |
+
# The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise
|
| 338 |
+
# recompiles graphs as the stride of the inputs is a guard.
|
| 339 |
+
# Ref: https://github.com/huggingface/transformers/pull/29114
|
| 340 |
+
# TODO: use `next_tokens` directly instead.
|
| 341 |
+
model_inputs = {'input_ids': input_ids.contiguous()}
|
| 342 |
+
|
| 343 |
+
model_inputs.update({
|
| 344 |
+
'past_key_values': past_key_values,
|
| 345 |
+
'use_cache': kwargs.get('use_cache'),
|
| 346 |
+
'attention_mask': attention_mask,
|
| 347 |
+
})
|
| 348 |
+
return model_inputs
|
| 349 |
+
|
| 350 |
+
def forward(
|
| 351 |
+
self,
|
| 352 |
+
input_ids: torch.LongTensor = None,
|
| 353 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 354 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 355 |
+
past_key_values: Optional[Tuple[List[torch.Tensor]]] = None,
|
| 356 |
+
labels: Optional[torch.LongTensor] = None,
|
| 357 |
+
use_cache: Optional[bool] = None,
|
| 358 |
+
output_attentions: Optional[bool] = None,
|
| 359 |
+
output_hidden_states: Optional[bool] = None,
|
| 360 |
+
return_dict: Optional[bool] = None,
|
| 361 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 362 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 363 |
+
output_hidden_states = (
|
| 364 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 365 |
+
)
|
| 366 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 367 |
+
|
| 368 |
+
outputs = self.model(
|
| 369 |
+
input_ids=input_ids,
|
| 370 |
+
attention_mask=attention_mask,
|
| 371 |
+
inputs_embeds=inputs_embeds,
|
| 372 |
+
past_key_values=past_key_values,
|
| 373 |
+
use_cache=use_cache,
|
| 374 |
+
output_attentions=output_attentions,
|
| 375 |
+
output_hidden_states=output_hidden_states,
|
| 376 |
+
return_dict=return_dict
|
| 377 |
+
)
|
| 378 |
+
|
| 379 |
+
hidden_states = outputs[0]
|
| 380 |
+
logits = self.lm_head(hidden_states)
|
| 381 |
+
|
| 382 |
+
loss = None
|
| 383 |
+
if labels is not None:
|
| 384 |
+
if self.config.fuse_cross_entropy:
|
| 385 |
+
loss_fct = FusedCrossEntropyLoss(inplace_backward=True)
|
| 386 |
+
else:
|
| 387 |
+
loss_fct = nn.CrossEntropyLoss()
|
| 388 |
+
# Enable model parallelism
|
| 389 |
+
labels = labels.to(logits.device)
|
| 390 |
+
labels = torch.cat((labels[..., 1:], torch.full_like(labels[:, :1], loss_fct.ignore_index)), 1)
|
| 391 |
+
loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
|
| 392 |
+
|
| 393 |
+
if not return_dict:
|
| 394 |
+
output = (logits,) + outputs[1:]
|
| 395 |
+
return (loss,) + output if loss is not None else output
|
| 396 |
+
|
| 397 |
+
return CausalLMOutputWithPast(
|
| 398 |
+
loss=loss,
|
| 399 |
+
logits=logits,
|
| 400 |
+
past_key_values=outputs.past_key_values,
|
| 401 |
+
hidden_states=outputs.hidden_states,
|
| 402 |
+
attentions=outputs.attentions,
|
| 403 |
+
)
|
fla/models/hgrn/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from transformers import AutoConfig, AutoModel, AutoModelForCausalLM
|
| 4 |
+
|
| 5 |
+
from fla.models.hgrn.configuration_hgrn import HGRNConfig
|
| 6 |
+
from fla.models.hgrn.modeling_hgrn import HGRNForCausalLM, HGRNModel
|
| 7 |
+
|
| 8 |
+
AutoConfig.register(HGRNConfig.model_type, HGRNConfig)
|
| 9 |
+
AutoModel.register(HGRNConfig, HGRNModel)
|
| 10 |
+
AutoModelForCausalLM.register(HGRNConfig, HGRNForCausalLM)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
__all__ = ['HGRNConfig', 'HGRNForCausalLM', 'HGRNModel']
|
fla/models/hgrn/configuration_hgrn.py
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from typing import Optional
|
| 4 |
+
|
| 5 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class HGRNConfig(PretrainedConfig):
|
| 9 |
+
|
| 10 |
+
model_type = 'hgrn'
|
| 11 |
+
keys_to_ignore_at_inference = ['past_key_values']
|
| 12 |
+
|
| 13 |
+
def __init__(
|
| 14 |
+
self,
|
| 15 |
+
attn_mode: str = "chunk",
|
| 16 |
+
vocab_size: int = 32000,
|
| 17 |
+
hidden_size: int = 2048,
|
| 18 |
+
num_hidden_layers: int = 24,
|
| 19 |
+
num_heads: Optional[int] = 1,
|
| 20 |
+
expand_ratio: Optional[int] = 1,
|
| 21 |
+
use_short_conv: bool = False,
|
| 22 |
+
conv_size: int = 4,
|
| 23 |
+
share_conv_kernel: bool = True,
|
| 24 |
+
use_lower_bound: bool = True,
|
| 25 |
+
hidden_ratio: Optional[int] = 4,
|
| 26 |
+
intermediate_size: Optional[int] = None,
|
| 27 |
+
hidden_act: str = "swish",
|
| 28 |
+
max_position_embeddings: int = 2048,
|
| 29 |
+
elementwise_affine: Optional[bool] = True,
|
| 30 |
+
norm_eps: float = 1e-6,
|
| 31 |
+
use_cache: bool = True,
|
| 32 |
+
pad_token_id: int = None,
|
| 33 |
+
bos_token_id: int = 1,
|
| 34 |
+
eos_token_id: int = 2,
|
| 35 |
+
tie_word_embeddings: bool = False,
|
| 36 |
+
initializer_range: float = 0.02,
|
| 37 |
+
fuse_cross_entropy: bool = True,
|
| 38 |
+
**kwargs
|
| 39 |
+
):
|
| 40 |
+
self.attn_mode = attn_mode
|
| 41 |
+
self.vocab_size = vocab_size
|
| 42 |
+
self.max_position_embeddings = max_position_embeddings
|
| 43 |
+
self.hidden_size = hidden_size
|
| 44 |
+
self.num_hidden_layers = num_hidden_layers
|
| 45 |
+
self.num_heads = num_heads
|
| 46 |
+
self.expand_ratio = expand_ratio
|
| 47 |
+
self.use_short_conv = use_short_conv
|
| 48 |
+
self.conv_size = conv_size
|
| 49 |
+
self.share_conv_kernel = share_conv_kernel
|
| 50 |
+
self.use_lower_bound = use_lower_bound
|
| 51 |
+
self.hidden_ratio = hidden_ratio
|
| 52 |
+
self.intermediate_size = intermediate_size
|
| 53 |
+
self.hidden_act = hidden_act
|
| 54 |
+
self.elementwise_affine = elementwise_affine
|
| 55 |
+
self.norm_eps = norm_eps
|
| 56 |
+
self.use_cache = use_cache
|
| 57 |
+
self.initializer_range = initializer_range
|
| 58 |
+
self.fuse_cross_entropy = fuse_cross_entropy
|
| 59 |
+
|
| 60 |
+
super().__init__(
|
| 61 |
+
pad_token_id=pad_token_id,
|
| 62 |
+
bos_token_id=bos_token_id,
|
| 63 |
+
eos_token_id=eos_token_id,
|
| 64 |
+
tie_word_embeddings=tie_word_embeddings,
|
| 65 |
+
**kwargs,
|
| 66 |
+
)
|
fla/models/hgrn/modeling_hgrn.py
ADDED
|
@@ -0,0 +1,407 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import math
|
| 6 |
+
import warnings
|
| 7 |
+
from typing import List, Optional, Tuple, Union
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn as nn
|
| 11 |
+
import torch.utils.checkpoint
|
| 12 |
+
from transformers.activations import ACT2FN
|
| 13 |
+
from transformers.modeling_outputs import (BaseModelOutputWithPast,
|
| 14 |
+
CausalLMOutputWithPast)
|
| 15 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 16 |
+
from transformers.utils import logging
|
| 17 |
+
|
| 18 |
+
from fla.layers.hgrn import HGRNAttention
|
| 19 |
+
from fla.models.hgrn.configuration_hgrn import HGRNConfig
|
| 20 |
+
from fla.models.utils import RecurrentCache
|
| 21 |
+
from fla.modules import FusedCrossEntropyLoss, RMSNorm
|
| 22 |
+
from fla.modules.activations import swiglu_linear
|
| 23 |
+
|
| 24 |
+
logger = logging.get_logger(__name__)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class HGRNMLP(nn.Module):
|
| 28 |
+
|
| 29 |
+
def __init__(
|
| 30 |
+
self,
|
| 31 |
+
hidden_size: int,
|
| 32 |
+
hidden_ratio: Optional[int] = None,
|
| 33 |
+
intermediate_size: Optional[int] = None,
|
| 34 |
+
hidden_act: str = 'swish'
|
| 35 |
+
) -> HGRNMLP:
|
| 36 |
+
super().__init__()
|
| 37 |
+
|
| 38 |
+
self.hidden_size = hidden_size
|
| 39 |
+
# the final number of params is `hidden_ratio * hidden_size^2`
|
| 40 |
+
# `intermediate_size` is chosen to be a multiple of 256 closest to `2/3 * hidden_size * hidden_ratio`
|
| 41 |
+
if hidden_ratio is None:
|
| 42 |
+
hidden_ratio = 4
|
| 43 |
+
if intermediate_size is None:
|
| 44 |
+
intermediate_size = int(hidden_size * hidden_ratio * 2 / 3)
|
| 45 |
+
intermediate_size = 256 * ((intermediate_size + 256 - 1) // 256)
|
| 46 |
+
self.hidden_ratio = hidden_ratio
|
| 47 |
+
self.intermediate_size = intermediate_size
|
| 48 |
+
|
| 49 |
+
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size * 2, bias=False)
|
| 50 |
+
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
|
| 51 |
+
self.act_fn = ACT2FN[hidden_act]
|
| 52 |
+
|
| 53 |
+
def forward(self, x):
|
| 54 |
+
y = self.gate_proj(x)
|
| 55 |
+
gate, y = y.chunk(2, -1)
|
| 56 |
+
return swiglu_linear(gate, y, self.down_proj.weight, self.down_proj.bias)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
class HGRNBlock(nn.Module):
|
| 60 |
+
def __init__(self, config: HGRNConfig, layer_idx: int):
|
| 61 |
+
super().__init__()
|
| 62 |
+
self.hidden_size = config.hidden_size
|
| 63 |
+
|
| 64 |
+
self.attn_norm = RMSNorm(hidden_size=config.hidden_size, eps=config.norm_eps)
|
| 65 |
+
self.attn = HGRNAttention(
|
| 66 |
+
mode=config.attn_mode,
|
| 67 |
+
hidden_size=config.hidden_size,
|
| 68 |
+
num_heads=config.num_heads,
|
| 69 |
+
expand_ratio=config.expand_ratio,
|
| 70 |
+
use_short_conv=config.use_short_conv,
|
| 71 |
+
conv_size=config.conv_size,
|
| 72 |
+
share_conv_kernel=config.share_conv_kernel,
|
| 73 |
+
elementwise_affine=config.elementwise_affine,
|
| 74 |
+
norm_eps=config.norm_eps,
|
| 75 |
+
layer_idx=layer_idx
|
| 76 |
+
)
|
| 77 |
+
self.mlp_norm = RMSNorm(hidden_size=config.hidden_size, eps=config.norm_eps)
|
| 78 |
+
self.mlp = HGRNMLP(
|
| 79 |
+
hidden_size=config.hidden_size,
|
| 80 |
+
hidden_ratio=config.hidden_ratio,
|
| 81 |
+
intermediate_size=config.intermediate_size,
|
| 82 |
+
hidden_act=config.hidden_act
|
| 83 |
+
)
|
| 84 |
+
|
| 85 |
+
def forward(
|
| 86 |
+
self,
|
| 87 |
+
hidden_states: torch.Tensor,
|
| 88 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 89 |
+
past_key_values: Optional[Tuple[List[torch.Tensor]]] = None,
|
| 90 |
+
use_cache: Optional[bool] = False,
|
| 91 |
+
output_attentions: Optional[bool] = False,
|
| 92 |
+
lower_bound: Optional[torch.Tensor] = False,
|
| 93 |
+
**kwargs,
|
| 94 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
| 95 |
+
residual = hidden_states
|
| 96 |
+
hidden_states = self.attn_norm(hidden_states)
|
| 97 |
+
hidden_states, attentions, past_key_values = self.attn(
|
| 98 |
+
hidden_states=hidden_states,
|
| 99 |
+
attention_mask=attention_mask,
|
| 100 |
+
past_key_values=past_key_values,
|
| 101 |
+
use_cache=use_cache,
|
| 102 |
+
output_attentions=output_attentions,
|
| 103 |
+
lower_bound=lower_bound
|
| 104 |
+
)
|
| 105 |
+
hidden_states, residual = self.mlp_norm(hidden_states, residual, True)
|
| 106 |
+
hidden_states = self.mlp(hidden_states)
|
| 107 |
+
hidden_states = residual + hidden_states
|
| 108 |
+
|
| 109 |
+
outputs = (hidden_states, attentions, past_key_values)
|
| 110 |
+
|
| 111 |
+
return outputs
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
class HGRNPreTrainedModel(PreTrainedModel):
|
| 115 |
+
|
| 116 |
+
config_class = HGRNConfig
|
| 117 |
+
supports_gradient_checkpointing = True
|
| 118 |
+
_no_split_modules = ['HGRNBlock']
|
| 119 |
+
|
| 120 |
+
def __init__(self, *inputs, **kwargs):
|
| 121 |
+
super().__init__(*inputs, **kwargs)
|
| 122 |
+
|
| 123 |
+
def _init_weights(
|
| 124 |
+
self,
|
| 125 |
+
module: nn.Module,
|
| 126 |
+
rescale_prenorm_residual: bool = True,
|
| 127 |
+
num_residuals_per_layer: int = 2,
|
| 128 |
+
):
|
| 129 |
+
if isinstance(module, (nn.Linear, nn.Conv1d)):
|
| 130 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
| 131 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
| 132 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 133 |
+
if module.bias is not None:
|
| 134 |
+
nn.init.zeros_(module.bias)
|
| 135 |
+
elif isinstance(module, nn.Embedding):
|
| 136 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 137 |
+
if module.padding_idx is not None:
|
| 138 |
+
module.weight.data[module.padding_idx].zero_()
|
| 139 |
+
|
| 140 |
+
if rescale_prenorm_residual:
|
| 141 |
+
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
|
| 142 |
+
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
|
| 143 |
+
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
|
| 144 |
+
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
|
| 145 |
+
#
|
| 146 |
+
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
|
| 147 |
+
for name, p in module.named_parameters():
|
| 148 |
+
if name in ["o_proj.weight", "down_proj.weight"]:
|
| 149 |
+
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
|
| 150 |
+
# Following Pytorch init, except scale by 1/sqrt(2 * n_layer)
|
| 151 |
+
# We need to reinit p since this code could be called multiple times
|
| 152 |
+
# Having just p *= scale would repeatedly scale it down
|
| 153 |
+
with torch.no_grad():
|
| 154 |
+
p /= math.sqrt(num_residuals_per_layer * self.config.num_hidden_layers)
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
class HGRNModel(HGRNPreTrainedModel):
|
| 158 |
+
|
| 159 |
+
def __init__(self, config: HGRNConfig):
|
| 160 |
+
super().__init__(config)
|
| 161 |
+
self.padding_idx = config.pad_token_id
|
| 162 |
+
self.vocab_size = config.vocab_size
|
| 163 |
+
|
| 164 |
+
self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
| 165 |
+
if config.use_lower_bound:
|
| 166 |
+
self.lower_bounds = nn.Parameter(torch.zeros(config.num_hidden_layers, config.hidden_size))
|
| 167 |
+
self.layers = nn.ModuleList([HGRNBlock(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
|
| 168 |
+
self.norm = RMSNorm(config.hidden_size, eps=config.norm_eps)
|
| 169 |
+
|
| 170 |
+
self.gradient_checkpointing = False
|
| 171 |
+
|
| 172 |
+
self.post_init()
|
| 173 |
+
|
| 174 |
+
def get_input_embeddings(self):
|
| 175 |
+
return self.embeddings
|
| 176 |
+
|
| 177 |
+
def set_input_embeddings(self, value):
|
| 178 |
+
self.embeddings = value
|
| 179 |
+
|
| 180 |
+
def forward(
|
| 181 |
+
self,
|
| 182 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 183 |
+
attention_mask: Optional[torch.Tensor] = None, # noqa
|
| 184 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 185 |
+
past_key_values: Optional[Tuple[List[torch.Tensor]]] = None,
|
| 186 |
+
use_cache: Optional[bool] = None,
|
| 187 |
+
output_attentions: Optional[bool] = None,
|
| 188 |
+
output_hidden_states: Optional[bool] = None,
|
| 189 |
+
return_dict: Optional[bool] = None
|
| 190 |
+
) -> Union[Tuple, BaseModelOutputWithPast]:
|
| 191 |
+
if output_attentions:
|
| 192 |
+
warnings.warn("`HGRNModel` does not `output_attentions` now, setting it to `False`.")
|
| 193 |
+
output_attentions = False
|
| 194 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 195 |
+
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 196 |
+
use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
|
| 197 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 198 |
+
|
| 199 |
+
# retrieve input_ids and inputs_embeds
|
| 200 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 201 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
| 202 |
+
elif input_ids is not None:
|
| 203 |
+
batch_size = input_ids.shape[0]
|
| 204 |
+
elif inputs_embeds is not None:
|
| 205 |
+
batch_size = inputs_embeds.shape[0]
|
| 206 |
+
else:
|
| 207 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
| 208 |
+
|
| 209 |
+
if inputs_embeds is None:
|
| 210 |
+
inputs_embeds = self.embeddings(input_ids)
|
| 211 |
+
hidden_states = inputs_embeds
|
| 212 |
+
|
| 213 |
+
if use_cache:
|
| 214 |
+
if past_key_values is None:
|
| 215 |
+
past_key_values = [layer.attn.init_state(batch_size) for layer in self.layers]
|
| 216 |
+
if not isinstance(past_key_values, RecurrentCache):
|
| 217 |
+
past_key_values = RecurrentCache.from_legacy_cache(past_key_values)
|
| 218 |
+
|
| 219 |
+
if self.gradient_checkpointing and self.training:
|
| 220 |
+
if use_cache:
|
| 221 |
+
logger.warning_once(
|
| 222 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
| 223 |
+
)
|
| 224 |
+
use_cache = False
|
| 225 |
+
|
| 226 |
+
all_hidden_states = () if output_hidden_states else None
|
| 227 |
+
all_attns = () if output_attentions else None
|
| 228 |
+
|
| 229 |
+
if self.config.use_lower_bound:
|
| 230 |
+
lower_bounds = self.lower_bounds.softmax(0)
|
| 231 |
+
lower_bounds = lower_bounds.cumsum(0) - lower_bounds[0]
|
| 232 |
+
for i, layer in enumerate(self.layers):
|
| 233 |
+
if output_hidden_states:
|
| 234 |
+
all_hidden_states += (hidden_states,)
|
| 235 |
+
|
| 236 |
+
lower_bound = lower_bounds[i] if self.config.use_lower_bound else None
|
| 237 |
+
if self.gradient_checkpointing and self.training:
|
| 238 |
+
hidden_states, attentions, past_key_values = self._gradient_checkpointing_func(
|
| 239 |
+
layer.__call__,
|
| 240 |
+
hidden_states,
|
| 241 |
+
attention_mask,
|
| 242 |
+
past_key_values,
|
| 243 |
+
use_cache,
|
| 244 |
+
output_attentions,
|
| 245 |
+
lower_bound
|
| 246 |
+
)
|
| 247 |
+
else:
|
| 248 |
+
hidden_states, attentions, past_key_values = layer(
|
| 249 |
+
hidden_states,
|
| 250 |
+
attention_mask=attention_mask,
|
| 251 |
+
past_key_values=past_key_values,
|
| 252 |
+
use_cache=use_cache,
|
| 253 |
+
output_attentions=output_attentions,
|
| 254 |
+
lower_bound=lower_bound
|
| 255 |
+
)
|
| 256 |
+
|
| 257 |
+
if output_attentions:
|
| 258 |
+
all_attns += (attentions,)
|
| 259 |
+
|
| 260 |
+
hidden_states = self.norm(hidden_states)
|
| 261 |
+
|
| 262 |
+
# add hidden states from the last decoder layer
|
| 263 |
+
if output_hidden_states:
|
| 264 |
+
all_hidden_states += (hidden_states,)
|
| 265 |
+
|
| 266 |
+
next_cache = None
|
| 267 |
+
if use_cache:
|
| 268 |
+
next_cache = past_key_values.to_legacy_cache()
|
| 269 |
+
if not return_dict:
|
| 270 |
+
return tuple(x for x in [hidden_states, next_cache, all_hidden_states, all_attns] if x is not None)
|
| 271 |
+
return BaseModelOutputWithPast(
|
| 272 |
+
last_hidden_state=hidden_states,
|
| 273 |
+
past_key_values=next_cache,
|
| 274 |
+
hidden_states=all_hidden_states,
|
| 275 |
+
attentions=all_attns
|
| 276 |
+
)
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
class HGRNForCausalLM(HGRNPreTrainedModel):
|
| 280 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 281 |
+
|
| 282 |
+
def __init__(self, config):
|
| 283 |
+
super().__init__(config)
|
| 284 |
+
self.model = HGRNModel(config)
|
| 285 |
+
self.vocab_size = config.vocab_size
|
| 286 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 287 |
+
|
| 288 |
+
# Initialize weights and apply final processing
|
| 289 |
+
self.post_init()
|
| 290 |
+
|
| 291 |
+
def get_input_embeddings(self):
|
| 292 |
+
return self.model.embeddings
|
| 293 |
+
|
| 294 |
+
def set_input_embeddings(self, value):
|
| 295 |
+
self.model.embeddings = value
|
| 296 |
+
|
| 297 |
+
def get_output_embeddings(self):
|
| 298 |
+
return self.lm_head
|
| 299 |
+
|
| 300 |
+
def set_output_embeddings(self, new_embeddings):
|
| 301 |
+
self.lm_head = new_embeddings
|
| 302 |
+
|
| 303 |
+
def set_decoder(self, decoder):
|
| 304 |
+
self.model = decoder
|
| 305 |
+
|
| 306 |
+
def get_decoder(self):
|
| 307 |
+
return self.model
|
| 308 |
+
|
| 309 |
+
def generate(self, *args, **kwargs):
|
| 310 |
+
try:
|
| 311 |
+
return super().generate(*args, **kwargs)
|
| 312 |
+
except AttributeError as exception:
|
| 313 |
+
if 'past_key_values' in str(exception):
|
| 314 |
+
raise AttributeError(
|
| 315 |
+
f"You tried to call `generate` with a decoding strategy that manipulates `past_key_values`, "
|
| 316 |
+
f"which is not supported for {self.__class__.__name__}. "
|
| 317 |
+
f"Try another generation strategy instead. "
|
| 318 |
+
f"For the available generation strategies, check this doc: "
|
| 319 |
+
f"https://huggingface.co/docs/transformers/en/generation_strategies#decoding-strategies"
|
| 320 |
+
)
|
| 321 |
+
else:
|
| 322 |
+
raise exception
|
| 323 |
+
|
| 324 |
+
def prepare_inputs_for_generation(
|
| 325 |
+
self,
|
| 326 |
+
input_ids: torch.LongTensor = None,
|
| 327 |
+
past_key_values: Optional[Tuple[List[torch.Tensor]]] = None,
|
| 328 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 329 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 330 |
+
**kwargs
|
| 331 |
+
):
|
| 332 |
+
# only last token for `inputs_ids` if the `past_key_values` is passed along.
|
| 333 |
+
if past_key_values is not None:
|
| 334 |
+
if not isinstance(past_key_values, RecurrentCache):
|
| 335 |
+
past_key_values = RecurrentCache.from_legacy_cache(past_key_values, input_ids.shape[1] - 1)
|
| 336 |
+
input_ids, attention_mask = input_ids[:, -1:], attention_mask[:, -1:]
|
| 337 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
| 338 |
+
if inputs_embeds is not None and past_key_values is None:
|
| 339 |
+
model_inputs = {'inputs_embeds': inputs_embeds}
|
| 340 |
+
else:
|
| 341 |
+
# The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise
|
| 342 |
+
# recompiles graphs as the stride of the inputs is a guard.
|
| 343 |
+
# Ref: https://github.com/huggingface/transformers/pull/29114
|
| 344 |
+
# TODO: use `next_tokens` directly instead.
|
| 345 |
+
model_inputs = {'input_ids': input_ids.contiguous()}
|
| 346 |
+
|
| 347 |
+
model_inputs.update({
|
| 348 |
+
'past_key_values': past_key_values,
|
| 349 |
+
'use_cache': kwargs.get('use_cache'),
|
| 350 |
+
'attention_mask': attention_mask,
|
| 351 |
+
})
|
| 352 |
+
return model_inputs
|
| 353 |
+
|
| 354 |
+
def forward(
|
| 355 |
+
self,
|
| 356 |
+
input_ids: torch.LongTensor = None,
|
| 357 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 358 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 359 |
+
past_key_values: Optional[Tuple[List[torch.Tensor]]] = None,
|
| 360 |
+
labels: Optional[torch.LongTensor] = None,
|
| 361 |
+
use_cache: Optional[bool] = None,
|
| 362 |
+
output_attentions: Optional[bool] = None,
|
| 363 |
+
output_hidden_states: Optional[bool] = None,
|
| 364 |
+
return_dict: Optional[bool] = None,
|
| 365 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 366 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 367 |
+
output_hidden_states = (
|
| 368 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 369 |
+
)
|
| 370 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 371 |
+
|
| 372 |
+
outputs = self.model(
|
| 373 |
+
input_ids=input_ids,
|
| 374 |
+
attention_mask=attention_mask,
|
| 375 |
+
inputs_embeds=inputs_embeds,
|
| 376 |
+
past_key_values=past_key_values,
|
| 377 |
+
use_cache=use_cache,
|
| 378 |
+
output_attentions=output_attentions,
|
| 379 |
+
output_hidden_states=output_hidden_states,
|
| 380 |
+
return_dict=return_dict
|
| 381 |
+
)
|
| 382 |
+
|
| 383 |
+
hidden_states = outputs[0]
|
| 384 |
+
logits = self.lm_head(hidden_states)
|
| 385 |
+
|
| 386 |
+
loss = None
|
| 387 |
+
if labels is not None:
|
| 388 |
+
if self.config.fuse_cross_entropy:
|
| 389 |
+
loss_fct = FusedCrossEntropyLoss(inplace_backward=True)
|
| 390 |
+
else:
|
| 391 |
+
loss_fct = nn.CrossEntropyLoss()
|
| 392 |
+
# Enable model parallelism
|
| 393 |
+
labels = labels.to(logits.device)
|
| 394 |
+
labels = torch.cat((labels[..., 1:], torch.full_like(labels[:, :1], loss_fct.ignore_index)), 1)
|
| 395 |
+
loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
|
| 396 |
+
|
| 397 |
+
if not return_dict:
|
| 398 |
+
output = (logits,) + outputs[1:]
|
| 399 |
+
return (loss,) + output if loss is not None else output
|
| 400 |
+
|
| 401 |
+
return CausalLMOutputWithPast(
|
| 402 |
+
loss=loss,
|
| 403 |
+
logits=logits,
|
| 404 |
+
past_key_values=outputs.past_key_values,
|
| 405 |
+
hidden_states=outputs.hidden_states,
|
| 406 |
+
attentions=outputs.attentions,
|
| 407 |
+
)
|
fla/models/hgrn2/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from transformers import AutoConfig, AutoModel, AutoModelForCausalLM
|
| 4 |
+
|
| 5 |
+
from fla.models.hgrn2.configuration_hgrn2 import HGRN2Config
|
| 6 |
+
from fla.models.hgrn2.modeling_hgrn2 import HGRN2ForCausalLM, HGRN2Model
|
| 7 |
+
|
| 8 |
+
AutoConfig.register(HGRN2Config.model_type, HGRN2Config)
|
| 9 |
+
AutoModel.register(HGRN2Config, HGRN2Model)
|
| 10 |
+
AutoModelForCausalLM.register(HGRN2Config, HGRN2ForCausalLM)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
__all__ = ['HGRN2Config', 'HGRN2ForCausalLM', 'HGRN2Model']
|
fla/models/hgrn2/configuration_hgrn2.py
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from typing import Optional
|
| 4 |
+
|
| 5 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class HGRN2Config(PretrainedConfig):
|
| 9 |
+
|
| 10 |
+
model_type = 'hgrn2'
|
| 11 |
+
keys_to_ignore_at_inference = ['past_key_values']
|
| 12 |
+
|
| 13 |
+
def __init__(
|
| 14 |
+
self,
|
| 15 |
+
vocab_size: int = 32000,
|
| 16 |
+
hidden_size: int = 2048,
|
| 17 |
+
num_hidden_layers: int = 24,
|
| 18 |
+
attn_mode: str = "chunk",
|
| 19 |
+
num_heads: Optional[int] = None,
|
| 20 |
+
expand_ratio: Optional[int] = 128,
|
| 21 |
+
use_short_conv: bool = False,
|
| 22 |
+
conv_size: int = 4,
|
| 23 |
+
share_conv_kernel: bool = True,
|
| 24 |
+
use_lower_bound: bool = True,
|
| 25 |
+
hidden_ratio: Optional[int] = 4,
|
| 26 |
+
intermediate_size: Optional[int] = None,
|
| 27 |
+
hidden_act: str = "swish",
|
| 28 |
+
max_position_embeddings: int = 2048,
|
| 29 |
+
elementwise_affine: Optional[bool] = True,
|
| 30 |
+
norm_eps: float = 1e-6,
|
| 31 |
+
use_cache: bool = True,
|
| 32 |
+
pad_token_id: int = None,
|
| 33 |
+
bos_token_id: int = 1,
|
| 34 |
+
eos_token_id: int = 2,
|
| 35 |
+
tie_word_embeddings: bool = False,
|
| 36 |
+
initializer_range: float = 0.02,
|
| 37 |
+
fuse_cross_entropy: bool = True,
|
| 38 |
+
**kwargs
|
| 39 |
+
):
|
| 40 |
+
self.vocab_size = vocab_size
|
| 41 |
+
self.max_position_embeddings = max_position_embeddings
|
| 42 |
+
self.hidden_size = hidden_size
|
| 43 |
+
self.num_hidden_layers = num_hidden_layers
|
| 44 |
+
self.attn_mode = attn_mode
|
| 45 |
+
self.num_heads = num_heads
|
| 46 |
+
self.expand_ratio = expand_ratio
|
| 47 |
+
self.use_short_conv = use_short_conv
|
| 48 |
+
self.conv_size = conv_size
|
| 49 |
+
self.share_conv_kernel = share_conv_kernel
|
| 50 |
+
self.use_lower_bound = use_lower_bound
|
| 51 |
+
self.hidden_ratio = hidden_ratio
|
| 52 |
+
self.intermediate_size = intermediate_size
|
| 53 |
+
self.hidden_act = hidden_act
|
| 54 |
+
self.elementwise_affine = elementwise_affine
|
| 55 |
+
self.norm_eps = norm_eps
|
| 56 |
+
self.use_cache = use_cache
|
| 57 |
+
self.initializer_range = initializer_range
|
| 58 |
+
self.fuse_cross_entropy = fuse_cross_entropy
|
| 59 |
+
|
| 60 |
+
super().__init__(
|
| 61 |
+
pad_token_id=pad_token_id,
|
| 62 |
+
bos_token_id=bos_token_id,
|
| 63 |
+
eos_token_id=eos_token_id,
|
| 64 |
+
tie_word_embeddings=tie_word_embeddings,
|
| 65 |
+
**kwargs,
|
| 66 |
+
)
|
fla/models/hgrn2/modeling_hgrn2.py
ADDED
|
@@ -0,0 +1,407 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import math
|
| 6 |
+
import warnings
|
| 7 |
+
from typing import List, Optional, Tuple, Union
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn as nn
|
| 11 |
+
import torch.utils.checkpoint
|
| 12 |
+
from transformers.activations import ACT2FN
|
| 13 |
+
from transformers.modeling_outputs import (BaseModelOutputWithPast,
|
| 14 |
+
CausalLMOutputWithPast)
|
| 15 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 16 |
+
from transformers.utils import logging
|
| 17 |
+
|
| 18 |
+
from fla.layers.hgrn2 import HGRN2Attention
|
| 19 |
+
from fla.models.hgrn2.configuration_hgrn2 import HGRN2Config
|
| 20 |
+
from fla.models.utils import RecurrentCache
|
| 21 |
+
from fla.modules import FusedCrossEntropyLoss, RMSNorm
|
| 22 |
+
from fla.modules.activations import swiglu_linear
|
| 23 |
+
|
| 24 |
+
logger = logging.get_logger(__name__)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class HGRN2MLP(nn.Module):
|
| 28 |
+
|
| 29 |
+
def __init__(
|
| 30 |
+
self,
|
| 31 |
+
hidden_size: int,
|
| 32 |
+
hidden_ratio: Optional[int] = None,
|
| 33 |
+
intermediate_size: Optional[int] = None,
|
| 34 |
+
hidden_act: str = 'swish'
|
| 35 |
+
) -> HGRN2MLP:
|
| 36 |
+
super().__init__()
|
| 37 |
+
|
| 38 |
+
self.hidden_size = hidden_size
|
| 39 |
+
# the final number of params is `hidden_ratio * hidden_size^2`
|
| 40 |
+
# `intermediate_size` is chosen to be a multiple of 256 closest to `2/3 * hidden_size * hidden_ratio`
|
| 41 |
+
if hidden_ratio is None:
|
| 42 |
+
hidden_ratio = 4
|
| 43 |
+
if intermediate_size is None:
|
| 44 |
+
intermediate_size = int(hidden_size * hidden_ratio * 2 / 3)
|
| 45 |
+
intermediate_size = 256 * ((intermediate_size + 256 - 1) // 256)
|
| 46 |
+
self.hidden_ratio = hidden_ratio
|
| 47 |
+
self.intermediate_size = intermediate_size
|
| 48 |
+
|
| 49 |
+
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size * 2, bias=False)
|
| 50 |
+
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
|
| 51 |
+
self.act_fn = ACT2FN[hidden_act]
|
| 52 |
+
|
| 53 |
+
def forward(self, x):
|
| 54 |
+
y = self.gate_proj(x)
|
| 55 |
+
gate, y = y.chunk(2, -1)
|
| 56 |
+
return swiglu_linear(gate, y, self.down_proj.weight, self.down_proj.bias)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
class HGRN2Block(nn.Module):
|
| 60 |
+
def __init__(self, config: HGRN2Config, layer_idx: int):
|
| 61 |
+
super().__init__()
|
| 62 |
+
self.hidden_size = config.hidden_size
|
| 63 |
+
|
| 64 |
+
self.attn_norm = RMSNorm(hidden_size=config.hidden_size, eps=config.norm_eps)
|
| 65 |
+
self.attn = HGRN2Attention(
|
| 66 |
+
mode=config.attn_mode,
|
| 67 |
+
hidden_size=config.hidden_size,
|
| 68 |
+
num_heads=config.num_heads,
|
| 69 |
+
expand_ratio=config.expand_ratio,
|
| 70 |
+
use_short_conv=config.use_short_conv,
|
| 71 |
+
conv_size=config.conv_size,
|
| 72 |
+
share_conv_kernel=config.share_conv_kernel,
|
| 73 |
+
elementwise_affine=config.elementwise_affine,
|
| 74 |
+
norm_eps=config.norm_eps,
|
| 75 |
+
layer_idx=layer_idx
|
| 76 |
+
)
|
| 77 |
+
self.mlp_norm = RMSNorm(hidden_size=config.hidden_size, eps=config.norm_eps)
|
| 78 |
+
self.mlp = HGRN2MLP(
|
| 79 |
+
hidden_size=config.hidden_size,
|
| 80 |
+
hidden_ratio=config.hidden_ratio,
|
| 81 |
+
intermediate_size=config.intermediate_size,
|
| 82 |
+
hidden_act=config.hidden_act
|
| 83 |
+
)
|
| 84 |
+
|
| 85 |
+
def forward(
|
| 86 |
+
self,
|
| 87 |
+
hidden_states: torch.Tensor,
|
| 88 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 89 |
+
past_key_values: Optional[Tuple[List[torch.Tensor]]] = None,
|
| 90 |
+
use_cache: Optional[bool] = False,
|
| 91 |
+
output_attentions: Optional[bool] = False,
|
| 92 |
+
lower_bound: Optional[torch.Tensor] = False,
|
| 93 |
+
**kwargs,
|
| 94 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
| 95 |
+
residual = hidden_states
|
| 96 |
+
hidden_states = self.attn_norm(hidden_states)
|
| 97 |
+
hidden_states, attentions, past_key_values = self.attn(
|
| 98 |
+
hidden_states=hidden_states,
|
| 99 |
+
attention_mask=attention_mask,
|
| 100 |
+
past_key_values=past_key_values,
|
| 101 |
+
use_cache=use_cache,
|
| 102 |
+
output_attentions=output_attentions,
|
| 103 |
+
lower_bound=lower_bound
|
| 104 |
+
)
|
| 105 |
+
hidden_states, residual = self.mlp_norm(hidden_states, residual, True)
|
| 106 |
+
hidden_states = self.mlp(hidden_states)
|
| 107 |
+
hidden_states = residual + hidden_states
|
| 108 |
+
|
| 109 |
+
outputs = (hidden_states, attentions, past_key_values)
|
| 110 |
+
|
| 111 |
+
return outputs
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
class HGRN2PreTrainedModel(PreTrainedModel):
|
| 115 |
+
|
| 116 |
+
config_class = HGRN2Config
|
| 117 |
+
supports_gradient_checkpointing = True
|
| 118 |
+
_no_split_modules = ['HGRN2Block']
|
| 119 |
+
|
| 120 |
+
def __init__(self, *inputs, **kwargs):
|
| 121 |
+
super().__init__(*inputs, **kwargs)
|
| 122 |
+
|
| 123 |
+
def _init_weights(
|
| 124 |
+
self,
|
| 125 |
+
module: nn.Module,
|
| 126 |
+
rescale_prenorm_residual: bool = True,
|
| 127 |
+
num_residuals_per_layer: int = 2,
|
| 128 |
+
):
|
| 129 |
+
if isinstance(module, (nn.Linear, nn.Conv1d)):
|
| 130 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
| 131 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
| 132 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 133 |
+
if module.bias is not None:
|
| 134 |
+
nn.init.zeros_(module.bias)
|
| 135 |
+
elif isinstance(module, nn.Embedding):
|
| 136 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 137 |
+
if module.padding_idx is not None:
|
| 138 |
+
module.weight.data[module.padding_idx].zero_()
|
| 139 |
+
|
| 140 |
+
if rescale_prenorm_residual:
|
| 141 |
+
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
|
| 142 |
+
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
|
| 143 |
+
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
|
| 144 |
+
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
|
| 145 |
+
#
|
| 146 |
+
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
|
| 147 |
+
for name, p in module.named_parameters():
|
| 148 |
+
if name in ["o_proj.weight", "down_proj.weight"]:
|
| 149 |
+
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
|
| 150 |
+
# Following Pytorch init, except scale by 1/sqrt(2 * n_layer)
|
| 151 |
+
# We need to reinit p since this code could be called multiple times
|
| 152 |
+
# Having just p *= scale would repeatedly scale it down
|
| 153 |
+
with torch.no_grad():
|
| 154 |
+
p /= math.sqrt(num_residuals_per_layer * self.config.num_hidden_layers)
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
class HGRN2Model(HGRN2PreTrainedModel):
|
| 158 |
+
|
| 159 |
+
def __init__(self, config: HGRN2Config):
|
| 160 |
+
super().__init__(config)
|
| 161 |
+
self.padding_idx = config.pad_token_id
|
| 162 |
+
self.vocab_size = config.vocab_size
|
| 163 |
+
|
| 164 |
+
self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
| 165 |
+
if config.use_lower_bound:
|
| 166 |
+
self.lower_bounds = nn.Parameter(torch.zeros(config.num_hidden_layers, config.hidden_size))
|
| 167 |
+
self.layers = nn.ModuleList([HGRN2Block(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
|
| 168 |
+
self.norm = RMSNorm(config.hidden_size, eps=config.norm_eps)
|
| 169 |
+
|
| 170 |
+
self.gradient_checkpointing = False
|
| 171 |
+
|
| 172 |
+
self.post_init()
|
| 173 |
+
|
| 174 |
+
def get_input_embeddings(self):
|
| 175 |
+
return self.embeddings
|
| 176 |
+
|
| 177 |
+
def set_input_embeddings(self, value):
|
| 178 |
+
self.embeddings = value
|
| 179 |
+
|
| 180 |
+
def forward(
|
| 181 |
+
self,
|
| 182 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 183 |
+
attention_mask: Optional[torch.Tensor] = None, # noqa
|
| 184 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 185 |
+
past_key_values: Optional[Tuple[List[torch.Tensor]]] = None,
|
| 186 |
+
use_cache: Optional[bool] = None,
|
| 187 |
+
output_attentions: Optional[bool] = None,
|
| 188 |
+
output_hidden_states: Optional[bool] = None,
|
| 189 |
+
return_dict: Optional[bool] = None
|
| 190 |
+
) -> Union[Tuple, BaseModelOutputWithPast]:
|
| 191 |
+
if output_attentions:
|
| 192 |
+
warnings.warn("`HGRN2Model` does not `output_attentions` now, setting it to `False`.")
|
| 193 |
+
output_attentions = False
|
| 194 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 195 |
+
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 196 |
+
use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
|
| 197 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 198 |
+
|
| 199 |
+
# retrieve input_ids and inputs_embeds
|
| 200 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 201 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
| 202 |
+
elif input_ids is not None:
|
| 203 |
+
batch_size = input_ids.shape[0]
|
| 204 |
+
elif inputs_embeds is not None:
|
| 205 |
+
batch_size = inputs_embeds.shape[0]
|
| 206 |
+
else:
|
| 207 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
| 208 |
+
|
| 209 |
+
if inputs_embeds is None:
|
| 210 |
+
inputs_embeds = self.embeddings(input_ids)
|
| 211 |
+
hidden_states = inputs_embeds
|
| 212 |
+
|
| 213 |
+
if use_cache:
|
| 214 |
+
if past_key_values is None:
|
| 215 |
+
past_key_values = [layer.attn.init_state(batch_size) for layer in self.layers]
|
| 216 |
+
if not isinstance(past_key_values, RecurrentCache):
|
| 217 |
+
past_key_values = RecurrentCache.from_legacy_cache(past_key_values)
|
| 218 |
+
|
| 219 |
+
if self.gradient_checkpointing and self.training:
|
| 220 |
+
if use_cache:
|
| 221 |
+
logger.warning_once(
|
| 222 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
| 223 |
+
)
|
| 224 |
+
use_cache = False
|
| 225 |
+
|
| 226 |
+
all_hidden_states = () if output_hidden_states else None
|
| 227 |
+
all_attns = () if output_attentions else None
|
| 228 |
+
|
| 229 |
+
if self.config.use_lower_bound:
|
| 230 |
+
lower_bounds = self.lower_bounds.softmax(0)
|
| 231 |
+
lower_bounds = lower_bounds.cumsum(0) - lower_bounds[0]
|
| 232 |
+
for i, layer in enumerate(self.layers):
|
| 233 |
+
if output_hidden_states:
|
| 234 |
+
all_hidden_states += (hidden_states,)
|
| 235 |
+
|
| 236 |
+
lower_bound = lower_bounds[i] if self.config.use_lower_bound else None
|
| 237 |
+
if self.gradient_checkpointing and self.training:
|
| 238 |
+
hidden_states, attentions, past_key_values = self._gradient_checkpointing_func(
|
| 239 |
+
layer.__call__,
|
| 240 |
+
hidden_states,
|
| 241 |
+
attention_mask,
|
| 242 |
+
past_key_values,
|
| 243 |
+
use_cache,
|
| 244 |
+
output_attentions,
|
| 245 |
+
lower_bound
|
| 246 |
+
)
|
| 247 |
+
else:
|
| 248 |
+
hidden_states, attentions, past_key_values = layer(
|
| 249 |
+
hidden_states,
|
| 250 |
+
attention_mask=attention_mask,
|
| 251 |
+
past_key_values=past_key_values,
|
| 252 |
+
use_cache=use_cache,
|
| 253 |
+
output_attentions=output_attentions,
|
| 254 |
+
lower_bound=lower_bound
|
| 255 |
+
)
|
| 256 |
+
|
| 257 |
+
if output_attentions:
|
| 258 |
+
all_attns += (attentions,)
|
| 259 |
+
|
| 260 |
+
hidden_states = self.norm(hidden_states)
|
| 261 |
+
|
| 262 |
+
# add hidden states from the last decoder layer
|
| 263 |
+
if output_hidden_states:
|
| 264 |
+
all_hidden_states += (hidden_states,)
|
| 265 |
+
|
| 266 |
+
next_cache = None
|
| 267 |
+
if use_cache:
|
| 268 |
+
next_cache = past_key_values.to_legacy_cache()
|
| 269 |
+
if not return_dict:
|
| 270 |
+
return tuple(x for x in [hidden_states, next_cache, all_hidden_states, all_attns] if x is not None)
|
| 271 |
+
return BaseModelOutputWithPast(
|
| 272 |
+
last_hidden_state=hidden_states,
|
| 273 |
+
past_key_values=next_cache,
|
| 274 |
+
hidden_states=all_hidden_states,
|
| 275 |
+
attentions=all_attns
|
| 276 |
+
)
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
class HGRN2ForCausalLM(HGRN2PreTrainedModel):
|
| 280 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 281 |
+
|
| 282 |
+
def __init__(self, config):
|
| 283 |
+
super().__init__(config)
|
| 284 |
+
self.model = HGRN2Model(config)
|
| 285 |
+
self.vocab_size = config.vocab_size
|
| 286 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 287 |
+
|
| 288 |
+
# Initialize weights and apply final processing
|
| 289 |
+
self.post_init()
|
| 290 |
+
|
| 291 |
+
def get_input_embeddings(self):
|
| 292 |
+
return self.model.embeddings
|
| 293 |
+
|
| 294 |
+
def set_input_embeddings(self, value):
|
| 295 |
+
self.model.embeddings = value
|
| 296 |
+
|
| 297 |
+
def get_output_embeddings(self):
|
| 298 |
+
return self.lm_head
|
| 299 |
+
|
| 300 |
+
def set_output_embeddings(self, new_embeddings):
|
| 301 |
+
self.lm_head = new_embeddings
|
| 302 |
+
|
| 303 |
+
def set_decoder(self, decoder):
|
| 304 |
+
self.model = decoder
|
| 305 |
+
|
| 306 |
+
def get_decoder(self):
|
| 307 |
+
return self.model
|
| 308 |
+
|
| 309 |
+
def generate(self, *args, **kwargs):
|
| 310 |
+
try:
|
| 311 |
+
return super().generate(*args, **kwargs)
|
| 312 |
+
except AttributeError as exception:
|
| 313 |
+
if 'past_key_values' in str(exception):
|
| 314 |
+
raise AttributeError(
|
| 315 |
+
f"You tried to call `generate` with a decoding strategy that manipulates `past_key_values`, "
|
| 316 |
+
f"which is not supported for {self.__class__.__name__}. "
|
| 317 |
+
f"Try another generation strategy instead. "
|
| 318 |
+
f"For the available generation strategies, check this doc: "
|
| 319 |
+
f"https://huggingface.co/docs/transformers/en/generation_strategies#decoding-strategies"
|
| 320 |
+
)
|
| 321 |
+
else:
|
| 322 |
+
raise exception
|
| 323 |
+
|
| 324 |
+
def prepare_inputs_for_generation(
|
| 325 |
+
self,
|
| 326 |
+
input_ids: torch.LongTensor = None,
|
| 327 |
+
past_key_values: Optional[Tuple[List[torch.Tensor]]] = None,
|
| 328 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 329 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 330 |
+
**kwargs
|
| 331 |
+
):
|
| 332 |
+
# only last token for `inputs_ids` if the `past_key_values` is passed along.
|
| 333 |
+
if past_key_values is not None:
|
| 334 |
+
if not isinstance(past_key_values, RecurrentCache):
|
| 335 |
+
past_key_values = RecurrentCache.from_legacy_cache(past_key_values, input_ids.shape[1] - 1)
|
| 336 |
+
input_ids, attention_mask = input_ids[:, -1:], attention_mask[:, -1:]
|
| 337 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
| 338 |
+
if inputs_embeds is not None and past_key_values is None:
|
| 339 |
+
model_inputs = {'inputs_embeds': inputs_embeds}
|
| 340 |
+
else:
|
| 341 |
+
# The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise
|
| 342 |
+
# recompiles graphs as the stride of the inputs is a guard.
|
| 343 |
+
# Ref: https://github.com/huggingface/transformers/pull/29114
|
| 344 |
+
# TODO: use `next_tokens` directly instead.
|
| 345 |
+
model_inputs = {'input_ids': input_ids.contiguous()}
|
| 346 |
+
|
| 347 |
+
model_inputs.update({
|
| 348 |
+
'past_key_values': past_key_values,
|
| 349 |
+
'use_cache': kwargs.get('use_cache'),
|
| 350 |
+
'attention_mask': attention_mask,
|
| 351 |
+
})
|
| 352 |
+
return model_inputs
|
| 353 |
+
|
| 354 |
+
def forward(
|
| 355 |
+
self,
|
| 356 |
+
input_ids: torch.LongTensor = None,
|
| 357 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 358 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 359 |
+
past_key_values: Optional[Tuple[List[torch.Tensor]]] = None,
|
| 360 |
+
labels: Optional[torch.LongTensor] = None,
|
| 361 |
+
use_cache: Optional[bool] = None,
|
| 362 |
+
output_attentions: Optional[bool] = None,
|
| 363 |
+
output_hidden_states: Optional[bool] = None,
|
| 364 |
+
return_dict: Optional[bool] = None,
|
| 365 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 366 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 367 |
+
output_hidden_states = (
|
| 368 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 369 |
+
)
|
| 370 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 371 |
+
|
| 372 |
+
outputs = self.model(
|
| 373 |
+
input_ids=input_ids,
|
| 374 |
+
attention_mask=attention_mask,
|
| 375 |
+
inputs_embeds=inputs_embeds,
|
| 376 |
+
past_key_values=past_key_values,
|
| 377 |
+
use_cache=use_cache,
|
| 378 |
+
output_attentions=output_attentions,
|
| 379 |
+
output_hidden_states=output_hidden_states,
|
| 380 |
+
return_dict=return_dict
|
| 381 |
+
)
|
| 382 |
+
|
| 383 |
+
hidden_states = outputs[0]
|
| 384 |
+
logits = self.lm_head(hidden_states)
|
| 385 |
+
|
| 386 |
+
loss = None
|
| 387 |
+
if labels is not None:
|
| 388 |
+
if self.config.fuse_cross_entropy:
|
| 389 |
+
loss_fct = FusedCrossEntropyLoss(inplace_backward=True)
|
| 390 |
+
else:
|
| 391 |
+
loss_fct = nn.CrossEntropyLoss()
|
| 392 |
+
# Enable model parallelism
|
| 393 |
+
labels = labels.to(logits.device)
|
| 394 |
+
labels = torch.cat((labels[..., 1:], torch.full_like(labels[:, :1], loss_fct.ignore_index)), 1)
|
| 395 |
+
loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
|
| 396 |
+
|
| 397 |
+
if not return_dict:
|
| 398 |
+
output = (logits,) + outputs[1:]
|
| 399 |
+
return (loss,) + output if loss is not None else output
|
| 400 |
+
|
| 401 |
+
return CausalLMOutputWithPast(
|
| 402 |
+
loss=loss,
|
| 403 |
+
logits=logits,
|
| 404 |
+
past_key_values=outputs.past_key_values,
|
| 405 |
+
hidden_states=outputs.hidden_states,
|
| 406 |
+
attentions=outputs.attentions,
|
| 407 |
+
)
|
fla/models/linear_attn/__init__.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from transformers import AutoConfig, AutoModel, AutoModelForCausalLM
|
| 4 |
+
|
| 5 |
+
from fla.models.linear_attn.configuration_linear_attn import \
|
| 6 |
+
LinearAttentionConfig
|
| 7 |
+
from fla.models.linear_attn.modeling_linear_attn import (
|
| 8 |
+
LinearAttentionForCausalLM, LinearAttentionModel)
|
| 9 |
+
|
| 10 |
+
AutoConfig.register(LinearAttentionConfig.model_type, LinearAttentionConfig)
|
| 11 |
+
AutoModel.register(LinearAttentionConfig, LinearAttentionModel)
|
| 12 |
+
AutoModelForCausalLM.register(LinearAttentionConfig, LinearAttentionForCausalLM)
|
| 13 |
+
|
| 14 |
+
__all__ = ['LinearAttentionConfig', 'LinearAttentionForCausalLM', 'LinearAttentionModel']
|
fla/models/linear_attn/configuration_linear_attn.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from typing import Optional
|
| 4 |
+
|
| 5 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class LinearAttentionConfig(PretrainedConfig):
|
| 9 |
+
|
| 10 |
+
model_type = 'linear_attn'
|
| 11 |
+
keys_to_ignore_at_inference = ['past_key_values']
|
| 12 |
+
|
| 13 |
+
def __init__(
|
| 14 |
+
self,
|
| 15 |
+
vocab_size: int = 32000,
|
| 16 |
+
hidden_size: int = 2048,
|
| 17 |
+
expand_k: int = 1,
|
| 18 |
+
expand_v: int = 1,
|
| 19 |
+
hidden_ratio: Optional[int] = 4,
|
| 20 |
+
intermediate_size: Optional[int] = None,
|
| 21 |
+
num_hidden_layers: int = 24,
|
| 22 |
+
num_heads: int = 4,
|
| 23 |
+
attn_mode: str = "fused_chunk",
|
| 24 |
+
feature_map: str = "elementwise_product",
|
| 25 |
+
tie_feature_map_qk: bool = False,
|
| 26 |
+
norm_q: bool = False,
|
| 27 |
+
norm_k: bool = False,
|
| 28 |
+
norm_feature_map: bool = False,
|
| 29 |
+
hidden_act: str = "swish",
|
| 30 |
+
max_position_embeddings: int = 2048,
|
| 31 |
+
elementwise_affine: Optional[bool] = True,
|
| 32 |
+
norm_eps: float = 1e-6,
|
| 33 |
+
use_cache: bool = True,
|
| 34 |
+
pad_token_id: int = None,
|
| 35 |
+
bos_token_id: int = 1,
|
| 36 |
+
eos_token_id: int = 2,
|
| 37 |
+
tie_word_embeddings: bool = False,
|
| 38 |
+
initializer_range: float = 0.02,
|
| 39 |
+
fuse_cross_entropy: bool = True,
|
| 40 |
+
**kwargs
|
| 41 |
+
):
|
| 42 |
+
self.vocab_size = vocab_size
|
| 43 |
+
self.max_position_embeddings = max_position_embeddings
|
| 44 |
+
self.hidden_size = hidden_size
|
| 45 |
+
self.expand_k = expand_k
|
| 46 |
+
self.expand_v = expand_v
|
| 47 |
+
self.hidden_ratio = hidden_ratio
|
| 48 |
+
self.intermediate_size = intermediate_size
|
| 49 |
+
self.num_hidden_layers = num_hidden_layers
|
| 50 |
+
self.num_heads = num_heads
|
| 51 |
+
self.attn_mode = attn_mode
|
| 52 |
+
self.feature_map = feature_map
|
| 53 |
+
self.tie_feature_map_qk = tie_feature_map_qk
|
| 54 |
+
self.norm_q = norm_q
|
| 55 |
+
self.norm_k = norm_k
|
| 56 |
+
self.norm_feature_map = norm_feature_map
|
| 57 |
+
self.hidden_act = hidden_act
|
| 58 |
+
self.elementwise_affine = elementwise_affine
|
| 59 |
+
self.norm_eps = norm_eps
|
| 60 |
+
self.use_cache = use_cache
|
| 61 |
+
self.initializer_range = initializer_range
|
| 62 |
+
self.fuse_cross_entropy = fuse_cross_entropy
|
| 63 |
+
|
| 64 |
+
super().__init__(
|
| 65 |
+
pad_token_id=pad_token_id,
|
| 66 |
+
bos_token_id=bos_token_id,
|
| 67 |
+
eos_token_id=eos_token_id,
|
| 68 |
+
tie_word_embeddings=tie_word_embeddings,
|
| 69 |
+
**kwargs,
|
| 70 |
+
)
|
fla/models/linear_attn/modeling_linear_attn.py
ADDED
|
@@ -0,0 +1,424 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import math
|
| 6 |
+
import warnings
|
| 7 |
+
from typing import List, Optional, Tuple, Union
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn as nn
|
| 11 |
+
import torch.utils.checkpoint
|
| 12 |
+
from transformers.activations import ACT2FN
|
| 13 |
+
from transformers.cache_utils import Cache, DynamicCache
|
| 14 |
+
from transformers.modeling_outputs import (BaseModelOutputWithPast,
|
| 15 |
+
CausalLMOutputWithPast)
|
| 16 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 17 |
+
from transformers.utils import logging
|
| 18 |
+
|
| 19 |
+
from fla.layers.linear_attn import LinearAttention
|
| 20 |
+
from fla.models.linear_attn.configuration_linear_attn import \
|
| 21 |
+
LinearAttentionConfig
|
| 22 |
+
from fla.modules import FusedCrossEntropyLoss, RMSNorm
|
| 23 |
+
from fla.modules.activations import swiglu_linear
|
| 24 |
+
|
| 25 |
+
logger = logging.get_logger(__name__)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class LinearAttentionMLP(nn.Module):
|
| 29 |
+
def __init__(
|
| 30 |
+
self,
|
| 31 |
+
hidden_size: int,
|
| 32 |
+
hidden_ratio: Optional[int] = None,
|
| 33 |
+
intermediate_size: Optional[int] = None,
|
| 34 |
+
hidden_act: str = 'swish'
|
| 35 |
+
) -> LinearAttentionMLP:
|
| 36 |
+
super().__init__()
|
| 37 |
+
|
| 38 |
+
self.hidden_size = hidden_size
|
| 39 |
+
# the final number of params is `hidden_ratio * hidden_size^2`
|
| 40 |
+
# `intermediate_size` is chosen to be a multiple of 256 closest to `2/3 * hidden_size * hidden_ratio`
|
| 41 |
+
if hidden_ratio is None:
|
| 42 |
+
hidden_ratio = 4
|
| 43 |
+
if intermediate_size is None:
|
| 44 |
+
intermediate_size = int(hidden_size * hidden_ratio * 2 / 3)
|
| 45 |
+
intermediate_size = 256 * ((intermediate_size + 256 - 1) // 256)
|
| 46 |
+
self.hidden_ratio = hidden_ratio
|
| 47 |
+
self.intermediate_size = intermediate_size
|
| 48 |
+
|
| 49 |
+
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size * 2, bias=False)
|
| 50 |
+
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
|
| 51 |
+
self.act_fn = ACT2FN[hidden_act]
|
| 52 |
+
|
| 53 |
+
def forward(self, x):
|
| 54 |
+
y = self.gate_proj(x)
|
| 55 |
+
gate, y = y.chunk(2, -1)
|
| 56 |
+
return swiglu_linear(gate, y, self.down_proj.weight, self.down_proj.bias)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
class LinearAttentionBlock(nn.Module):
|
| 60 |
+
def __init__(self, config: LinearAttentionConfig, layer_idx: int):
|
| 61 |
+
super().__init__()
|
| 62 |
+
self.hidden_size = config.hidden_size
|
| 63 |
+
|
| 64 |
+
self.attn_norm = RMSNorm(hidden_size=config.hidden_size, eps=config.norm_eps)
|
| 65 |
+
self.attn = LinearAttention(
|
| 66 |
+
hidden_size=config.hidden_size,
|
| 67 |
+
expand_k=config.expand_k,
|
| 68 |
+
expand_v=config.expand_v,
|
| 69 |
+
num_heads=config.num_heads,
|
| 70 |
+
mode=config.attn_mode,
|
| 71 |
+
feature_map=config.feature_map,
|
| 72 |
+
tie_feature_map_qk=config.tie_feature_map_qk,
|
| 73 |
+
norm_q=config.norm_q,
|
| 74 |
+
norm_k=config.norm_k,
|
| 75 |
+
do_feature_map_norm=config.norm_feature_map,
|
| 76 |
+
elementwise_affine=config.elementwise_affine,
|
| 77 |
+
norm_eps=config.norm_eps,
|
| 78 |
+
layer_idx=layer_idx
|
| 79 |
+
)
|
| 80 |
+
self.mlp_norm = RMSNorm(hidden_size=config.hidden_size, eps=config.norm_eps)
|
| 81 |
+
self.mlp = LinearAttentionMLP(
|
| 82 |
+
hidden_size=config.hidden_size,
|
| 83 |
+
hidden_ratio=config.hidden_ratio,
|
| 84 |
+
intermediate_size=config.intermediate_size,
|
| 85 |
+
hidden_act=config.hidden_act
|
| 86 |
+
)
|
| 87 |
+
|
| 88 |
+
def forward(
|
| 89 |
+
self,
|
| 90 |
+
hidden_states: torch.Tensor,
|
| 91 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 92 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 93 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
| 94 |
+
output_attentions: Optional[bool] = False,
|
| 95 |
+
use_cache: Optional[bool] = False,
|
| 96 |
+
**kwargs,
|
| 97 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
| 98 |
+
|
| 99 |
+
residual = hidden_states
|
| 100 |
+
# currently not supported
|
| 101 |
+
attn_weights, present_key_value = None, None
|
| 102 |
+
|
| 103 |
+
hidden_states = self.attn_norm(hidden_states)
|
| 104 |
+
hidden_states = self.attn(hidden_states)
|
| 105 |
+
hidden_states, residual = self.mlp_norm(hidden_states, residual, True)
|
| 106 |
+
hidden_states = self.mlp(hidden_states)
|
| 107 |
+
hidden_states = residual + hidden_states
|
| 108 |
+
|
| 109 |
+
outputs = (hidden_states,)
|
| 110 |
+
|
| 111 |
+
if output_attentions:
|
| 112 |
+
outputs += (attn_weights,)
|
| 113 |
+
|
| 114 |
+
if use_cache:
|
| 115 |
+
outputs += (present_key_value,)
|
| 116 |
+
|
| 117 |
+
return outputs
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
class LinearAttentionPreTrainedModel(PreTrainedModel):
|
| 121 |
+
config_class = LinearAttentionConfig
|
| 122 |
+
supports_gradient_checkpointing = True
|
| 123 |
+
_no_split_modules = ['LinearAttentionBlock']
|
| 124 |
+
|
| 125 |
+
def __init__(self, *inputs, **kwargs):
|
| 126 |
+
super().__init__(*inputs, **kwargs)
|
| 127 |
+
|
| 128 |
+
def _init_weights(
|
| 129 |
+
self,
|
| 130 |
+
module: nn.Module,
|
| 131 |
+
rescale_prenorm_residual: bool = True,
|
| 132 |
+
num_residuals_per_layer: int = 2,
|
| 133 |
+
):
|
| 134 |
+
if isinstance(module, (nn.Linear, nn.Conv1d)):
|
| 135 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
| 136 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
| 137 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 138 |
+
if module.bias is not None:
|
| 139 |
+
nn.init.zeros_(module.bias)
|
| 140 |
+
elif isinstance(module, nn.Embedding):
|
| 141 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 142 |
+
if module.padding_idx is not None:
|
| 143 |
+
module.weight.data[module.padding_idx].zero_()
|
| 144 |
+
|
| 145 |
+
if rescale_prenorm_residual:
|
| 146 |
+
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
|
| 147 |
+
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
|
| 148 |
+
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
|
| 149 |
+
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
|
| 150 |
+
#
|
| 151 |
+
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
|
| 152 |
+
for name, p in module.named_parameters():
|
| 153 |
+
if name in ["o_proj.weight", "down_proj.weight"]:
|
| 154 |
+
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
|
| 155 |
+
# Following Pytorch init, except scale by 1/sqrt(2 * n_layer)
|
| 156 |
+
# We need to reinit p since this code could be called multiple times
|
| 157 |
+
# Having just p *= scale would repeatedly scale it down
|
| 158 |
+
with torch.no_grad():
|
| 159 |
+
p /= math.sqrt(num_residuals_per_layer * self.config.num_hidden_layers)
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
class LinearAttentionModel(LinearAttentionPreTrainedModel):
|
| 163 |
+
|
| 164 |
+
def __init__(self, config: LinearAttentionConfig):
|
| 165 |
+
super().__init__(config)
|
| 166 |
+
self.padding_idx = config.pad_token_id
|
| 167 |
+
self.vocab_size = config.vocab_size
|
| 168 |
+
|
| 169 |
+
self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
| 170 |
+
self.layers = nn.ModuleList(
|
| 171 |
+
[LinearAttentionBlock(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
|
| 172 |
+
)
|
| 173 |
+
self.norm = RMSNorm(config.hidden_size, eps=config.norm_eps)
|
| 174 |
+
|
| 175 |
+
self.gradient_checkpointing = False
|
| 176 |
+
|
| 177 |
+
self.post_init()
|
| 178 |
+
|
| 179 |
+
def get_input_embeddings(self):
|
| 180 |
+
return self.embeddings
|
| 181 |
+
|
| 182 |
+
def set_input_embeddings(self, value):
|
| 183 |
+
self.embeddings = value
|
| 184 |
+
|
| 185 |
+
def forward(
|
| 186 |
+
self,
|
| 187 |
+
input_ids: torch.LongTensor = None,
|
| 188 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 189 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 190 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| 191 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 192 |
+
use_cache: Optional[bool] = None,
|
| 193 |
+
output_attentions: Optional[bool] = None,
|
| 194 |
+
output_hidden_states: Optional[bool] = None,
|
| 195 |
+
return_dict: Optional[bool] = None,
|
| 196 |
+
) -> Union[Tuple, BaseModelOutputWithPast]:
|
| 197 |
+
if output_attentions:
|
| 198 |
+
warnings.warn(
|
| 199 |
+
"`LinearAttentionModel` does not support output attention weights now, "
|
| 200 |
+
"so `output_attentions` is set to `False`."
|
| 201 |
+
)
|
| 202 |
+
output_attentions = False
|
| 203 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 204 |
+
output_hidden_states = (
|
| 205 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 206 |
+
)
|
| 207 |
+
use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
|
| 208 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 209 |
+
|
| 210 |
+
# retrieve input_ids and inputs_embeds
|
| 211 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 212 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
| 213 |
+
elif input_ids is not None:
|
| 214 |
+
_, seq_length = input_ids.shape[:2]
|
| 215 |
+
elif inputs_embeds is not None:
|
| 216 |
+
_, seq_length = inputs_embeds.shape[:2]
|
| 217 |
+
else:
|
| 218 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
| 219 |
+
|
| 220 |
+
past_key_values_length = 0
|
| 221 |
+
if use_cache:
|
| 222 |
+
use_legacy_cache = not isinstance(past_key_values, Cache)
|
| 223 |
+
if use_legacy_cache:
|
| 224 |
+
past_key_values = DynamicCache.from_legacy_cache(past_key_values)
|
| 225 |
+
past_key_values_length = past_key_values.get_usable_length(seq_length)
|
| 226 |
+
|
| 227 |
+
if position_ids is None:
|
| 228 |
+
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
| 229 |
+
position_ids = torch.arange(
|
| 230 |
+
past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
|
| 231 |
+
)
|
| 232 |
+
position_ids = position_ids.unsqueeze(0)
|
| 233 |
+
|
| 234 |
+
if inputs_embeds is None:
|
| 235 |
+
inputs_embeds = self.embeddings(input_ids)
|
| 236 |
+
|
| 237 |
+
# embed positions
|
| 238 |
+
hidden_states = inputs_embeds
|
| 239 |
+
|
| 240 |
+
if self.gradient_checkpointing and self.training:
|
| 241 |
+
if use_cache:
|
| 242 |
+
logger.warning_once(
|
| 243 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
| 244 |
+
)
|
| 245 |
+
use_cache = False
|
| 246 |
+
|
| 247 |
+
# decoder layers
|
| 248 |
+
all_hidden_states = () if output_hidden_states else None
|
| 249 |
+
all_self_attns = () if output_attentions else None
|
| 250 |
+
next_decoder_cache = None
|
| 251 |
+
|
| 252 |
+
for decoder_layer in self.layers:
|
| 253 |
+
if output_hidden_states:
|
| 254 |
+
all_hidden_states += (hidden_states,)
|
| 255 |
+
|
| 256 |
+
if self.gradient_checkpointing and self.training:
|
| 257 |
+
layer_outputs = self._gradient_checkpointing_func(
|
| 258 |
+
decoder_layer.__call__,
|
| 259 |
+
hidden_states,
|
| 260 |
+
attention_mask,
|
| 261 |
+
position_ids,
|
| 262 |
+
past_key_values,
|
| 263 |
+
output_attentions,
|
| 264 |
+
use_cache,
|
| 265 |
+
)
|
| 266 |
+
else:
|
| 267 |
+
layer_outputs = decoder_layer(
|
| 268 |
+
hidden_states,
|
| 269 |
+
attention_mask=attention_mask,
|
| 270 |
+
position_ids=position_ids,
|
| 271 |
+
past_key_value=past_key_values,
|
| 272 |
+
output_attentions=output_attentions,
|
| 273 |
+
use_cache=use_cache,
|
| 274 |
+
)
|
| 275 |
+
|
| 276 |
+
hidden_states = layer_outputs[0]
|
| 277 |
+
|
| 278 |
+
if use_cache:
|
| 279 |
+
next_decoder_cache = layer_outputs[2 if output_attentions else 1]
|
| 280 |
+
|
| 281 |
+
if output_attentions:
|
| 282 |
+
all_self_attns += (layer_outputs[1],)
|
| 283 |
+
|
| 284 |
+
hidden_states = self.norm(hidden_states)
|
| 285 |
+
|
| 286 |
+
# add hidden states from the last decoder layer
|
| 287 |
+
if output_hidden_states:
|
| 288 |
+
all_hidden_states += (hidden_states,)
|
| 289 |
+
|
| 290 |
+
next_cache = None
|
| 291 |
+
if use_cache:
|
| 292 |
+
next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache
|
| 293 |
+
if not return_dict:
|
| 294 |
+
return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
|
| 295 |
+
return BaseModelOutputWithPast(
|
| 296 |
+
last_hidden_state=hidden_states,
|
| 297 |
+
past_key_values=next_cache,
|
| 298 |
+
hidden_states=all_hidden_states,
|
| 299 |
+
attentions=all_self_attns,
|
| 300 |
+
)
|
| 301 |
+
|
| 302 |
+
|
| 303 |
+
class LinearAttentionForCausalLM(LinearAttentionPreTrainedModel):
|
| 304 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 305 |
+
|
| 306 |
+
def __init__(self, config):
|
| 307 |
+
super().__init__(config)
|
| 308 |
+
self.model = LinearAttentionModel(config)
|
| 309 |
+
self.vocab_size = config.vocab_size
|
| 310 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 311 |
+
|
| 312 |
+
# Initialize weights and apply final processing
|
| 313 |
+
self.post_init()
|
| 314 |
+
|
| 315 |
+
def get_input_embeddings(self):
|
| 316 |
+
return self.model.embeddings
|
| 317 |
+
|
| 318 |
+
def set_input_embeddings(self, value):
|
| 319 |
+
self.model.embeddings = value
|
| 320 |
+
|
| 321 |
+
def get_output_embeddings(self):
|
| 322 |
+
return self.lm_head
|
| 323 |
+
|
| 324 |
+
def set_output_embeddings(self, new_embeddings):
|
| 325 |
+
self.lm_head = new_embeddings
|
| 326 |
+
|
| 327 |
+
def set_decoder(self, decoder):
|
| 328 |
+
self.model = decoder
|
| 329 |
+
|
| 330 |
+
def get_decoder(self):
|
| 331 |
+
return self.model
|
| 332 |
+
|
| 333 |
+
def generate(self, *args, **kwargs):
|
| 334 |
+
try:
|
| 335 |
+
return super().generate(*args, **kwargs)
|
| 336 |
+
except AttributeError as exc:
|
| 337 |
+
# Expected exception: "AttributeError: '(object name)' object has no attribute 'past_key_values'"
|
| 338 |
+
if 'past_key_values' in str(exc):
|
| 339 |
+
raise AttributeError(
|
| 340 |
+
f"You tried to call `generate` with a decoding strategy that manipulates `past_key_values`, "
|
| 341 |
+
f"which is not supported for {self.__class__.__name__}. "
|
| 342 |
+
f"Try another generation strategy instead. "
|
| 343 |
+
f"For the available generation strategies, check this doc: "
|
| 344 |
+
f"https://huggingface.co/docs/transformers/en/generation_strategies#decoding-strategies"
|
| 345 |
+
)
|
| 346 |
+
else:
|
| 347 |
+
raise exc
|
| 348 |
+
|
| 349 |
+
def prepare_inputs_for_generation(
|
| 350 |
+
self,
|
| 351 |
+
input_ids: torch.LongTensor = None,
|
| 352 |
+
state: Optional[torch.Tensor] = None,
|
| 353 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 354 |
+
**kwargs
|
| 355 |
+
):
|
| 356 |
+
# only last token for inputs_ids if the state is passed along.
|
| 357 |
+
if state is not None:
|
| 358 |
+
input_ids = input_ids[:, -1].unsqueeze(-1)
|
| 359 |
+
|
| 360 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
| 361 |
+
if inputs_embeds is not None and state is None:
|
| 362 |
+
model_inputs = {"inputs_embeds": inputs_embeds}
|
| 363 |
+
else:
|
| 364 |
+
model_inputs = {"input_ids": input_ids}
|
| 365 |
+
model_inputs["state"] = state
|
| 366 |
+
return model_inputs
|
| 367 |
+
|
| 368 |
+
def forward(
|
| 369 |
+
self,
|
| 370 |
+
input_ids: torch.LongTensor = None,
|
| 371 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 372 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 373 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| 374 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 375 |
+
labels: Optional[torch.LongTensor] = None,
|
| 376 |
+
use_cache: Optional[bool] = None,
|
| 377 |
+
output_attentions: Optional[bool] = None,
|
| 378 |
+
output_hidden_states: Optional[bool] = None,
|
| 379 |
+
return_dict: Optional[bool] = None,
|
| 380 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 381 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 382 |
+
output_hidden_states = (
|
| 383 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 384 |
+
)
|
| 385 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 386 |
+
|
| 387 |
+
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
| 388 |
+
outputs = self.model(
|
| 389 |
+
input_ids=input_ids,
|
| 390 |
+
attention_mask=attention_mask,
|
| 391 |
+
position_ids=position_ids,
|
| 392 |
+
past_key_values=past_key_values,
|
| 393 |
+
inputs_embeds=inputs_embeds,
|
| 394 |
+
use_cache=use_cache,
|
| 395 |
+
output_attentions=output_attentions,
|
| 396 |
+
output_hidden_states=output_hidden_states,
|
| 397 |
+
return_dict=return_dict,
|
| 398 |
+
)
|
| 399 |
+
|
| 400 |
+
hidden_states = outputs[0]
|
| 401 |
+
logits = self.lm_head(hidden_states)
|
| 402 |
+
|
| 403 |
+
loss = None
|
| 404 |
+
if labels is not None:
|
| 405 |
+
if self.config.fuse_cross_entropy:
|
| 406 |
+
loss_fct = FusedCrossEntropyLoss(inplace_backward=True)
|
| 407 |
+
else:
|
| 408 |
+
loss_fct = nn.CrossEntropyLoss()
|
| 409 |
+
# Enable model parallelism
|
| 410 |
+
labels = labels.to(logits.device)
|
| 411 |
+
labels = torch.cat((labels[..., 1:], torch.full_like(labels[:, :1], loss_fct.ignore_index)), 1)
|
| 412 |
+
loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
|
| 413 |
+
|
| 414 |
+
if not return_dict:
|
| 415 |
+
output = (logits,) + outputs[1:]
|
| 416 |
+
return (loss,) + output if loss is not None else output
|
| 417 |
+
|
| 418 |
+
return CausalLMOutputWithPast(
|
| 419 |
+
loss=loss,
|
| 420 |
+
logits=logits,
|
| 421 |
+
past_key_values=outputs.past_key_values,
|
| 422 |
+
hidden_states=outputs.hidden_states,
|
| 423 |
+
attentions=outputs.attentions,
|
| 424 |
+
)
|
fla/models/mamba/__init__.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from transformers import AutoConfig, AutoModel, AutoModelForCausalLM
|
| 4 |
+
|
| 5 |
+
from fla.models.mamba.configuration_mamba import MambaConfig
|
| 6 |
+
from fla.models.mamba.modeling_mamba import (MambaBlock, MambaForCausalLM,
|
| 7 |
+
MambaModel)
|
| 8 |
+
|
| 9 |
+
AutoConfig.register(MambaConfig.model_type, MambaConfig, True)
|
| 10 |
+
AutoModel.register(MambaConfig, MambaModel, True)
|
| 11 |
+
AutoModelForCausalLM.register(MambaConfig, MambaForCausalLM, True)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
__all__ = ['MambaConfig', 'MambaForCausalLM', 'MambaModel', 'MambaBlock']
|
fla/models/mamba/configuration_mamba.py
ADDED
|
@@ -0,0 +1,156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2024 The HuggingFace Inc. team.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""MAMBA configuration"""
|
| 16 |
+
|
| 17 |
+
import math
|
| 18 |
+
|
| 19 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class MambaConfig(PretrainedConfig):
|
| 23 |
+
"""
|
| 24 |
+
This is the configuration class to store the configuration of a [`MambaModel`]. It is used to instantiate a MAMBA
|
| 25 |
+
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
|
| 26 |
+
defaults will yield a similar configuration to that of the MAMBA
|
| 27 |
+
[state-spaces/mamba-2.8b](https://huggingface.co/state-spaces/mamba-2.8b) architecture.
|
| 28 |
+
|
| 29 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 30 |
+
documentation from [`PretrainedConfig`] for more information.
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
Args:
|
| 34 |
+
vocab_size (`int`, *optional*, defaults to 50280):
|
| 35 |
+
Vocabulary size of the MAMBA model. Defines the number of different tokens that can be represented by the
|
| 36 |
+
`inputs_ids` passed when calling [`MambaModel`].
|
| 37 |
+
hidden_size (`int`, *optional*, defaults to 768):
|
| 38 |
+
Dimensionality of the embeddings and hidden states.
|
| 39 |
+
state_size (`int`, *optional*, defaults to 16): shape of the state space latents.
|
| 40 |
+
num_hidden_layers (`int`, *optional*, defaults to 32):
|
| 41 |
+
Number of hidden layers in the model.
|
| 42 |
+
layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
|
| 43 |
+
The epsilon to use in the layer normalization layers.
|
| 44 |
+
pad_token_id (`int`, *optional*, defaults to 0):
|
| 45 |
+
Padding token id.
|
| 46 |
+
bos_token_id (`int`, *optional*, defaults to 0):
|
| 47 |
+
The id of the beginning of sentence token in the vocabulary.
|
| 48 |
+
eos_token_id (`int`, *optional*, defaults to 0):
|
| 49 |
+
The id of the end of sentence token in the vocabulary.
|
| 50 |
+
expand (`int`, *optional*, defaults to 2): Expanding factor used to determine the intermediate size.
|
| 51 |
+
conv_kernel (`int`, *optional*, defaults to 4): Size of the convolution kernel.
|
| 52 |
+
use_bias (`bool`, *optional*, defaults to `False`):
|
| 53 |
+
Whether or not to use bias in ["in_proj", "out_proj"] of the mixer block
|
| 54 |
+
use_conv_bias (`bool`, *optional*, defaults to `True`):
|
| 55 |
+
Whether or not to use bias in the convolution layer of the mixer block.
|
| 56 |
+
hidden_act (`str`, *optional*, defaults to `"silu"`):
|
| 57 |
+
The non-linear activation function (function or string) in the decoder.
|
| 58 |
+
initializer_range (`float`, *optional*, defaults to 0.1):
|
| 59 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
| 60 |
+
residual_in_fp32 (`bool`, *optional*, defaults to `True`):
|
| 61 |
+
Whether or not residuals should be in `float32`.
|
| 62 |
+
If set to `False` residuals will keep the same `dtype` as the rest of the model
|
| 63 |
+
time_step_rank (`Union[int,str]`, *optional*, defaults to `"auto"`):
|
| 64 |
+
Rank of the the discretization projection matrix.
|
| 65 |
+
`"auto"` means that it will default to `math.ceil(self.hidden_size / 16)`
|
| 66 |
+
time_step_scale (`float`, *optional*, defaults to 1.0):
|
| 67 |
+
Scale used used to scale `dt_proj.bias`.
|
| 68 |
+
time_step_min (`float`, *optional*, defaults to 0.001):
|
| 69 |
+
Minimum `time_step` used to bound `dt_proj.bias`.
|
| 70 |
+
time_step_max (`float`, *optional*, defaults to 0.1):
|
| 71 |
+
Maximum `time_step` used to bound `dt_proj.bias`.
|
| 72 |
+
time_step_init_scheme (`float`, *optional*, defaults to `"random"`):
|
| 73 |
+
Init scheme used for `dt_proj.weight`. Should be one of `["random","uniform"]`
|
| 74 |
+
time_step_floor (`float`, *optional*, defaults to 0.0001):
|
| 75 |
+
Minimum clamping value of the `dt_proj.bias` layer initialization.
|
| 76 |
+
rescale_prenorm_residual (`bool`, *optional*, defaults to `False`):
|
| 77 |
+
Whether or not to rescale `out_proj` weights when initializing.
|
| 78 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
| 79 |
+
Whether or not the cache should be used.
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
Example:
|
| 83 |
+
|
| 84 |
+
```python
|
| 85 |
+
>>> from transformers import MambaConfig, MambaModel
|
| 86 |
+
|
| 87 |
+
>>> # Initializing a Mamba configuration
|
| 88 |
+
>>> configuration = MambaConfig()
|
| 89 |
+
|
| 90 |
+
>>> # Initializing a model (with random weights) from the configuration
|
| 91 |
+
>>> model = MambaModel(configuration)
|
| 92 |
+
|
| 93 |
+
>>> # Accessing the model configuration
|
| 94 |
+
>>> configuration = model.config
|
| 95 |
+
```"""
|
| 96 |
+
|
| 97 |
+
model_type = "mamba"
|
| 98 |
+
|
| 99 |
+
def __init__(
|
| 100 |
+
self,
|
| 101 |
+
vocab_size=32000,
|
| 102 |
+
hidden_size=2048,
|
| 103 |
+
state_size=16,
|
| 104 |
+
num_hidden_layers=48,
|
| 105 |
+
layer_norm_epsilon=1e-5,
|
| 106 |
+
pad_token_id= 0,
|
| 107 |
+
bos_token_id= 1,
|
| 108 |
+
eos_token_id= 2,
|
| 109 |
+
expand=2,
|
| 110 |
+
conv_kernel=4,
|
| 111 |
+
use_bias=False,
|
| 112 |
+
use_conv_bias=True,
|
| 113 |
+
hidden_act="silu",
|
| 114 |
+
initializer_range=0.1,
|
| 115 |
+
residual_in_fp32=False,
|
| 116 |
+
time_step_rank="auto",
|
| 117 |
+
time_step_scale=1.0,
|
| 118 |
+
time_step_min=0.001,
|
| 119 |
+
time_step_max=0.1,
|
| 120 |
+
time_step_init_scheme="random",
|
| 121 |
+
time_step_floor=1e-4,
|
| 122 |
+
rescale_prenorm_residual=False,
|
| 123 |
+
use_cache=True,
|
| 124 |
+
fuse_norm: bool = True,
|
| 125 |
+
fuse_cross_entropy: bool = True,
|
| 126 |
+
tie_word_embeddings: bool = False,
|
| 127 |
+
**kwargs,
|
| 128 |
+
):
|
| 129 |
+
self.vocab_size = vocab_size
|
| 130 |
+
self.hidden_size = hidden_size
|
| 131 |
+
self.state_size = state_size
|
| 132 |
+
self.num_hidden_layers = num_hidden_layers
|
| 133 |
+
self.layer_norm_epsilon = layer_norm_epsilon
|
| 134 |
+
self.conv_kernel = conv_kernel
|
| 135 |
+
self.expand = expand
|
| 136 |
+
self.intermediate_size = int(expand * self.hidden_size)
|
| 137 |
+
self.bos_token_id = bos_token_id
|
| 138 |
+
self.eos_token_id = eos_token_id
|
| 139 |
+
self.pad_token_id = pad_token_id
|
| 140 |
+
self.use_bias = use_bias
|
| 141 |
+
self.use_conv_bias = use_conv_bias
|
| 142 |
+
self.hidden_act = hidden_act
|
| 143 |
+
self.initializer_range = initializer_range
|
| 144 |
+
self.time_step_rank = math.ceil(self.hidden_size / 16) if time_step_rank == "auto" else time_step_rank
|
| 145 |
+
self.time_step_scale = time_step_scale
|
| 146 |
+
self.time_step_min = time_step_min
|
| 147 |
+
self.time_step_max = time_step_max
|
| 148 |
+
self.time_step_init_scheme = time_step_init_scheme
|
| 149 |
+
self.time_step_floor = time_step_floor
|
| 150 |
+
self.rescale_prenorm_residual = rescale_prenorm_residual
|
| 151 |
+
self.residual_in_fp32 = residual_in_fp32
|
| 152 |
+
self.use_cache = use_cache
|
| 153 |
+
self.fuse_cross_entropy = fuse_cross_entropy
|
| 154 |
+
self.fuse_norm = fuse_norm
|
| 155 |
+
|
| 156 |
+
super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, pad_token_id=pad_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs)
|
fla/models/mamba/modeling_mamba.py
ADDED
|
@@ -0,0 +1,605 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2024 state-spaces/mamba org and HuggingFace Inc. team.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""PyTorch MAMBA model."""
|
| 16 |
+
|
| 17 |
+
import math
|
| 18 |
+
from dataclasses import dataclass
|
| 19 |
+
from typing import Any, Dict, Optional, Tuple, Union
|
| 20 |
+
|
| 21 |
+
import torch
|
| 22 |
+
import torch.utils.checkpoint
|
| 23 |
+
from torch import nn
|
| 24 |
+
from transformers.activations import ACT2FN
|
| 25 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 26 |
+
from transformers.utils import ModelOutput, logging
|
| 27 |
+
|
| 28 |
+
from fla.models.mamba.configuration_mamba import MambaConfig
|
| 29 |
+
from fla.modules import FusedCrossEntropyLoss, RMSNorm
|
| 30 |
+
|
| 31 |
+
logger = logging.get_logger(__name__)
|
| 32 |
+
|
| 33 |
+
try:
|
| 34 |
+
from mamba_ssm.ops.selective_scan_interface import (mamba_inner_fn,
|
| 35 |
+
selective_scan_fn)
|
| 36 |
+
from mamba_ssm.ops.triton.selective_state_update import \
|
| 37 |
+
selective_state_update
|
| 38 |
+
except ImportError:
|
| 39 |
+
selective_state_update, selective_scan_fn, mamba_inner_fn = None, None, None
|
| 40 |
+
|
| 41 |
+
try:
|
| 42 |
+
from causal_conv1d import causal_conv1d_fn, causal_conv1d_update
|
| 43 |
+
except ImportError:
|
| 44 |
+
causal_conv1d_update, causal_conv1d_fn = None, None
|
| 45 |
+
|
| 46 |
+
is_fast_path_available = all(
|
| 47 |
+
(selective_state_update, selective_scan_fn, causal_conv1d_fn, causal_conv1d_update, mamba_inner_fn)
|
| 48 |
+
)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
class MambaCache:
|
| 52 |
+
def __init__(self, config, batch_size, dtype=torch.float16, device=None):
|
| 53 |
+
self.seqlen_offset = 0
|
| 54 |
+
self.dtype = dtype
|
| 55 |
+
intermediate_size = config.intermediate_size
|
| 56 |
+
ssm_state_size = config.state_size
|
| 57 |
+
conv_kernel_size = config.conv_kernel
|
| 58 |
+
|
| 59 |
+
self.conv_states = {
|
| 60 |
+
i: torch.zeros(batch_size, intermediate_size, conv_kernel_size, device=device, dtype=dtype)
|
| 61 |
+
for i in range(config.num_hidden_layers)
|
| 62 |
+
}
|
| 63 |
+
self.ssm_states = {
|
| 64 |
+
i: torch.zeros(batch_size, intermediate_size, ssm_state_size, device=device, dtype=dtype)
|
| 65 |
+
for i in range(config.num_hidden_layers)
|
| 66 |
+
}
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
class MambaMixer(nn.Module):
|
| 70 |
+
"""
|
| 71 |
+
Compute ∆, A, B, C, and D the state space parameters and compute the `contextualized_states`.
|
| 72 |
+
A, D are input independent (see Mamba paper [1] Section 3.5.2 "Interpretation of A" for why A isn't selective)
|
| 73 |
+
∆, B, C are input-dependent (this is a key difference between Mamba and the linear time invariant S4,
|
| 74 |
+
and is why Mamba is called **selective** state spaces)
|
| 75 |
+
"""
|
| 76 |
+
|
| 77 |
+
def __init__(self, config, layer_idx):
|
| 78 |
+
super().__init__()
|
| 79 |
+
self.hidden_size = config.hidden_size
|
| 80 |
+
self.ssm_state_size = config.state_size
|
| 81 |
+
self.conv_kernel_size = config.conv_kernel
|
| 82 |
+
self.intermediate_size = config.intermediate_size
|
| 83 |
+
self.time_step_rank = config.time_step_rank
|
| 84 |
+
self.layer_idx = layer_idx
|
| 85 |
+
self.use_conv_bias = config.use_conv_bias
|
| 86 |
+
self.conv1d = nn.Conv1d(
|
| 87 |
+
in_channels=self.intermediate_size,
|
| 88 |
+
out_channels=self.intermediate_size,
|
| 89 |
+
bias=config.use_conv_bias,
|
| 90 |
+
kernel_size=config.conv_kernel,
|
| 91 |
+
groups=self.intermediate_size,
|
| 92 |
+
padding=config.conv_kernel - 1,
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
self.activation = config.hidden_act
|
| 96 |
+
self.act = ACT2FN[config.hidden_act]
|
| 97 |
+
|
| 98 |
+
# projection of the input hidden states
|
| 99 |
+
self.in_proj = nn.Linear(self.hidden_size, self.intermediate_size * 2, bias=config.use_bias)
|
| 100 |
+
# selective projection used to make dt, B and C input dependant
|
| 101 |
+
self.x_proj = nn.Linear(self.intermediate_size, self.time_step_rank + self.ssm_state_size * 2, bias=False)
|
| 102 |
+
# time step projection (discretization)
|
| 103 |
+
self.dt_proj = nn.Linear(self.time_step_rank, self.intermediate_size, bias=True)
|
| 104 |
+
|
| 105 |
+
# S4D real initialization. These are not discretized!
|
| 106 |
+
# The core is to load them, compute the discrete states, then write the updated state. Keeps the memory bounded
|
| 107 |
+
A = torch.arange(1, self.ssm_state_size + 1, dtype=torch.float32)[None, :]
|
| 108 |
+
A = A.expand(self.intermediate_size, -1).contiguous()
|
| 109 |
+
|
| 110 |
+
self.A_log = nn.Parameter(torch.log(A))
|
| 111 |
+
self.D = nn.Parameter(torch.ones(self.intermediate_size))
|
| 112 |
+
self.out_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.use_bias)
|
| 113 |
+
self.use_bias = config.use_bias
|
| 114 |
+
|
| 115 |
+
if not is_fast_path_available:
|
| 116 |
+
logger.warning_once(
|
| 117 |
+
"The fast path is not available because on of "
|
| 118 |
+
"`(selective_state_update, selective_scan_fn, causal_conv1d_fn, causal_conv1d_update, mamba_inner_fn)`"
|
| 119 |
+
" is None. Falling back to the naive implementation. "
|
| 120 |
+
"To install follow https://github.com/state-spaces/mamba/#installation and"
|
| 121 |
+
" https://github.com/Dao-AILab/causal-conv1d"
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
def cuda_kernels_forward(self, hidden_states: torch.Tensor, cache_params: Optional[MambaCache] = None):
|
| 125 |
+
# 1. Gated MLP's linear projection
|
| 126 |
+
projected_states = self.in_proj(hidden_states).transpose(1, 2)
|
| 127 |
+
|
| 128 |
+
if self.training and cache_params is None: # Doesn't support outputting the states -> used for training
|
| 129 |
+
contextualized_states = mamba_inner_fn(
|
| 130 |
+
projected_states,
|
| 131 |
+
self.conv1d.weight,
|
| 132 |
+
self.conv1d.bias if self.use_conv_bias else None,
|
| 133 |
+
self.x_proj.weight,
|
| 134 |
+
self.dt_proj.weight,
|
| 135 |
+
self.out_proj.weight,
|
| 136 |
+
self.out_proj.bias.float() if self.use_bias else None,
|
| 137 |
+
-torch.exp(self.A_log.float()),
|
| 138 |
+
None, # input-dependent B
|
| 139 |
+
None, # input-dependent C
|
| 140 |
+
self.D.float(),
|
| 141 |
+
delta_bias=self.dt_proj.bias.float(),
|
| 142 |
+
delta_softplus=True,
|
| 143 |
+
)
|
| 144 |
+
|
| 145 |
+
else:
|
| 146 |
+
hidden_states, gate = projected_states.chunk(2, dim=1)
|
| 147 |
+
|
| 148 |
+
# 2. Convolution sequence transformation
|
| 149 |
+
conv_weights = self.conv1d.weight.view(self.conv1d.weight.size(0), self.conv1d.weight.size(2))
|
| 150 |
+
if cache_params is not None and cache_params.seqlen_offset > 0:
|
| 151 |
+
hidden_states = causal_conv1d_update(
|
| 152 |
+
hidden_states.squeeze(-1),
|
| 153 |
+
cache_params.conv_states[self.layer_idx],
|
| 154 |
+
conv_weights,
|
| 155 |
+
self.conv1d.bias,
|
| 156 |
+
self.activation,
|
| 157 |
+
)
|
| 158 |
+
hidden_states = hidden_states.unsqueeze(-1)
|
| 159 |
+
else:
|
| 160 |
+
if cache_params is not None:
|
| 161 |
+
conv_states = nn.functional.pad(
|
| 162 |
+
hidden_states, (self.conv_kernel_size - hidden_states.shape[-1], 0)
|
| 163 |
+
)
|
| 164 |
+
cache_params.conv_states[self.layer_idx].copy_(conv_states)
|
| 165 |
+
hidden_states = causal_conv1d_fn(
|
| 166 |
+
hidden_states, conv_weights, self.conv1d.bias, activation=self.activation
|
| 167 |
+
)
|
| 168 |
+
|
| 169 |
+
# 3. State Space Model sequence transformation
|
| 170 |
+
# 3.a. input varying initialization of time_step, B and C
|
| 171 |
+
ssm_parameters = self.x_proj(hidden_states.transpose(1, 2))
|
| 172 |
+
time_step, B, C = torch.split(
|
| 173 |
+
ssm_parameters, [self.time_step_rank, self.ssm_state_size, self.ssm_state_size], dim=-1
|
| 174 |
+
)
|
| 175 |
+
discrete_time_step = self.dt_proj.weight @ time_step.transpose(1, 2)
|
| 176 |
+
|
| 177 |
+
A = -torch.exp(self.A_log.float())
|
| 178 |
+
# 3.c perform the recurrence y ← SSM(A, B, C)(x)
|
| 179 |
+
time_proj_bias = self.dt_proj.bias.float() if hasattr(self.dt_proj, "bias") else None
|
| 180 |
+
if cache_params is not None and cache_params.seqlen_offset > 0:
|
| 181 |
+
scan_outputs = selective_state_update(
|
| 182 |
+
cache_params.ssm_states[self.layer_idx],
|
| 183 |
+
hidden_states[..., 0],
|
| 184 |
+
discrete_time_step[..., 0],
|
| 185 |
+
A,
|
| 186 |
+
B[:, 0],
|
| 187 |
+
C[:, 0],
|
| 188 |
+
self.D,
|
| 189 |
+
gate[..., 0],
|
| 190 |
+
time_proj_bias,
|
| 191 |
+
dt_softplus=True,
|
| 192 |
+
).unsqueeze(-1)
|
| 193 |
+
else:
|
| 194 |
+
scan_outputs, ssm_state = selective_scan_fn(
|
| 195 |
+
hidden_states,
|
| 196 |
+
discrete_time_step,
|
| 197 |
+
A,
|
| 198 |
+
B.transpose(1, 2),
|
| 199 |
+
C.transpose(1, 2),
|
| 200 |
+
self.D.float(),
|
| 201 |
+
gate,
|
| 202 |
+
time_proj_bias,
|
| 203 |
+
delta_softplus=True,
|
| 204 |
+
return_last_state=True,
|
| 205 |
+
)
|
| 206 |
+
if ssm_state is not None and cache_params is not None:
|
| 207 |
+
cache_params.ssm_states[self.layer_idx].copy_(ssm_state)
|
| 208 |
+
|
| 209 |
+
# 4. Final linear projection
|
| 210 |
+
contextualized_states = self.out_proj(scan_outputs.transpose(1, 2))
|
| 211 |
+
return contextualized_states
|
| 212 |
+
|
| 213 |
+
# fmt: off
|
| 214 |
+
def slow_forward(self, input_states, cache_params: Optional[MambaCache] = None):
|
| 215 |
+
batch_size, seq_len, _ = input_states.shape
|
| 216 |
+
dtype = input_states.dtype
|
| 217 |
+
# 1. Gated MLP's linear projection
|
| 218 |
+
# [batch, 2 * intermediate_size, seq_len]
|
| 219 |
+
projected_states = self.in_proj(input_states).transpose(1, 2)
|
| 220 |
+
hidden_states, gate = projected_states.chunk(2, dim=1)
|
| 221 |
+
|
| 222 |
+
# 2. Convolution sequence transformation
|
| 223 |
+
if cache_params is not None:
|
| 224 |
+
ssm_state = cache_params.ssm_states[self.layer_idx].clone()
|
| 225 |
+
if cache_params.seqlen_offset > 0:
|
| 226 |
+
# [batch, intermediate_size, conv_kernel_size]
|
| 227 |
+
conv_state = cache_params.conv_states[self.layer_idx]
|
| 228 |
+
conv_state = torch.roll(conv_state, shifts=-1, dims=-1)
|
| 229 |
+
conv_state[:, :, -1] = hidden_states[:, :, 0]
|
| 230 |
+
cache_params.conv_states[self.layer_idx].copy_(conv_state)
|
| 231 |
+
hidden_states = torch.sum(conv_state * self.conv1d.weight[:, 0, :], dim=-1)
|
| 232 |
+
if self.use_conv_bias:
|
| 233 |
+
hidden_states += self.conv1d.bias
|
| 234 |
+
# [batch, intermediate_size, 1] : decoding
|
| 235 |
+
hidden_states = self.act(hidden_states).to(dtype).unsqueeze(-1)
|
| 236 |
+
else:
|
| 237 |
+
conv_state = nn.functional.pad(
|
| 238 |
+
hidden_states,
|
| 239 |
+
(self.conv_kernel_size - hidden_states.shape[-1], 0)
|
| 240 |
+
)
|
| 241 |
+
cache_params.conv_states[self.layer_idx].copy_(conv_state)
|
| 242 |
+
# [batch, intermediate_size, seq_len]
|
| 243 |
+
hidden_states = self.act(self.conv1d(hidden_states)[..., :seq_len])
|
| 244 |
+
else:
|
| 245 |
+
ssm_state = torch.zeros(
|
| 246 |
+
(batch_size, self.intermediate_size, self.ssm_state_size),
|
| 247 |
+
device=hidden_states.device, dtype=dtype
|
| 248 |
+
)
|
| 249 |
+
# [batch, intermediate_size, seq_len]
|
| 250 |
+
hidden_states = self.act(self.conv1d(hidden_states)[..., :seq_len])
|
| 251 |
+
|
| 252 |
+
# 3. State Space Model sequence transformation
|
| 253 |
+
# 3.a. Selection: [batch, seq_len, self.time_step_rank + self.ssm_state_size * 2]
|
| 254 |
+
ssm_parameters = self.x_proj(hidden_states.transpose(1, 2))
|
| 255 |
+
time_step, B, C = torch.split(
|
| 256 |
+
ssm_parameters, [self.time_step_rank, self.ssm_state_size, self.ssm_state_size], dim=-1
|
| 257 |
+
)
|
| 258 |
+
# [batch, seq_len, intermediate_size]
|
| 259 |
+
discrete_time_step = self.dt_proj(time_step)
|
| 260 |
+
# [batch, intermediate_size, seq_len]
|
| 261 |
+
discrete_time_step = nn.functional.softplus(discrete_time_step).transpose(1, 2)
|
| 262 |
+
|
| 263 |
+
# 3.b. Discretization: B and C to [batch, seq_len, intermediate_size, ssm_state_size] (SRAM)
|
| 264 |
+
# [intermediate_size, ssm_state_size]
|
| 265 |
+
A = -torch.exp(self.A_log.float())
|
| 266 |
+
# [batch, intermediate_size, seq_len, ssm_state_size]
|
| 267 |
+
discrete_A = torch.exp(A[None, :, None, :] * discrete_time_step[:, :, :, None])
|
| 268 |
+
# [batch, intermediade_size, seq_len, ssm_state_size]
|
| 269 |
+
discrete_B = discrete_time_step[:, :, :, None] * B[:, None, :, :].float()
|
| 270 |
+
deltaB_u = discrete_B * hidden_states[:, :, :, None].float()
|
| 271 |
+
|
| 272 |
+
# 3.c perform the recurrence y ← SSM(A, B, C)(x)
|
| 273 |
+
scan_outputs = []
|
| 274 |
+
for i in range(seq_len):
|
| 275 |
+
# [batch, intermediade_size, ssm_state]
|
| 276 |
+
ssm_state = discrete_A[:, :, i, :] * ssm_state + deltaB_u[:, :, i, :]
|
| 277 |
+
# [batch, intermediade_size, 1]
|
| 278 |
+
scan_output = torch.matmul(ssm_state.to(dtype), C[:, i, :].unsqueeze(-1))
|
| 279 |
+
scan_outputs.append(scan_output[:, :, 0])
|
| 280 |
+
# [batch, seq_len, intermediade_size]
|
| 281 |
+
scan_output = torch.stack(scan_outputs, dim=-1)
|
| 282 |
+
scan_output = scan_output + (hidden_states * self.D[None, :, None])
|
| 283 |
+
scan_output = (scan_output * self.act(gate))
|
| 284 |
+
|
| 285 |
+
if cache_params is not None:
|
| 286 |
+
cache_params.ssm_states[self.layer_idx].copy_(ssm_state)
|
| 287 |
+
|
| 288 |
+
# 4. Final linear projection
|
| 289 |
+
# [batch, seq_len, hidden_size]
|
| 290 |
+
contextualized_states = self.out_proj(scan_output.transpose(1, 2))
|
| 291 |
+
return contextualized_states
|
| 292 |
+
# fmt: on
|
| 293 |
+
|
| 294 |
+
def forward(self, hidden_states, cache_params: Optional[MambaCache] = None):
|
| 295 |
+
if is_fast_path_available and "cuda" in self.x_proj.weight.device.type:
|
| 296 |
+
return self.cuda_kernels_forward(hidden_states, cache_params)
|
| 297 |
+
return self.slow_forward(hidden_states, cache_params)
|
| 298 |
+
|
| 299 |
+
|
| 300 |
+
class MambaBlock(nn.Module):
|
| 301 |
+
def __init__(self, config, layer_idx):
|
| 302 |
+
super().__init__()
|
| 303 |
+
self.config = config
|
| 304 |
+
self.layer_idx = layer_idx
|
| 305 |
+
self.residual_in_fp32 = config.residual_in_fp32
|
| 306 |
+
self.norm = RMSNorm(config.hidden_size, eps=config.layer_norm_epsilon)
|
| 307 |
+
self.mixer = MambaMixer(config, layer_idx=layer_idx)
|
| 308 |
+
|
| 309 |
+
def forward(self, hidden_states, cache_params: Optional[MambaCache] = None):
|
| 310 |
+
residual = hidden_states
|
| 311 |
+
hidden_states = self.norm(hidden_states)
|
| 312 |
+
# if self.residual_in_fp32:
|
| 313 |
+
# residual = residual.to(torch.float32)
|
| 314 |
+
hidden_states = self.mixer(hidden_states, cache_params=cache_params)
|
| 315 |
+
hidden_states = residual + hidden_states
|
| 316 |
+
return hidden_states
|
| 317 |
+
|
| 318 |
+
|
| 319 |
+
class MambaPreTrainedModel(PreTrainedModel):
|
| 320 |
+
"""
|
| 321 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
| 322 |
+
models.
|
| 323 |
+
"""
|
| 324 |
+
|
| 325 |
+
config_class = MambaConfig
|
| 326 |
+
base_model_prefix = "backbone"
|
| 327 |
+
_no_split_modules = ["MambaBlock"]
|
| 328 |
+
supports_gradient_checkpointing = True
|
| 329 |
+
|
| 330 |
+
def _init_weights(self, module):
|
| 331 |
+
"""Initialize the weights."""
|
| 332 |
+
if isinstance(module, MambaMixer):
|
| 333 |
+
module.A_log._no_weight_decay = True
|
| 334 |
+
module.D._no_weight_decay = True
|
| 335 |
+
|
| 336 |
+
dt_init_std = self.config.time_step_rank**-0.5 * self.config.time_step_scale
|
| 337 |
+
if self.config.time_step_init_scheme == "constant":
|
| 338 |
+
nn.init.constant_(module.dt_proj.weight, dt_init_std)
|
| 339 |
+
elif self.config.time_step_init_scheme == "random":
|
| 340 |
+
nn.init.uniform_(module.dt_proj.weight, -dt_init_std, dt_init_std)
|
| 341 |
+
|
| 342 |
+
dt = torch.exp(
|
| 343 |
+
torch.rand(self.config.intermediate_size)
|
| 344 |
+
* (math.log(self.config.time_step_max) - math.log(self.config.time_step_min))
|
| 345 |
+
+ math.log(self.config.time_step_min)
|
| 346 |
+
).clamp(min=self.config.time_step_floor)
|
| 347 |
+
# # Inverse of softplus: https://github.com/pytorch/pytorch/issues/72759
|
| 348 |
+
inv_dt = dt + torch.log(-torch.expm1(-dt))
|
| 349 |
+
with torch.no_grad():
|
| 350 |
+
module.dt_proj.bias.copy_(inv_dt)
|
| 351 |
+
module.dt_proj.bias._no_reinit = True
|
| 352 |
+
|
| 353 |
+
if isinstance(module, nn.Linear):
|
| 354 |
+
if module.bias is not None:
|
| 355 |
+
if not getattr(module.bias, "_no_reinit", False):
|
| 356 |
+
nn.init.zeros_(module.bias)
|
| 357 |
+
elif isinstance(module, nn.Embedding):
|
| 358 |
+
nn.init.normal_(module.weight, std=self.config.initializer_range)
|
| 359 |
+
|
| 360 |
+
if self.config.rescale_prenorm_residual:
|
| 361 |
+
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
|
| 362 |
+
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
|
| 363 |
+
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
|
| 364 |
+
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
|
| 365 |
+
#
|
| 366 |
+
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
|
| 367 |
+
for name, p in module.named_parameters():
|
| 368 |
+
if name in ["out_proj.weight"]:
|
| 369 |
+
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
|
| 370 |
+
# Following Pytorch init, except scale by 1/sqrt(2 * n_layer)
|
| 371 |
+
# We need to reinit p since this code could be called multiple times
|
| 372 |
+
# Having just p *= scale would repeatedly scale it down
|
| 373 |
+
nn.init.kaiming_uniform_(p, a=math.sqrt(5))
|
| 374 |
+
with torch.no_grad():
|
| 375 |
+
p /= math.sqrt(self.config.num_layers)
|
| 376 |
+
|
| 377 |
+
|
| 378 |
+
@dataclass
|
| 379 |
+
class MambaOutput(ModelOutput):
|
| 380 |
+
"""
|
| 381 |
+
Class for the MAMBA model outputs.
|
| 382 |
+
|
| 383 |
+
Args:
|
| 384 |
+
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
| 385 |
+
Sequence of hidden-states at the output of the last layer of the model.
|
| 386 |
+
cache_params (`MambaCache`):
|
| 387 |
+
The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to
|
| 388 |
+
avoid providing the old `input_ids`.
|
| 389 |
+
|
| 390 |
+
Includes both the State space model state matrices after the selective scan, and the Convolutional states
|
| 391 |
+
hidden_states (`tuple(torch.FloatTensor)`, *optional*,
|
| 392 |
+
returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
| 393 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
| 394 |
+
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
| 395 |
+
|
| 396 |
+
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
| 397 |
+
"""
|
| 398 |
+
|
| 399 |
+
last_hidden_state: Optional[torch.FloatTensor] = None
|
| 400 |
+
cache_params: Optional[MambaCache] = None
|
| 401 |
+
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
| 402 |
+
|
| 403 |
+
|
| 404 |
+
@dataclass
|
| 405 |
+
class MambaCausalLMOutput(ModelOutput):
|
| 406 |
+
"""
|
| 407 |
+
Base class for causal language model (or autoregressive) outputs.
|
| 408 |
+
|
| 409 |
+
Args:
|
| 410 |
+
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
|
| 411 |
+
Language modeling loss (for next-token prediction).
|
| 412 |
+
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
|
| 413 |
+
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
|
| 414 |
+
cache_params (`MambaCache`):
|
| 415 |
+
The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to
|
| 416 |
+
avoid providing the old `input_ids`.
|
| 417 |
+
|
| 418 |
+
Includes both the State space model state matrices after the selective scan, and the Convolutional states
|
| 419 |
+
hidden_states (`tuple(torch.FloatTensor)`, *optional*,
|
| 420 |
+
returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
| 421 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
| 422 |
+
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
| 423 |
+
|
| 424 |
+
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
| 425 |
+
"""
|
| 426 |
+
|
| 427 |
+
loss: Optional[torch.FloatTensor] = None
|
| 428 |
+
logits: Optional[torch.FloatTensor] = None
|
| 429 |
+
cache_params: Optional[MambaCache] = None
|
| 430 |
+
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
| 431 |
+
|
| 432 |
+
|
| 433 |
+
class MambaModel(MambaPreTrainedModel):
|
| 434 |
+
def __init__(self, config):
|
| 435 |
+
super().__init__(config)
|
| 436 |
+
|
| 437 |
+
self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
|
| 438 |
+
self.layers = nn.ModuleList([MambaBlock(config, layer_idx=idx) for idx in range(config.num_hidden_layers)])
|
| 439 |
+
|
| 440 |
+
self.gradient_checkpointing = False
|
| 441 |
+
self.norm_f = RMSNorm(config.hidden_size, eps=config.layer_norm_epsilon)
|
| 442 |
+
# Initialize weights and apply final processing
|
| 443 |
+
self.post_init()
|
| 444 |
+
|
| 445 |
+
def get_input_embeddings(self):
|
| 446 |
+
return self.embeddings
|
| 447 |
+
|
| 448 |
+
def set_input_embeddings(self, new_embeddings):
|
| 449 |
+
self.embeddings = new_embeddings
|
| 450 |
+
|
| 451 |
+
def forward(
|
| 452 |
+
self,
|
| 453 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 454 |
+
inputs_embeds: Optional[torch.LongTensor] = None,
|
| 455 |
+
cache_params: Optional[MambaCache] = None,
|
| 456 |
+
use_cache: Optional[bool] = None,
|
| 457 |
+
output_hidden_states: Optional[bool] = None,
|
| 458 |
+
return_dict: Optional[bool] = None,
|
| 459 |
+
**kwargs, # `attention_mask` is passed by the tokenizer and we don't want it
|
| 460 |
+
) -> Union[Tuple, MambaOutput]:
|
| 461 |
+
output_hidden_states = (
|
| 462 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 463 |
+
)
|
| 464 |
+
use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
|
| 465 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 466 |
+
|
| 467 |
+
if (input_ids is None) ^ (inputs_embeds is not None): # ^ is python for xor
|
| 468 |
+
raise ValueError(
|
| 469 |
+
"You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one"
|
| 470 |
+
)
|
| 471 |
+
|
| 472 |
+
if inputs_embeds is None:
|
| 473 |
+
inputs_embeds = self.embeddings(input_ids)
|
| 474 |
+
|
| 475 |
+
if self.gradient_checkpointing and self.training and use_cache:
|
| 476 |
+
use_cache = False
|
| 477 |
+
|
| 478 |
+
if cache_params is None and use_cache:
|
| 479 |
+
cache_params = MambaCache(
|
| 480 |
+
self.config, inputs_embeds.size(0), device=inputs_embeds.device, dtype=inputs_embeds.dtype
|
| 481 |
+
)
|
| 482 |
+
|
| 483 |
+
hidden_states = inputs_embeds
|
| 484 |
+
all_hidden_states = () if output_hidden_states else None
|
| 485 |
+
for mixer_block in self.layers:
|
| 486 |
+
if self.gradient_checkpointing and self.training:
|
| 487 |
+
hidden_states = self._gradient_checkpointing_func(mixer_block.__call__, hidden_states, cache_params)
|
| 488 |
+
else:
|
| 489 |
+
hidden_states = mixer_block(hidden_states, cache_params=cache_params)
|
| 490 |
+
|
| 491 |
+
if output_hidden_states:
|
| 492 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 493 |
+
|
| 494 |
+
if use_cache:
|
| 495 |
+
cache_params.seqlen_offset += inputs_embeds.shape[1]
|
| 496 |
+
|
| 497 |
+
hidden_states = self.norm_f(hidden_states)
|
| 498 |
+
|
| 499 |
+
if output_hidden_states:
|
| 500 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 501 |
+
|
| 502 |
+
if not return_dict:
|
| 503 |
+
return tuple(v for v in [hidden_states, cache_params, all_hidden_states] if v is not None)
|
| 504 |
+
|
| 505 |
+
return MambaOutput(
|
| 506 |
+
last_hidden_state=hidden_states,
|
| 507 |
+
cache_params=cache_params if use_cache else None,
|
| 508 |
+
hidden_states=all_hidden_states,
|
| 509 |
+
)
|
| 510 |
+
|
| 511 |
+
|
| 512 |
+
class MambaForCausalLM(MambaPreTrainedModel):
|
| 513 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 514 |
+
|
| 515 |
+
def __init__(self, config):
|
| 516 |
+
super().__init__(config)
|
| 517 |
+
self.backbone = MambaModel(config)
|
| 518 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 519 |
+
# Initialize weights and apply final processing
|
| 520 |
+
self.post_init()
|
| 521 |
+
|
| 522 |
+
def get_output_embeddings(self):
|
| 523 |
+
return self.lm_head
|
| 524 |
+
|
| 525 |
+
def set_output_embeddings(self, new_embeddings):
|
| 526 |
+
self.lm_head = new_embeddings
|
| 527 |
+
|
| 528 |
+
def get_input_embeddings(self):
|
| 529 |
+
return self.backbone.get_input_embeddings()
|
| 530 |
+
|
| 531 |
+
def set_input_embeddings(self, new_embeddings):
|
| 532 |
+
return self.backbone.set_input_embeddings(new_embeddings)
|
| 533 |
+
|
| 534 |
+
def _update_model_kwargs_for_generation(
|
| 535 |
+
self, outputs: ModelOutput, model_kwargs: Dict[str, Any], **kwargs
|
| 536 |
+
) -> Dict[str, Any]:
|
| 537 |
+
model_kwargs["cache_params"] = outputs.get("cache_params", None)
|
| 538 |
+
return model_kwargs
|
| 539 |
+
|
| 540 |
+
def prepare_inputs_for_generation(
|
| 541 |
+
self, input_ids, cache_params: Optional[MambaCache] = None, inputs_embeds=None, attention_mask=None, **kwargs
|
| 542 |
+
):
|
| 543 |
+
# only last token for inputs_ids if the state is passed along.
|
| 544 |
+
if cache_params is not None:
|
| 545 |
+
input_ids = input_ids[:, -1].unsqueeze(-1)
|
| 546 |
+
|
| 547 |
+
if inputs_embeds is not None and cache_params is None:
|
| 548 |
+
model_inputs = {"inputs_embeds": inputs_embeds}
|
| 549 |
+
else:
|
| 550 |
+
model_inputs = {"input_ids": input_ids}
|
| 551 |
+
|
| 552 |
+
model_inputs["cache_params"] = cache_params
|
| 553 |
+
return model_inputs
|
| 554 |
+
|
| 555 |
+
def forward(
|
| 556 |
+
self,
|
| 557 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 558 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 559 |
+
cache_params: Optional[MambaCache] = None,
|
| 560 |
+
labels: Optional[torch.LongTensor] = None,
|
| 561 |
+
output_hidden_states: Optional[bool] = None,
|
| 562 |
+
return_dict: Optional[bool] = None,
|
| 563 |
+
use_cache: Optional[bool] = None,
|
| 564 |
+
**kwargs, # for now we need this for generation
|
| 565 |
+
) -> Union[Tuple, MambaCausalLMOutput]:
|
| 566 |
+
r"""
|
| 567 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 568 |
+
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
|
| 569 |
+
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
|
| 570 |
+
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
|
| 571 |
+
"""
|
| 572 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 573 |
+
|
| 574 |
+
mamba_outputs = self.backbone(
|
| 575 |
+
input_ids,
|
| 576 |
+
cache_params=cache_params,
|
| 577 |
+
inputs_embeds=inputs_embeds,
|
| 578 |
+
output_hidden_states=output_hidden_states,
|
| 579 |
+
return_dict=return_dict,
|
| 580 |
+
use_cache=use_cache,
|
| 581 |
+
)
|
| 582 |
+
hidden_states = mamba_outputs[0]
|
| 583 |
+
logits = self.lm_head(hidden_states)
|
| 584 |
+
|
| 585 |
+
loss = None
|
| 586 |
+
if labels is not None:
|
| 587 |
+
if self.config.fuse_cross_entropy:
|
| 588 |
+
loss_fct = FusedCrossEntropyLoss(inplace_backward=True)
|
| 589 |
+
else:
|
| 590 |
+
loss_fct = nn.CrossEntropyLoss()
|
| 591 |
+
# Enable model parallelism
|
| 592 |
+
labels = labels.to(logits.device)
|
| 593 |
+
labels = torch.cat((labels[..., 1:], torch.full_like(labels[:, :1], loss_fct.ignore_index)), 1)
|
| 594 |
+
loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
|
| 595 |
+
|
| 596 |
+
if not return_dict:
|
| 597 |
+
output = (logits,) + mamba_outputs[1:]
|
| 598 |
+
return (loss,) + output if loss is not None else output
|
| 599 |
+
|
| 600 |
+
return MambaCausalLMOutput(
|
| 601 |
+
loss=loss,
|
| 602 |
+
logits=logits,
|
| 603 |
+
cache_params=mamba_outputs.cache_params,
|
| 604 |
+
hidden_states=mamba_outputs.hidden_states,
|
| 605 |
+
)
|
fla/models/retnet/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from transformers import AutoConfig, AutoModel, AutoModelForCausalLM
|
| 4 |
+
|
| 5 |
+
from fla.models.retnet.configuration_retnet import RetNetConfig
|
| 6 |
+
from fla.models.retnet.modeling_retnet import RetNetForCausalLM, RetNetModel
|
| 7 |
+
|
| 8 |
+
AutoConfig.register(RetNetConfig.model_type, RetNetConfig)
|
| 9 |
+
AutoModel.register(RetNetConfig, RetNetModel)
|
| 10 |
+
AutoModelForCausalLM.register(RetNetConfig, RetNetForCausalLM)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
__all__ = ['RetNetConfig', 'RetNetForCausalLM', 'RetNetModel']
|