Add 8-bit quantized model
Browse files- LICENSE +202 -0
- README.md +126 -0
- added_tokens.json +26 -0
- chat_template.jinja +54 -0
- config.json +51 -0
- configuration_dream.py +87 -0
- generation_config.json +16 -0
- generation_utils.py +570 -0
- merges.txt +0 -0
- model-00001-of-00002.safetensors +3 -0
- model-00002-of-00002.safetensors +3 -0
- model.safetensors.index.json +738 -0
- modeling_dream.py +839 -0
- special_tokens_map.json +36 -0
- tokenization_dream.py +340 -0
- tokenizer_config.json +222 -0
- training_utils.py +271 -0
- vocab.json +0 -0
LICENSE
ADDED
|
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
Apache License
|
| 3 |
+
Version 2.0, January 2004
|
| 4 |
+
http://www.apache.org/licenses/
|
| 5 |
+
|
| 6 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 7 |
+
|
| 8 |
+
1. Definitions.
|
| 9 |
+
|
| 10 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 11 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 12 |
+
|
| 13 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 14 |
+
the copyright owner that is granting the License.
|
| 15 |
+
|
| 16 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 17 |
+
other entities that control, are controlled by, or are under common
|
| 18 |
+
control with that entity. For the purposes of this definition,
|
| 19 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 20 |
+
direction or management of such entity, whether by contract or
|
| 21 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 22 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 23 |
+
|
| 24 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 25 |
+
exercising permissions granted by this License.
|
| 26 |
+
|
| 27 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 28 |
+
including but not limited to software source code, documentation
|
| 29 |
+
source, and configuration files.
|
| 30 |
+
|
| 31 |
+
"Object" form shall mean any form resulting from mechanical
|
| 32 |
+
transformation or translation of a Source form, including but
|
| 33 |
+
not limited to compiled object code, generated documentation,
|
| 34 |
+
and conversions to other media types.
|
| 35 |
+
|
| 36 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 37 |
+
Object form, made available under the License, as indicated by a
|
| 38 |
+
copyright notice that is included in or attached to the work
|
| 39 |
+
(an example is provided in the Appendix below).
|
| 40 |
+
|
| 41 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 42 |
+
form, that is based on (or derived from) the Work and for which the
|
| 43 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 44 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 45 |
+
of this License, Derivative Works shall not include works that remain
|
| 46 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 47 |
+
the Work and Derivative Works thereof.
|
| 48 |
+
|
| 49 |
+
"Contribution" shall mean any work of authorship, including
|
| 50 |
+
the original version of the Work and any modifications or additions
|
| 51 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 52 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 53 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 54 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 55 |
+
means any form of electronic, verbal, or written communication sent
|
| 56 |
+
to the Licensor or its representatives, including but not limited to
|
| 57 |
+
communication on electronic mailing lists, source code control systems,
|
| 58 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 59 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 60 |
+
excluding communication that is conspicuously marked or otherwise
|
| 61 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 62 |
+
|
| 63 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 64 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 65 |
+
subsequently incorporated within the Work.
|
| 66 |
+
|
| 67 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 68 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 69 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 70 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 71 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 72 |
+
Work and such Derivative Works in Source or Object form.
|
| 73 |
+
|
| 74 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 75 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 76 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 77 |
+
(except as stated in this section) patent license to make, have made,
|
| 78 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 79 |
+
where such license applies only to those patent claims licensable
|
| 80 |
+
by such Contributor that are necessarily infringed by their
|
| 81 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 82 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 83 |
+
institute patent litigation against any entity (including a
|
| 84 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 85 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 86 |
+
or contributory patent infringement, then any patent licenses
|
| 87 |
+
granted to You under this License for that Work shall terminate
|
| 88 |
+
as of the date such litigation is filed.
|
| 89 |
+
|
| 90 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 91 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 92 |
+
modifications, and in Source or Object form, provided that You
|
| 93 |
+
meet the following conditions:
|
| 94 |
+
|
| 95 |
+
(a) You must give any other recipients of the Work or
|
| 96 |
+
Derivative Works a copy of this License; and
|
| 97 |
+
|
| 98 |
+
(b) You must cause any modified files to carry prominent notices
|
| 99 |
+
stating that You changed the files; and
|
| 100 |
+
|
| 101 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 102 |
+
that You distribute, all copyright, patent, trademark, and
|
| 103 |
+
attribution notices from the Source form of the Work,
|
| 104 |
+
excluding those notices that do not pertain to any part of
|
| 105 |
+
the Derivative Works; and
|
| 106 |
+
|
| 107 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 108 |
+
distribution, then any Derivative Works that You distribute must
|
| 109 |
+
include a readable copy of the attribution notices contained
|
| 110 |
+
within such NOTICE file, excluding those notices that do not
|
| 111 |
+
pertain to any part of the Derivative Works, in at least one
|
| 112 |
+
of the following places: within a NOTICE text file distributed
|
| 113 |
+
as part of the Derivative Works; within the Source form or
|
| 114 |
+
documentation, if provided along with the Derivative Works; or,
|
| 115 |
+
within a display generated by the Derivative Works, if and
|
| 116 |
+
wherever such third-party notices normally appear. The contents
|
| 117 |
+
of the NOTICE file are for informational purposes only and
|
| 118 |
+
do not modify the License. You may add Your own attribution
|
| 119 |
+
notices within Derivative Works that You distribute, alongside
|
| 120 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 121 |
+
that such additional attribution notices cannot be construed
|
| 122 |
+
as modifying the License.
|
| 123 |
+
|
| 124 |
+
You may add Your own copyright statement to Your modifications and
|
| 125 |
+
may provide additional or different license terms and conditions
|
| 126 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 127 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 128 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 129 |
+
the conditions stated in this License.
|
| 130 |
+
|
| 131 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 132 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 133 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 134 |
+
this License, without any additional terms or conditions.
|
| 135 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 136 |
+
the terms of any separate license agreement you may have executed
|
| 137 |
+
with Licensor regarding such Contributions.
|
| 138 |
+
|
| 139 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 140 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 141 |
+
except as required for reasonable and customary use in describing the
|
| 142 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 143 |
+
|
| 144 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 145 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 146 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 147 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 148 |
+
implied, including, without limitation, any warranties or conditions
|
| 149 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 150 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 151 |
+
appropriateness of using or redistributing the Work and assume any
|
| 152 |
+
risks associated with Your exercise of permissions under this License.
|
| 153 |
+
|
| 154 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 155 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 156 |
+
unless required by applicable law (such as deliberate and grossly
|
| 157 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 158 |
+
liable to You for damages, including any direct, indirect, special,
|
| 159 |
+
incidental, or consequential damages of any character arising as a
|
| 160 |
+
result of this License or out of the use or inability to use the
|
| 161 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 162 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 163 |
+
other commercial damages or losses), even if such Contributor
|
| 164 |
+
has been advised of the possibility of such damages.
|
| 165 |
+
|
| 166 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 167 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 168 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 169 |
+
or other liability obligations and/or rights consistent with this
|
| 170 |
+
License. However, in accepting such obligations, You may act only
|
| 171 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 172 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 173 |
+
defend, and hold each Contributor harmless for any liability
|
| 174 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 175 |
+
of your accepting any such warranty or additional liability.
|
| 176 |
+
|
| 177 |
+
END OF TERMS AND CONDITIONS
|
| 178 |
+
|
| 179 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 180 |
+
|
| 181 |
+
To apply the Apache License to your work, attach the following
|
| 182 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
| 183 |
+
replaced with your own identifying information. (Don't include
|
| 184 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 185 |
+
comment syntax for the file format. We also recommend that a
|
| 186 |
+
file or class name and description of purpose be included on the
|
| 187 |
+
same "printed page" as the copyright notice for easier
|
| 188 |
+
identification within third-party archives.
|
| 189 |
+
|
| 190 |
+
Copyright [yyyy] [name of copyright owner]
|
| 191 |
+
|
| 192 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 193 |
+
you may not use this file except in compliance with the License.
|
| 194 |
+
You may obtain a copy of the License at
|
| 195 |
+
|
| 196 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 197 |
+
|
| 198 |
+
Unless required by applicable law or agreed to in writing, software
|
| 199 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 200 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 201 |
+
See the License for the specific language governing permissions and
|
| 202 |
+
limitations under the License.
|
README.md
ADDED
|
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: apache-2.0
|
| 3 |
+
base_model:
|
| 4 |
+
- elyza/ELYZA-Diffusion-Instruct-1.0-Dream-7B
|
| 5 |
+
language:
|
| 6 |
+
- en
|
| 7 |
+
- ja
|
| 8 |
+
---
|
| 9 |
+
|
| 10 |
+
# ELYZA-Diffusion-Instruct-1.0-Dream-7B (8-bit)
|
| 11 |
+
|
| 12 |
+
This is an **8-bit quantized version** of [ELYZA-Diffusion-Instruct-1.0-Dream-7B](https://huggingface.co/elyza/ELYZA-Diffusion-Instruct-1.0-Dream-7B), released by [ELYZA, Inc.](https://www.elyza.co.jp/).
|
| 13 |
+
|
| 14 |
+
## Model Description
|
| 15 |
+
|
| 16 |
+
**ELYZA-Diffusion-Instruct-1.0-Dream-7B** is a Japanese-adapted **diffusion language model** based on the open-source diffusion LLM [Dream-v0-Instruct-7B](https://huggingface.co/Dream-org/Dream-v0-Instruct-7B), further pretrained and instruction-tuned on large-scale Japanese data.
|
| 17 |
+
|
| 18 |
+
## License
|
| 19 |
+
|
| 20 |
+
Apache License 2.0
|
| 21 |
+
|
| 22 |
+
## Important Note for 8-bit Version
|
| 23 |
+
|
| 24 |
+
When using this 8-bit quantized model, **you must use `alg="origin"`** for diffusion generation. The `alg="entropy"` algorithm used in the original model is not compatible with 8-bit quantization.
|
| 25 |
+
|
| 26 |
+
**Note**: Due to the algorithm change (`alg="origin"` instead of `alg="entropy"`), the generation quality may differ from the original model. Please evaluate the output quality for your specific use case.
|
| 27 |
+
|
| 28 |
+
## Usage Example (diffusion-bnb-8bit.py)
|
| 29 |
+
|
| 30 |
+
```python
|
| 31 |
+
import torch
|
| 32 |
+
from transformers import AutoModel, AutoTokenizer, BitsAndBytesConfig
|
| 33 |
+
import os
|
| 34 |
+
import sys
|
| 35 |
+
|
| 36 |
+
def clear_screen():
|
| 37 |
+
# ANSI escape code to clear screen and move cursor to top left
|
| 38 |
+
print("\033[H\033[J", end="")
|
| 39 |
+
|
| 40 |
+
model_path = "high-u/ELYZA-Diffusion-Instruct-1.0-Dream-7B-8bit"
|
| 41 |
+
|
| 42 |
+
model = AutoModel.from_pretrained(
|
| 43 |
+
model_path,
|
| 44 |
+
trust_remote_code=True,
|
| 45 |
+
device_map="auto",
|
| 46 |
+
).eval()
|
| 47 |
+
|
| 48 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
| 49 |
+
model_path,
|
| 50 |
+
trust_remote_code=True,
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
if tokenizer.chat_template is None:
|
| 54 |
+
try:
|
| 55 |
+
with open("chat_template.jinja", "r") as f:
|
| 56 |
+
tokenizer.chat_template = f.read()
|
| 57 |
+
except FileNotFoundError:
|
| 58 |
+
print("Warning: chat_template.jinja not found")
|
| 59 |
+
|
| 60 |
+
messages = [
|
| 61 |
+
{"role": "system", "content": "回答は必ずMarkdown形式で記述してください。トピックごとに適切な見出しを付け、重要なポイントは箇条書きや太字で強調し、視覚的に分かりやすく整理された文章で答えてください。"},
|
| 62 |
+
{"role": "user", "content": "空はなぜ青い?"}
|
| 63 |
+
]
|
| 64 |
+
|
| 65 |
+
inputs = tokenizer.apply_chat_template(
|
| 66 |
+
messages,
|
| 67 |
+
return_tensors="pt",
|
| 68 |
+
return_dict=True,
|
| 69 |
+
add_generation_prompt=True,
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
input_ids = inputs.input_ids.to("cuda")
|
| 73 |
+
attention_mask = inputs.attention_mask.to("cuda")
|
| 74 |
+
|
| 75 |
+
def stream_visualizer(step, x, logits):
|
| 76 |
+
decoded = tokenizer.decode(x[0], skip_special_tokens=False)
|
| 77 |
+
mask_token = tokenizer.mask_token if tokenizer.mask_token else "<|mask|>"
|
| 78 |
+
display_text = decoded.replace(mask_token, "..")
|
| 79 |
+
clear_screen()
|
| 80 |
+
print(f"--- Diffusion Step {step} ---")
|
| 81 |
+
print(display_text)
|
| 82 |
+
return x
|
| 83 |
+
|
| 84 |
+
with torch.inference_mode():
|
| 85 |
+
output = model.diffusion_generate(
|
| 86 |
+
input_ids,
|
| 87 |
+
attention_mask=attention_mask,
|
| 88 |
+
max_new_tokens=512,
|
| 89 |
+
steps=256,
|
| 90 |
+
temperature=0.5,
|
| 91 |
+
top_p=0.95,
|
| 92 |
+
alg="origin",
|
| 93 |
+
generation_tokens_hook_func=stream_visualizer
|
| 94 |
+
)
|
| 95 |
+
```
|
| 96 |
+
|
| 97 |
+
## Tested Environment
|
| 98 |
+
|
| 99 |
+
This model was tested with the following environment:
|
| 100 |
+
|
| 101 |
+
- **GPU**: NVIDIA RTX 5070 Ti (16GB VRAM)
|
| 102 |
+
- **Python**: >= 3.13
|
| 103 |
+
- **Dependencies**:
|
| 104 |
+
- accelerate >= 1.12.0
|
| 105 |
+
- bitsandbytes >= 0.45.0
|
| 106 |
+
- protobuf >= 6.33.4
|
| 107 |
+
- sentencepiece >= 0.2.1
|
| 108 |
+
- torch >= 2.11.0
|
| 109 |
+
- transformers == 4.46.2
|
| 110 |
+
|
| 111 |
+
## Installation
|
| 112 |
+
|
| 113 |
+
```bash
|
| 114 |
+
pip install accelerate bitsandbytes sentencepiece torch transformers
|
| 115 |
+
```
|
| 116 |
+
|
| 117 |
+
## Original Model
|
| 118 |
+
|
| 119 |
+
For more details on the model design and training setup, please refer to:
|
| 120 |
+
- [Original Model Card](https://huggingface.co/elyza/ELYZA-Diffusion-Instruct-1.0-Dream-7B)
|
| 121 |
+
- [ELYZA Technical Blog](https://zenn.dev/elyza/articles/f9dd010e895a34)
|
| 122 |
+
|
| 123 |
+
## Acknowledgments
|
| 124 |
+
|
| 125 |
+
This model is a quantized version of [ELYZA-Diffusion-Instruct-1.0-Dream-7B](https://huggingface.co/elyza/ELYZA-Diffusion-Instruct-1.0-Dream-7B). I would like to express my sincere gratitude to ELYZA, Inc. for releasing such an excellent model to the open-source community.
|
| 126 |
+
|
added_tokens.json
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"</tool_call>": 151658,
|
| 3 |
+
"<tool_call>": 151657,
|
| 4 |
+
"<|beginoftext|>": 151665,
|
| 5 |
+
"<|box_end|>": 151649,
|
| 6 |
+
"<|box_start|>": 151648,
|
| 7 |
+
"<|endoftext|>": 151643,
|
| 8 |
+
"<|file_sep|>": 151664,
|
| 9 |
+
"<|fim_middle|>": 151660,
|
| 10 |
+
"<|fim_pad|>": 151662,
|
| 11 |
+
"<|fim_prefix|>": 151659,
|
| 12 |
+
"<|fim_suffix|>": 151661,
|
| 13 |
+
"<|im_end|>": 151645,
|
| 14 |
+
"<|im_start|>": 151644,
|
| 15 |
+
"<|image_pad|>": 151655,
|
| 16 |
+
"<|mask|>": 151666,
|
| 17 |
+
"<|object_ref_end|>": 151647,
|
| 18 |
+
"<|object_ref_start|>": 151646,
|
| 19 |
+
"<|quad_end|>": 151651,
|
| 20 |
+
"<|quad_start|>": 151650,
|
| 21 |
+
"<|repo_name|>": 151663,
|
| 22 |
+
"<|video_pad|>": 151656,
|
| 23 |
+
"<|vision_end|>": 151653,
|
| 24 |
+
"<|vision_pad|>": 151654,
|
| 25 |
+
"<|vision_start|>": 151652
|
| 26 |
+
}
|
chat_template.jinja
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{%- if tools %}
|
| 2 |
+
{{- '<|im_start|>system\n' }}
|
| 3 |
+
{%- if messages[0]['role'] == 'system' %}
|
| 4 |
+
{{- messages[0]['content'] }}
|
| 5 |
+
{%- else %}
|
| 6 |
+
{{- 'You are a helpful assistant.' }}
|
| 7 |
+
{%- endif %}
|
| 8 |
+
{{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
|
| 9 |
+
{%- for tool in tools %}
|
| 10 |
+
{{- "\n" }}
|
| 11 |
+
{{- tool | tojson }}
|
| 12 |
+
{%- endfor %}
|
| 13 |
+
{{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
|
| 14 |
+
{%- else %}
|
| 15 |
+
{%- if messages[0]['role'] == 'system' %}
|
| 16 |
+
{{- '<|im_start|>system\n' + messages[0]['content'] + '<|im_end|>\n' }}
|
| 17 |
+
{%- else %}
|
| 18 |
+
{{- '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}
|
| 19 |
+
{%- endif %}
|
| 20 |
+
{%- endif %}
|
| 21 |
+
{%- for message in messages %}
|
| 22 |
+
{%- if (message.role == "user") or (message.role == "system" and not loop.first) or (message.role == "assistant" and not message.tool_calls) %}
|
| 23 |
+
{{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
|
| 24 |
+
{%- elif message.role == "assistant" %}
|
| 25 |
+
{{- '<|im_start|>' + message.role }}
|
| 26 |
+
{%- if message.content %}
|
| 27 |
+
{{- '\n' + message.content }}
|
| 28 |
+
{%- endif %}
|
| 29 |
+
{%- for tool_call in message.tool_calls %}
|
| 30 |
+
{%- if tool_call.function is defined %}
|
| 31 |
+
{%- set tool_call = tool_call.function %}
|
| 32 |
+
{%- endif %}
|
| 33 |
+
{{- '\n<tool_call>\n{"name": "' }}
|
| 34 |
+
{{- tool_call.name }}
|
| 35 |
+
{{- '", "arguments": ' }}
|
| 36 |
+
{{- tool_call.arguments | tojson }}
|
| 37 |
+
{{- '}\n</tool_call>' }}
|
| 38 |
+
{%- endfor %}
|
| 39 |
+
{{- '<|im_end|>\n' }}
|
| 40 |
+
{%- elif message.role == "tool" %}
|
| 41 |
+
{%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %}
|
| 42 |
+
{{- '<|im_start|>user' }}
|
| 43 |
+
{%- endif %}
|
| 44 |
+
{{- '\n<tool_response>\n' }}
|
| 45 |
+
{{- message.content }}
|
| 46 |
+
{{- '\n</tool_response>' }}
|
| 47 |
+
{%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
|
| 48 |
+
{{- '<|im_end|>\n' }}
|
| 49 |
+
{%- endif %}
|
| 50 |
+
{%- endif %}
|
| 51 |
+
{%- endfor %}
|
| 52 |
+
{%- if add_generation_prompt %}
|
| 53 |
+
{{- '<|im_start|>assistant\n' }}
|
| 54 |
+
{%- endif %}
|
config.json
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_name_or_path": "elyza/ELYZA-Diffusion-Instruct-1.0-Dream-7B",
|
| 3 |
+
"architectures": [
|
| 4 |
+
"DreamModel"
|
| 5 |
+
],
|
| 6 |
+
"attention_dropout": 0.0,
|
| 7 |
+
"auto_map": {
|
| 8 |
+
"AutoConfig": "Dream-org/Dream-v0-Instruct-7B--configuration_dream.DreamConfig",
|
| 9 |
+
"AutoModel": "Dream-org/Dream-v0-Instruct-7B--modeling_dream.DreamModel"
|
| 10 |
+
},
|
| 11 |
+
"bos_token_id": 151643,
|
| 12 |
+
"eos_token_id": 151643,
|
| 13 |
+
"hidden_act": "silu",
|
| 14 |
+
"hidden_size": 3584,
|
| 15 |
+
"initializer_range": 0.02,
|
| 16 |
+
"intermediate_size": 18944,
|
| 17 |
+
"mask_token_id": 151666,
|
| 18 |
+
"max_position_embeddings": 131072,
|
| 19 |
+
"max_window_layers": 28,
|
| 20 |
+
"model_type": "Dream",
|
| 21 |
+
"num_attention_heads": 28,
|
| 22 |
+
"num_hidden_layers": 28,
|
| 23 |
+
"num_key_value_heads": 4,
|
| 24 |
+
"pad_token_id": 151643,
|
| 25 |
+
"quantization_config": {
|
| 26 |
+
"_load_in_4bit": false,
|
| 27 |
+
"_load_in_8bit": true,
|
| 28 |
+
"bnb_4bit_compute_dtype": "float32",
|
| 29 |
+
"bnb_4bit_quant_storage": "uint8",
|
| 30 |
+
"bnb_4bit_quant_type": "fp4",
|
| 31 |
+
"bnb_4bit_use_double_quant": false,
|
| 32 |
+
"llm_int8_enable_fp32_cpu_offload": false,
|
| 33 |
+
"llm_int8_has_fp16_weight": false,
|
| 34 |
+
"llm_int8_skip_modules": null,
|
| 35 |
+
"llm_int8_threshold": 6.0,
|
| 36 |
+
"load_in_4bit": false,
|
| 37 |
+
"load_in_8bit": true,
|
| 38 |
+
"quant_method": "bitsandbytes"
|
| 39 |
+
},
|
| 40 |
+
"rms_norm_eps": 1e-06,
|
| 41 |
+
"rope_scaling": null,
|
| 42 |
+
"rope_theta": 1000000.0,
|
| 43 |
+
"sliding_window": null,
|
| 44 |
+
"tie_word_embeddings": false,
|
| 45 |
+
"torch_dtype": "float16",
|
| 46 |
+
"transformers_version": "4.46.2",
|
| 47 |
+
"use_cache": true,
|
| 48 |
+
"use_mrope": false,
|
| 49 |
+
"use_sliding_window": false,
|
| 50 |
+
"vocab_size": 152064
|
| 51 |
+
}
|
configuration_dream.py
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: ignore-errors
|
| 2 |
+
# isort: skip_file
|
| 3 |
+
# coding=utf-8
|
| 4 |
+
# Copyright 2024 The Dream team, HKUNLP Group and the HuggingFace Inc. team. All rights reserved.
|
| 5 |
+
#
|
| 6 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 7 |
+
# you may not use this file except in compliance with the License.
|
| 8 |
+
# You may obtain a copy of the License at
|
| 9 |
+
#
|
| 10 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 11 |
+
#
|
| 12 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 13 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 14 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 15 |
+
# See the License for the specific language governing permissions and
|
| 16 |
+
# limitations under the License.
|
| 17 |
+
"""Dream model configuration"""
|
| 18 |
+
|
| 19 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 20 |
+
from transformers.modeling_rope_utils import rope_config_validation
|
| 21 |
+
from transformers.utils import logging
|
| 22 |
+
|
| 23 |
+
logger = logging.get_logger(__name__)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class DreamConfig(PretrainedConfig):
|
| 27 |
+
model_type = "Dream"
|
| 28 |
+
keys_to_ignore_at_inference = ["past_key_values"]
|
| 29 |
+
|
| 30 |
+
def __init__(
|
| 31 |
+
self,
|
| 32 |
+
vocab_size: int = 151936,
|
| 33 |
+
hidden_size: int = 4096,
|
| 34 |
+
intermediate_size: int = 22016,
|
| 35 |
+
num_hidden_layers: int = 32,
|
| 36 |
+
num_attention_heads: int = 32,
|
| 37 |
+
num_key_value_heads: int = 32,
|
| 38 |
+
hidden_act: str = "silu",
|
| 39 |
+
max_position_embeddings: int = 32768,
|
| 40 |
+
initializer_range: float = 0.02,
|
| 41 |
+
rms_norm_eps: float = 1e-6,
|
| 42 |
+
use_cache: bool = False, # cache not used in diffusion
|
| 43 |
+
tie_word_embeddings: bool = False,
|
| 44 |
+
rope_theta: float = 10000.0,
|
| 45 |
+
rope_scaling: dict = None,
|
| 46 |
+
use_sliding_window: bool = False,
|
| 47 |
+
sliding_window: int = 4096,
|
| 48 |
+
max_window_layers: int = 28,
|
| 49 |
+
attention_dropout: float = 0.0,
|
| 50 |
+
mask_token_id: int = 151666,
|
| 51 |
+
pad_token_id: int = 151643,
|
| 52 |
+
**kwargs,
|
| 53 |
+
) -> None:
|
| 54 |
+
self.vocab_size = vocab_size
|
| 55 |
+
self.max_position_embeddings = max_position_embeddings
|
| 56 |
+
self.hidden_size = hidden_size
|
| 57 |
+
self.intermediate_size = intermediate_size
|
| 58 |
+
self.num_hidden_layers = num_hidden_layers
|
| 59 |
+
self.num_attention_heads = num_attention_heads
|
| 60 |
+
self.use_sliding_window = use_sliding_window
|
| 61 |
+
self.sliding_window = sliding_window if use_sliding_window else None
|
| 62 |
+
self.max_window_layers = max_window_layers
|
| 63 |
+
|
| 64 |
+
# for backward compatibility
|
| 65 |
+
if num_key_value_heads is None:
|
| 66 |
+
num_key_value_heads = num_attention_heads
|
| 67 |
+
|
| 68 |
+
self.num_key_value_heads = num_key_value_heads
|
| 69 |
+
self.hidden_act = hidden_act
|
| 70 |
+
self.initializer_range = initializer_range
|
| 71 |
+
self.rms_norm_eps = rms_norm_eps
|
| 72 |
+
self.use_cache = use_cache
|
| 73 |
+
self.rope_theta = rope_theta
|
| 74 |
+
self.rope_scaling = rope_scaling
|
| 75 |
+
self.attention_dropout = attention_dropout
|
| 76 |
+
# Validate the correctness of rotary position embeddings parameters
|
| 77 |
+
# BC: if there is a 'type' field, move it to 'rope_type'.
|
| 78 |
+
if self.rope_scaling is not None and "type" in self.rope_scaling:
|
| 79 |
+
self.rope_scaling["rope_type"] = self.rope_scaling["type"]
|
| 80 |
+
rope_config_validation(self)
|
| 81 |
+
|
| 82 |
+
super().__init__(
|
| 83 |
+
tie_word_embeddings=tie_word_embeddings,
|
| 84 |
+
**kwargs,
|
| 85 |
+
)
|
| 86 |
+
self.mask_token_id = mask_token_id
|
| 87 |
+
self.pad_token_id = pad_token_id
|
generation_config.json
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_from_model_config": true,
|
| 3 |
+
"alg": "origin",
|
| 4 |
+
"alg_temp": null,
|
| 5 |
+
"bos_token_id": 151643,
|
| 6 |
+
"eos_token_id": 151643,
|
| 7 |
+
"eps": 0.001,
|
| 8 |
+
"mask_token_id": null,
|
| 9 |
+
"output_history": false,
|
| 10 |
+
"pad_token_id": 151643,
|
| 11 |
+
"steps": 512,
|
| 12 |
+
"temperature": 0.0,
|
| 13 |
+
"top_k": null,
|
| 14 |
+
"top_p": null,
|
| 15 |
+
"transformers_version": "4.46.2"
|
| 16 |
+
}
|
generation_utils.py
ADDED
|
@@ -0,0 +1,570 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: ignore-errors
|
| 2 |
+
# isort: skip_file
|
| 3 |
+
# coding=utf-8
|
| 4 |
+
# Copyright 2024 The Dream team, HKUNLP Group and the HuggingFace Inc. team. All rights reserved.
|
| 5 |
+
#
|
| 6 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 7 |
+
# you may not use this file except in compliance with the License.
|
| 8 |
+
# You may obtain a copy of the License at
|
| 9 |
+
#
|
| 10 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 11 |
+
#
|
| 12 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 13 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 14 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 15 |
+
# See the License for the specific language governing permissions and
|
| 16 |
+
# limitations under the License.
|
| 17 |
+
|
| 18 |
+
import copy
|
| 19 |
+
import warnings
|
| 20 |
+
from dataclasses import dataclass
|
| 21 |
+
from typing import Any, Dict, Optional, Tuple, Union
|
| 22 |
+
|
| 23 |
+
import torch
|
| 24 |
+
import torch.distributions as dists
|
| 25 |
+
from torch.nn import functional as F
|
| 26 |
+
from transformers import __version__
|
| 27 |
+
from transformers.generation.configuration_utils import GenerationConfig
|
| 28 |
+
from transformers.utils import (
|
| 29 |
+
ModelOutput,
|
| 30 |
+
is_torchdynamo_compiling,
|
| 31 |
+
logging,
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
logger = logging.get_logger(__name__)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def top_p_logits(logits, top_p=None):
|
| 38 |
+
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
|
| 39 |
+
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
|
| 40 |
+
sorted_indices_to_remove = cumulative_probs > top_p
|
| 41 |
+
# Shift the indices to the right to keep the first token above the threshold
|
| 42 |
+
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
|
| 43 |
+
sorted_indices_to_remove[..., 0] = 0
|
| 44 |
+
|
| 45 |
+
mask = torch.zeros_like(logits, dtype=torch.bool, device=logits.device)
|
| 46 |
+
mask = mask.scatter_(-1, sorted_indices, sorted_indices_to_remove)
|
| 47 |
+
logits = logits.masked_fill(mask, torch.finfo(logits.dtype).min)
|
| 48 |
+
return logits
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def top_k_logits(logits, top_k=None):
|
| 52 |
+
top_k = min(top_k, logits.size(-1)) # Safety check
|
| 53 |
+
# Remove all tokens with a probability less than the last token of the top-k
|
| 54 |
+
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
|
| 55 |
+
logits = logits.masked_fill(indices_to_remove, torch.finfo(logits.dtype).min)
|
| 56 |
+
return logits
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def sample_tokens(
|
| 60 |
+
logits,
|
| 61 |
+
temperature=0.0,
|
| 62 |
+
top_p=None,
|
| 63 |
+
top_k=None,
|
| 64 |
+
margin_confidence=False,
|
| 65 |
+
neg_entropy=False,
|
| 66 |
+
use_ori_logits=False,
|
| 67 |
+
):
|
| 68 |
+
original_dtype = logits.dtype
|
| 69 |
+
logits = logits.to(torch.float32)
|
| 70 |
+
if use_ori_logits:
|
| 71 |
+
ori_logits = logits.clone()
|
| 72 |
+
if temperature > 0:
|
| 73 |
+
logits = logits / temperature
|
| 74 |
+
if top_p is not None and top_p < 1:
|
| 75 |
+
logits = top_p_logits(logits, top_p)
|
| 76 |
+
if top_k is not None:
|
| 77 |
+
logits = top_k_logits(logits, top_k)
|
| 78 |
+
probs = torch.softmax(logits, dim=-1)
|
| 79 |
+
|
| 80 |
+
if temperature > 0:
|
| 81 |
+
x0 = dists.Categorical(probs=probs).sample()
|
| 82 |
+
if use_ori_logits:
|
| 83 |
+
confidence = torch.gather(torch.softmax(ori_logits, dim=-1), -1, x0.unsqueeze(-1)).squeeze(-1)
|
| 84 |
+
else:
|
| 85 |
+
confidence = torch.gather(probs, -1, x0.unsqueeze(-1)).squeeze(-1)
|
| 86 |
+
else:
|
| 87 |
+
confidence, x0 = probs.max(dim=-1)
|
| 88 |
+
|
| 89 |
+
if margin_confidence:
|
| 90 |
+
if use_ori_logits:
|
| 91 |
+
sorted_probs, _ = torch.sort(torch.softmax(ori_logits, dim=-1), dim=-1, descending=True)
|
| 92 |
+
else:
|
| 93 |
+
sorted_probs, _ = torch.sort(probs, dim=-1, descending=True)
|
| 94 |
+
# Extract top1 and top2 probabilities
|
| 95 |
+
top1_probs = sorted_probs[:, 0]
|
| 96 |
+
top2_probs = sorted_probs[:, 1]
|
| 97 |
+
# Calculate confidence as top1 - top2
|
| 98 |
+
confidence = top1_probs - top2_probs
|
| 99 |
+
|
| 100 |
+
if neg_entropy:
|
| 101 |
+
epsilon = 1e-10
|
| 102 |
+
log_probs = torch.log(probs + epsilon)
|
| 103 |
+
confidence = torch.sum(probs * log_probs, dim=-1)
|
| 104 |
+
|
| 105 |
+
return confidence.to(original_dtype), x0
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
@dataclass
|
| 109 |
+
class DreamModelOutput(ModelOutput):
|
| 110 |
+
sequences: torch.LongTensor = None
|
| 111 |
+
history: Optional[Tuple[torch.FloatTensor]] = None
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
class DreamGenerationConfig(GenerationConfig):
|
| 115 |
+
def __init__(self, **kwargs):
|
| 116 |
+
self.temperature: float = kwargs.pop("temperature", 0.0)
|
| 117 |
+
self.top_p: Optional[float] = kwargs.pop("top_p", None)
|
| 118 |
+
self.top_k: Optional[int] = kwargs.pop("top_k", None)
|
| 119 |
+
self.max_length = kwargs.pop("max_length", 20)
|
| 120 |
+
self.max_new_tokens = kwargs.pop("max_new_tokens", None)
|
| 121 |
+
# diffusion specific params
|
| 122 |
+
self.eps: float = kwargs.pop("eps", 1e-12)
|
| 123 |
+
self.steps: int = kwargs.pop("steps", 512)
|
| 124 |
+
self.alg: str = kwargs.pop("alg", "origin")
|
| 125 |
+
self.alg_temp: Optional[float] = kwargs.pop("alg_temp", None)
|
| 126 |
+
self.eos_penalty: Optional[float] = kwargs.pop("eos_penalty", 0)
|
| 127 |
+
|
| 128 |
+
# Parameters that define the output variables of `generate`
|
| 129 |
+
self.num_return_sequences: int = kwargs.pop("num_return_sequences", 1)
|
| 130 |
+
self.return_dict_in_generate: bool = kwargs.pop("return_dict_in_generate", False)
|
| 131 |
+
self.output_history: bool = kwargs.pop("output_history", False)
|
| 132 |
+
|
| 133 |
+
# Special tokens that can be used at generation time
|
| 134 |
+
self.mask_token_id = kwargs.pop("mask_token_id", None)
|
| 135 |
+
self.pad_token_id = kwargs.pop("pad_token_id", None)
|
| 136 |
+
self.bos_token_id = kwargs.pop("bos_token_id", None)
|
| 137 |
+
self.eos_token_id = kwargs.pop("eos_token_id", None)
|
| 138 |
+
|
| 139 |
+
# Wild card
|
| 140 |
+
self.generation_kwargs = kwargs.pop("generation_kwargs", {})
|
| 141 |
+
|
| 142 |
+
# The remaining attributes do not parametrize `.generate()`, but are informative and/or used by the hub
|
| 143 |
+
# interface.
|
| 144 |
+
self._from_model_config = kwargs.pop("_from_model_config", False)
|
| 145 |
+
self._commit_hash = kwargs.pop("_commit_hash", None)
|
| 146 |
+
self.transformers_version = kwargs.pop("transformers_version", __version__)
|
| 147 |
+
|
| 148 |
+
# Additional attributes without default values
|
| 149 |
+
if not self._from_model_config:
|
| 150 |
+
# we don't want to copy values from the model config if we're initializing a `GenerationConfig` from a
|
| 151 |
+
# model's default configuration file
|
| 152 |
+
for key, value in kwargs.items():
|
| 153 |
+
try:
|
| 154 |
+
setattr(self, key, value)
|
| 155 |
+
except AttributeError as err:
|
| 156 |
+
logger.error(f"Can't set {key} with value {value} for {self}")
|
| 157 |
+
raise err
|
| 158 |
+
|
| 159 |
+
# Validate the values of the attributes
|
| 160 |
+
self.validate(is_init=True)
|
| 161 |
+
|
| 162 |
+
def validate(self, is_init=False):
|
| 163 |
+
pass
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
class DreamGenerationMixin:
|
| 167 |
+
@staticmethod
|
| 168 |
+
def _expand_inputs_for_generation(
|
| 169 |
+
expand_size: int = 1,
|
| 170 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 171 |
+
attention_mask: Optional[torch.LongTensor] = None,
|
| 172 |
+
) -> Tuple[torch.LongTensor, Dict[str, Any]]:
|
| 173 |
+
"""Expands tensors from [batch_size, ...] to [batch_size * expand_size, ...]"""
|
| 174 |
+
# Do not call torch.repeat_interleave if expand_size is 1 because it clones
|
| 175 |
+
# the input tensor and thus requires more memory although no change is applied
|
| 176 |
+
if expand_size == 1:
|
| 177 |
+
return input_ids, attention_mask
|
| 178 |
+
if input_ids is not None:
|
| 179 |
+
input_ids = input_ids.repeat_interleave(expand_size, dim=0)
|
| 180 |
+
if attention_mask is not None:
|
| 181 |
+
attention_mask = attention_mask.repeat_interleave(expand_size, dim=0)
|
| 182 |
+
return input_ids, attention_mask
|
| 183 |
+
|
| 184 |
+
def _validate_generated_length(self, generation_config, input_ids_length, has_default_max_length):
|
| 185 |
+
"""Performs validation related to the resulting generated length"""
|
| 186 |
+
|
| 187 |
+
# Can't throw warnings/exceptions during compilation
|
| 188 |
+
if is_torchdynamo_compiling():
|
| 189 |
+
return
|
| 190 |
+
|
| 191 |
+
# 1. Max length warnings related to poor parameterization
|
| 192 |
+
if has_default_max_length and generation_config.max_new_tokens is None and generation_config.max_length == 20:
|
| 193 |
+
# 20 is the default max_length of the generation config
|
| 194 |
+
warnings.warn(
|
| 195 |
+
f"Using the model-agnostic default `max_length` (={generation_config.max_length}) to control the "
|
| 196 |
+
"generation length. We recommend setting `max_new_tokens` to control the maximum length of the "
|
| 197 |
+
"generation.",
|
| 198 |
+
UserWarning,
|
| 199 |
+
)
|
| 200 |
+
if input_ids_length >= generation_config.max_length:
|
| 201 |
+
input_ids_string = "input_ids"
|
| 202 |
+
raise ValueError(
|
| 203 |
+
f"Input length of {input_ids_string} is {input_ids_length}, but `max_length` is set to"
|
| 204 |
+
f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider"
|
| 205 |
+
" increasing `max_length` or, better yet, setting `max_new_tokens`."
|
| 206 |
+
)
|
| 207 |
+
|
| 208 |
+
def _prepare_generated_length(
|
| 209 |
+
self,
|
| 210 |
+
generation_config,
|
| 211 |
+
has_default_max_length,
|
| 212 |
+
input_ids_length,
|
| 213 |
+
):
|
| 214 |
+
"""Prepared max and min length in generation configs to avoid clashes between similar attributes"""
|
| 215 |
+
|
| 216 |
+
if generation_config.max_new_tokens is not None:
|
| 217 |
+
if not has_default_max_length and generation_config.max_length is not None:
|
| 218 |
+
logger.warning(
|
| 219 |
+
f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(="
|
| 220 |
+
f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. "
|
| 221 |
+
"Please refer to the documentation for more information. "
|
| 222 |
+
"(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)"
|
| 223 |
+
)
|
| 224 |
+
generation_config.max_length = generation_config.max_new_tokens + input_ids_length
|
| 225 |
+
|
| 226 |
+
elif has_default_max_length:
|
| 227 |
+
if generation_config.max_length == DreamGenerationConfig().max_length:
|
| 228 |
+
generation_config.max_length = generation_config.max_length + input_ids_length
|
| 229 |
+
max_position_embeddings = getattr(self.config, "max_position_embeddings", None)
|
| 230 |
+
if max_position_embeddings is not None:
|
| 231 |
+
generation_config.max_length = min(generation_config.max_length, max_position_embeddings)
|
| 232 |
+
|
| 233 |
+
return generation_config
|
| 234 |
+
|
| 235 |
+
def _prepare_generation_config(
|
| 236 |
+
self, generation_config: Optional[DreamGenerationConfig], **kwargs: Dict
|
| 237 |
+
) -> DreamGenerationConfig:
|
| 238 |
+
"""
|
| 239 |
+
Prepares the base generation config, then applies any generation configuration options from kwargs. This
|
| 240 |
+
function handles retrocompatibility with respect to configuration files.
|
| 241 |
+
"""
|
| 242 |
+
# priority: `generation_config` argument > `model.generation_config` (the default generation config)
|
| 243 |
+
using_model_generation_config = False
|
| 244 |
+
if generation_config is None:
|
| 245 |
+
generation_config = DreamGenerationConfig.from_model_config(self.config)
|
| 246 |
+
using_model_generation_config = True
|
| 247 |
+
|
| 248 |
+
# `torch.compile` can't compile `copy.deepcopy`, arguments in `kwargs` that are part of `generation_config`
|
| 249 |
+
# will mutate the object with `.update`. As such, passing these arguments through `kwargs` is disabled -- an
|
| 250 |
+
# exception will be raised in `_validate_model_kwargs`
|
| 251 |
+
if not is_torchdynamo_compiling():
|
| 252 |
+
generation_config = copy.deepcopy(generation_config)
|
| 253 |
+
_kwargs = generation_config.update(**kwargs)
|
| 254 |
+
# If `generation_config` is provided, let's fallback ALL special tokens to the default values for the model
|
| 255 |
+
if not using_model_generation_config:
|
| 256 |
+
if generation_config.bos_token_id is None:
|
| 257 |
+
generation_config.bos_token_id = self.generation_config.bos_token_id
|
| 258 |
+
if generation_config.eos_token_id is None:
|
| 259 |
+
generation_config.eos_token_id = self.generation_config.eos_token_id
|
| 260 |
+
if generation_config.pad_token_id is None:
|
| 261 |
+
generation_config.pad_token_id = self.generation_config.pad_token_id
|
| 262 |
+
if generation_config.mask_token_id is None:
|
| 263 |
+
generation_config.mask_token_id = self.generation_config.mask_token_id
|
| 264 |
+
|
| 265 |
+
return generation_config
|
| 266 |
+
|
| 267 |
+
def _prepare_special_tokens(
|
| 268 |
+
self,
|
| 269 |
+
generation_config: DreamGenerationConfig,
|
| 270 |
+
device: Optional[Union[torch.device, str]] = None,
|
| 271 |
+
):
|
| 272 |
+
"""
|
| 273 |
+
Prepares the special tokens for generation, overwriting the generation config with their processed versions
|
| 274 |
+
converted to tensor.
|
| 275 |
+
|
| 276 |
+
Note that `generation_config` is changed in place and stops being serializable after this method is called.
|
| 277 |
+
That is no problem if called within `generate` (`generation_config` is a local copy that doesn't leave the
|
| 278 |
+
function). However, if called outside `generate`, consider creating a copy of `generation_config` first.
|
| 279 |
+
"""
|
| 280 |
+
|
| 281 |
+
# Convert special tokens to tensors
|
| 282 |
+
def _tensor_or_none(token, device=None):
|
| 283 |
+
if token is None:
|
| 284 |
+
return token
|
| 285 |
+
|
| 286 |
+
device = device if device is not None else self.device
|
| 287 |
+
if isinstance(token, torch.Tensor):
|
| 288 |
+
return token.to(device)
|
| 289 |
+
return torch.tensor(token, device=device, dtype=torch.long)
|
| 290 |
+
|
| 291 |
+
bos_token_tensor = _tensor_or_none(generation_config.bos_token_id, device=device)
|
| 292 |
+
eos_token_tensor = _tensor_or_none(generation_config.eos_token_id, device=device)
|
| 293 |
+
pad_token_tensor = _tensor_or_none(generation_config.pad_token_id, device=device)
|
| 294 |
+
mask_token_tensor = _tensor_or_none(generation_config.mask_token_id, device=device)
|
| 295 |
+
|
| 296 |
+
# We can have more than one eos token. Always treat it as a 1D tensor (when it exists).
|
| 297 |
+
if eos_token_tensor is not None and eos_token_tensor.ndim == 0:
|
| 298 |
+
eos_token_tensor = eos_token_tensor.unsqueeze(0)
|
| 299 |
+
|
| 300 |
+
# Set pad token if unset (and there are conditions to do so)
|
| 301 |
+
if pad_token_tensor is None and eos_token_tensor is not None:
|
| 302 |
+
pad_token_tensor = eos_token_tensor[0]
|
| 303 |
+
logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{pad_token_tensor} for open-end generation.")
|
| 304 |
+
|
| 305 |
+
# Update generation config with the updated special tokens tensors
|
| 306 |
+
# NOTE: this must be written into a different attribute name than the one holding the original special tokens
|
| 307 |
+
# (in their non-tensor form), in order to enable end-to-end compilation. See
|
| 308 |
+
# https://pytorch.org/docs/stable/torch.compiler_cudagraph_trees.html#limitations
|
| 309 |
+
generation_config._bos_token_tensor = bos_token_tensor
|
| 310 |
+
generation_config._eos_token_tensor = eos_token_tensor
|
| 311 |
+
generation_config._pad_token_tensor = pad_token_tensor
|
| 312 |
+
generation_config._mask_token_tensor = mask_token_tensor
|
| 313 |
+
|
| 314 |
+
@torch.no_grad()
|
| 315 |
+
def diffusion_generate(
|
| 316 |
+
self,
|
| 317 |
+
inputs: Optional[torch.Tensor] = None,
|
| 318 |
+
generation_config: Optional[DreamGenerationConfig] = None,
|
| 319 |
+
**kwargs,
|
| 320 |
+
) -> Union[DreamModelOutput, torch.LongTensor]:
|
| 321 |
+
# 1. Handle `generation_config` and kwargs that might update it, and validate the `.generate()` call
|
| 322 |
+
generation_config = self._prepare_generation_config(generation_config, **kwargs)
|
| 323 |
+
generation_tokens_hook_func = kwargs.pop("generation_tokens_hook_func", lambda step, x, logits: x)
|
| 324 |
+
generation_logits_hook_func = kwargs.pop("generation_logits_hook_func", lambda step, x, logits: logits)
|
| 325 |
+
|
| 326 |
+
# 2. Define model inputs
|
| 327 |
+
assert inputs is not None
|
| 328 |
+
input_ids = inputs
|
| 329 |
+
device = input_ids.device
|
| 330 |
+
attention_mask = kwargs.pop("attention_mask", None)
|
| 331 |
+
self._prepare_special_tokens(generation_config, device=device)
|
| 332 |
+
|
| 333 |
+
# 3. Prepare `max_length`.
|
| 334 |
+
input_ids_length = input_ids.shape[-1]
|
| 335 |
+
has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None
|
| 336 |
+
generation_config = self._prepare_generated_length(
|
| 337 |
+
generation_config=generation_config,
|
| 338 |
+
has_default_max_length=has_default_max_length,
|
| 339 |
+
input_ids_length=input_ids_length,
|
| 340 |
+
)
|
| 341 |
+
|
| 342 |
+
self._validate_generated_length(generation_config, input_ids_length, has_default_max_length)
|
| 343 |
+
|
| 344 |
+
# 4. Check input_ids
|
| 345 |
+
if not is_torchdynamo_compiling() and self.device.type != input_ids.device.type:
|
| 346 |
+
warnings.warn(
|
| 347 |
+
"You are calling .generate() with the `input_ids` being on a device type different"
|
| 348 |
+
f" than your model's device. `input_ids` is on {input_ids.device.type}, whereas the model"
|
| 349 |
+
f" is on {self.device.type}. You may experience unexpected behaviors or slower generation."
|
| 350 |
+
" Please make sure that you have put `input_ids` to the"
|
| 351 |
+
f" correct device by calling for example input_ids = input_ids.to('{self.device.type}') before"
|
| 352 |
+
" running `.generate()`.",
|
| 353 |
+
UserWarning,
|
| 354 |
+
)
|
| 355 |
+
if (
|
| 356 |
+
hasattr(generation_config, "pad_token_id")
|
| 357 |
+
and torch.any(input_ids == generation_config.pad_token_id)
|
| 358 |
+
and attention_mask is None
|
| 359 |
+
):
|
| 360 |
+
warnings.warn(
|
| 361 |
+
"Padding was detected but no attention mask is passed here. For correct "
|
| 362 |
+
"generation results, please set `attention_mask` when batch-padding inputs.",
|
| 363 |
+
UserWarning,
|
| 364 |
+
)
|
| 365 |
+
|
| 366 |
+
input_ids, attention_mask = self._expand_inputs_for_generation(
|
| 367 |
+
expand_size=generation_config.num_return_sequences,
|
| 368 |
+
input_ids=input_ids,
|
| 369 |
+
attention_mask=attention_mask,
|
| 370 |
+
)
|
| 371 |
+
|
| 372 |
+
result = self._sample(
|
| 373 |
+
input_ids,
|
| 374 |
+
attention_mask=attention_mask,
|
| 375 |
+
generation_config=generation_config,
|
| 376 |
+
generation_tokens_hook_func=generation_tokens_hook_func,
|
| 377 |
+
generation_logits_hook_func=generation_logits_hook_func,
|
| 378 |
+
)
|
| 379 |
+
return result
|
| 380 |
+
|
| 381 |
+
def _sample(
|
| 382 |
+
self,
|
| 383 |
+
input_ids: torch.LongTensor,
|
| 384 |
+
attention_mask: Optional[torch.LongTensor],
|
| 385 |
+
generation_config: DreamGenerationConfig,
|
| 386 |
+
generation_tokens_hook_func,
|
| 387 |
+
generation_logits_hook_func,
|
| 388 |
+
) -> Union[DreamModelOutput, torch.LongTensor]:
|
| 389 |
+
# init values
|
| 390 |
+
output_history = generation_config.output_history
|
| 391 |
+
return_dict_in_generate = generation_config.return_dict_in_generate
|
| 392 |
+
max_length = generation_config.max_length
|
| 393 |
+
mask_token_id = generation_config.mask_token_id
|
| 394 |
+
pad_token_id = generation_config.pad_token_id
|
| 395 |
+
steps = generation_config.steps
|
| 396 |
+
eps = generation_config.eps
|
| 397 |
+
alg = generation_config.alg
|
| 398 |
+
alg_temp = generation_config.alg_temp
|
| 399 |
+
eos_penalty = generation_config.eos_penalty
|
| 400 |
+
temperature = generation_config.temperature
|
| 401 |
+
top_p = generation_config.top_p
|
| 402 |
+
top_k = generation_config.top_k
|
| 403 |
+
# print(eos_penalty)
|
| 404 |
+
histories = [] if (return_dict_in_generate and output_history) else None
|
| 405 |
+
|
| 406 |
+
# pad input_ids to max_length
|
| 407 |
+
x = F.pad(input_ids, (0, max_length - input_ids.shape[1]), value=mask_token_id)
|
| 408 |
+
|
| 409 |
+
if attention_mask is not None:
|
| 410 |
+
# we do not mask the [MASK] tokens so value = 1.0
|
| 411 |
+
attention_mask = F.pad(attention_mask, (0, max_length - attention_mask.shape[1]), value=1.0)
|
| 412 |
+
tok_idx = attention_mask.long().cumsum(-1) - 1
|
| 413 |
+
tok_idx.masked_fill_(attention_mask == 0, 1)
|
| 414 |
+
# attention_mask is of shape [B, N]
|
| 415 |
+
# broadcast to [B, 1, N, N]
|
| 416 |
+
attention_mask = torch.logical_and(
|
| 417 |
+
attention_mask.unsqueeze(1).unsqueeze(-2),
|
| 418 |
+
attention_mask.unsqueeze(1).unsqueeze(-1),
|
| 419 |
+
)
|
| 420 |
+
else:
|
| 421 |
+
tok_idx = None
|
| 422 |
+
attention_mask = "full"
|
| 423 |
+
|
| 424 |
+
timesteps = torch.linspace(1, eps, steps + 1, device=x.device)
|
| 425 |
+
|
| 426 |
+
# this allows user-defined token control of the intermediate steps
|
| 427 |
+
x = generation_tokens_hook_func(None, x, None)
|
| 428 |
+
masked_seq_len = (x == mask_token_id).sum() // x.shape[0]
|
| 429 |
+
for i in range(steps):
|
| 430 |
+
mask_index = x == mask_token_id
|
| 431 |
+
logits = self(x, attention_mask, tok_idx).logits
|
| 432 |
+
logits = torch.cat([logits[:, :1], logits[:, :-1]], dim=1)
|
| 433 |
+
|
| 434 |
+
# this allows user-defined logits control of the intermediate steps
|
| 435 |
+
logits = generation_logits_hook_func(i, x, logits)
|
| 436 |
+
|
| 437 |
+
mask_logits = logits[mask_index]
|
| 438 |
+
t = timesteps[i]
|
| 439 |
+
s = timesteps[i + 1]
|
| 440 |
+
# print(eps)
|
| 441 |
+
mask_logits[:, pad_token_id] += eos_penalty * torch.log(1 - t + eps)
|
| 442 |
+
if alg == "origin":
|
| 443 |
+
p_transfer = 1 - s / t if i < steps - 1 else 1
|
| 444 |
+
x0 = torch.zeros_like(x[mask_index], device=self.device, dtype=torch.long) + mask_token_id
|
| 445 |
+
transfer_index_t_s = torch.rand(*x0.shape, device=self.device) < p_transfer
|
| 446 |
+
_, x0[transfer_index_t_s] = sample_tokens(
|
| 447 |
+
mask_logits[transfer_index_t_s],
|
| 448 |
+
temperature=temperature,
|
| 449 |
+
top_p=top_p,
|
| 450 |
+
top_k=top_k,
|
| 451 |
+
)
|
| 452 |
+
x[mask_index] = x0.clone()
|
| 453 |
+
elif alg.startswith("transition"):
|
| 454 |
+
transition_step = int(alg.split("_")[1])
|
| 455 |
+
confidence, x0 = sample_tokens(mask_logits, temperature=temperature, top_p=top_p, top_k=top_k)
|
| 456 |
+
if i < transition_step:
|
| 457 |
+
number_transfer_tokens = 1
|
| 458 |
+
else:
|
| 459 |
+
# at 0 <= i <= transition_step, the number of transfer tokens is 1
|
| 460 |
+
# at i = transition_step, the number of transfer tokens is transition_step
|
| 461 |
+
# at i = steps, the number of transfer tokens is seq_len
|
| 462 |
+
# a * transition_step + b = transition_step
|
| 463 |
+
# a * steps + b = seq_len
|
| 464 |
+
denoised_tokens = int(
|
| 465 |
+
((masked_seq_len - transition_step) * (i + 1) + transition_step * (steps - masked_seq_len))
|
| 466 |
+
/ (steps - transition_step)
|
| 467 |
+
)
|
| 468 |
+
already_denoised_tokens = masked_seq_len - mask_index.sum() // x.shape[0]
|
| 469 |
+
number_transfer_tokens = denoised_tokens - already_denoised_tokens
|
| 470 |
+
|
| 471 |
+
full_confidence = torch.full_like(x, -torch.inf, device=self.device, dtype=confidence.dtype)
|
| 472 |
+
full_confidence[mask_index] = confidence
|
| 473 |
+
if number_transfer_tokens > 0:
|
| 474 |
+
if alg_temp is None or alg_temp == 0:
|
| 475 |
+
_, transfer_index = torch.topk(full_confidence, number_transfer_tokens)
|
| 476 |
+
else:
|
| 477 |
+
full_confidence = full_confidence / alg_temp
|
| 478 |
+
full_confidence = F.softmax(full_confidence, dim=-1)
|
| 479 |
+
transfer_index = torch.multinomial(full_confidence, num_samples=number_transfer_tokens)
|
| 480 |
+
x_ = torch.zeros_like(x, device=self.device, dtype=torch.long) + mask_token_id
|
| 481 |
+
x_[mask_index] = x0.clone()
|
| 482 |
+
row_indices = torch.arange(x.size(0), device=self.device).unsqueeze(1).expand_as(transfer_index)
|
| 483 |
+
x[row_indices, transfer_index] = x_[row_indices, transfer_index]
|
| 484 |
+
else:
|
| 485 |
+
if alg == "maskgit_plus" or alg == "maskgit_plus_ar" or alg == "ar":
|
| 486 |
+
confidence, x0 = sample_tokens(mask_logits, temperature=temperature, top_p=top_p, top_k=top_k)
|
| 487 |
+
elif alg == "maskgit_plus_ori":
|
| 488 |
+
confidence, x0 = sample_tokens(
|
| 489 |
+
mask_logits,
|
| 490 |
+
temperature=temperature,
|
| 491 |
+
top_p=top_p,
|
| 492 |
+
top_k=top_k,
|
| 493 |
+
use_ori_logits=True,
|
| 494 |
+
)
|
| 495 |
+
elif alg == "topk_margin":
|
| 496 |
+
confidence, x0 = sample_tokens(
|
| 497 |
+
mask_logits,
|
| 498 |
+
temperature=temperature,
|
| 499 |
+
top_p=top_p,
|
| 500 |
+
top_k=top_k,
|
| 501 |
+
margin_confidence=True,
|
| 502 |
+
)
|
| 503 |
+
elif alg == "topk_margin_ori":
|
| 504 |
+
confidence, x0 = sample_tokens(
|
| 505 |
+
mask_logits,
|
| 506 |
+
temperature=temperature,
|
| 507 |
+
top_p=top_p,
|
| 508 |
+
top_k=top_k,
|
| 509 |
+
margin_confidence=True,
|
| 510 |
+
use_ori_logits=True,
|
| 511 |
+
)
|
| 512 |
+
elif alg == "entropy":
|
| 513 |
+
confidence, x0 = sample_tokens(
|
| 514 |
+
mask_logits,
|
| 515 |
+
temperature,
|
| 516 |
+
top_p=top_p,
|
| 517 |
+
top_k=top_k,
|
| 518 |
+
neg_entropy=True,
|
| 519 |
+
)
|
| 520 |
+
elif alg == "entropy_ori":
|
| 521 |
+
confidence, x0 = sample_tokens(
|
| 522 |
+
mask_logits,
|
| 523 |
+
temperature,
|
| 524 |
+
top_p=top_p,
|
| 525 |
+
top_k=top_k,
|
| 526 |
+
neg_entropy=True,
|
| 527 |
+
use_ori_logits=True,
|
| 528 |
+
)
|
| 529 |
+
else:
|
| 530 |
+
raise RuntimeError(f"Unknown alg: {alg}")
|
| 531 |
+
num_mask_token = mask_index.sum() / mask_index.shape[0]
|
| 532 |
+
number_transfer_tokens = int(num_mask_token * (1 - s / t)) if i < steps - 1 else int(num_mask_token)
|
| 533 |
+
full_confidence = torch.full_like(x, -torch.inf, device=self.device, dtype=logits.dtype)
|
| 534 |
+
full_confidence[mask_index] = confidence
|
| 535 |
+
|
| 536 |
+
if alg == "ar":
|
| 537 |
+
number_transfer_tokens = 1
|
| 538 |
+
|
| 539 |
+
if number_transfer_tokens > 0:
|
| 540 |
+
if alg_temp is None or alg_temp == 0:
|
| 541 |
+
_, transfer_index = torch.topk(full_confidence, number_transfer_tokens)
|
| 542 |
+
else:
|
| 543 |
+
full_confidence = full_confidence / alg_temp
|
| 544 |
+
full_confidence = F.softmax(full_confidence, dim=-1)
|
| 545 |
+
transfer_index = torch.multinomial(full_confidence, num_samples=number_transfer_tokens)
|
| 546 |
+
if alg == "maskgit_plus_ar" or alg == "ar":
|
| 547 |
+
full_confidence = torch.full_like(x, -torch.inf, device=self.device, dtype=logits.dtype)
|
| 548 |
+
full_confidence[mask_index] = 1.0
|
| 549 |
+
_, transfer_index = torch.topk(
|
| 550 |
+
full_confidence, number_transfer_tokens
|
| 551 |
+
) # NOTE: ignoring alg_temp
|
| 552 |
+
|
| 553 |
+
x_ = torch.zeros_like(x, device=self.device, dtype=torch.long) + mask_token_id
|
| 554 |
+
x_[mask_index] = x0.clone()
|
| 555 |
+
row_indices = torch.arange(x.size(0), device=self.device).unsqueeze(1).expand_as(transfer_index)
|
| 556 |
+
x[row_indices, transfer_index] = x_[row_indices, transfer_index]
|
| 557 |
+
|
| 558 |
+
# this allows user-defined token control of the intermediate steps
|
| 559 |
+
x = generation_tokens_hook_func(i, x, logits)
|
| 560 |
+
|
| 561 |
+
if histories is not None:
|
| 562 |
+
histories.append(x.clone())
|
| 563 |
+
|
| 564 |
+
if return_dict_in_generate:
|
| 565 |
+
return DreamModelOutput(
|
| 566 |
+
sequences=x,
|
| 567 |
+
history=histories,
|
| 568 |
+
)
|
| 569 |
+
else:
|
| 570 |
+
return x
|
merges.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
model-00001-of-00002.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0ba7ab179a5a5a940f55b8ee2cec6fd08b32a4807ce2cf24bc1c518e361983d6
|
| 3 |
+
size 4987678542
|
model-00002-of-00002.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4d55d668277527294d6b525d5e9079fa97ef7d52c79c9158766ed7f7850e71b9
|
| 3 |
+
size 3723908574
|
model.safetensors.index.json
ADDED
|
@@ -0,0 +1,738 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"metadata": {
|
| 3 |
+
"total_size": 8711507140
|
| 4 |
+
},
|
| 5 |
+
"weight_map": {
|
| 6 |
+
"lm_head.weight": "model-00002-of-00002.safetensors",
|
| 7 |
+
"model.embed_tokens.weight": "model-00001-of-00002.safetensors",
|
| 8 |
+
"model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 9 |
+
"model.layers.0.mlp.down_proj.SCB": "model-00001-of-00002.safetensors",
|
| 10 |
+
"model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 11 |
+
"model.layers.0.mlp.down_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 12 |
+
"model.layers.0.mlp.gate_proj.SCB": "model-00001-of-00002.safetensors",
|
| 13 |
+
"model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 14 |
+
"model.layers.0.mlp.gate_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 15 |
+
"model.layers.0.mlp.up_proj.SCB": "model-00001-of-00002.safetensors",
|
| 16 |
+
"model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 17 |
+
"model.layers.0.mlp.up_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 18 |
+
"model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 19 |
+
"model.layers.0.self_attn.k_proj.SCB": "model-00001-of-00002.safetensors",
|
| 20 |
+
"model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
| 21 |
+
"model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 22 |
+
"model.layers.0.self_attn.k_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 23 |
+
"model.layers.0.self_attn.o_proj.SCB": "model-00001-of-00002.safetensors",
|
| 24 |
+
"model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 25 |
+
"model.layers.0.self_attn.o_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 26 |
+
"model.layers.0.self_attn.q_proj.SCB": "model-00001-of-00002.safetensors",
|
| 27 |
+
"model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
| 28 |
+
"model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 29 |
+
"model.layers.0.self_attn.q_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 30 |
+
"model.layers.0.self_attn.v_proj.SCB": "model-00001-of-00002.safetensors",
|
| 31 |
+
"model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
| 32 |
+
"model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 33 |
+
"model.layers.0.self_attn.v_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 34 |
+
"model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 35 |
+
"model.layers.1.mlp.down_proj.SCB": "model-00001-of-00002.safetensors",
|
| 36 |
+
"model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 37 |
+
"model.layers.1.mlp.down_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 38 |
+
"model.layers.1.mlp.gate_proj.SCB": "model-00001-of-00002.safetensors",
|
| 39 |
+
"model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 40 |
+
"model.layers.1.mlp.gate_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 41 |
+
"model.layers.1.mlp.up_proj.SCB": "model-00001-of-00002.safetensors",
|
| 42 |
+
"model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 43 |
+
"model.layers.1.mlp.up_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 44 |
+
"model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 45 |
+
"model.layers.1.self_attn.k_proj.SCB": "model-00001-of-00002.safetensors",
|
| 46 |
+
"model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
| 47 |
+
"model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 48 |
+
"model.layers.1.self_attn.k_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 49 |
+
"model.layers.1.self_attn.o_proj.SCB": "model-00001-of-00002.safetensors",
|
| 50 |
+
"model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 51 |
+
"model.layers.1.self_attn.o_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 52 |
+
"model.layers.1.self_attn.q_proj.SCB": "model-00001-of-00002.safetensors",
|
| 53 |
+
"model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
| 54 |
+
"model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 55 |
+
"model.layers.1.self_attn.q_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 56 |
+
"model.layers.1.self_attn.v_proj.SCB": "model-00001-of-00002.safetensors",
|
| 57 |
+
"model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
| 58 |
+
"model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 59 |
+
"model.layers.1.self_attn.v_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 60 |
+
"model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 61 |
+
"model.layers.10.mlp.down_proj.SCB": "model-00001-of-00002.safetensors",
|
| 62 |
+
"model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 63 |
+
"model.layers.10.mlp.down_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 64 |
+
"model.layers.10.mlp.gate_proj.SCB": "model-00001-of-00002.safetensors",
|
| 65 |
+
"model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 66 |
+
"model.layers.10.mlp.gate_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 67 |
+
"model.layers.10.mlp.up_proj.SCB": "model-00001-of-00002.safetensors",
|
| 68 |
+
"model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 69 |
+
"model.layers.10.mlp.up_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 70 |
+
"model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 71 |
+
"model.layers.10.self_attn.k_proj.SCB": "model-00001-of-00002.safetensors",
|
| 72 |
+
"model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
| 73 |
+
"model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 74 |
+
"model.layers.10.self_attn.k_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 75 |
+
"model.layers.10.self_attn.o_proj.SCB": "model-00001-of-00002.safetensors",
|
| 76 |
+
"model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 77 |
+
"model.layers.10.self_attn.o_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 78 |
+
"model.layers.10.self_attn.q_proj.SCB": "model-00001-of-00002.safetensors",
|
| 79 |
+
"model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
| 80 |
+
"model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 81 |
+
"model.layers.10.self_attn.q_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 82 |
+
"model.layers.10.self_attn.v_proj.SCB": "model-00001-of-00002.safetensors",
|
| 83 |
+
"model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
| 84 |
+
"model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 85 |
+
"model.layers.10.self_attn.v_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 86 |
+
"model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 87 |
+
"model.layers.11.mlp.down_proj.SCB": "model-00001-of-00002.safetensors",
|
| 88 |
+
"model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 89 |
+
"model.layers.11.mlp.down_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 90 |
+
"model.layers.11.mlp.gate_proj.SCB": "model-00001-of-00002.safetensors",
|
| 91 |
+
"model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 92 |
+
"model.layers.11.mlp.gate_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 93 |
+
"model.layers.11.mlp.up_proj.SCB": "model-00001-of-00002.safetensors",
|
| 94 |
+
"model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 95 |
+
"model.layers.11.mlp.up_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 96 |
+
"model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 97 |
+
"model.layers.11.self_attn.k_proj.SCB": "model-00001-of-00002.safetensors",
|
| 98 |
+
"model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
| 99 |
+
"model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 100 |
+
"model.layers.11.self_attn.k_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 101 |
+
"model.layers.11.self_attn.o_proj.SCB": "model-00001-of-00002.safetensors",
|
| 102 |
+
"model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 103 |
+
"model.layers.11.self_attn.o_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 104 |
+
"model.layers.11.self_attn.q_proj.SCB": "model-00001-of-00002.safetensors",
|
| 105 |
+
"model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
| 106 |
+
"model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 107 |
+
"model.layers.11.self_attn.q_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 108 |
+
"model.layers.11.self_attn.v_proj.SCB": "model-00001-of-00002.safetensors",
|
| 109 |
+
"model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
| 110 |
+
"model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 111 |
+
"model.layers.11.self_attn.v_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 112 |
+
"model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 113 |
+
"model.layers.12.mlp.down_proj.SCB": "model-00001-of-00002.safetensors",
|
| 114 |
+
"model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 115 |
+
"model.layers.12.mlp.down_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 116 |
+
"model.layers.12.mlp.gate_proj.SCB": "model-00001-of-00002.safetensors",
|
| 117 |
+
"model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 118 |
+
"model.layers.12.mlp.gate_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 119 |
+
"model.layers.12.mlp.up_proj.SCB": "model-00001-of-00002.safetensors",
|
| 120 |
+
"model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 121 |
+
"model.layers.12.mlp.up_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 122 |
+
"model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 123 |
+
"model.layers.12.self_attn.k_proj.SCB": "model-00001-of-00002.safetensors",
|
| 124 |
+
"model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
| 125 |
+
"model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 126 |
+
"model.layers.12.self_attn.k_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 127 |
+
"model.layers.12.self_attn.o_proj.SCB": "model-00001-of-00002.safetensors",
|
| 128 |
+
"model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 129 |
+
"model.layers.12.self_attn.o_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 130 |
+
"model.layers.12.self_attn.q_proj.SCB": "model-00001-of-00002.safetensors",
|
| 131 |
+
"model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
| 132 |
+
"model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 133 |
+
"model.layers.12.self_attn.q_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 134 |
+
"model.layers.12.self_attn.v_proj.SCB": "model-00001-of-00002.safetensors",
|
| 135 |
+
"model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
| 136 |
+
"model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 137 |
+
"model.layers.12.self_attn.v_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 138 |
+
"model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 139 |
+
"model.layers.13.mlp.down_proj.SCB": "model-00001-of-00002.safetensors",
|
| 140 |
+
"model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 141 |
+
"model.layers.13.mlp.down_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 142 |
+
"model.layers.13.mlp.gate_proj.SCB": "model-00001-of-00002.safetensors",
|
| 143 |
+
"model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 144 |
+
"model.layers.13.mlp.gate_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 145 |
+
"model.layers.13.mlp.up_proj.SCB": "model-00001-of-00002.safetensors",
|
| 146 |
+
"model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 147 |
+
"model.layers.13.mlp.up_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 148 |
+
"model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 149 |
+
"model.layers.13.self_attn.k_proj.SCB": "model-00001-of-00002.safetensors",
|
| 150 |
+
"model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
| 151 |
+
"model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 152 |
+
"model.layers.13.self_attn.k_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 153 |
+
"model.layers.13.self_attn.o_proj.SCB": "model-00001-of-00002.safetensors",
|
| 154 |
+
"model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 155 |
+
"model.layers.13.self_attn.o_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 156 |
+
"model.layers.13.self_attn.q_proj.SCB": "model-00001-of-00002.safetensors",
|
| 157 |
+
"model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
| 158 |
+
"model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 159 |
+
"model.layers.13.self_attn.q_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 160 |
+
"model.layers.13.self_attn.v_proj.SCB": "model-00001-of-00002.safetensors",
|
| 161 |
+
"model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
| 162 |
+
"model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 163 |
+
"model.layers.13.self_attn.v_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 164 |
+
"model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 165 |
+
"model.layers.14.mlp.down_proj.SCB": "model-00001-of-00002.safetensors",
|
| 166 |
+
"model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 167 |
+
"model.layers.14.mlp.down_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 168 |
+
"model.layers.14.mlp.gate_proj.SCB": "model-00001-of-00002.safetensors",
|
| 169 |
+
"model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 170 |
+
"model.layers.14.mlp.gate_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 171 |
+
"model.layers.14.mlp.up_proj.SCB": "model-00001-of-00002.safetensors",
|
| 172 |
+
"model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 173 |
+
"model.layers.14.mlp.up_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 174 |
+
"model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 175 |
+
"model.layers.14.self_attn.k_proj.SCB": "model-00001-of-00002.safetensors",
|
| 176 |
+
"model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
| 177 |
+
"model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 178 |
+
"model.layers.14.self_attn.k_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 179 |
+
"model.layers.14.self_attn.o_proj.SCB": "model-00001-of-00002.safetensors",
|
| 180 |
+
"model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 181 |
+
"model.layers.14.self_attn.o_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 182 |
+
"model.layers.14.self_attn.q_proj.SCB": "model-00001-of-00002.safetensors",
|
| 183 |
+
"model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
| 184 |
+
"model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 185 |
+
"model.layers.14.self_attn.q_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 186 |
+
"model.layers.14.self_attn.v_proj.SCB": "model-00001-of-00002.safetensors",
|
| 187 |
+
"model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
| 188 |
+
"model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 189 |
+
"model.layers.14.self_attn.v_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 190 |
+
"model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 191 |
+
"model.layers.15.mlp.down_proj.SCB": "model-00001-of-00002.safetensors",
|
| 192 |
+
"model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 193 |
+
"model.layers.15.mlp.down_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 194 |
+
"model.layers.15.mlp.gate_proj.SCB": "model-00001-of-00002.safetensors",
|
| 195 |
+
"model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 196 |
+
"model.layers.15.mlp.gate_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 197 |
+
"model.layers.15.mlp.up_proj.SCB": "model-00001-of-00002.safetensors",
|
| 198 |
+
"model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 199 |
+
"model.layers.15.mlp.up_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 200 |
+
"model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 201 |
+
"model.layers.15.self_attn.k_proj.SCB": "model-00001-of-00002.safetensors",
|
| 202 |
+
"model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
| 203 |
+
"model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 204 |
+
"model.layers.15.self_attn.k_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 205 |
+
"model.layers.15.self_attn.o_proj.SCB": "model-00001-of-00002.safetensors",
|
| 206 |
+
"model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 207 |
+
"model.layers.15.self_attn.o_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 208 |
+
"model.layers.15.self_attn.q_proj.SCB": "model-00001-of-00002.safetensors",
|
| 209 |
+
"model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
| 210 |
+
"model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 211 |
+
"model.layers.15.self_attn.q_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 212 |
+
"model.layers.15.self_attn.v_proj.SCB": "model-00001-of-00002.safetensors",
|
| 213 |
+
"model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
| 214 |
+
"model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 215 |
+
"model.layers.15.self_attn.v_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 216 |
+
"model.layers.16.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 217 |
+
"model.layers.16.mlp.down_proj.SCB": "model-00002-of-00002.safetensors",
|
| 218 |
+
"model.layers.16.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 219 |
+
"model.layers.16.mlp.down_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 220 |
+
"model.layers.16.mlp.gate_proj.SCB": "model-00001-of-00002.safetensors",
|
| 221 |
+
"model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 222 |
+
"model.layers.16.mlp.gate_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 223 |
+
"model.layers.16.mlp.up_proj.SCB": "model-00001-of-00002.safetensors",
|
| 224 |
+
"model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 225 |
+
"model.layers.16.mlp.up_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 226 |
+
"model.layers.16.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 227 |
+
"model.layers.16.self_attn.k_proj.SCB": "model-00001-of-00002.safetensors",
|
| 228 |
+
"model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
| 229 |
+
"model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 230 |
+
"model.layers.16.self_attn.k_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 231 |
+
"model.layers.16.self_attn.o_proj.SCB": "model-00001-of-00002.safetensors",
|
| 232 |
+
"model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 233 |
+
"model.layers.16.self_attn.o_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 234 |
+
"model.layers.16.self_attn.q_proj.SCB": "model-00001-of-00002.safetensors",
|
| 235 |
+
"model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
| 236 |
+
"model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 237 |
+
"model.layers.16.self_attn.q_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 238 |
+
"model.layers.16.self_attn.v_proj.SCB": "model-00001-of-00002.safetensors",
|
| 239 |
+
"model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
| 240 |
+
"model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 241 |
+
"model.layers.16.self_attn.v_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 242 |
+
"model.layers.17.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 243 |
+
"model.layers.17.mlp.down_proj.SCB": "model-00002-of-00002.safetensors",
|
| 244 |
+
"model.layers.17.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 245 |
+
"model.layers.17.mlp.down_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 246 |
+
"model.layers.17.mlp.gate_proj.SCB": "model-00002-of-00002.safetensors",
|
| 247 |
+
"model.layers.17.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
| 248 |
+
"model.layers.17.mlp.gate_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 249 |
+
"model.layers.17.mlp.up_proj.SCB": "model-00002-of-00002.safetensors",
|
| 250 |
+
"model.layers.17.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
| 251 |
+
"model.layers.17.mlp.up_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 252 |
+
"model.layers.17.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 253 |
+
"model.layers.17.self_attn.k_proj.SCB": "model-00002-of-00002.safetensors",
|
| 254 |
+
"model.layers.17.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
|
| 255 |
+
"model.layers.17.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 256 |
+
"model.layers.17.self_attn.k_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 257 |
+
"model.layers.17.self_attn.o_proj.SCB": "model-00002-of-00002.safetensors",
|
| 258 |
+
"model.layers.17.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 259 |
+
"model.layers.17.self_attn.o_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 260 |
+
"model.layers.17.self_attn.q_proj.SCB": "model-00002-of-00002.safetensors",
|
| 261 |
+
"model.layers.17.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
|
| 262 |
+
"model.layers.17.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 263 |
+
"model.layers.17.self_attn.q_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 264 |
+
"model.layers.17.self_attn.v_proj.SCB": "model-00002-of-00002.safetensors",
|
| 265 |
+
"model.layers.17.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
|
| 266 |
+
"model.layers.17.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 267 |
+
"model.layers.17.self_attn.v_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 268 |
+
"model.layers.18.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 269 |
+
"model.layers.18.mlp.down_proj.SCB": "model-00002-of-00002.safetensors",
|
| 270 |
+
"model.layers.18.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 271 |
+
"model.layers.18.mlp.down_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 272 |
+
"model.layers.18.mlp.gate_proj.SCB": "model-00002-of-00002.safetensors",
|
| 273 |
+
"model.layers.18.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
| 274 |
+
"model.layers.18.mlp.gate_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 275 |
+
"model.layers.18.mlp.up_proj.SCB": "model-00002-of-00002.safetensors",
|
| 276 |
+
"model.layers.18.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
| 277 |
+
"model.layers.18.mlp.up_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 278 |
+
"model.layers.18.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 279 |
+
"model.layers.18.self_attn.k_proj.SCB": "model-00002-of-00002.safetensors",
|
| 280 |
+
"model.layers.18.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
|
| 281 |
+
"model.layers.18.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 282 |
+
"model.layers.18.self_attn.k_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 283 |
+
"model.layers.18.self_attn.o_proj.SCB": "model-00002-of-00002.safetensors",
|
| 284 |
+
"model.layers.18.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 285 |
+
"model.layers.18.self_attn.o_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 286 |
+
"model.layers.18.self_attn.q_proj.SCB": "model-00002-of-00002.safetensors",
|
| 287 |
+
"model.layers.18.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
|
| 288 |
+
"model.layers.18.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 289 |
+
"model.layers.18.self_attn.q_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 290 |
+
"model.layers.18.self_attn.v_proj.SCB": "model-00002-of-00002.safetensors",
|
| 291 |
+
"model.layers.18.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
|
| 292 |
+
"model.layers.18.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 293 |
+
"model.layers.18.self_attn.v_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 294 |
+
"model.layers.19.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 295 |
+
"model.layers.19.mlp.down_proj.SCB": "model-00002-of-00002.safetensors",
|
| 296 |
+
"model.layers.19.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 297 |
+
"model.layers.19.mlp.down_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 298 |
+
"model.layers.19.mlp.gate_proj.SCB": "model-00002-of-00002.safetensors",
|
| 299 |
+
"model.layers.19.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
| 300 |
+
"model.layers.19.mlp.gate_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 301 |
+
"model.layers.19.mlp.up_proj.SCB": "model-00002-of-00002.safetensors",
|
| 302 |
+
"model.layers.19.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
| 303 |
+
"model.layers.19.mlp.up_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 304 |
+
"model.layers.19.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 305 |
+
"model.layers.19.self_attn.k_proj.SCB": "model-00002-of-00002.safetensors",
|
| 306 |
+
"model.layers.19.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
|
| 307 |
+
"model.layers.19.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 308 |
+
"model.layers.19.self_attn.k_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 309 |
+
"model.layers.19.self_attn.o_proj.SCB": "model-00002-of-00002.safetensors",
|
| 310 |
+
"model.layers.19.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 311 |
+
"model.layers.19.self_attn.o_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 312 |
+
"model.layers.19.self_attn.q_proj.SCB": "model-00002-of-00002.safetensors",
|
| 313 |
+
"model.layers.19.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
|
| 314 |
+
"model.layers.19.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 315 |
+
"model.layers.19.self_attn.q_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 316 |
+
"model.layers.19.self_attn.v_proj.SCB": "model-00002-of-00002.safetensors",
|
| 317 |
+
"model.layers.19.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
|
| 318 |
+
"model.layers.19.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 319 |
+
"model.layers.19.self_attn.v_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 320 |
+
"model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 321 |
+
"model.layers.2.mlp.down_proj.SCB": "model-00001-of-00002.safetensors",
|
| 322 |
+
"model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 323 |
+
"model.layers.2.mlp.down_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 324 |
+
"model.layers.2.mlp.gate_proj.SCB": "model-00001-of-00002.safetensors",
|
| 325 |
+
"model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 326 |
+
"model.layers.2.mlp.gate_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 327 |
+
"model.layers.2.mlp.up_proj.SCB": "model-00001-of-00002.safetensors",
|
| 328 |
+
"model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 329 |
+
"model.layers.2.mlp.up_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 330 |
+
"model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 331 |
+
"model.layers.2.self_attn.k_proj.SCB": "model-00001-of-00002.safetensors",
|
| 332 |
+
"model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
| 333 |
+
"model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 334 |
+
"model.layers.2.self_attn.k_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 335 |
+
"model.layers.2.self_attn.o_proj.SCB": "model-00001-of-00002.safetensors",
|
| 336 |
+
"model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 337 |
+
"model.layers.2.self_attn.o_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 338 |
+
"model.layers.2.self_attn.q_proj.SCB": "model-00001-of-00002.safetensors",
|
| 339 |
+
"model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
| 340 |
+
"model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 341 |
+
"model.layers.2.self_attn.q_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 342 |
+
"model.layers.2.self_attn.v_proj.SCB": "model-00001-of-00002.safetensors",
|
| 343 |
+
"model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
| 344 |
+
"model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 345 |
+
"model.layers.2.self_attn.v_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 346 |
+
"model.layers.20.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 347 |
+
"model.layers.20.mlp.down_proj.SCB": "model-00002-of-00002.safetensors",
|
| 348 |
+
"model.layers.20.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 349 |
+
"model.layers.20.mlp.down_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 350 |
+
"model.layers.20.mlp.gate_proj.SCB": "model-00002-of-00002.safetensors",
|
| 351 |
+
"model.layers.20.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
| 352 |
+
"model.layers.20.mlp.gate_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 353 |
+
"model.layers.20.mlp.up_proj.SCB": "model-00002-of-00002.safetensors",
|
| 354 |
+
"model.layers.20.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
| 355 |
+
"model.layers.20.mlp.up_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 356 |
+
"model.layers.20.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 357 |
+
"model.layers.20.self_attn.k_proj.SCB": "model-00002-of-00002.safetensors",
|
| 358 |
+
"model.layers.20.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
|
| 359 |
+
"model.layers.20.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 360 |
+
"model.layers.20.self_attn.k_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 361 |
+
"model.layers.20.self_attn.o_proj.SCB": "model-00002-of-00002.safetensors",
|
| 362 |
+
"model.layers.20.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 363 |
+
"model.layers.20.self_attn.o_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 364 |
+
"model.layers.20.self_attn.q_proj.SCB": "model-00002-of-00002.safetensors",
|
| 365 |
+
"model.layers.20.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
|
| 366 |
+
"model.layers.20.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 367 |
+
"model.layers.20.self_attn.q_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 368 |
+
"model.layers.20.self_attn.v_proj.SCB": "model-00002-of-00002.safetensors",
|
| 369 |
+
"model.layers.20.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
|
| 370 |
+
"model.layers.20.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 371 |
+
"model.layers.20.self_attn.v_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 372 |
+
"model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 373 |
+
"model.layers.21.mlp.down_proj.SCB": "model-00002-of-00002.safetensors",
|
| 374 |
+
"model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 375 |
+
"model.layers.21.mlp.down_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 376 |
+
"model.layers.21.mlp.gate_proj.SCB": "model-00002-of-00002.safetensors",
|
| 377 |
+
"model.layers.21.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
| 378 |
+
"model.layers.21.mlp.gate_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 379 |
+
"model.layers.21.mlp.up_proj.SCB": "model-00002-of-00002.safetensors",
|
| 380 |
+
"model.layers.21.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
| 381 |
+
"model.layers.21.mlp.up_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 382 |
+
"model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 383 |
+
"model.layers.21.self_attn.k_proj.SCB": "model-00002-of-00002.safetensors",
|
| 384 |
+
"model.layers.21.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
|
| 385 |
+
"model.layers.21.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 386 |
+
"model.layers.21.self_attn.k_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 387 |
+
"model.layers.21.self_attn.o_proj.SCB": "model-00002-of-00002.safetensors",
|
| 388 |
+
"model.layers.21.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 389 |
+
"model.layers.21.self_attn.o_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 390 |
+
"model.layers.21.self_attn.q_proj.SCB": "model-00002-of-00002.safetensors",
|
| 391 |
+
"model.layers.21.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
|
| 392 |
+
"model.layers.21.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 393 |
+
"model.layers.21.self_attn.q_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 394 |
+
"model.layers.21.self_attn.v_proj.SCB": "model-00002-of-00002.safetensors",
|
| 395 |
+
"model.layers.21.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
|
| 396 |
+
"model.layers.21.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 397 |
+
"model.layers.21.self_attn.v_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 398 |
+
"model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 399 |
+
"model.layers.22.mlp.down_proj.SCB": "model-00002-of-00002.safetensors",
|
| 400 |
+
"model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 401 |
+
"model.layers.22.mlp.down_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 402 |
+
"model.layers.22.mlp.gate_proj.SCB": "model-00002-of-00002.safetensors",
|
| 403 |
+
"model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
| 404 |
+
"model.layers.22.mlp.gate_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 405 |
+
"model.layers.22.mlp.up_proj.SCB": "model-00002-of-00002.safetensors",
|
| 406 |
+
"model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
| 407 |
+
"model.layers.22.mlp.up_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 408 |
+
"model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 409 |
+
"model.layers.22.self_attn.k_proj.SCB": "model-00002-of-00002.safetensors",
|
| 410 |
+
"model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
|
| 411 |
+
"model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 412 |
+
"model.layers.22.self_attn.k_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 413 |
+
"model.layers.22.self_attn.o_proj.SCB": "model-00002-of-00002.safetensors",
|
| 414 |
+
"model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 415 |
+
"model.layers.22.self_attn.o_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 416 |
+
"model.layers.22.self_attn.q_proj.SCB": "model-00002-of-00002.safetensors",
|
| 417 |
+
"model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
|
| 418 |
+
"model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 419 |
+
"model.layers.22.self_attn.q_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 420 |
+
"model.layers.22.self_attn.v_proj.SCB": "model-00002-of-00002.safetensors",
|
| 421 |
+
"model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
|
| 422 |
+
"model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 423 |
+
"model.layers.22.self_attn.v_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 424 |
+
"model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 425 |
+
"model.layers.23.mlp.down_proj.SCB": "model-00002-of-00002.safetensors",
|
| 426 |
+
"model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 427 |
+
"model.layers.23.mlp.down_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 428 |
+
"model.layers.23.mlp.gate_proj.SCB": "model-00002-of-00002.safetensors",
|
| 429 |
+
"model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
| 430 |
+
"model.layers.23.mlp.gate_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 431 |
+
"model.layers.23.mlp.up_proj.SCB": "model-00002-of-00002.safetensors",
|
| 432 |
+
"model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
| 433 |
+
"model.layers.23.mlp.up_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 434 |
+
"model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 435 |
+
"model.layers.23.self_attn.k_proj.SCB": "model-00002-of-00002.safetensors",
|
| 436 |
+
"model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
|
| 437 |
+
"model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 438 |
+
"model.layers.23.self_attn.k_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 439 |
+
"model.layers.23.self_attn.o_proj.SCB": "model-00002-of-00002.safetensors",
|
| 440 |
+
"model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 441 |
+
"model.layers.23.self_attn.o_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 442 |
+
"model.layers.23.self_attn.q_proj.SCB": "model-00002-of-00002.safetensors",
|
| 443 |
+
"model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
|
| 444 |
+
"model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 445 |
+
"model.layers.23.self_attn.q_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 446 |
+
"model.layers.23.self_attn.v_proj.SCB": "model-00002-of-00002.safetensors",
|
| 447 |
+
"model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
|
| 448 |
+
"model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 449 |
+
"model.layers.23.self_attn.v_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 450 |
+
"model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 451 |
+
"model.layers.24.mlp.down_proj.SCB": "model-00002-of-00002.safetensors",
|
| 452 |
+
"model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 453 |
+
"model.layers.24.mlp.down_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 454 |
+
"model.layers.24.mlp.gate_proj.SCB": "model-00002-of-00002.safetensors",
|
| 455 |
+
"model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
| 456 |
+
"model.layers.24.mlp.gate_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 457 |
+
"model.layers.24.mlp.up_proj.SCB": "model-00002-of-00002.safetensors",
|
| 458 |
+
"model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
| 459 |
+
"model.layers.24.mlp.up_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 460 |
+
"model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 461 |
+
"model.layers.24.self_attn.k_proj.SCB": "model-00002-of-00002.safetensors",
|
| 462 |
+
"model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
|
| 463 |
+
"model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 464 |
+
"model.layers.24.self_attn.k_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 465 |
+
"model.layers.24.self_attn.o_proj.SCB": "model-00002-of-00002.safetensors",
|
| 466 |
+
"model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 467 |
+
"model.layers.24.self_attn.o_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 468 |
+
"model.layers.24.self_attn.q_proj.SCB": "model-00002-of-00002.safetensors",
|
| 469 |
+
"model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
|
| 470 |
+
"model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 471 |
+
"model.layers.24.self_attn.q_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 472 |
+
"model.layers.24.self_attn.v_proj.SCB": "model-00002-of-00002.safetensors",
|
| 473 |
+
"model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
|
| 474 |
+
"model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 475 |
+
"model.layers.24.self_attn.v_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 476 |
+
"model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 477 |
+
"model.layers.25.mlp.down_proj.SCB": "model-00002-of-00002.safetensors",
|
| 478 |
+
"model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 479 |
+
"model.layers.25.mlp.down_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 480 |
+
"model.layers.25.mlp.gate_proj.SCB": "model-00002-of-00002.safetensors",
|
| 481 |
+
"model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
| 482 |
+
"model.layers.25.mlp.gate_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 483 |
+
"model.layers.25.mlp.up_proj.SCB": "model-00002-of-00002.safetensors",
|
| 484 |
+
"model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
| 485 |
+
"model.layers.25.mlp.up_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 486 |
+
"model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 487 |
+
"model.layers.25.self_attn.k_proj.SCB": "model-00002-of-00002.safetensors",
|
| 488 |
+
"model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
|
| 489 |
+
"model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 490 |
+
"model.layers.25.self_attn.k_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 491 |
+
"model.layers.25.self_attn.o_proj.SCB": "model-00002-of-00002.safetensors",
|
| 492 |
+
"model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 493 |
+
"model.layers.25.self_attn.o_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 494 |
+
"model.layers.25.self_attn.q_proj.SCB": "model-00002-of-00002.safetensors",
|
| 495 |
+
"model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
|
| 496 |
+
"model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 497 |
+
"model.layers.25.self_attn.q_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 498 |
+
"model.layers.25.self_attn.v_proj.SCB": "model-00002-of-00002.safetensors",
|
| 499 |
+
"model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
|
| 500 |
+
"model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 501 |
+
"model.layers.25.self_attn.v_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 502 |
+
"model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 503 |
+
"model.layers.26.mlp.down_proj.SCB": "model-00002-of-00002.safetensors",
|
| 504 |
+
"model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 505 |
+
"model.layers.26.mlp.down_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 506 |
+
"model.layers.26.mlp.gate_proj.SCB": "model-00002-of-00002.safetensors",
|
| 507 |
+
"model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
| 508 |
+
"model.layers.26.mlp.gate_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 509 |
+
"model.layers.26.mlp.up_proj.SCB": "model-00002-of-00002.safetensors",
|
| 510 |
+
"model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
| 511 |
+
"model.layers.26.mlp.up_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 512 |
+
"model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 513 |
+
"model.layers.26.self_attn.k_proj.SCB": "model-00002-of-00002.safetensors",
|
| 514 |
+
"model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
|
| 515 |
+
"model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 516 |
+
"model.layers.26.self_attn.k_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 517 |
+
"model.layers.26.self_attn.o_proj.SCB": "model-00002-of-00002.safetensors",
|
| 518 |
+
"model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 519 |
+
"model.layers.26.self_attn.o_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 520 |
+
"model.layers.26.self_attn.q_proj.SCB": "model-00002-of-00002.safetensors",
|
| 521 |
+
"model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
|
| 522 |
+
"model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 523 |
+
"model.layers.26.self_attn.q_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 524 |
+
"model.layers.26.self_attn.v_proj.SCB": "model-00002-of-00002.safetensors",
|
| 525 |
+
"model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
|
| 526 |
+
"model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 527 |
+
"model.layers.26.self_attn.v_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 528 |
+
"model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 529 |
+
"model.layers.27.mlp.down_proj.SCB": "model-00002-of-00002.safetensors",
|
| 530 |
+
"model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 531 |
+
"model.layers.27.mlp.down_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 532 |
+
"model.layers.27.mlp.gate_proj.SCB": "model-00002-of-00002.safetensors",
|
| 533 |
+
"model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
| 534 |
+
"model.layers.27.mlp.gate_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 535 |
+
"model.layers.27.mlp.up_proj.SCB": "model-00002-of-00002.safetensors",
|
| 536 |
+
"model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
| 537 |
+
"model.layers.27.mlp.up_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 538 |
+
"model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 539 |
+
"model.layers.27.self_attn.k_proj.SCB": "model-00002-of-00002.safetensors",
|
| 540 |
+
"model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
|
| 541 |
+
"model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 542 |
+
"model.layers.27.self_attn.k_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 543 |
+
"model.layers.27.self_attn.o_proj.SCB": "model-00002-of-00002.safetensors",
|
| 544 |
+
"model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 545 |
+
"model.layers.27.self_attn.o_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 546 |
+
"model.layers.27.self_attn.q_proj.SCB": "model-00002-of-00002.safetensors",
|
| 547 |
+
"model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
|
| 548 |
+
"model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 549 |
+
"model.layers.27.self_attn.q_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 550 |
+
"model.layers.27.self_attn.v_proj.SCB": "model-00002-of-00002.safetensors",
|
| 551 |
+
"model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
|
| 552 |
+
"model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 553 |
+
"model.layers.27.self_attn.v_proj.weight_format": "model-00002-of-00002.safetensors",
|
| 554 |
+
"model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 555 |
+
"model.layers.3.mlp.down_proj.SCB": "model-00001-of-00002.safetensors",
|
| 556 |
+
"model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 557 |
+
"model.layers.3.mlp.down_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 558 |
+
"model.layers.3.mlp.gate_proj.SCB": "model-00001-of-00002.safetensors",
|
| 559 |
+
"model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 560 |
+
"model.layers.3.mlp.gate_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 561 |
+
"model.layers.3.mlp.up_proj.SCB": "model-00001-of-00002.safetensors",
|
| 562 |
+
"model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 563 |
+
"model.layers.3.mlp.up_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 564 |
+
"model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 565 |
+
"model.layers.3.self_attn.k_proj.SCB": "model-00001-of-00002.safetensors",
|
| 566 |
+
"model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
| 567 |
+
"model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 568 |
+
"model.layers.3.self_attn.k_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 569 |
+
"model.layers.3.self_attn.o_proj.SCB": "model-00001-of-00002.safetensors",
|
| 570 |
+
"model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 571 |
+
"model.layers.3.self_attn.o_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 572 |
+
"model.layers.3.self_attn.q_proj.SCB": "model-00001-of-00002.safetensors",
|
| 573 |
+
"model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
| 574 |
+
"model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 575 |
+
"model.layers.3.self_attn.q_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 576 |
+
"model.layers.3.self_attn.v_proj.SCB": "model-00001-of-00002.safetensors",
|
| 577 |
+
"model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
| 578 |
+
"model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 579 |
+
"model.layers.3.self_attn.v_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 580 |
+
"model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 581 |
+
"model.layers.4.mlp.down_proj.SCB": "model-00001-of-00002.safetensors",
|
| 582 |
+
"model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 583 |
+
"model.layers.4.mlp.down_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 584 |
+
"model.layers.4.mlp.gate_proj.SCB": "model-00001-of-00002.safetensors",
|
| 585 |
+
"model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 586 |
+
"model.layers.4.mlp.gate_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 587 |
+
"model.layers.4.mlp.up_proj.SCB": "model-00001-of-00002.safetensors",
|
| 588 |
+
"model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 589 |
+
"model.layers.4.mlp.up_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 590 |
+
"model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 591 |
+
"model.layers.4.self_attn.k_proj.SCB": "model-00001-of-00002.safetensors",
|
| 592 |
+
"model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
| 593 |
+
"model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 594 |
+
"model.layers.4.self_attn.k_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 595 |
+
"model.layers.4.self_attn.o_proj.SCB": "model-00001-of-00002.safetensors",
|
| 596 |
+
"model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 597 |
+
"model.layers.4.self_attn.o_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 598 |
+
"model.layers.4.self_attn.q_proj.SCB": "model-00001-of-00002.safetensors",
|
| 599 |
+
"model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
| 600 |
+
"model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 601 |
+
"model.layers.4.self_attn.q_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 602 |
+
"model.layers.4.self_attn.v_proj.SCB": "model-00001-of-00002.safetensors",
|
| 603 |
+
"model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
| 604 |
+
"model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 605 |
+
"model.layers.4.self_attn.v_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 606 |
+
"model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 607 |
+
"model.layers.5.mlp.down_proj.SCB": "model-00001-of-00002.safetensors",
|
| 608 |
+
"model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 609 |
+
"model.layers.5.mlp.down_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 610 |
+
"model.layers.5.mlp.gate_proj.SCB": "model-00001-of-00002.safetensors",
|
| 611 |
+
"model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 612 |
+
"model.layers.5.mlp.gate_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 613 |
+
"model.layers.5.mlp.up_proj.SCB": "model-00001-of-00002.safetensors",
|
| 614 |
+
"model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 615 |
+
"model.layers.5.mlp.up_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 616 |
+
"model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 617 |
+
"model.layers.5.self_attn.k_proj.SCB": "model-00001-of-00002.safetensors",
|
| 618 |
+
"model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
| 619 |
+
"model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 620 |
+
"model.layers.5.self_attn.k_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 621 |
+
"model.layers.5.self_attn.o_proj.SCB": "model-00001-of-00002.safetensors",
|
| 622 |
+
"model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 623 |
+
"model.layers.5.self_attn.o_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 624 |
+
"model.layers.5.self_attn.q_proj.SCB": "model-00001-of-00002.safetensors",
|
| 625 |
+
"model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
| 626 |
+
"model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 627 |
+
"model.layers.5.self_attn.q_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 628 |
+
"model.layers.5.self_attn.v_proj.SCB": "model-00001-of-00002.safetensors",
|
| 629 |
+
"model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
| 630 |
+
"model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 631 |
+
"model.layers.5.self_attn.v_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 632 |
+
"model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 633 |
+
"model.layers.6.mlp.down_proj.SCB": "model-00001-of-00002.safetensors",
|
| 634 |
+
"model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 635 |
+
"model.layers.6.mlp.down_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 636 |
+
"model.layers.6.mlp.gate_proj.SCB": "model-00001-of-00002.safetensors",
|
| 637 |
+
"model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 638 |
+
"model.layers.6.mlp.gate_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 639 |
+
"model.layers.6.mlp.up_proj.SCB": "model-00001-of-00002.safetensors",
|
| 640 |
+
"model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 641 |
+
"model.layers.6.mlp.up_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 642 |
+
"model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 643 |
+
"model.layers.6.self_attn.k_proj.SCB": "model-00001-of-00002.safetensors",
|
| 644 |
+
"model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
| 645 |
+
"model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 646 |
+
"model.layers.6.self_attn.k_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 647 |
+
"model.layers.6.self_attn.o_proj.SCB": "model-00001-of-00002.safetensors",
|
| 648 |
+
"model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 649 |
+
"model.layers.6.self_attn.o_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 650 |
+
"model.layers.6.self_attn.q_proj.SCB": "model-00001-of-00002.safetensors",
|
| 651 |
+
"model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
| 652 |
+
"model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 653 |
+
"model.layers.6.self_attn.q_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 654 |
+
"model.layers.6.self_attn.v_proj.SCB": "model-00001-of-00002.safetensors",
|
| 655 |
+
"model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
| 656 |
+
"model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 657 |
+
"model.layers.6.self_attn.v_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 658 |
+
"model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 659 |
+
"model.layers.7.mlp.down_proj.SCB": "model-00001-of-00002.safetensors",
|
| 660 |
+
"model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 661 |
+
"model.layers.7.mlp.down_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 662 |
+
"model.layers.7.mlp.gate_proj.SCB": "model-00001-of-00002.safetensors",
|
| 663 |
+
"model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 664 |
+
"model.layers.7.mlp.gate_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 665 |
+
"model.layers.7.mlp.up_proj.SCB": "model-00001-of-00002.safetensors",
|
| 666 |
+
"model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 667 |
+
"model.layers.7.mlp.up_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 668 |
+
"model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 669 |
+
"model.layers.7.self_attn.k_proj.SCB": "model-00001-of-00002.safetensors",
|
| 670 |
+
"model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
| 671 |
+
"model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 672 |
+
"model.layers.7.self_attn.k_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 673 |
+
"model.layers.7.self_attn.o_proj.SCB": "model-00001-of-00002.safetensors",
|
| 674 |
+
"model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 675 |
+
"model.layers.7.self_attn.o_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 676 |
+
"model.layers.7.self_attn.q_proj.SCB": "model-00001-of-00002.safetensors",
|
| 677 |
+
"model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
| 678 |
+
"model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 679 |
+
"model.layers.7.self_attn.q_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 680 |
+
"model.layers.7.self_attn.v_proj.SCB": "model-00001-of-00002.safetensors",
|
| 681 |
+
"model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
| 682 |
+
"model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 683 |
+
"model.layers.7.self_attn.v_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 684 |
+
"model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 685 |
+
"model.layers.8.mlp.down_proj.SCB": "model-00001-of-00002.safetensors",
|
| 686 |
+
"model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 687 |
+
"model.layers.8.mlp.down_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 688 |
+
"model.layers.8.mlp.gate_proj.SCB": "model-00001-of-00002.safetensors",
|
| 689 |
+
"model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 690 |
+
"model.layers.8.mlp.gate_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 691 |
+
"model.layers.8.mlp.up_proj.SCB": "model-00001-of-00002.safetensors",
|
| 692 |
+
"model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 693 |
+
"model.layers.8.mlp.up_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 694 |
+
"model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 695 |
+
"model.layers.8.self_attn.k_proj.SCB": "model-00001-of-00002.safetensors",
|
| 696 |
+
"model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
| 697 |
+
"model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 698 |
+
"model.layers.8.self_attn.k_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 699 |
+
"model.layers.8.self_attn.o_proj.SCB": "model-00001-of-00002.safetensors",
|
| 700 |
+
"model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 701 |
+
"model.layers.8.self_attn.o_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 702 |
+
"model.layers.8.self_attn.q_proj.SCB": "model-00001-of-00002.safetensors",
|
| 703 |
+
"model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
| 704 |
+
"model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 705 |
+
"model.layers.8.self_attn.q_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 706 |
+
"model.layers.8.self_attn.v_proj.SCB": "model-00001-of-00002.safetensors",
|
| 707 |
+
"model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
| 708 |
+
"model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 709 |
+
"model.layers.8.self_attn.v_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 710 |
+
"model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 711 |
+
"model.layers.9.mlp.down_proj.SCB": "model-00001-of-00002.safetensors",
|
| 712 |
+
"model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 713 |
+
"model.layers.9.mlp.down_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 714 |
+
"model.layers.9.mlp.gate_proj.SCB": "model-00001-of-00002.safetensors",
|
| 715 |
+
"model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 716 |
+
"model.layers.9.mlp.gate_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 717 |
+
"model.layers.9.mlp.up_proj.SCB": "model-00001-of-00002.safetensors",
|
| 718 |
+
"model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 719 |
+
"model.layers.9.mlp.up_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 720 |
+
"model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 721 |
+
"model.layers.9.self_attn.k_proj.SCB": "model-00001-of-00002.safetensors",
|
| 722 |
+
"model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
| 723 |
+
"model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 724 |
+
"model.layers.9.self_attn.k_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 725 |
+
"model.layers.9.self_attn.o_proj.SCB": "model-00001-of-00002.safetensors",
|
| 726 |
+
"model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 727 |
+
"model.layers.9.self_attn.o_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 728 |
+
"model.layers.9.self_attn.q_proj.SCB": "model-00001-of-00002.safetensors",
|
| 729 |
+
"model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
| 730 |
+
"model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 731 |
+
"model.layers.9.self_attn.q_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 732 |
+
"model.layers.9.self_attn.v_proj.SCB": "model-00001-of-00002.safetensors",
|
| 733 |
+
"model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
| 734 |
+
"model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 735 |
+
"model.layers.9.self_attn.v_proj.weight_format": "model-00001-of-00002.safetensors",
|
| 736 |
+
"model.norm.weight": "model-00002-of-00002.safetensors"
|
| 737 |
+
}
|
| 738 |
+
}
|
modeling_dream.py
ADDED
|
@@ -0,0 +1,839 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: ignore-errors
|
| 2 |
+
# isort: skip_file
|
| 3 |
+
# coding=utf-8
|
| 4 |
+
# Copyright 2024 The Dream team, HKUNLP Group and the HuggingFace Inc. team. All rights reserved.
|
| 5 |
+
#
|
| 6 |
+
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
|
| 7 |
+
# and OPT and Qwen implementations in this library. It has been modified from its
|
| 8 |
+
# original forms to accommodate minor architectural differences compared
|
| 9 |
+
# to GPT-NeoX and OPT and Qwen used by the Meta AI and Qwen team that trained the model.
|
| 10 |
+
#
|
| 11 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 12 |
+
# you may not use this file except in compliance with the License.
|
| 13 |
+
# You may obtain a copy of the License at
|
| 14 |
+
#
|
| 15 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 16 |
+
#
|
| 17 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 18 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 19 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 20 |
+
# See the License for the specific language governing permissions and
|
| 21 |
+
# limitations under the License.
|
| 22 |
+
"""PyTorch Dream model."""
|
| 23 |
+
|
| 24 |
+
import math
|
| 25 |
+
import os
|
| 26 |
+
from typing import List, Optional, Tuple, Union
|
| 27 |
+
|
| 28 |
+
import torch
|
| 29 |
+
import torch.utils.checkpoint
|
| 30 |
+
from torch import nn
|
| 31 |
+
from transformers import PretrainedConfig
|
| 32 |
+
from transformers.activations import ACT2FN
|
| 33 |
+
from transformers.cache_utils import Cache, DynamicCache
|
| 34 |
+
from transformers.modeling_outputs import (
|
| 35 |
+
BaseModelOutput,
|
| 36 |
+
MaskedLMOutput,
|
| 37 |
+
)
|
| 38 |
+
from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS
|
| 39 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 40 |
+
from transformers.utils import (
|
| 41 |
+
is_flash_attn_2_available,
|
| 42 |
+
logging,
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
from .configuration_dream import DreamConfig
|
| 46 |
+
from .generation_utils import DreamGenerationConfig, DreamGenerationMixin
|
| 47 |
+
from .training_utils import get_prompt_lengths_from_labels, loss_function, simple_uniform_mask
|
| 48 |
+
|
| 49 |
+
if is_flash_attn_2_available():
|
| 50 |
+
pass
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
logger = logging.get_logger(__name__)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
_CHECKPOINT_FOR_DOC = "Dream-7B"
|
| 57 |
+
_CONFIG_FOR_DOC = "DreamConfig"
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Dream
|
| 61 |
+
class DreamRMSNorm(nn.Module):
|
| 62 |
+
def __init__(self, hidden_size, eps=1e-6):
|
| 63 |
+
"""
|
| 64 |
+
DreamRMSNorm is equivalent to T5LayerNorm
|
| 65 |
+
"""
|
| 66 |
+
super().__init__()
|
| 67 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
| 68 |
+
self.variance_epsilon = eps
|
| 69 |
+
|
| 70 |
+
def forward(self, hidden_states):
|
| 71 |
+
input_dtype = hidden_states.dtype
|
| 72 |
+
hidden_states = hidden_states.to(torch.float32)
|
| 73 |
+
variance = hidden_states.pow(2).mean(-1, keepdim=True)
|
| 74 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
| 75 |
+
return self.weight * hidden_states.to(input_dtype)
|
| 76 |
+
|
| 77 |
+
def extra_repr(self):
|
| 78 |
+
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaRotaryEmbedding with Llama->Dream
|
| 82 |
+
class DreamRotaryEmbedding(nn.Module):
|
| 83 |
+
def __init__(
|
| 84 |
+
self,
|
| 85 |
+
dim=None,
|
| 86 |
+
max_position_embeddings=2048,
|
| 87 |
+
base=10000,
|
| 88 |
+
device=None,
|
| 89 |
+
scaling_factor=1.0,
|
| 90 |
+
rope_type="default",
|
| 91 |
+
config: Optional[DreamConfig] = None,
|
| 92 |
+
):
|
| 93 |
+
super().__init__()
|
| 94 |
+
# TODO (joao): remove the `if` below, only used for BC
|
| 95 |
+
self.rope_kwargs = {}
|
| 96 |
+
if config is None:
|
| 97 |
+
logger.warning_once(
|
| 98 |
+
"`DreamRotaryEmbedding` can now be fully parameterized by passing the model config through the "
|
| 99 |
+
"`config` argument. All other arguments will be removed in v4.46"
|
| 100 |
+
)
|
| 101 |
+
self.rope_kwargs = {
|
| 102 |
+
"rope_type": rope_type,
|
| 103 |
+
"factor": scaling_factor,
|
| 104 |
+
"dim": dim,
|
| 105 |
+
"base": base,
|
| 106 |
+
"max_position_embeddings": max_position_embeddings,
|
| 107 |
+
}
|
| 108 |
+
self.rope_type = rope_type
|
| 109 |
+
self.max_seq_len_cached = max_position_embeddings
|
| 110 |
+
self.original_max_seq_len = max_position_embeddings
|
| 111 |
+
else:
|
| 112 |
+
# BC: "rope_type" was originally "type"
|
| 113 |
+
if config.rope_scaling is not None:
|
| 114 |
+
self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
|
| 115 |
+
else:
|
| 116 |
+
self.rope_type = "default"
|
| 117 |
+
self.max_seq_len_cached = config.max_position_embeddings
|
| 118 |
+
self.original_max_seq_len = config.max_position_embeddings
|
| 119 |
+
|
| 120 |
+
self.config = config
|
| 121 |
+
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
|
| 122 |
+
|
| 123 |
+
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, **self.rope_kwargs)
|
| 124 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
| 125 |
+
self.original_inv_freq = self.inv_freq
|
| 126 |
+
|
| 127 |
+
def reset_parameters(self):
|
| 128 |
+
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, self.inv_freq.device, **self.rope_kwargs)
|
| 129 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
| 130 |
+
self.original_inv_freq = self.inv_freq
|
| 131 |
+
|
| 132 |
+
def _dynamic_frequency_update(self, position_ids, device):
|
| 133 |
+
"""
|
| 134 |
+
dynamic RoPE layers should recompute `inv_freq` in the following situations:
|
| 135 |
+
1 - growing beyond the cached sequence length (allow scaling)
|
| 136 |
+
2 - the current sequence length is in the original scale (avoid losing precision with small sequences)
|
| 137 |
+
"""
|
| 138 |
+
seq_len = torch.max(position_ids) + 1
|
| 139 |
+
if seq_len > self.max_seq_len_cached: # growth
|
| 140 |
+
inv_freq, self.attention_scaling = self.rope_init_fn(
|
| 141 |
+
self.config, device, seq_len=seq_len, **self.rope_kwargs
|
| 142 |
+
)
|
| 143 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation
|
| 144 |
+
self.max_seq_len_cached = seq_len
|
| 145 |
+
|
| 146 |
+
if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset
|
| 147 |
+
self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
|
| 148 |
+
self.max_seq_len_cached = self.original_max_seq_len
|
| 149 |
+
|
| 150 |
+
@torch.no_grad()
|
| 151 |
+
def forward(self, x, position_ids):
|
| 152 |
+
if "dynamic" in self.rope_type:
|
| 153 |
+
self._dynamic_frequency_update(position_ids, device=x.device)
|
| 154 |
+
|
| 155 |
+
# Core RoPE block
|
| 156 |
+
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
|
| 157 |
+
position_ids_expanded = position_ids[:, None, :].float()
|
| 158 |
+
# Force float32 (see https://github.com/huggingface/transformers/pull/29285)
|
| 159 |
+
device_type = x.device.type
|
| 160 |
+
device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
|
| 161 |
+
with torch.autocast(device_type=device_type, enabled=False):
|
| 162 |
+
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
|
| 163 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
| 164 |
+
cos = emb.cos()
|
| 165 |
+
sin = emb.sin()
|
| 166 |
+
|
| 167 |
+
# Advanced RoPE types (e.g. yarn) apply a post-processing scaling factor, equivalent to scaling attention
|
| 168 |
+
cos = cos * self.attention_scaling
|
| 169 |
+
sin = sin * self.attention_scaling
|
| 170 |
+
|
| 171 |
+
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
# Copied from transformers.models.llama.modeling_llama.rotate_half
|
| 175 |
+
def rotate_half(x):
|
| 176 |
+
"""Rotates half the hidden dims of the input."""
|
| 177 |
+
x1 = x[..., : x.shape[-1] // 2]
|
| 178 |
+
x2 = x[..., x.shape[-1] // 2 :]
|
| 179 |
+
return torch.cat((-x2, x1), dim=-1)
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
# Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb
|
| 183 |
+
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
|
| 184 |
+
"""Applies Rotary Position Embedding to the query and key tensors.
|
| 185 |
+
|
| 186 |
+
Args:
|
| 187 |
+
q (`torch.Tensor`): The query tensor.
|
| 188 |
+
k (`torch.Tensor`): The key tensor.
|
| 189 |
+
cos (`torch.Tensor`): The cosine part of the rotary embedding.
|
| 190 |
+
sin (`torch.Tensor`): The sine part of the rotary embedding.
|
| 191 |
+
position_ids (`torch.Tensor`, *optional*):
|
| 192 |
+
Deprecated and unused.
|
| 193 |
+
unsqueeze_dim (`int`, *optional*, defaults to 1):
|
| 194 |
+
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
|
| 195 |
+
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
|
| 196 |
+
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
|
| 197 |
+
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
|
| 198 |
+
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
|
| 199 |
+
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
|
| 200 |
+
Returns:
|
| 201 |
+
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
|
| 202 |
+
"""
|
| 203 |
+
cos = cos.unsqueeze(unsqueeze_dim)
|
| 204 |
+
sin = sin.unsqueeze(unsqueeze_dim)
|
| 205 |
+
q_embed = (q * cos) + (rotate_half(q) * sin)
|
| 206 |
+
k_embed = (k * cos) + (rotate_half(k) * sin)
|
| 207 |
+
return q_embed, k_embed
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
# Copied from transformers.models.mistral.modeling_mistral.MistralMLP with Mistral->Dream
|
| 211 |
+
class DreamMLP(nn.Module):
|
| 212 |
+
def __init__(self, config):
|
| 213 |
+
super().__init__()
|
| 214 |
+
self.hidden_size = config.hidden_size
|
| 215 |
+
self.intermediate_size = config.intermediate_size
|
| 216 |
+
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
| 217 |
+
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
| 218 |
+
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
|
| 219 |
+
self.act_fn = ACT2FN[config.hidden_act]
|
| 220 |
+
|
| 221 |
+
def forward(self, hidden_state):
|
| 222 |
+
return self.down_proj(self.act_fn(self.gate_proj(hidden_state)) * self.up_proj(hidden_state))
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
# Copied from transformers.models.llama.modeling_llama.repeat_kv
|
| 226 |
+
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
|
| 227 |
+
"""
|
| 228 |
+
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
|
| 229 |
+
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
|
| 230 |
+
"""
|
| 231 |
+
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
|
| 232 |
+
if n_rep == 1:
|
| 233 |
+
return hidden_states
|
| 234 |
+
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
|
| 235 |
+
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
class DreamAttention(nn.Module):
|
| 239 |
+
"""
|
| 240 |
+
Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
|
| 241 |
+
and "Generating Long Sequences with Sparse Transformers".
|
| 242 |
+
"""
|
| 243 |
+
|
| 244 |
+
def __init__(self, config: DreamConfig, layer_idx: Optional[int] = None):
|
| 245 |
+
super().__init__()
|
| 246 |
+
self.config = config
|
| 247 |
+
self.layer_idx = layer_idx
|
| 248 |
+
if layer_idx is None:
|
| 249 |
+
logger.warning_once(
|
| 250 |
+
f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will "
|
| 251 |
+
"to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
|
| 252 |
+
"when creating this class."
|
| 253 |
+
)
|
| 254 |
+
|
| 255 |
+
self.hidden_size = config.hidden_size
|
| 256 |
+
self.num_heads = config.num_attention_heads
|
| 257 |
+
self.head_dim = self.hidden_size // self.num_heads
|
| 258 |
+
self.num_key_value_heads = config.num_key_value_heads
|
| 259 |
+
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
|
| 260 |
+
self.max_position_embeddings = config.max_position_embeddings
|
| 261 |
+
self.rope_theta = config.rope_theta
|
| 262 |
+
self.is_causal = False
|
| 263 |
+
self.attention_dropout = config.attention_dropout
|
| 264 |
+
|
| 265 |
+
if (self.head_dim * self.num_heads) != self.hidden_size:
|
| 266 |
+
raise ValueError(
|
| 267 |
+
f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
|
| 268 |
+
f" and `num_heads`: {self.num_heads})."
|
| 269 |
+
)
|
| 270 |
+
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=True)
|
| 271 |
+
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
|
| 272 |
+
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
|
| 273 |
+
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
|
| 274 |
+
|
| 275 |
+
self.rotary_emb = DreamRotaryEmbedding(config=self.config)
|
| 276 |
+
|
| 277 |
+
def forward(
|
| 278 |
+
self,
|
| 279 |
+
hidden_states: torch.Tensor,
|
| 280 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 281 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 282 |
+
past_key_value: Optional[Cache] = None,
|
| 283 |
+
output_attentions: bool = False,
|
| 284 |
+
use_cache: bool = False,
|
| 285 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 286 |
+
position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46
|
| 287 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| 288 |
+
bsz, q_len, _ = hidden_states.size()
|
| 289 |
+
|
| 290 |
+
query_states = self.q_proj(hidden_states)
|
| 291 |
+
key_states = self.k_proj(hidden_states)
|
| 292 |
+
value_states = self.v_proj(hidden_states)
|
| 293 |
+
|
| 294 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| 295 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 296 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 297 |
+
|
| 298 |
+
if position_embeddings is None:
|
| 299 |
+
logger.warning_once(
|
| 300 |
+
"The attention layers in this model are transitioning from computing the RoPE embeddings internally "
|
| 301 |
+
"through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed "
|
| 302 |
+
"`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.46 `position_ids` will be "
|
| 303 |
+
"removed and `position_embeddings` will be mandatory."
|
| 304 |
+
)
|
| 305 |
+
cos, sin = self.rotary_emb(value_states, position_ids)
|
| 306 |
+
else:
|
| 307 |
+
cos, sin = position_embeddings
|
| 308 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
|
| 309 |
+
|
| 310 |
+
if past_key_value is not None:
|
| 311 |
+
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models
|
| 312 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
| 313 |
+
|
| 314 |
+
# repeat k/v heads if n_kv_heads < n_heads
|
| 315 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
| 316 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
| 317 |
+
|
| 318 |
+
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
|
| 319 |
+
if attention_mask is not None: # no matter the length, we just slice it
|
| 320 |
+
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
|
| 321 |
+
attn_weights = attn_weights + causal_mask
|
| 322 |
+
|
| 323 |
+
# upcast attention to fp32
|
| 324 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
|
| 325 |
+
attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
|
| 326 |
+
attn_output = torch.matmul(attn_weights, value_states)
|
| 327 |
+
|
| 328 |
+
if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
|
| 329 |
+
raise ValueError(
|
| 330 |
+
f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
|
| 331 |
+
f" {attn_output.size()}"
|
| 332 |
+
)
|
| 333 |
+
|
| 334 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
| 335 |
+
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
|
| 336 |
+
|
| 337 |
+
attn_output = self.o_proj(attn_output)
|
| 338 |
+
|
| 339 |
+
if not output_attentions:
|
| 340 |
+
attn_weights = None
|
| 341 |
+
|
| 342 |
+
return attn_output, attn_weights, past_key_value
|
| 343 |
+
|
| 344 |
+
|
| 345 |
+
class DreamSdpaAttention(DreamAttention):
|
| 346 |
+
"""
|
| 347 |
+
Dream attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
|
| 348 |
+
`DreamAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
|
| 349 |
+
SDPA API.
|
| 350 |
+
"""
|
| 351 |
+
|
| 352 |
+
# Adapted from DreamAttention.forward
|
| 353 |
+
def forward(
|
| 354 |
+
self,
|
| 355 |
+
hidden_states: torch.Tensor,
|
| 356 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 357 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 358 |
+
past_key_value: Optional[Cache] = None,
|
| 359 |
+
output_attentions: bool = False,
|
| 360 |
+
use_cache: bool = False,
|
| 361 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 362 |
+
position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46
|
| 363 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| 364 |
+
if output_attentions:
|
| 365 |
+
# TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
|
| 366 |
+
logger.warning_once(
|
| 367 |
+
"DreamModel is using DreamSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
|
| 368 |
+
'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
|
| 369 |
+
)
|
| 370 |
+
return super().forward(
|
| 371 |
+
hidden_states=hidden_states,
|
| 372 |
+
attention_mask=attention_mask,
|
| 373 |
+
position_ids=position_ids,
|
| 374 |
+
past_key_value=past_key_value,
|
| 375 |
+
output_attentions=output_attentions,
|
| 376 |
+
use_cache=use_cache,
|
| 377 |
+
)
|
| 378 |
+
|
| 379 |
+
bsz, q_len, _ = hidden_states.size()
|
| 380 |
+
|
| 381 |
+
query_states = self.q_proj(hidden_states)
|
| 382 |
+
key_states = self.k_proj(hidden_states)
|
| 383 |
+
value_states = self.v_proj(hidden_states)
|
| 384 |
+
|
| 385 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| 386 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 387 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 388 |
+
|
| 389 |
+
if position_embeddings is None:
|
| 390 |
+
logger.warning_once(
|
| 391 |
+
"The attention layers in this model are transitioning from computing the RoPE embeddings internally "
|
| 392 |
+
"through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed "
|
| 393 |
+
"`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.46 `position_ids` will be "
|
| 394 |
+
"removed and `position_embeddings` will be mandatory."
|
| 395 |
+
)
|
| 396 |
+
cos, sin = self.rotary_emb(value_states, position_ids)
|
| 397 |
+
else:
|
| 398 |
+
cos, sin = position_embeddings
|
| 399 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
|
| 400 |
+
|
| 401 |
+
if past_key_value is not None:
|
| 402 |
+
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models
|
| 403 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
| 404 |
+
|
| 405 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
| 406 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
| 407 |
+
|
| 408 |
+
# causal_mask = attention_mask
|
| 409 |
+
# if attention_mask is not None: # no matter the length, we just slice it
|
| 410 |
+
# causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
|
| 411 |
+
|
| 412 |
+
# SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
|
| 413 |
+
# Reference: https://github.com/pytorch/pytorch/issues/112577.
|
| 414 |
+
if query_states.device.type == "cuda" and attention_mask is not None:
|
| 415 |
+
query_states = query_states.contiguous()
|
| 416 |
+
key_states = key_states.contiguous()
|
| 417 |
+
value_states = value_states.contiguous()
|
| 418 |
+
|
| 419 |
+
# We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
|
| 420 |
+
# in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
|
| 421 |
+
# The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1.
|
| 422 |
+
# is_causal = True if causal_mask is None and q_len > 1 else False
|
| 423 |
+
|
| 424 |
+
attn_output = torch.nn.functional.scaled_dot_product_attention(
|
| 425 |
+
query_states,
|
| 426 |
+
key_states,
|
| 427 |
+
value_states,
|
| 428 |
+
attn_mask=attention_mask if isinstance(attention_mask, torch.Tensor) else None,
|
| 429 |
+
dropout_p=self.attention_dropout if self.training else 0.0,
|
| 430 |
+
is_causal=False, # hard coded
|
| 431 |
+
)
|
| 432 |
+
|
| 433 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
| 434 |
+
attn_output = attn_output.view(bsz, q_len, self.hidden_size)
|
| 435 |
+
|
| 436 |
+
attn_output = self.o_proj(attn_output)
|
| 437 |
+
|
| 438 |
+
return attn_output, None, past_key_value
|
| 439 |
+
|
| 440 |
+
|
| 441 |
+
class DreamDecoderLayer(nn.Module):
|
| 442 |
+
def __init__(self, config: DreamConfig, layer_idx: int):
|
| 443 |
+
super().__init__()
|
| 444 |
+
self.hidden_size = config.hidden_size
|
| 445 |
+
|
| 446 |
+
if config.sliding_window and config._attn_implementation != "flash_attention_2":
|
| 447 |
+
logger.warning_once(
|
| 448 |
+
f"Sliding Window Attention is enabled but not implemented for `{config._attn_implementation}`; "
|
| 449 |
+
"unexpected results may be encountered."
|
| 450 |
+
)
|
| 451 |
+
|
| 452 |
+
# self.self_attn = Dream_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx)
|
| 453 |
+
self.self_attn = DreamSdpaAttention(config, layer_idx)
|
| 454 |
+
|
| 455 |
+
self.mlp = DreamMLP(config)
|
| 456 |
+
self.input_layernorm = DreamRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 457 |
+
self.post_attention_layernorm = DreamRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 458 |
+
|
| 459 |
+
def forward(
|
| 460 |
+
self,
|
| 461 |
+
hidden_states: torch.Tensor,
|
| 462 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 463 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 464 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
| 465 |
+
output_attentions: Optional[bool] = False,
|
| 466 |
+
use_cache: Optional[bool] = False,
|
| 467 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 468 |
+
position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46
|
| 469 |
+
**kwargs,
|
| 470 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
| 471 |
+
"""
|
| 472 |
+
Args:
|
| 473 |
+
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
| 474 |
+
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
|
| 475 |
+
`(batch, sequence_length)` where padding elements are indicated by 0.
|
| 476 |
+
output_attentions (`bool`, *optional*):
|
| 477 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
| 478 |
+
returned tensors for more detail.
|
| 479 |
+
use_cache (`bool`, *optional*):
|
| 480 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
|
| 481 |
+
(see `past_key_values`).
|
| 482 |
+
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
|
| 483 |
+
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
|
| 484 |
+
Indices depicting the position of the input sequence tokens in the sequence.
|
| 485 |
+
position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
|
| 486 |
+
Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
|
| 487 |
+
with `head_dim` being the embedding dimension of each attention head.
|
| 488 |
+
kwargs (`dict`, *optional*):
|
| 489 |
+
Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
|
| 490 |
+
into the model
|
| 491 |
+
"""
|
| 492 |
+
|
| 493 |
+
residual = hidden_states
|
| 494 |
+
|
| 495 |
+
hidden_states = self.input_layernorm(hidden_states)
|
| 496 |
+
|
| 497 |
+
# Self Attention
|
| 498 |
+
hidden_states, self_attn_weights, present_key_value = self.self_attn(
|
| 499 |
+
hidden_states=hidden_states,
|
| 500 |
+
attention_mask=attention_mask,
|
| 501 |
+
position_ids=position_ids,
|
| 502 |
+
past_key_value=past_key_value,
|
| 503 |
+
output_attentions=output_attentions,
|
| 504 |
+
use_cache=use_cache,
|
| 505 |
+
cache_position=cache_position,
|
| 506 |
+
position_embeddings=position_embeddings,
|
| 507 |
+
)
|
| 508 |
+
hidden_states = residual + hidden_states
|
| 509 |
+
|
| 510 |
+
# Fully Connected
|
| 511 |
+
residual = hidden_states
|
| 512 |
+
hidden_states = self.post_attention_layernorm(hidden_states)
|
| 513 |
+
hidden_states = self.mlp(hidden_states)
|
| 514 |
+
hidden_states = residual + hidden_states
|
| 515 |
+
|
| 516 |
+
outputs = (hidden_states,)
|
| 517 |
+
|
| 518 |
+
if output_attentions:
|
| 519 |
+
outputs += (self_attn_weights,)
|
| 520 |
+
|
| 521 |
+
if use_cache:
|
| 522 |
+
outputs += (present_key_value,)
|
| 523 |
+
|
| 524 |
+
return outputs
|
| 525 |
+
|
| 526 |
+
|
| 527 |
+
class DreamPreTrainedModel(PreTrainedModel):
|
| 528 |
+
config_class = DreamConfig
|
| 529 |
+
base_model_prefix = "model"
|
| 530 |
+
supports_gradient_checkpointing = True
|
| 531 |
+
_no_split_modules = ["DreamDecoderLayer"]
|
| 532 |
+
_skip_keys_device_placement = "past_key_values"
|
| 533 |
+
_supports_flash_attn_2 = True
|
| 534 |
+
_supports_sdpa = True
|
| 535 |
+
_supports_cache_class = True
|
| 536 |
+
_supports_quantized_cache = True
|
| 537 |
+
_supports_static_cache = True
|
| 538 |
+
|
| 539 |
+
def _init_weights(self, module):
|
| 540 |
+
std = self.config.initializer_range
|
| 541 |
+
if isinstance(module, nn.Linear):
|
| 542 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
| 543 |
+
if module.bias is not None:
|
| 544 |
+
module.bias.data.zero_()
|
| 545 |
+
elif isinstance(module, nn.Embedding):
|
| 546 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
| 547 |
+
if module.padding_idx is not None:
|
| 548 |
+
module.weight.data[module.padding_idx].zero_()
|
| 549 |
+
|
| 550 |
+
@classmethod
|
| 551 |
+
def from_pretrained(
|
| 552 |
+
cls,
|
| 553 |
+
pretrained_model_name_or_path: Optional[Union[str, os.PathLike]],
|
| 554 |
+
*model_args,
|
| 555 |
+
config: Optional[Union[PretrainedConfig, str, os.PathLike]] = None,
|
| 556 |
+
cache_dir: Optional[Union[str, os.PathLike]] = None,
|
| 557 |
+
ignore_mismatched_sizes: bool = False,
|
| 558 |
+
force_download: bool = False,
|
| 559 |
+
local_files_only: bool = False,
|
| 560 |
+
token: Optional[Union[str, bool]] = None,
|
| 561 |
+
revision: str = "main",
|
| 562 |
+
use_safetensors: Optional[bool] = None,
|
| 563 |
+
weights_only: bool = True,
|
| 564 |
+
**kwargs,
|
| 565 |
+
):
|
| 566 |
+
_model = super().from_pretrained(
|
| 567 |
+
pretrained_model_name_or_path,
|
| 568 |
+
*model_args,
|
| 569 |
+
config=config,
|
| 570 |
+
cache_dir=cache_dir,
|
| 571 |
+
ignore_mismatched_sizes=ignore_mismatched_sizes,
|
| 572 |
+
force_download=force_download,
|
| 573 |
+
local_files_only=local_files_only,
|
| 574 |
+
token=token,
|
| 575 |
+
revision=revision,
|
| 576 |
+
use_safetensors=use_safetensors,
|
| 577 |
+
weights_only=weights_only,
|
| 578 |
+
**kwargs,
|
| 579 |
+
)
|
| 580 |
+
# NOTE(Lin): we need to override the generation config
|
| 581 |
+
# because the generation config loaded in `from_pretrained`
|
| 582 |
+
# does not include all the attributes of DreamGenerationConfig
|
| 583 |
+
resume_download = kwargs.get("resume_download", None)
|
| 584 |
+
proxies = kwargs.get("proxies", None)
|
| 585 |
+
subfolder = kwargs.get("subfolder", "")
|
| 586 |
+
from_auto_class = kwargs.get("_from_auto", False)
|
| 587 |
+
from_pipeline = kwargs.get("_from_pipeline", None)
|
| 588 |
+
_model.generation_config = DreamGenerationConfig.from_pretrained(
|
| 589 |
+
pretrained_model_name_or_path,
|
| 590 |
+
cache_dir=cache_dir,
|
| 591 |
+
force_download=force_download,
|
| 592 |
+
resume_download=resume_download,
|
| 593 |
+
proxies=proxies,
|
| 594 |
+
local_files_only=local_files_only,
|
| 595 |
+
token=token,
|
| 596 |
+
revision=revision,
|
| 597 |
+
subfolder=subfolder,
|
| 598 |
+
_from_auto=from_auto_class,
|
| 599 |
+
_from_pipeline=from_pipeline,
|
| 600 |
+
)
|
| 601 |
+
return _model
|
| 602 |
+
|
| 603 |
+
|
| 604 |
+
class DreamBaseModel(DreamPreTrainedModel):
|
| 605 |
+
"""
|
| 606 |
+
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`DreamDecoderLayer`]
|
| 607 |
+
|
| 608 |
+
Args:
|
| 609 |
+
config: DreamConfig
|
| 610 |
+
"""
|
| 611 |
+
|
| 612 |
+
def __init__(self, config: DreamConfig):
|
| 613 |
+
super().__init__(config)
|
| 614 |
+
self.padding_idx = config.pad_token_id
|
| 615 |
+
self.vocab_size = config.vocab_size
|
| 616 |
+
|
| 617 |
+
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
| 618 |
+
self.layers = nn.ModuleList(
|
| 619 |
+
[DreamDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
|
| 620 |
+
)
|
| 621 |
+
self._attn_implementation = config._attn_implementation
|
| 622 |
+
self.norm = DreamRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 623 |
+
self.rotary_emb = DreamRotaryEmbedding(config=config)
|
| 624 |
+
|
| 625 |
+
self.gradient_checkpointing = False
|
| 626 |
+
# Initialize weights and apply final processing
|
| 627 |
+
self.post_init()
|
| 628 |
+
|
| 629 |
+
def get_input_embeddings(self):
|
| 630 |
+
return self.embed_tokens
|
| 631 |
+
|
| 632 |
+
def set_input_embeddings(self, value):
|
| 633 |
+
self.embed_tokens = value
|
| 634 |
+
|
| 635 |
+
def forward(
|
| 636 |
+
self,
|
| 637 |
+
input_ids: torch.LongTensor = None,
|
| 638 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 639 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 640 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| 641 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 642 |
+
use_cache: Optional[bool] = None,
|
| 643 |
+
output_attentions: Optional[bool] = None,
|
| 644 |
+
output_hidden_states: Optional[bool] = None,
|
| 645 |
+
return_dict: Optional[bool] = None,
|
| 646 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 647 |
+
) -> Union[Tuple, BaseModelOutput]:
|
| 648 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 649 |
+
output_hidden_states = (
|
| 650 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 651 |
+
)
|
| 652 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
| 653 |
+
|
| 654 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 655 |
+
|
| 656 |
+
if (input_ids is None) ^ (inputs_embeds is not None):
|
| 657 |
+
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
|
| 658 |
+
|
| 659 |
+
if self.gradient_checkpointing and self.training:
|
| 660 |
+
if use_cache:
|
| 661 |
+
logger.warning_once(
|
| 662 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
| 663 |
+
)
|
| 664 |
+
use_cache = False
|
| 665 |
+
|
| 666 |
+
if inputs_embeds is None:
|
| 667 |
+
inputs_embeds = self.embed_tokens(input_ids)
|
| 668 |
+
|
| 669 |
+
if use_cache and past_key_values is None:
|
| 670 |
+
past_key_values = DynamicCache()
|
| 671 |
+
|
| 672 |
+
if cache_position is None:
|
| 673 |
+
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
|
| 674 |
+
cache_position = torch.arange(
|
| 675 |
+
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
|
| 676 |
+
)
|
| 677 |
+
|
| 678 |
+
if position_ids is None:
|
| 679 |
+
position_ids = cache_position.unsqueeze(0)
|
| 680 |
+
|
| 681 |
+
hidden_states = inputs_embeds
|
| 682 |
+
|
| 683 |
+
# create position embeddings to be shared across the decoder layers
|
| 684 |
+
position_embeddings = self.rotary_emb(hidden_states, position_ids)
|
| 685 |
+
|
| 686 |
+
# decoder layers
|
| 687 |
+
all_hidden_states = () if output_hidden_states else None
|
| 688 |
+
all_self_attns = () if output_attentions else None
|
| 689 |
+
|
| 690 |
+
for decoder_layer in self.layers:
|
| 691 |
+
if output_hidden_states:
|
| 692 |
+
all_hidden_states += (hidden_states,)
|
| 693 |
+
|
| 694 |
+
if self.gradient_checkpointing and self.training:
|
| 695 |
+
layer_outputs = self._gradient_checkpointing_func(
|
| 696 |
+
decoder_layer.__call__,
|
| 697 |
+
hidden_states,
|
| 698 |
+
attention_mask,
|
| 699 |
+
position_ids,
|
| 700 |
+
past_key_values,
|
| 701 |
+
output_attentions,
|
| 702 |
+
use_cache,
|
| 703 |
+
cache_position,
|
| 704 |
+
position_embeddings,
|
| 705 |
+
)
|
| 706 |
+
else:
|
| 707 |
+
layer_outputs = decoder_layer(
|
| 708 |
+
hidden_states,
|
| 709 |
+
attention_mask=attention_mask,
|
| 710 |
+
position_ids=position_ids,
|
| 711 |
+
past_key_value=past_key_values,
|
| 712 |
+
output_attentions=output_attentions,
|
| 713 |
+
use_cache=use_cache,
|
| 714 |
+
cache_position=cache_position,
|
| 715 |
+
position_embeddings=position_embeddings,
|
| 716 |
+
)
|
| 717 |
+
|
| 718 |
+
hidden_states = layer_outputs[0]
|
| 719 |
+
|
| 720 |
+
if output_attentions:
|
| 721 |
+
all_self_attns += (layer_outputs[1],)
|
| 722 |
+
|
| 723 |
+
hidden_states = self.norm(hidden_states)
|
| 724 |
+
|
| 725 |
+
# add hidden states from the last decoder layer
|
| 726 |
+
if output_hidden_states:
|
| 727 |
+
all_hidden_states += (hidden_states,)
|
| 728 |
+
|
| 729 |
+
if not return_dict:
|
| 730 |
+
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attns] if v is not None)
|
| 731 |
+
return BaseModelOutput(
|
| 732 |
+
last_hidden_state=hidden_states,
|
| 733 |
+
hidden_states=all_hidden_states,
|
| 734 |
+
attentions=all_self_attns,
|
| 735 |
+
)
|
| 736 |
+
|
| 737 |
+
|
| 738 |
+
class DreamModel(DreamGenerationMixin, DreamPreTrainedModel):
|
| 739 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 740 |
+
|
| 741 |
+
def __init__(self, config):
|
| 742 |
+
super().__init__(config)
|
| 743 |
+
self.model = DreamBaseModel(config)
|
| 744 |
+
self.vocab_size = config.vocab_size
|
| 745 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 746 |
+
self.loss_function = loss_function
|
| 747 |
+
|
| 748 |
+
# Initialize weights and apply final processing
|
| 749 |
+
self.post_init()
|
| 750 |
+
|
| 751 |
+
def reset_rope_parameters(self):
|
| 752 |
+
self.model.rotary_emb.reset_parameters()
|
| 753 |
+
for layer in self.model.layers:
|
| 754 |
+
layer.self_attn.rotary_emb.reset_parameters()
|
| 755 |
+
|
| 756 |
+
def get_input_embeddings(self):
|
| 757 |
+
return self.model.embed_tokens
|
| 758 |
+
|
| 759 |
+
def set_input_embeddings(self, value):
|
| 760 |
+
self.model.embed_tokens = value
|
| 761 |
+
|
| 762 |
+
def get_output_embeddings(self):
|
| 763 |
+
return self.lm_head
|
| 764 |
+
|
| 765 |
+
def set_output_embeddings(self, new_embeddings):
|
| 766 |
+
self.lm_head = new_embeddings
|
| 767 |
+
|
| 768 |
+
def set_decoder(self, decoder):
|
| 769 |
+
self.model = decoder
|
| 770 |
+
|
| 771 |
+
def get_decoder(self):
|
| 772 |
+
return self.model
|
| 773 |
+
|
| 774 |
+
def forward(
|
| 775 |
+
self,
|
| 776 |
+
input_ids: torch.LongTensor = None,
|
| 777 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 778 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 779 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| 780 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 781 |
+
labels: Optional[torch.LongTensor] = None,
|
| 782 |
+
use_cache: Optional[bool] = None,
|
| 783 |
+
output_attentions: Optional[bool] = None,
|
| 784 |
+
output_hidden_states: Optional[bool] = None,
|
| 785 |
+
return_dict: Optional[bool] = None,
|
| 786 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 787 |
+
num_logits_to_keep: int = 0,
|
| 788 |
+
**loss_kwargs,
|
| 789 |
+
) -> Union[Tuple, MaskedLMOutput]:
|
| 790 |
+
attention_mask = attention_mask.to(dtype=torch.bool) if attention_mask is not None else None
|
| 791 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 792 |
+
output_hidden_states = (
|
| 793 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 794 |
+
)
|
| 795 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 796 |
+
|
| 797 |
+
if self.training and labels is not None:
|
| 798 |
+
prompt_lengths = get_prompt_lengths_from_labels(labels, attention_mask)
|
| 799 |
+
masked_input_ids, masked, _ = simple_uniform_mask(
|
| 800 |
+
input_ids,
|
| 801 |
+
prompt_lengths,
|
| 802 |
+
self.config.mask_token_id,
|
| 803 |
+
protect_eos_id=self.config.pad_token_id,
|
| 804 |
+
pad_id=self.config.pad_token_id,
|
| 805 |
+
)
|
| 806 |
+
input_ids = masked_input_ids
|
| 807 |
+
|
| 808 |
+
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
| 809 |
+
outputs = self.model(
|
| 810 |
+
input_ids=input_ids,
|
| 811 |
+
attention_mask=attention_mask,
|
| 812 |
+
position_ids=position_ids,
|
| 813 |
+
past_key_values=past_key_values,
|
| 814 |
+
inputs_embeds=inputs_embeds,
|
| 815 |
+
use_cache=use_cache,
|
| 816 |
+
output_attentions=output_attentions,
|
| 817 |
+
output_hidden_states=output_hidden_states,
|
| 818 |
+
return_dict=return_dict,
|
| 819 |
+
cache_position=cache_position,
|
| 820 |
+
)
|
| 821 |
+
|
| 822 |
+
hidden_states = outputs[0]
|
| 823 |
+
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
|
| 824 |
+
logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :])
|
| 825 |
+
|
| 826 |
+
loss = torch.tensor(0.0, device=logits.device)
|
| 827 |
+
if self.training and labels is not None:
|
| 828 |
+
loss = self.loss_function(logits, labels, masked, self.vocab_size, **loss_kwargs)
|
| 829 |
+
|
| 830 |
+
if not return_dict:
|
| 831 |
+
output = (logits,) + outputs[1:]
|
| 832 |
+
return (loss,) + output if loss is not None else output
|
| 833 |
+
|
| 834 |
+
return MaskedLMOutput(
|
| 835 |
+
loss=loss,
|
| 836 |
+
logits=logits,
|
| 837 |
+
hidden_states=outputs.hidden_states,
|
| 838 |
+
attentions=outputs.attentions,
|
| 839 |
+
)
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"additional_special_tokens": [
|
| 3 |
+
"<|beginoftext|>",
|
| 4 |
+
"<|mask|>",
|
| 5 |
+
"<|im_start|>",
|
| 6 |
+
"<|im_end|>"
|
| 7 |
+
],
|
| 8 |
+
"bos_token": {
|
| 9 |
+
"content": "<|beginoftext|>",
|
| 10 |
+
"lstrip": false,
|
| 11 |
+
"normalized": false,
|
| 12 |
+
"rstrip": false,
|
| 13 |
+
"single_word": false
|
| 14 |
+
},
|
| 15 |
+
"eos_token": {
|
| 16 |
+
"content": "<|endoftext|>",
|
| 17 |
+
"lstrip": false,
|
| 18 |
+
"normalized": false,
|
| 19 |
+
"rstrip": false,
|
| 20 |
+
"single_word": false
|
| 21 |
+
},
|
| 22 |
+
"mask_token": {
|
| 23 |
+
"content": "<|mask|>",
|
| 24 |
+
"lstrip": false,
|
| 25 |
+
"normalized": false,
|
| 26 |
+
"rstrip": false,
|
| 27 |
+
"single_word": false
|
| 28 |
+
},
|
| 29 |
+
"pad_token": {
|
| 30 |
+
"content": "<|endoftext|>",
|
| 31 |
+
"lstrip": false,
|
| 32 |
+
"normalized": false,
|
| 33 |
+
"rstrip": false,
|
| 34 |
+
"single_word": false
|
| 35 |
+
}
|
| 36 |
+
}
|
tokenization_dream.py
ADDED
|
@@ -0,0 +1,340 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2024 The Dream team, HKUNLP Group and The HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This code is based on Qwen's implementations in this library.
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
"""Tokenization classes for Dream."""
|
| 17 |
+
|
| 18 |
+
import json
|
| 19 |
+
import os
|
| 20 |
+
import unicodedata
|
| 21 |
+
from functools import lru_cache
|
| 22 |
+
from typing import Optional, Tuple
|
| 23 |
+
|
| 24 |
+
import regex as re
|
| 25 |
+
|
| 26 |
+
from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
|
| 27 |
+
from transformers.utils import logging
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
logger = logging.get_logger(__name__)
|
| 31 |
+
|
| 32 |
+
VOCAB_FILES_NAMES = {
|
| 33 |
+
"vocab_file": "vocab.json",
|
| 34 |
+
"merges_file": "merges.txt",
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
MAX_MODEL_INPUT_SIZES = {"dream/dream-tokenizer": 32768}
|
| 39 |
+
|
| 40 |
+
PRETOKENIZE_REGEX = r"""(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+"""
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
@lru_cache()
|
| 44 |
+
# Copied from transformers.models.gpt2.tokenization_gpt2.bytes_to_unicode
|
| 45 |
+
def bytes_to_unicode():
|
| 46 |
+
"""
|
| 47 |
+
Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
|
| 48 |
+
characters the bpe code barfs on.
|
| 49 |
+
|
| 50 |
+
The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
|
| 51 |
+
if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
|
| 52 |
+
decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
|
| 53 |
+
tables between utf-8 bytes and unicode strings.
|
| 54 |
+
"""
|
| 55 |
+
bs = (
|
| 56 |
+
list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
|
| 57 |
+
)
|
| 58 |
+
cs = bs[:]
|
| 59 |
+
n = 0
|
| 60 |
+
for b in range(2**8):
|
| 61 |
+
if b not in bs:
|
| 62 |
+
bs.append(b)
|
| 63 |
+
cs.append(2**8 + n)
|
| 64 |
+
n += 1
|
| 65 |
+
cs = [chr(n) for n in cs]
|
| 66 |
+
return dict(zip(bs, cs))
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
# Copied from transformers.models.gpt2.tokenization_gpt2.get_pairs
|
| 70 |
+
def get_pairs(word):
|
| 71 |
+
"""
|
| 72 |
+
Return set of symbol pairs in a word.
|
| 73 |
+
|
| 74 |
+
Word is represented as tuple of symbols (symbols being variable-length strings).
|
| 75 |
+
"""
|
| 76 |
+
pairs = set()
|
| 77 |
+
prev_char = word[0]
|
| 78 |
+
for char in word[1:]:
|
| 79 |
+
pairs.add((prev_char, char))
|
| 80 |
+
prev_char = char
|
| 81 |
+
return pairs
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
class DreamTokenizer(PreTrainedTokenizer):
|
| 85 |
+
"""
|
| 86 |
+
Construct a Dream tokenizer. Based on byte-level Byte-Pair-Encoding.
|
| 87 |
+
|
| 88 |
+
Same with GPT2Tokenizer, this tokenizer has been trained to treat spaces like parts of the tokens so a word will
|
| 89 |
+
be encoded differently whether it is at the beginning of the sentence (without space) or not:
|
| 90 |
+
|
| 91 |
+
```python
|
| 92 |
+
>>> from transformers import AutoTokenizer
|
| 93 |
+
|
| 94 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("Dream-org/Dream-v0-Base-7B", trust_remote_code=True)
|
| 95 |
+
>>> tokenizer("Hello world")["input_ids"]
|
| 96 |
+
[9707, 1879]
|
| 97 |
+
|
| 98 |
+
>>> tokenizer(" Hello world")["input_ids"]
|
| 99 |
+
[21927, 1879]
|
| 100 |
+
```
|
| 101 |
+
This is expected.
|
| 102 |
+
|
| 103 |
+
You should not use GPT2Tokenizer instead, because of the different pretokenization rules.
|
| 104 |
+
|
| 105 |
+
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
|
| 106 |
+
this superclass for more information regarding those methods.
|
| 107 |
+
|
| 108 |
+
Args:
|
| 109 |
+
vocab_file (`str`):
|
| 110 |
+
Path to the vocabulary file.
|
| 111 |
+
merges_file (`str`):
|
| 112 |
+
Path to the merges file.
|
| 113 |
+
errors (`str`, *optional*, defaults to `"replace"`):
|
| 114 |
+
Paradigm to follow when decoding bytes to UTF-8. See
|
| 115 |
+
[bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
|
| 116 |
+
unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
|
| 117 |
+
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
|
| 118 |
+
token instead.
|
| 119 |
+
bos_token (`str`, *optional*):
|
| 120 |
+
The beginning of sequence token. Not applicable for this tokenizer.
|
| 121 |
+
eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
|
| 122 |
+
The end of sequence token.
|
| 123 |
+
pad_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
|
| 124 |
+
The token used for padding, for example when batching sequences of different lengths.
|
| 125 |
+
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
|
| 126 |
+
Whether or not the model should cleanup the spaces that were added when splitting the input text during the
|
| 127 |
+
tokenization process. Not applicable to this tokenizer, since tokenization does not add spaces.
|
| 128 |
+
split_special_tokens (`bool`, *optional*, defaults to `False`):
|
| 129 |
+
Whether or not the special tokens should be split during the tokenization process. The default behavior is
|
| 130 |
+
to not split special tokens. This means that if `<|endoftext|>` is the `eos_token`, then `tokenizer.tokenize("<|endoftext|>") =
|
| 131 |
+
['<|endoftext|>`]. Otherwise, if `split_special_tokens=True`, then `tokenizer.tokenize("<|endoftext|>")` will be give `['<',
|
| 132 |
+
'|', 'endo', 'ft', 'ext', '|', '>']`. This argument is only supported for `slow` tokenizers for the moment.
|
| 133 |
+
"""
|
| 134 |
+
|
| 135 |
+
vocab_files_names = VOCAB_FILES_NAMES
|
| 136 |
+
model_input_names = ["input_ids", "attention_mask"]
|
| 137 |
+
|
| 138 |
+
def __init__(
|
| 139 |
+
self,
|
| 140 |
+
vocab_file,
|
| 141 |
+
merges_file,
|
| 142 |
+
errors="replace",
|
| 143 |
+
unk_token="<|endoftext|>",
|
| 144 |
+
bos_token=None,
|
| 145 |
+
eos_token="<|endoftext|>",
|
| 146 |
+
pad_token="<|endoftext|>",
|
| 147 |
+
clean_up_tokenization_spaces=False,
|
| 148 |
+
split_special_tokens=False,
|
| 149 |
+
**kwargs,
|
| 150 |
+
):
|
| 151 |
+
# Dream vocab does not contain control tokens; added tokens need to be special
|
| 152 |
+
bos_token = (
|
| 153 |
+
AddedToken(bos_token, lstrip=False, rstrip=False, special=True, normalized=False)
|
| 154 |
+
if isinstance(bos_token, str)
|
| 155 |
+
else bos_token
|
| 156 |
+
)
|
| 157 |
+
eos_token = (
|
| 158 |
+
AddedToken(eos_token, lstrip=False, rstrip=False, special=True, normalized=False)
|
| 159 |
+
if isinstance(eos_token, str)
|
| 160 |
+
else eos_token
|
| 161 |
+
)
|
| 162 |
+
unk_token = (
|
| 163 |
+
AddedToken(unk_token, lstrip=False, rstrip=False, special=True, normalized=False)
|
| 164 |
+
if isinstance(unk_token, str)
|
| 165 |
+
else unk_token
|
| 166 |
+
)
|
| 167 |
+
pad_token = (
|
| 168 |
+
AddedToken(pad_token, lstrip=False, rstrip=False, special=True, normalized=False)
|
| 169 |
+
if isinstance(pad_token, str)
|
| 170 |
+
else pad_token
|
| 171 |
+
)
|
| 172 |
+
|
| 173 |
+
with open(vocab_file, encoding="utf-8") as vocab_handle:
|
| 174 |
+
self.encoder = json.load(vocab_handle)
|
| 175 |
+
self.decoder = {v: k for k, v in self.encoder.items()}
|
| 176 |
+
self.errors = errors # how to handle errors in decoding
|
| 177 |
+
self.byte_encoder = bytes_to_unicode()
|
| 178 |
+
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
|
| 179 |
+
bpe_merges = []
|
| 180 |
+
with open(merges_file, encoding="utf-8") as merges_handle:
|
| 181 |
+
for i, line in enumerate(merges_handle):
|
| 182 |
+
line = line.strip()
|
| 183 |
+
if (i == 0 and line.startswith("#version:")) or not line:
|
| 184 |
+
continue
|
| 185 |
+
bpe_merges.append(tuple(line.split()))
|
| 186 |
+
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
|
| 187 |
+
# NOTE: the cache can grow without bound and will get really large for long running processes
|
| 188 |
+
# (esp. for texts of language that do not use space between word, e.g. Chinese); technically
|
| 189 |
+
# not a memory leak but appears as one.
|
| 190 |
+
# GPT2Tokenizer has the same problem, so let's be consistent.
|
| 191 |
+
self.cache = {}
|
| 192 |
+
|
| 193 |
+
self.pat = re.compile(PRETOKENIZE_REGEX)
|
| 194 |
+
|
| 195 |
+
if kwargs.get("add_prefix_space", False):
|
| 196 |
+
logger.warning_once(
|
| 197 |
+
f"{self.__class__.__name} does not support `add_prefix_space`, setting it to True has no effect."
|
| 198 |
+
)
|
| 199 |
+
|
| 200 |
+
super().__init__(
|
| 201 |
+
errors=errors,
|
| 202 |
+
bos_token=bos_token,
|
| 203 |
+
eos_token=eos_token,
|
| 204 |
+
pad_token=pad_token,
|
| 205 |
+
unk_token=unk_token,
|
| 206 |
+
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
|
| 207 |
+
split_special_tokens=split_special_tokens,
|
| 208 |
+
**kwargs,
|
| 209 |
+
)
|
| 210 |
+
|
| 211 |
+
@property
|
| 212 |
+
def vocab_size(self) -> int:
|
| 213 |
+
return len(self.encoder)
|
| 214 |
+
|
| 215 |
+
# Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.get_vocab
|
| 216 |
+
def get_vocab(self):
|
| 217 |
+
return dict(self.encoder, **self.added_tokens_encoder)
|
| 218 |
+
|
| 219 |
+
# Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.bpe
|
| 220 |
+
def bpe(self, token):
|
| 221 |
+
if token in self.cache:
|
| 222 |
+
return self.cache[token]
|
| 223 |
+
word = tuple(token)
|
| 224 |
+
pairs = get_pairs(word)
|
| 225 |
+
|
| 226 |
+
if not pairs:
|
| 227 |
+
return token
|
| 228 |
+
|
| 229 |
+
while True:
|
| 230 |
+
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
|
| 231 |
+
if bigram not in self.bpe_ranks:
|
| 232 |
+
break
|
| 233 |
+
first, second = bigram
|
| 234 |
+
new_word = []
|
| 235 |
+
i = 0
|
| 236 |
+
while i < len(word):
|
| 237 |
+
try:
|
| 238 |
+
j = word.index(first, i)
|
| 239 |
+
except ValueError:
|
| 240 |
+
new_word.extend(word[i:])
|
| 241 |
+
break
|
| 242 |
+
else:
|
| 243 |
+
new_word.extend(word[i:j])
|
| 244 |
+
i = j
|
| 245 |
+
|
| 246 |
+
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
|
| 247 |
+
new_word.append(first + second)
|
| 248 |
+
i += 2
|
| 249 |
+
else:
|
| 250 |
+
new_word.append(word[i])
|
| 251 |
+
i += 1
|
| 252 |
+
new_word = tuple(new_word)
|
| 253 |
+
word = new_word
|
| 254 |
+
if len(word) == 1:
|
| 255 |
+
break
|
| 256 |
+
else:
|
| 257 |
+
pairs = get_pairs(word)
|
| 258 |
+
word = " ".join(word)
|
| 259 |
+
self.cache[token] = word
|
| 260 |
+
return word
|
| 261 |
+
|
| 262 |
+
# Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._tokenize
|
| 263 |
+
def _tokenize(self, text):
|
| 264 |
+
"""Tokenize a string."""
|
| 265 |
+
bpe_tokens = []
|
| 266 |
+
for token in re.findall(self.pat, text):
|
| 267 |
+
token = "".join(
|
| 268 |
+
self.byte_encoder[b] for b in token.encode("utf-8")
|
| 269 |
+
) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
|
| 270 |
+
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
|
| 271 |
+
return bpe_tokens
|
| 272 |
+
|
| 273 |
+
# Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._convert_token_to_id
|
| 274 |
+
def _convert_token_to_id(self, token):
|
| 275 |
+
"""Converts a token (str) in an id using the vocab."""
|
| 276 |
+
return self.encoder.get(token, self.encoder.get(self.unk_token))
|
| 277 |
+
|
| 278 |
+
# Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._convert_id_to_token
|
| 279 |
+
def _convert_id_to_token(self, index):
|
| 280 |
+
"""Converts an index (integer) in a token (str) using the vocab."""
|
| 281 |
+
return self.decoder.get(index)
|
| 282 |
+
|
| 283 |
+
# Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.convert_tokens_to_string
|
| 284 |
+
def convert_tokens_to_string(self, tokens):
|
| 285 |
+
"""Converts a sequence of tokens (string) in a single string."""
|
| 286 |
+
text = "".join(tokens)
|
| 287 |
+
text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
|
| 288 |
+
return text
|
| 289 |
+
|
| 290 |
+
def decode(
|
| 291 |
+
self,
|
| 292 |
+
token_ids,
|
| 293 |
+
skip_special_tokens: bool = False,
|
| 294 |
+
clean_up_tokenization_spaces: Optional[bool] = False,
|
| 295 |
+
spaces_between_special_tokens: bool = False,
|
| 296 |
+
**kwargs,
|
| 297 |
+
) -> str:
|
| 298 |
+
# `spaces_between_special_tokens` defaults to True for _decode in slow tokenizers
|
| 299 |
+
# and cannot be configured elsewhere, but it should default to False for DreamTokenizer
|
| 300 |
+
return super().decode(
|
| 301 |
+
token_ids,
|
| 302 |
+
skip_special_tokens=skip_special_tokens,
|
| 303 |
+
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
|
| 304 |
+
spaces_between_special_tokens=spaces_between_special_tokens,
|
| 305 |
+
**kwargs,
|
| 306 |
+
)
|
| 307 |
+
|
| 308 |
+
# Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.save_vocabulary
|
| 309 |
+
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
|
| 310 |
+
if not os.path.isdir(save_directory):
|
| 311 |
+
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
|
| 312 |
+
return
|
| 313 |
+
vocab_file = os.path.join(
|
| 314 |
+
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
|
| 315 |
+
)
|
| 316 |
+
merge_file = os.path.join(
|
| 317 |
+
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
|
| 318 |
+
)
|
| 319 |
+
|
| 320 |
+
with open(vocab_file, "w", encoding="utf-8") as f:
|
| 321 |
+
f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
|
| 322 |
+
|
| 323 |
+
index = 0
|
| 324 |
+
with open(merge_file, "w", encoding="utf-8") as writer:
|
| 325 |
+
writer.write("#version: 0.2\n")
|
| 326 |
+
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
|
| 327 |
+
if index != token_index:
|
| 328 |
+
logger.warning(
|
| 329 |
+
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
|
| 330 |
+
" Please check that the tokenizer is not corrupted!"
|
| 331 |
+
)
|
| 332 |
+
index = token_index
|
| 333 |
+
writer.write(" ".join(bpe_tokens) + "\n")
|
| 334 |
+
index += 1
|
| 335 |
+
|
| 336 |
+
return vocab_file, merge_file
|
| 337 |
+
|
| 338 |
+
def prepare_for_tokenization(self, text, **kwargs):
|
| 339 |
+
text = unicodedata.normalize("NFC", text)
|
| 340 |
+
return (text, kwargs)
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,222 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_bos_token": false,
|
| 3 |
+
"add_prefix_space": false,
|
| 4 |
+
"added_tokens_decoder": {
|
| 5 |
+
"151643": {
|
| 6 |
+
"content": "<|endoftext|>",
|
| 7 |
+
"lstrip": false,
|
| 8 |
+
"normalized": false,
|
| 9 |
+
"rstrip": false,
|
| 10 |
+
"single_word": false,
|
| 11 |
+
"special": true
|
| 12 |
+
},
|
| 13 |
+
"151644": {
|
| 14 |
+
"content": "<|im_start|>",
|
| 15 |
+
"lstrip": false,
|
| 16 |
+
"normalized": false,
|
| 17 |
+
"rstrip": false,
|
| 18 |
+
"single_word": false,
|
| 19 |
+
"special": true
|
| 20 |
+
},
|
| 21 |
+
"151645": {
|
| 22 |
+
"content": "<|im_end|>",
|
| 23 |
+
"lstrip": false,
|
| 24 |
+
"normalized": false,
|
| 25 |
+
"rstrip": false,
|
| 26 |
+
"single_word": false,
|
| 27 |
+
"special": true
|
| 28 |
+
},
|
| 29 |
+
"151646": {
|
| 30 |
+
"content": "<|object_ref_start|>",
|
| 31 |
+
"lstrip": false,
|
| 32 |
+
"normalized": false,
|
| 33 |
+
"rstrip": false,
|
| 34 |
+
"single_word": false,
|
| 35 |
+
"special": true
|
| 36 |
+
},
|
| 37 |
+
"151647": {
|
| 38 |
+
"content": "<|object_ref_end|>",
|
| 39 |
+
"lstrip": false,
|
| 40 |
+
"normalized": false,
|
| 41 |
+
"rstrip": false,
|
| 42 |
+
"single_word": false,
|
| 43 |
+
"special": true
|
| 44 |
+
},
|
| 45 |
+
"151648": {
|
| 46 |
+
"content": "<|box_start|>",
|
| 47 |
+
"lstrip": false,
|
| 48 |
+
"normalized": false,
|
| 49 |
+
"rstrip": false,
|
| 50 |
+
"single_word": false,
|
| 51 |
+
"special": true
|
| 52 |
+
},
|
| 53 |
+
"151649": {
|
| 54 |
+
"content": "<|box_end|>",
|
| 55 |
+
"lstrip": false,
|
| 56 |
+
"normalized": false,
|
| 57 |
+
"rstrip": false,
|
| 58 |
+
"single_word": false,
|
| 59 |
+
"special": true
|
| 60 |
+
},
|
| 61 |
+
"151650": {
|
| 62 |
+
"content": "<|quad_start|>",
|
| 63 |
+
"lstrip": false,
|
| 64 |
+
"normalized": false,
|
| 65 |
+
"rstrip": false,
|
| 66 |
+
"single_word": false,
|
| 67 |
+
"special": true
|
| 68 |
+
},
|
| 69 |
+
"151651": {
|
| 70 |
+
"content": "<|quad_end|>",
|
| 71 |
+
"lstrip": false,
|
| 72 |
+
"normalized": false,
|
| 73 |
+
"rstrip": false,
|
| 74 |
+
"single_word": false,
|
| 75 |
+
"special": true
|
| 76 |
+
},
|
| 77 |
+
"151652": {
|
| 78 |
+
"content": "<|vision_start|>",
|
| 79 |
+
"lstrip": false,
|
| 80 |
+
"normalized": false,
|
| 81 |
+
"rstrip": false,
|
| 82 |
+
"single_word": false,
|
| 83 |
+
"special": true
|
| 84 |
+
},
|
| 85 |
+
"151653": {
|
| 86 |
+
"content": "<|vision_end|>",
|
| 87 |
+
"lstrip": false,
|
| 88 |
+
"normalized": false,
|
| 89 |
+
"rstrip": false,
|
| 90 |
+
"single_word": false,
|
| 91 |
+
"special": true
|
| 92 |
+
},
|
| 93 |
+
"151654": {
|
| 94 |
+
"content": "<|vision_pad|>",
|
| 95 |
+
"lstrip": false,
|
| 96 |
+
"normalized": false,
|
| 97 |
+
"rstrip": false,
|
| 98 |
+
"single_word": false,
|
| 99 |
+
"special": true
|
| 100 |
+
},
|
| 101 |
+
"151655": {
|
| 102 |
+
"content": "<|image_pad|>",
|
| 103 |
+
"lstrip": false,
|
| 104 |
+
"normalized": false,
|
| 105 |
+
"rstrip": false,
|
| 106 |
+
"single_word": false,
|
| 107 |
+
"special": true
|
| 108 |
+
},
|
| 109 |
+
"151656": {
|
| 110 |
+
"content": "<|video_pad|>",
|
| 111 |
+
"lstrip": false,
|
| 112 |
+
"normalized": false,
|
| 113 |
+
"rstrip": false,
|
| 114 |
+
"single_word": false,
|
| 115 |
+
"special": true
|
| 116 |
+
},
|
| 117 |
+
"151657": {
|
| 118 |
+
"content": "<tool_call>",
|
| 119 |
+
"lstrip": false,
|
| 120 |
+
"normalized": false,
|
| 121 |
+
"rstrip": false,
|
| 122 |
+
"single_word": false,
|
| 123 |
+
"special": false
|
| 124 |
+
},
|
| 125 |
+
"151658": {
|
| 126 |
+
"content": "</tool_call>",
|
| 127 |
+
"lstrip": false,
|
| 128 |
+
"normalized": false,
|
| 129 |
+
"rstrip": false,
|
| 130 |
+
"single_word": false,
|
| 131 |
+
"special": false
|
| 132 |
+
},
|
| 133 |
+
"151659": {
|
| 134 |
+
"content": "<|fim_prefix|>",
|
| 135 |
+
"lstrip": false,
|
| 136 |
+
"normalized": false,
|
| 137 |
+
"rstrip": false,
|
| 138 |
+
"single_word": false,
|
| 139 |
+
"special": false
|
| 140 |
+
},
|
| 141 |
+
"151660": {
|
| 142 |
+
"content": "<|fim_middle|>",
|
| 143 |
+
"lstrip": false,
|
| 144 |
+
"normalized": false,
|
| 145 |
+
"rstrip": false,
|
| 146 |
+
"single_word": false,
|
| 147 |
+
"special": false
|
| 148 |
+
},
|
| 149 |
+
"151661": {
|
| 150 |
+
"content": "<|fim_suffix|>",
|
| 151 |
+
"lstrip": false,
|
| 152 |
+
"normalized": false,
|
| 153 |
+
"rstrip": false,
|
| 154 |
+
"single_word": false,
|
| 155 |
+
"special": false
|
| 156 |
+
},
|
| 157 |
+
"151662": {
|
| 158 |
+
"content": "<|fim_pad|>",
|
| 159 |
+
"lstrip": false,
|
| 160 |
+
"normalized": false,
|
| 161 |
+
"rstrip": false,
|
| 162 |
+
"single_word": false,
|
| 163 |
+
"special": false
|
| 164 |
+
},
|
| 165 |
+
"151663": {
|
| 166 |
+
"content": "<|repo_name|>",
|
| 167 |
+
"lstrip": false,
|
| 168 |
+
"normalized": false,
|
| 169 |
+
"rstrip": false,
|
| 170 |
+
"single_word": false,
|
| 171 |
+
"special": false
|
| 172 |
+
},
|
| 173 |
+
"151664": {
|
| 174 |
+
"content": "<|file_sep|>",
|
| 175 |
+
"lstrip": false,
|
| 176 |
+
"normalized": false,
|
| 177 |
+
"rstrip": false,
|
| 178 |
+
"single_word": false,
|
| 179 |
+
"special": false
|
| 180 |
+
},
|
| 181 |
+
"151665": {
|
| 182 |
+
"content": "<|beginoftext|>",
|
| 183 |
+
"lstrip": false,
|
| 184 |
+
"normalized": false,
|
| 185 |
+
"rstrip": false,
|
| 186 |
+
"single_word": false,
|
| 187 |
+
"special": true
|
| 188 |
+
},
|
| 189 |
+
"151666": {
|
| 190 |
+
"content": "<|mask|>",
|
| 191 |
+
"lstrip": false,
|
| 192 |
+
"normalized": false,
|
| 193 |
+
"rstrip": false,
|
| 194 |
+
"single_word": false,
|
| 195 |
+
"special": true
|
| 196 |
+
}
|
| 197 |
+
},
|
| 198 |
+
"additional_special_tokens": [
|
| 199 |
+
"<|beginoftext|>",
|
| 200 |
+
"<|mask|>",
|
| 201 |
+
"<|im_start|>",
|
| 202 |
+
"<|im_end|>"
|
| 203 |
+
],
|
| 204 |
+
"auto_map": {
|
| 205 |
+
"AutoTokenizer": [
|
| 206 |
+
"elyza/ELYZA-Diffusion-Instruct-1.0-Dream-7B--tokenization_dream.DreamTokenizer",
|
| 207 |
+
null
|
| 208 |
+
]
|
| 209 |
+
},
|
| 210 |
+
"bos_token": "<|beginoftext|>",
|
| 211 |
+
"chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
|
| 212 |
+
"clean_up_tokenization_spaces": false,
|
| 213 |
+
"eos_token": "<|endoftext|>",
|
| 214 |
+
"errors": "replace",
|
| 215 |
+
"extra_special_tokens": {},
|
| 216 |
+
"mask_token": "<|mask|>",
|
| 217 |
+
"model_max_length": 131072,
|
| 218 |
+
"pad_token": "<|endoftext|>",
|
| 219 |
+
"split_special_tokens": false,
|
| 220 |
+
"tokenizer_class": "DreamTokenizer",
|
| 221 |
+
"unk_token": null
|
| 222 |
+
}
|
training_utils.py
ADDED
|
@@ -0,0 +1,271 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Utility functions for the training Dream model.
|
| 3 |
+
|
| 4 |
+
References: https://github.com/zhijie-group/Discrete-Diffusion-Forcing/blob/main/D2F-train/utils/loss.py
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import functools
|
| 8 |
+
import math
|
| 9 |
+
|
| 10 |
+
import numpy as np
|
| 11 |
+
import torch
|
| 12 |
+
import torch.nn.functional as F
|
| 13 |
+
from torch import Tensor
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
@torch.no_grad()
|
| 17 |
+
def get_prompt_lengths_from_labels(
|
| 18 |
+
labels: Tensor,
|
| 19 |
+
attention_mask: Tensor | None = None,
|
| 20 |
+
ignore_index: int = -100,
|
| 21 |
+
) -> Tensor:
|
| 22 |
+
"""
|
| 23 |
+
labels: (B, T) int64, with ignore_index where we ignore loss (prompt/user/system/pad)
|
| 24 |
+
attention_mask: optional (B, T) 1/0; if given, will treat masked-out (0) as non-real tokens
|
| 25 |
+
|
| 26 |
+
Returns: (B,) int64 prompt lengths = index of first (labels != ignore_index) per sample.
|
| 27 |
+
If a sample has no supervised tokens, length = number of real tokens.
|
| 28 |
+
"""
|
| 29 |
+
B, T = labels.shape
|
| 30 |
+
device = labels.device
|
| 31 |
+
|
| 32 |
+
supervised = labels.ne(ignore_index)
|
| 33 |
+
if attention_mask is not None:
|
| 34 |
+
supervised = supervised & attention_mask.bool()
|
| 35 |
+
|
| 36 |
+
idx_grid = torch.arange(T, device=device).expand(B, T)
|
| 37 |
+
first_idx = torch.where(supervised, idx_grid, torch.full_like(idx_grid, T)).min(dim=1).values
|
| 38 |
+
|
| 39 |
+
if attention_mask is None:
|
| 40 |
+
return first_idx
|
| 41 |
+
|
| 42 |
+
real_len = attention_mask.sum(dim=1).to(torch.long)
|
| 43 |
+
return torch.where((labels.ne(ignore_index) & attention_mask.bool()).any(dim=1), first_idx, real_len)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
@torch.no_grad()
|
| 47 |
+
def simple_uniform_mask(
|
| 48 |
+
input_ids: Tensor, # (B, L), int64
|
| 49 |
+
prompt_lengths: Tensor, # (B,), int64 — number of tokens to keep unmasked at the left
|
| 50 |
+
mask_id: int, # token id to write where masked
|
| 51 |
+
p: float | None = None, # fixed mask rate; if None, sample per-sample in [p_min, p_max]
|
| 52 |
+
p_min: float = 0.0,
|
| 53 |
+
p_max: float = 1.0,
|
| 54 |
+
protect_eos_id: int | None = None, # treated as EOS id for the tail rule
|
| 55 |
+
pad_id: int | None = None,
|
| 56 |
+
ensure_at_least_one: bool = True,
|
| 57 |
+
eps: float = 1e-6, # tiny floor for probabilities
|
| 58 |
+
) -> tuple[Tensor, Tensor, Tensor]:
|
| 59 |
+
"""
|
| 60 |
+
Returns:
|
| 61 |
+
noisy: (B, L) int64 — input_ids with some tail tokens replaced by mask_id
|
| 62 |
+
masked: (B, L) bool — True where we replaced a token (incurs loss)
|
| 63 |
+
p_samples: (B,) float32 — per-sample mask probabilities used
|
| 64 |
+
"""
|
| 65 |
+
B, L = input_ids.shape
|
| 66 |
+
device = input_ids.device
|
| 67 |
+
|
| 68 |
+
noisy = input_ids.clone()
|
| 69 |
+
masked = torch.zeros_like(input_ids, dtype=torch.bool)
|
| 70 |
+
p_mask_tensor = torch.zeros((B, L), device=device, dtype=torch.float32)
|
| 71 |
+
|
| 72 |
+
# choose per-sample p
|
| 73 |
+
if p is None:
|
| 74 |
+
p_samples = torch.rand(B, device=device) * (p_max - p_min) + p_min
|
| 75 |
+
else:
|
| 76 |
+
p = float(p)
|
| 77 |
+
p_samples = torch.full((B,), p, device=device)
|
| 78 |
+
|
| 79 |
+
for i in range(B):
|
| 80 |
+
pl = int(prompt_lengths[i].item())
|
| 81 |
+
if pl >= L:
|
| 82 |
+
continue # nothing to mask
|
| 83 |
+
|
| 84 |
+
# ---- Eligible region: [pl, L). Exclude PAD only here. Do NOT exclude EOS now. ----
|
| 85 |
+
tail_tokens = input_ids[i, pl:L]
|
| 86 |
+
elig = torch.ones_like(tail_tokens, dtype=torch.bool)
|
| 87 |
+
if pad_id is not None:
|
| 88 |
+
elig &= tail_tokens != pad_id
|
| 89 |
+
if not elig.any():
|
| 90 |
+
continue
|
| 91 |
+
|
| 92 |
+
# i.i.d. Bernoulli with per-sample prob
|
| 93 |
+
pi = float(torch.clamp(p_samples[i], eps, 1.0 - eps).item())
|
| 94 |
+
randv = torch.rand(elig.shape, device=device)
|
| 95 |
+
tail_mask = (randv < pi) & elig
|
| 96 |
+
|
| 97 |
+
# optionally guarantee at least one masked token per sample
|
| 98 |
+
if ensure_at_least_one and not tail_mask.any():
|
| 99 |
+
# pick a random eligible index to force-mask
|
| 100 |
+
idxs = torch.nonzero(elig, as_tuple=False).squeeze(1)
|
| 101 |
+
force_idx = idxs[torch.randint(0, len(idxs), (1,), device=device)]
|
| 102 |
+
tail_mask[force_idx] = True
|
| 103 |
+
|
| 104 |
+
# provisional write-back BEFORE EOS rule
|
| 105 |
+
noisy[i, pl:L] = torch.where(
|
| 106 |
+
tail_mask,
|
| 107 |
+
torch.tensor(mask_id, device=device, dtype=noisy.dtype),
|
| 108 |
+
tail_tokens,
|
| 109 |
+
)
|
| 110 |
+
masked[i, pl:L] = tail_mask
|
| 111 |
+
p_mask_tensor[i, pl:L] = torch.where(elig, torch.tensor(pi, device=device), torch.tensor(0.0, device=device))
|
| 112 |
+
|
| 113 |
+
# ---- EOS tail rule (apply only if EOS is distinct from PAD) ----
|
| 114 |
+
if protect_eos_id is not None and (pad_id is None or protect_eos_id != pad_id):
|
| 115 |
+
# Find first EOS at/after prompt
|
| 116 |
+
eos_positions = input_ids[i, :] == protect_eos_id
|
| 117 |
+
# First EOS index in the entire sequence
|
| 118 |
+
if eos_positions.any():
|
| 119 |
+
first_eos_idx = int(torch.argmax(eos_positions.to(torch.uint8)).item())
|
| 120 |
+
else:
|
| 121 |
+
first_eos_idx = L # no EOS
|
| 122 |
+
|
| 123 |
+
# Tail exists only if EOS is not the last token
|
| 124 |
+
if first_eos_idx < L - 1:
|
| 125 |
+
# Check whether that first EOS was masked
|
| 126 |
+
was_first_eos_masked = False
|
| 127 |
+
if first_eos_idx >= pl:
|
| 128 |
+
was_first_eos_masked = bool(masked[i, first_eos_idx].item())
|
| 129 |
+
else:
|
| 130 |
+
# EOS lies inside the prompt region; it couldn't be masked by the sampling
|
| 131 |
+
was_first_eos_masked = False
|
| 132 |
+
|
| 133 |
+
# Build tail slice [first_eos_idx, L)
|
| 134 |
+
tail_slice = slice(first_eos_idx, L)
|
| 135 |
+
|
| 136 |
+
if was_first_eos_masked:
|
| 137 |
+
# Case A: mask entire EOS tail; loss applies there
|
| 138 |
+
noisy[i, tail_slice] = torch.tensor(mask_id, device=device, dtype=noisy.dtype)
|
| 139 |
+
masked[i, tail_slice] = True
|
| 140 |
+
# For consistency, set per-token prob on the tail to pi where we forced masking
|
| 141 |
+
p_mask_tensor[i, tail_slice] = pi
|
| 142 |
+
else:
|
| 143 |
+
# Case B: force EOS on the tail; no loss there
|
| 144 |
+
noisy[i, tail_slice] = torch.tensor(protect_eos_id, device=device, dtype=noisy.dtype)
|
| 145 |
+
masked[i, tail_slice] = False
|
| 146 |
+
p_mask_tensor[i, tail_slice] = 0.0
|
| 147 |
+
|
| 148 |
+
return noisy, masked, p_samples
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
def _shift_logits(logits: Tensor) -> Tensor:
|
| 152 |
+
"""
|
| 153 |
+
https://github.com/zhijie-group/Discrete-Diffusion-Forcing/blob/eed9750ab081cdc302daa9d8305478988f3f5a17/D2F-train/utils/util.py#L145C1-L150C26
|
| 154 |
+
"""
|
| 155 |
+
shifted_logits = torch.zeros_like(logits)
|
| 156 |
+
shifted_logits[:, 1:, :] = logits[:, :-1, :]
|
| 157 |
+
shifted_logits[:, 0, :] = 1.0
|
| 158 |
+
|
| 159 |
+
return shifted_logits
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
def _context_adaptive_reweight(seq_len: int, distribution: str = "symmetric-geometric", **kwargs) -> Tensor:
|
| 163 |
+
"""
|
| 164 |
+
Create context-adaptive reweighting matrix W of shape (seq_len, seq_len)
|
| 165 |
+
https://github.com/DreamLM/Dream/blob/fd91b8f1d47c5cbe4a8a1674fd9b98045e79d9db/src/trainer/fsdp_sft_trainer.py#L93
|
| 166 |
+
"""
|
| 167 |
+
position_ids_l = np.arange(seq_len).reshape(-1, 1)
|
| 168 |
+
position_ids_r = np.arange(seq_len).reshape(1, -1)
|
| 169 |
+
distance = position_ids_l - position_ids_r
|
| 170 |
+
distance = torch.from_numpy(distance)
|
| 171 |
+
|
| 172 |
+
def geometric_distribution(k, cart_p=0.8, **_):
|
| 173 |
+
if not 0 < cart_p <= 1:
|
| 174 |
+
raise ValueError("p must be between 0 and 1")
|
| 175 |
+
res = (math.log(cart_p) + (k.abs() - 1) * math.log(1 - cart_p)).exp() * 0.5
|
| 176 |
+
res.masked_fill_(k == 0, 0)
|
| 177 |
+
return res
|
| 178 |
+
|
| 179 |
+
if distribution == "symmetric-geometric":
|
| 180 |
+
matrix = geometric_distribution(distance, **kwargs)
|
| 181 |
+
else:
|
| 182 |
+
raise ValueError(f"Unknown distribution {distribution}")
|
| 183 |
+
return matrix
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
@functools.lru_cache(maxsize=64)
|
| 187 |
+
def _cached_cart_matrix(seq_len: int, cart_p: float, distribution: str) -> Tensor:
|
| 188 |
+
"""
|
| 189 |
+
Get cached context-adaptive reweighting matrix W of shape (seq_len, seq_len)
|
| 190 |
+
"""
|
| 191 |
+
W = _context_adaptive_reweight(seq_len, distribution=distribution, cart_p=cart_p)
|
| 192 |
+
return W # CPU float tensor; we'll .to(device,dtype) at use time
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
def loss_function(
|
| 196 |
+
logits: Tensor, # (B, L, V)
|
| 197 |
+
labels: Tensor, # (B, L)
|
| 198 |
+
masked: Tensor, # (B, L) bool or float; True/1.0 => include in loss
|
| 199 |
+
vocab_size: int,
|
| 200 |
+
*,
|
| 201 |
+
t: Tensor | None = None, # (B,) in [0,1], per-sample time
|
| 202 |
+
time_weighting: str = "cart", # "none" | "original" | "linear" | "cart"
|
| 203 |
+
cart_p: float = 0.5, # for cart time weighting
|
| 204 |
+
cart_distribution: str = "symmetric-geometric", # for cart time weighting
|
| 205 |
+
token_reweighting: bool = False, # optional difficulty weighting
|
| 206 |
+
alpha: float = 1.0,
|
| 207 |
+
gamma: float = 0.0,
|
| 208 |
+
ignore_index: int = -100,
|
| 209 |
+
eps: float = 1e-6,
|
| 210 |
+
) -> Tensor:
|
| 211 |
+
"""
|
| 212 |
+
Cross-entropy on masked positions with optional time- and token-reweighting.
|
| 213 |
+
time_weighting:
|
| 214 |
+
- "none": w_t = 1
|
| 215 |
+
- "original": w_t = 1 / t
|
| 216 |
+
- "linear": w_t = 1 - t
|
| 217 |
+
- "cart": w_t from context-adaptive reweighting matrix
|
| 218 |
+
We normalize by the sum of (masked * w_t) so the scale stays consistent.
|
| 219 |
+
"""
|
| 220 |
+
B, L, _ = logits.shape
|
| 221 |
+
shifted_logits = _shift_logits(logits) # (B, L, V)
|
| 222 |
+
|
| 223 |
+
# per-token CE without reduction
|
| 224 |
+
per_tok = F.cross_entropy(
|
| 225 |
+
shifted_logits.view(-1, vocab_size),
|
| 226 |
+
labels.view(-1),
|
| 227 |
+
ignore_index=ignore_index,
|
| 228 |
+
reduction="none",
|
| 229 |
+
).view_as(labels) # (B, L)
|
| 230 |
+
|
| 231 |
+
# base mask: include only selected tokens and not ignore_index
|
| 232 |
+
base_mask = masked.to(per_tok.dtype) # (B, L)
|
| 233 |
+
if ignore_index is not None:
|
| 234 |
+
base_mask = base_mask * (labels.ne(ignore_index)).to(per_tok.dtype)
|
| 235 |
+
|
| 236 |
+
# time weights (per-sample -> per-token broadcast)
|
| 237 |
+
if t is None or time_weighting == "none":
|
| 238 |
+
w_t = 1.0
|
| 239 |
+
time_w = torch.ones_like(per_tok)
|
| 240 |
+
else:
|
| 241 |
+
t = t.to(per_tok.device, dtype=per_tok.dtype)
|
| 242 |
+
if time_weighting == "original":
|
| 243 |
+
w_t = 1.0 / t.clamp_min(eps) # upweight small t (early timesteps)
|
| 244 |
+
time_w = w_t.view(-1, 1).expand_as(per_tok) # (B, L)
|
| 245 |
+
elif time_weighting == "linear":
|
| 246 |
+
w_t = (1.0 - t).clamp_min(0.0) # downweight large t
|
| 247 |
+
time_w = w_t.view(-1, 1).expand_as(per_tok) # (B, L)
|
| 248 |
+
elif time_weighting == "cart":
|
| 249 |
+
W = _cached_cart_matrix(L, float(cart_p), str(cart_distribution)).to(
|
| 250 |
+
per_tok.device, dtype=per_tok.dtype
|
| 251 |
+
) # (L, L)
|
| 252 |
+
w_pos = base_mask @ W.T # (B, L) @ (L, L) -> (B, L)
|
| 253 |
+
# normalize so mean weight over included tokens is 1 (stable scale)
|
| 254 |
+
mass = base_mask.sum(dim=1, keepdim=True).clamp_min(1.0) # (B, 1)
|
| 255 |
+
mean_w = (w_pos * base_mask).sum(dim=1, keepdim=True) / mass # (B, 1)
|
| 256 |
+
time_w = (w_pos / (mean_w + eps)).where(mass > 0, torch.ones_like(w_pos)) # (B, L)
|
| 257 |
+
else:
|
| 258 |
+
raise ValueError(f"Unknown time_weighting: {time_weighting}")
|
| 259 |
+
|
| 260 |
+
weighted = per_tok * base_mask * time_w
|
| 261 |
+
|
| 262 |
+
# optional difficulty-based token reweighting (like alpha*(1-exp(-loss))**gamma * loss)
|
| 263 |
+
if token_reweighting and gamma != 0.0:
|
| 264 |
+
weighted = alpha * (1.0 - torch.exp(-weighted)).pow(gamma) * weighted
|
| 265 |
+
elif token_reweighting:
|
| 266 |
+
weighted = alpha * weighted
|
| 267 |
+
|
| 268 |
+
# normalize by effective weight mass (masked * time_w), not just masked count
|
| 269 |
+
denom = (base_mask * time_w).sum().clamp_min(1.0)
|
| 270 |
+
loss = weighted.sum() / denom
|
| 271 |
+
return loss
|
vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|