minpeter haijunlv commited on
Commit
c048a22
·
verified ·
0 Parent(s):

Duplicate from internlm/internlm3-8b-instruct

Browse files

Co-authored-by: henry <haijunlv@users.noreply.huggingface.co>

.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
LICENSE.txt ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright 2023-2024 Shanghai AI Laboratory
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
README.md ADDED
@@ -0,0 +1,889 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ pipeline_tag: text-generation
4
+ ---
5
+ # InternLM
6
+
7
+
8
+
9
+ <div align="center">
10
+ <img src="https://github.com/InternLM/InternLM/assets/22529082/b9788105-8892-4398-8b47-b513a292378e" width="200"/>
11
+
12
+ <div>&nbsp;</div>
13
+ <div align="center">
14
+ <b><font size="5">InternLM</font></b>
15
+ <sup>
16
+ <a href="https://internlm.intern-ai.org.cn/">
17
+ <i><font size="4">HOT</font></i>
18
+ </a>
19
+ </sup>
20
+ <div>&nbsp;</div>
21
+ </div>
22
+
23
+
24
+ [![evaluation](https://github.com/InternLM/InternLM/assets/22529082/f80a2a58-5ddf-471a-8da4-32ab65c8fd3b)](https://github.com/internLM/OpenCompass/)
25
+
26
+ [💻Github Repo](https://github.com/InternLM/InternLM) • [🤗Demo](https://huggingface.co/spaces/internlm/internlm3-8b-instruct) • [🤔Reporting Issues](https://github.com/InternLM/InternLM/issues/new) • [📜Technical Report](https://arxiv.org/abs/2403.17297)
27
+
28
+ </div>
29
+
30
+ <p align="center">
31
+ 👋 join us on <a href="https://discord.gg/xa29JuW87d" target="_blank">Discord</a> and <a href="https://github.com/InternLM/InternLM/assets/25839884/a6aad896-7232-4220-ac84-9e070c2633ce" target="_blank">WeChat</a>
32
+ </p>
33
+
34
+
35
+
36
+ ## Introduction
37
+
38
+ InternLM3 has open-sourced an 8-billion parameter instruction model, InternLM3-8B-Instruct, designed for general-purpose usage and advanced reasoning. This model has the following characteristics:
39
+
40
+ - **Enhanced performance at reduced cost**:
41
+ State-of-the-art performance on reasoning and knowledge-intensive tasks surpass models like Llama3.1-8B and Qwen2.5-7B. Remarkably, InternLM3 is trained on only 4 trillion high-quality tokens, saving more than 75% of the training cost compared to other LLMs of similar scale.
42
+ - **Deep thinking capability**:
43
+ InternLM3 supports both the deep thinking mode for solving complicated reasoning tasks via the long chain-of-thought and the normal response mode for fluent user interactions.
44
+
45
+ ## InternLM3-8B-Instruct
46
+
47
+ ### Performance Evaluation
48
+
49
+ We conducted a comprehensive evaluation of InternLM using the open-source evaluation tool [OpenCompass](https://github.com/internLM/OpenCompass/). The evaluation covered five dimensions of capabilities: disciplinary competence, language competence, knowledge competence, inference competence, and comprehension competence. Here are some of the evaluation results, and you can visit the [OpenCompass leaderboard](https://rank.opencompass.org.cn) for more evaluation results.
50
+
51
+ | | Benchmark | InternLM3-8B-Instruct | Qwen2.5-7B-Instruct | Llama3.1-8B-Instruct | GPT-4o-mini(closed source) |
52
+ | ------------ | ------------------------------- | --------------------- | ------------------- | -------------------- | -------------------------- |
53
+ | General | CMMLU(0-shot) | **83.1** | 75.8 | 53.9 | 66.0 |
54
+ | | MMLU(0-shot) | 76.6 | **76.8** | 71.8 | 82.7 |
55
+ | | MMLU-Pro(0-shot) | **57.6** | 56.2 | 48.1 | 64.1 |
56
+ | Reasoning | GPQA-Diamond(0-shot) | **37.4** | 33.3 | 24.2 | 42.9 |
57
+ | | DROP(0-shot) | **83.1** | 80.4 | 81.6 | 85.2 |
58
+ | | HellaSwag(10-shot) | **91.2** | 85.3 | 76.7 | 89.5 |
59
+ | | KOR-Bench(0-shot) | **56.4** | 44.6 | 47.7 | 58.2 |
60
+ | MATH | MATH-500(0-shot) | **83.0*** | 72.4 | 48.4 | 74.0 |
61
+ | | AIME2024(0-shot) | **20.0*** | 16.7 | 6.7 | 13.3 |
62
+ | Coding | LiveCodeBench(2407-2409 Pass@1) | **17.8** | 16.8 | 12.9 | 21.8 |
63
+ | | HumanEval(Pass@1) | 82.3 | **85.4** | 72.0 | 86.6 |
64
+ | Instrunction | IFEval(Prompt-Strict) | **79.3** | 71.7 | 75.2 | 79.7 |
65
+ | Long Context | RULER(4-128K Average) | 87.9 | 81.4 | **88.5** | 90.7 |
66
+ | Chat | AlpacaEval 2.0(LC WinRate) | **51.1** | 30.3 | 25.0 | 50.7 |
67
+ | | WildBench(Raw Score) | **33.1** | 23.3 | 1.5 | 40.3 |
68
+ | | MT-Bench-101(Score 1-10) | **8.59** | 8.49 | 8.37 | 8.87 |
69
+
70
+ - Values marked in bold indicate the **highest** in open source models
71
+ - The evaluation results were obtained from [OpenCompass](https://github.com/internLM/OpenCompass/) (some data marked with *, which means evaluating with Thinking Mode), and evaluation configuration can be found in the configuration files provided by [OpenCompass](https://github.com/internLM/OpenCompass/).
72
+ - The evaluation data may have numerical differences due to the version iteration of [OpenCompass](https://github.com/internLM/OpenCompass/), so please refer to the latest evaluation results of [OpenCompass](https://github.com/internLM/OpenCompass/).
73
+
74
+ **Limitations:** Although we have made efforts to ensure the safety of the model during the training process and to encourage the model to generate text that complies with ethical and legal requirements, the model may still produce unexpected outputs due to its size and probabilistic generation paradigm. For example, the generated responses may contain biases, discrimination, or other harmful content. Please do not propagate such content. We are not responsible for any consequences resulting from the dissemination of harmful information.
75
+ ### Requirements
76
+ ```python
77
+ transformers >= 4.48
78
+ ```
79
+
80
+
81
+ ### Conversation Mode
82
+
83
+ #### Transformers inference
84
+
85
+ To load the InternLM3 8B Instruct model using Transformers, use the following code:
86
+
87
+ ```python
88
+ import torch
89
+ from transformers import AutoTokenizer, AutoModelForCausalLM
90
+
91
+ model_dir = "internlm/internlm3-8b-instruct"
92
+ tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
93
+ # Set `torch_dtype=torch.float16` to load model in float16, otherwise it will be loaded as float32 and might cause OOM Error.
94
+ model = AutoModelForCausalLM.from_pretrained(model_dir, trust_remote_code=True, torch_dtype=torch.bfloat16).cuda()
95
+ # (Optional) If on low resource devices, you can load model in 4-bit or 8-bit to further save GPU memory via bitsandbytes.
96
+ # InternLM3 8B in 4bit will cost nearly 8GB GPU memory.
97
+ # pip install -U bitsandbytes
98
+ # 8-bit: model = AutoModelForCausalLM.from_pretrained(model_dir, device_map="auto", trust_remote_code=True, load_in_8bit=True)
99
+ # 4-bit: model = AutoModelForCausalLM.from_pretrained(model_dir, device_map="auto", trust_remote_code=True, load_in_4bit=True)
100
+ model = model.eval()
101
+
102
+ system_prompt = """You are an AI assistant whose name is InternLM (书生·浦语).
103
+ - InternLM (书生·浦语) is a conversational language model that is developed by Shanghai AI Laboratory (上海人工智能实验室). It is designed to be helpful, honest, and harmless.
104
+ - InternLM (书生·浦语) can understand and communicate fluently in the language chosen by the user such as English and 中文."""
105
+ messages = [
106
+ {"role": "system", "content": system_prompt},
107
+ {"role": "user", "content": "Please tell me five scenic spots in Shanghai"},
108
+ ]
109
+ tokenized_chat = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt").to("cuda")
110
+
111
+ generated_ids = model.generate(tokenized_chat, max_new_tokens=1024, temperature=1, repetition_penalty=1.005, top_k=40, top_p=0.8)
112
+
113
+ generated_ids = [
114
+ output_ids[len(input_ids):] for input_ids, output_ids in zip(tokenized_chat, generated_ids)
115
+ ]
116
+ prompt = tokenizer.batch_decode(tokenized_chat)[0]
117
+ print(prompt)
118
+ response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
119
+ print(response)
120
+ ```
121
+
122
+ #### LMDeploy inference
123
+ LMDeploy is a toolkit for compressing, deploying, and serving LLM, developed by the MMRazor and MMDeploy teams.
124
+
125
+ ```bash
126
+ pip install lmdeploy
127
+ ```
128
+
129
+ You can run batch inference locally with the following python code:
130
+
131
+ ```python
132
+ import lmdeploy
133
+ model_dir = "internlm/internlm3-8b-instruct"
134
+ pipe = lmdeploy.pipeline(model_dir)
135
+ response = pipe("Please tell me five scenic spots in Shanghai")
136
+ print(response)
137
+
138
+ ```
139
+
140
+ Or you can launch an OpenAI compatible server with the following command:
141
+
142
+ ```bash
143
+ lmdeploy serve api_server internlm/internlm3-8b-instruct --model-name internlm3-8b-instruct --server-port 23333
144
+ ```
145
+
146
+ Then you can send a chat request to the server:
147
+
148
+ ```bash
149
+ curl http://localhost:23333/v1/chat/completions \
150
+ -H "Content-Type: application/json" \
151
+ -d '{
152
+ "model": "internlm3-8b-instruct",
153
+ "messages": [
154
+ {"role": "user", "content": "Please tell me five scenic spots in Shanghai"}
155
+ ]
156
+ }'
157
+ ```
158
+
159
+ Find more details in the [LMDeploy documentation](https://lmdeploy.readthedocs.io/en/latest/)
160
+
161
+
162
+
163
+ #### Ollama inference
164
+
165
+ First install ollama,
166
+
167
+ ```python
168
+ # install ollama
169
+ curl -fsSL https://ollama.com/install.sh | sh
170
+ # fetch model
171
+ ollama pull internlm/internlm3-8b-instruct
172
+ # install
173
+ pip install ollama
174
+ ```
175
+
176
+ inference code,
177
+
178
+ ```python
179
+ import ollama
180
+
181
+ system_prompt = """You are an AI assistant whose name is InternLM (书生·浦语).
182
+ - InternLM (书生·浦语) is a conversational language model that is developed by Shanghai AI Laboratory (上海人工智能实验室). It is designed to be helpful, honest, and harmless.
183
+ - InternLM (书生·浦语) can understand and communicate fluently in the language chosen by the user such as English and 中文."""
184
+
185
+ messages = [
186
+ {
187
+ "role": "system",
188
+ "content": system_prompt,
189
+ },
190
+ {
191
+ "role": "user",
192
+ "content": "Please tell me five scenic spots in Shanghai"
193
+ },
194
+ ]
195
+
196
+ stream = ollama.chat(
197
+ model='internlm/internlm3-8b-instruct',
198
+ messages=messages,
199
+ stream=True,
200
+ )
201
+
202
+ for chunk in stream:
203
+ print(chunk['message']['content'], end='', flush=True)
204
+ ```
205
+
206
+
207
+ #### vLLM inference
208
+
209
+ Refer to [installation](https://docs.vllm.ai/en/latest/getting_started/installation/index.html) to install the latest code of vllm
210
+
211
+ ```python
212
+ pip install vllm --pre --extra-index-url https://wheels.vllm.ai/nightly
213
+ ```
214
+
215
+ inference code:
216
+
217
+ ```python
218
+ from vllm import LLM, SamplingParams
219
+
220
+ llm = LLM(model="internlm/internlm3-8b-instruct")
221
+ sampling_params = SamplingParams(temperature=1, repetition_penalty=1.005, top_k=40, top_p=0.8)
222
+
223
+ system_prompt = """You are an AI assistant whose name is InternLM (书生·浦语).
224
+ - InternLM (书生·浦语) is a conversational language model that is developed by Shanghai AI Laboratory (上海人工智能实验室). It is designed to be helpful, honest, and harmless.
225
+ - InternLM (书生·浦语) can understand and communicate fluently in the language chosen by the user such as English and 中文."""
226
+
227
+ prompts = [
228
+ {
229
+ "role": "system",
230
+ "content": system_prompt,
231
+ },
232
+ {
233
+ "role": "user",
234
+ "content": "Please tell me five scenic spots in Shanghai"
235
+ },
236
+ ]
237
+ outputs = llm.chat(prompts,
238
+ sampling_params=sampling_params,
239
+ use_tqdm=False)
240
+ print(outputs)
241
+ ```
242
+
243
+
244
+
245
+
246
+
247
+ ### Thinking Mode
248
+ #### Thinking Demo
249
+
250
+ <img src="https://github.com/InternLM/InternLM/blob/017ba7446d20ecc3b9ab8e7b66cc034500868ab4/assets/solve_puzzle.png?raw=true" width="400"/>
251
+
252
+
253
+
254
+
255
+
256
+
257
+
258
+ #### Thinking system prompt
259
+ ```python
260
+ thinking_system_prompt = """You are an expert mathematician with extensive experience in mathematical competitions. You approach problems through systematic thinking and rigorous reasoning. When solving problems, follow these thought processes:
261
+ ## Deep Understanding
262
+ Take time to fully comprehend the problem before attempting a solution. Consider:
263
+ - What is the real question being asked?
264
+ - What are the given conditions and what do they tell us?
265
+ - Are there any special restrictions or assumptions?
266
+ - Which information is crucial and which is supplementary?
267
+ ## Multi-angle Analysis
268
+ Before solving, conduct thorough analysis:
269
+ - What mathematical concepts and properties are involved?
270
+ - Can you recall similar classic problems or solution methods?
271
+ - Would diagrams or tables help visualize the problem?
272
+ - Are there special cases that need separate consideration?
273
+ ## Systematic Thinking
274
+ Plan your solution path:
275
+ - Propose multiple possible approaches
276
+ - Analyze the feasibility and merits of each method
277
+ - Choose the most appropriate method and explain why
278
+ - Break complex problems into smaller, manageable steps
279
+ ## Rigorous Proof
280
+ During the solution process:
281
+ - Provide solid justification for each step
282
+ - Include detailed proofs for key conclusions
283
+ - Pay attention to logical connections
284
+ - Be vigilant about potential oversights
285
+ ## Repeated Verification
286
+ After completing your solution:
287
+ - Verify your results satisfy all conditions
288
+ - Check for overlooked special cases
289
+ - Consider if the solution can be optimized or simplified
290
+ - Review your reasoning process
291
+ Remember:
292
+ 1. Take time to think thoroughly rather than rushing to an answer
293
+ 2. Rigorously prove each key conclusion
294
+ 3. Keep an open mind and try different approaches
295
+ 4. Summarize valuable problem-solving methods
296
+ 5. Maintain healthy skepticism and verify multiple times
297
+ Your response should reflect deep mathematical understanding and precise logical thinking, making your solution path and reasoning clear to others.
298
+ When you're ready, present your complete solution with:
299
+ - Clear problem understanding
300
+ - Detailed solution process
301
+ - Key insights
302
+ - Thorough verification
303
+ Focus on clear, logical progression of ideas and thorough explanation of your mathematical reasoning. Provide answers in the same language as the user asking the question, repeat the final answer using a '\\boxed{}' without any units, you have [[8192]] tokens to complete the answer.
304
+ """
305
+ ```
306
+ #### Transformers inference
307
+ ```python
308
+ import torch
309
+ from transformers import AutoTokenizer, AutoModelForCausalLM
310
+
311
+ model_dir = "internlm/internlm3-8b-instruct"
312
+ tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
313
+ # Set `torch_dtype=torch.float16` to load model in float16, otherwise it will be loaded as float32 and might cause OOM Error.
314
+ model = AutoModelForCausalLM.from_pretrained(model_dir, trust_remote_code=True, torch_dtype=torch.bfloat16).cuda()
315
+ # (Optional) If on low resource devices, you can load model in 4-bit or 8-bit to further save GPU memory via bitsandbytes.
316
+ # InternLM3 8B in 4bit will cost nearly 8GB GPU memory.
317
+ # pip install -U bitsandbytes
318
+ # 8-bit: model = AutoModelForCausalLM.from_pretrained(model_dir, device_map="auto", trust_remote_code=True, load_in_8bit=True)
319
+ # 4-bit: model = AutoModelForCausalLM.from_pretrained(model_dir, device_map="auto", trust_remote_code=True, load_in_4bit=True)
320
+ model = model.eval()
321
+
322
+ messages = [
323
+ {"role": "system", "content": thinking_system_prompt},
324
+ {"role": "user", "content": "Given the function\(f(x)=\mathrm{e}^{x}-ax - a^{3}\),\n(1) When \(a = 1\), find the equation of the tangent line to the curve \(y = f(x)\) at the point \((1,f(1))\).\n(2) If \(f(x)\) has a local minimum and the minimum value is less than \(0\), determine the range of values for \(a\)."},
325
+ ]
326
+ tokenized_chat = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt").to("cuda")
327
+
328
+ generated_ids = model.generate(tokenized_chat, max_new_tokens=8192)
329
+
330
+ generated_ids = [
331
+ output_ids[len(input_ids):] for input_ids, output_ids in zip(tokenized_chat, generated_ids)
332
+ ]
333
+ prompt = tokenizer.batch_decode(tokenized_chat)[0]
334
+ print(prompt)
335
+ response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
336
+ print(response)
337
+ ```
338
+ #### LMDeploy inference
339
+
340
+ LMDeploy is a toolkit for compressing, deploying, and serving LLM.
341
+
342
+ ```bash
343
+ pip install lmdeploy
344
+ ```
345
+
346
+ You can run batch inference locally with the following python code:
347
+
348
+ ```python
349
+ from lmdeploy import pipeline, GenerationConfig, ChatTemplateConfig
350
+ model_dir = "internlm/internlm3-8b-instruct"
351
+ chat_template_config = ChatTemplateConfig(model_name='internlm3')
352
+ pipe = pipeline(model_dir, chat_template_config=chat_template_config)
353
+
354
+ messages = [
355
+ {"role": "system", "content": thinking_system_prompt},
356
+ {"role": "user", "content": "Given the function\(f(x)=\mathrm{e}^{x}-ax - a^{3}\),\n(1) When \(a = 1\), find the equation of the tangent line to the curve \(y = f(x)\) at the point \((1,f(1))\).\n(2) If \(f(x)\) has a local minimum and the minimum value is less than \(0\), determine the range of values for \(a\)."},
357
+ ]
358
+
359
+ response = pipe(messages, gen_config=GenerationConfig(max_new_tokens=2048))
360
+ print(response)
361
+ ```
362
+
363
+ #### Ollama inference
364
+
365
+ First install ollama,
366
+
367
+ ```python
368
+ # install ollama
369
+ curl -fsSL https://ollama.com/install.sh | sh
370
+ # fetch model
371
+ ollama pull internlm/internlm3-8b-instruct
372
+ # install
373
+ pip install ollama
374
+ ```
375
+
376
+ inference code,
377
+
378
+ ```python
379
+ import ollama
380
+
381
+ messages = [
382
+ {
383
+ "role": "system",
384
+ "content": thinking_system_prompt,
385
+ },
386
+ {
387
+ "role": "user",
388
+ "content": "Given the function\(f(x)=\mathrm{e}^{x}-ax - a^{3}\),\n(1) When \(a = 1\), find the equation of the tangent line to the curve \(y = f(x)\) at the point \((1,f(1))\).\n(2) If \(f(x)\) has a local minimum and the minimum value is less than \(0\), determine the range of values for \(a\)."
389
+ },
390
+ ]
391
+
392
+ stream = ollama.chat(
393
+ model='internlm/internlm3-8b-instruct',
394
+ messages=messages,
395
+ stream=True,
396
+ )
397
+
398
+ for chunk in stream:
399
+ print(chunk['message']['content'], end='', flush=True)
400
+ ```
401
+
402
+
403
+ ####
404
+
405
+ #### vLLM inference
406
+
407
+ Refer to [installation](https://docs.vllm.ai/en/latest/getting_started/installation/index.html) to install the latest code of vllm
408
+
409
+ ```python
410
+ pip install vllm --pre --extra-index-url https://wheels.vllm.ai/nightly
411
+ ```
412
+
413
+ inference code
414
+
415
+
416
+ ```python
417
+ from vllm import LLM, SamplingParams
418
+
419
+ llm = LLM(model="internlm/internlm3-8b-instruct")
420
+ sampling_params = SamplingParams(temperature=1, repetition_penalty=1.005, top_k=40, top_p=0.8, max_tokens=8192)
421
+
422
+ prompts = [
423
+ {
424
+ "role": "system",
425
+ "content": thinking_system_prompt,
426
+ },
427
+ {
428
+ "role": "user",
429
+ "content": "Given the function\(f(x)=\mathrm{e}^{x}-ax - a^{3}\),\n(1) When \(a = 1\), find the equation of the tangent line to the curve \(y = f(x)\) at the point \((1,f(1))\).\n(2) If \(f(x)\) has a local minimum and the minimum value is less than \(0\), determine the range of values for \(a\)."
430
+ },
431
+ ]
432
+ outputs = llm.chat(prompts,
433
+ sampling_params=sampling_params,
434
+ use_tqdm=False)
435
+ print(outputs)
436
+ ```
437
+
438
+
439
+
440
+ ## Open Source License
441
+
442
+ Code and model weights are licensed under Apache-2.0.
443
+
444
+ ## Citation
445
+
446
+ ```
447
+ @misc{cai2024internlm2,
448
+ title={InternLM2 Technical Report},
449
+ author={Zheng Cai and Maosong Cao and Haojiong Chen and Kai Chen and Keyu Chen and Xin Chen and Xun Chen and Zehui Chen and Zhi Chen and Pei Chu and Xiaoyi Dong and Haodong Duan and Qi Fan and Zhaoye Fei and Yang Gao and Jiaye Ge and Chenya Gu and Yuzhe Gu and Tao Gui and Aijia Guo and Qipeng Guo and Conghui He and Yingfan Hu and Ting Huang and Tao Jiang and Penglong Jiao and Zhenjiang Jin and Zhikai Lei and Jiaxing Li and Jingwen Li and Linyang Li and Shuaibin Li and Wei Li and Yining Li and Hongwei Liu and Jiangning Liu and Jiawei Hong and Kaiwen Liu and Kuikun Liu and Xiaoran Liu and Chengqi Lv and Haijun Lv and Kai Lv and Li Ma and Runyuan Ma and Zerun Ma and Wenchang Ning and Linke Ouyang and Jiantao Qiu and Yuan Qu and Fukai Shang and Yunfan Shao and Demin Song and Zifan Song and Zhihao Sui and Peng Sun and Yu Sun and Huanze Tang and Bin Wang and Guoteng Wang and Jiaqi Wang and Jiayu Wang and Rui Wang and Yudong Wang and Ziyi Wang and Xingjian Wei and Qizhen Weng and Fan Wu and Yingtong Xiong and Chao Xu and Ruiliang Xu and Hang Yan and Yirong Yan and Xiaogui Yang and Haochen Ye and Huaiyuan Ying and Jia Yu and Jing Yu and Yuhang Zang and Chuyu Zhang and Li Zhang and Pan Zhang and Peng Zhang and Ruijie Zhang and Shuo Zhang and Songyang Zhang and Wenjian Zhang and Wenwei Zhang and Xingcheng Zhang and Xinyue Zhang and Hui Zhao and Qian Zhao and Xiaomeng Zhao and Fengzhe Zhou and Zaida Zhou and Jingming Zhuo and Yicheng Zou and Xipeng Qiu and Yu Qiao and Dahua Lin},
450
+ year={2024},
451
+ eprint={2403.17297},
452
+ archivePrefix={arXiv},
453
+ primaryClass={cs.CL}
454
+ }
455
+ ```
456
+
457
+
458
+
459
+ ## 简介
460
+
461
+ ### InternLM3-8B-Instruct
462
+
463
+ InternLM3,即书生·浦语大模型第3代,开源了80亿参数,面向通用使用与高阶推理的指令模型(InternLM3-8B-Instruct)。模型具备以下特点:
464
+
465
+ - **更低的代价取得更高的性能**:
466
+ 在推理、知识类任务上取得同量级最优性能,超过Llama3.1-8B和Qwen2.5-7B。值得关注的是InternLM3只用了4万亿词元进行训练,对比同级别模型训练成本节省75%以上。
467
+ - **深度思考能力**:
468
+ InternLM3支持通过长思维链求解复杂推理任务的深度思考模式,同时还兼顾了用户体验更流畅的通用回复模式。
469
+
470
+ #### 性能评测
471
+
472
+ 我们使用开源评测工具 [OpenCompass](https://github.com/internLM/OpenCompass/) 从学科综合能力、语言能力、知识能力、推理能力、理解能力五大能力维度对InternLM开展全面评测,部分评测结果如下表所示,欢迎访问[ OpenCompass 榜单 ](https://rank.opencompass.org.cn)获取更多的评测结果。
473
+
474
+ | | 评测集\模型 | InternLM3-8B-Instruct | Qwen2.5-7B-Instruct | Llama3.1-8B-Instruct | GPT-4o-mini(闭源) |
475
+ | ------------ | ------------------------------- | --------------------- | ------------------- | -------------------- | ----------------- |
476
+ | General | CMMLU(0-shot) | **83.1** | 75.8 | 53.9 | 66.0 |
477
+ | | MMLU(0-shot) | 76.6 | **76.8** | 71.8 | 82.7 |
478
+ | | MMLU-Pro(0-shot) | **57.6** | 56.2 | 48.1 | 64.1 |
479
+ | Reasoning | GPQA-Diamond(0-shot) | **37.4** | 33.3 | 24.2 | 42.9 |
480
+ | | DROP(0-shot) | **83.1** | 80.4 | 81.6 | 85.2 |
481
+ | | HellaSwag(10-shot) | **91.2** | 85.3 | 76.7 | 89.5 |
482
+ | | KOR-Bench(0-shot) | **56.4** | 44.6 | 47.7 | 58.2 |
483
+ | MATH | MATH-500(0-shot) | **83.0*** | 72.4 | 48.4 | 74.0 |
484
+ | | AIME2024(0-shot) | **20.0*** | 16.7 | 6.7 | 13.3 |
485
+ | Coding | LiveCodeBench(2407-2409 Pass@1) | **17.8** | 16.8 | 12.9 | 21.8 |
486
+ | | HumanEval(Pass@1) | 82.3 | **85.4** | 72.0 | 86.6 |
487
+ | Instrunction | IFEval(Prompt-Strict) | **79.3** | 71.7 | 75.2 | 79.7 |
488
+ | LongContext | RULER(4-128K Average) | 87.9 | 81.4 | **88.5** | 90.7 |
489
+ | Chat | AlpacaEval 2.0(LC WinRate) | **51.1** | 30.3 | 25.0 | 50.7 |
490
+ | | WildBench(Raw Score) | **33.1** | 23.3 | 1.5 | 40.3 |
491
+ | | MT-Bench-101(Score 1-10) | **8.59** | 8.49 | 8.37 | 8.87 |
492
+
493
+ - 表中标粗的数值表示在对比的开源模型中的最高值。
494
+ - 以上评测结果基于 [OpenCompass](https://github.com/internLM/OpenCompass/) 获得(部分数据标注`*`代表使用深度思考模式进行评测),具体测试细节可参见 [OpenCompass](https://github.com/internLM/OpenCompass/) 中提供的配置文件。
495
+ - 评测数据会因 [OpenCompass](https://github.com/internLM/OpenCompass/) 的版本迭代而存在数值差异,请以 [OpenCompass](https://github.com/internLM/OpenCompass/) 最新版的评测结果为主。
496
+
497
+ **局限性:** 尽管在训练过程中我们非���注重模型的安全性,尽力促使模型输出符合伦理和法律要求的文本,但受限于模型大小以及概率生成范式,模型可能会产生各种不符合预期的输出,例如回复内容包含偏见、歧视等有害内容,请勿传播这些内容。由于传播不良信息导致的任何后果,本项目不承担责任。
498
+
499
+ #### 依赖
500
+
501
+ ```python
502
+ transformers >= 4.48
503
+ ```
504
+
505
+
506
+
507
+
508
+ #### 常规对话模式
509
+
510
+ ##### Transformers 推理
511
+
512
+ 通过以下的代码加载 InternLM3 8B Instruct 模型
513
+
514
+ ```python
515
+ import torch
516
+ from transformers import AutoTokenizer, AutoModelForCausalLM
517
+
518
+ model_dir = "internlm/internlm3-8b-instruct"
519
+ tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
520
+ # Set `torch_dtype=torch.float16` to load model in float16, otherwise it will be loaded as float32 and might cause OOM Error.
521
+ model = AutoModelForCausalLM.from_pretrained(model_dir, trust_remote_code=True, torch_dtype=torch.bfloat16).cuda()
522
+ # (Optional) If on low resource devices, you can load model in 4-bit or 8-bit to further save GPU memory via bitsandbytes.
523
+ # InternLM3 8B in 4bit will cost nearly 8GB GPU memory.
524
+ # pip install -U bitsandbytes
525
+ # 8-bit: model = AutoModelForCausalLM.from_pretrained(model_dir, device_map="auto", trust_remote_code=True, load_in_8bit=True)
526
+ # 4-bit: model = AutoModelForCausalLM.from_pretrained(model_dir, device_map="auto", trust_remote_code=True, load_in_4bit=True)
527
+ model = model.eval()
528
+
529
+ system_prompt = """You are an AI assistant whose name is InternLM (书生·浦语).
530
+ - InternLM (书生·浦语) is a conversational language model that is developed by Shanghai AI Laboratory (上海人工智能实验室). It is designed to be helpful, honest, and harmless.
531
+ - InternLM (书生·浦语) can understand and communicate fluently in the language chosen by the user such as English and 中文."""
532
+ messages = [
533
+ {"role": "system", "content": system_prompt},
534
+ {"role": "user", "content": "Please tell me five scenic spots in Shanghai"},
535
+ ]
536
+ tokenized_chat = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt").to("cuda")
537
+
538
+ generated_ids = model.generate(tokenized_chat, max_new_tokens=1024, temperature=1, repetition_penalty=1.005, top_k=40, top_p=0.8)
539
+
540
+ generated_ids = [
541
+ output_ids[len(input_ids):] for input_ids, output_ids in zip(tokenized_chat, generated_ids)
542
+ ]
543
+ prompt = tokenizer.batch_decode(tokenized_chat)[0]
544
+ print(prompt)
545
+ response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
546
+ print(response)
547
+ ```
548
+
549
+ ##### LMDeploy 推理
550
+
551
+ LMDeploy 是涵盖了 LLM 任务的全套轻量化、部署和服务解决方案。
552
+
553
+ ```bash
554
+ pip install lmdeploy
555
+ ```
556
+
557
+ 你可以使用以下 python 代码进行本地批量推理:
558
+
559
+ ```python
560
+ import lmdeploy
561
+ model_dir = "internlm/internlm3-8b-instruct"
562
+ pipe = lmdeploy.pipeline(model_dir)
563
+ response = pipe(["Please tell me five scenic spots in Shanghai"])
564
+ print(response)
565
+
566
+ ```
567
+
568
+ 或者你可以使用以下命令启动兼容 OpenAI API 的服务:
569
+
570
+ ```bash
571
+ lmdeploy serve api_server internlm/internlm3-8b-instruct --model-name internlm3-8b-instruct --server-port 23333
572
+ ```
573
+
574
+ 然后你可以向服务端发起一个聊天请求:
575
+
576
+ ```bash
577
+ curl http://localhost:23333/v1/chat/completions \
578
+ -H "Content-Type: application/json" \
579
+ -d '{
580
+ "model": "internlm3-8b-instruct",
581
+ "messages": [
582
+ {"role": "user", "content": "介绍一下深度学习。"}
583
+ ]
584
+ }'
585
+ ```
586
+
587
+ 更多信息请查看 [LMDeploy 文档](https://lmdeploy.readthedocs.io/en/latest/)
588
+
589
+
590
+
591
+ ##### Ollama 推理
592
+
593
+ 准备工作
594
+
595
+ ```python
596
+ # install ollama
597
+ curl -fsSL https://ollama.com/install.sh | sh
598
+ # fetch 模型
599
+ ollama pull internlm/internlm3-8b-instruct
600
+ # install python库
601
+ pip install ollama
602
+ ```
603
+
604
+ 推理代码
605
+
606
+ ```python
607
+ import ollama
608
+
609
+ system_prompt = """You are an AI assistant whose name is InternLM (书生·浦语).
610
+ - InternLM (书生·浦语) is a conversational language model that is developed by Shanghai AI Laboratory (上海人工智能实验室). It is designed to be helpful, honest, and harmless.
611
+ - InternLM (书生·浦语) can understand and communicate fluently in the language chosen by the user such as English and 中文."""
612
+
613
+ messages = [
614
+ {
615
+ "role": "system",
616
+ "content": system_prompt,
617
+ },
618
+ {
619
+ "role": "user",
620
+ "content": "Please tell me five scenic spots in Shanghai"
621
+ },
622
+ ]
623
+
624
+ stream = ollama.chat(
625
+ model='internlm/internlm3-8b-instruct',
626
+ messages=messages,
627
+ stream=True,
628
+ )
629
+
630
+ for chunk in stream:
631
+ print(chunk['message']['content'], end='', flush=True)
632
+ ```
633
+
634
+
635
+ ####
636
+
637
+ ##### vLLM 推理
638
+
639
+ 参考[文档](https://docs.vllm.ai/en/latest/getting_started/installation/index.html) 安装 vllm 最新代码
640
+
641
+ ```bash
642
+ pip install vllm --pre --extra-index-url https://wheels.vllm.ai/nightly
643
+ ```
644
+
645
+ 推理代码
646
+
647
+ ```python
648
+ from vllm import LLM, SamplingParams
649
+
650
+ llm = LLM(model="internlm/internlm3-8b-instruct")
651
+ sampling_params = SamplingParams(temperature=1, repetition_penalty=1.005, top_k=40, top_p=0.8)
652
+
653
+ system_prompt = """You are an AI assistant whose name is InternLM (书生·浦语).
654
+ - InternLM (书生·浦语) is a conversational language model that is developed by Shanghai AI Laboratory (上海人工智能实验室). It is designed to be helpful, honest, and harmless.
655
+ - InternLM (书生·浦语) can understand and communicate fluently in the language chosen by the user such as English and 中文."""
656
+
657
+ prompts = [
658
+ {
659
+ "role": "system",
660
+ "content": system_prompt,
661
+ },
662
+ {
663
+ "role": "user",
664
+ "content": "Please tell me five scenic spots in Shanghai"
665
+ },
666
+ ]
667
+ outputs = llm.chat(prompts,
668
+ sampling_params=sampling_params,
669
+ use_tqdm=False)
670
+ print(outputs)
671
+ ```
672
+
673
+ #### 深度思考模式
674
+
675
+ ##### 深度思考 Demo
676
+
677
+ <img src="https://github.com/InternLM/InternLM/blob/017ba7446d20ecc3b9ab8e7b66cc034500868ab4/assets/solve_puzzle.png?raw=true" width="400"/>
678
+
679
+
680
+
681
+
682
+
683
+ ##### 深度思考 system prompt
684
+
685
+ ```python
686
+ thinking_system_prompt = """You are an expert mathematician with extensive experience in mathematical competitions. You approach problems through systematic thinking and rigorous reasoning. When solving problems, follow these thought processes:
687
+ ## Deep Understanding
688
+ Take time to fully comprehend the problem before attempting a solution. Consider:
689
+ - What is the real question being asked?
690
+ - What are the given conditions and what do they tell us?
691
+ - Are there any special restrictions or assumptions?
692
+ - Which information is crucial and which is supplementary?
693
+ ## Multi-angle Analysis
694
+ Before solving, conduct thorough analysis:
695
+ - What mathematical concepts and properties are involved?
696
+ - Can you recall similar classic problems or solution methods?
697
+ - Would diagrams or tables help visualize the problem?
698
+ - Are there special cases that need separate consideration?
699
+ ## Systematic Thinking
700
+ Plan your solution path:
701
+ - Propose multiple possible approaches
702
+ - Analyze the feasibility and merits of each method
703
+ - Choose the most appropriate method and explain why
704
+ - Break complex problems into smaller, manageable steps
705
+ ## Rigorous Proof
706
+ During the solution process:
707
+ - Provide solid justification for each step
708
+ - Include detailed proofs for key conclusions
709
+ - Pay attention to logical connections
710
+ - Be vigilant about potential oversights
711
+ ## Repeated Verification
712
+ After completing your solution:
713
+ - Verify your results satisfy all conditions
714
+ - Check for overlooked special cases
715
+ - Consider if the solution can be optimized or simplified
716
+ - Review your reasoning process
717
+ Remember:
718
+ 1. Take time to think thoroughly rather than rushing to an answer
719
+ 2. Rigorously prove each key conclusion
720
+ 3. Keep an open mind and try different approaches
721
+ 4. Summarize valuable problem-solving methods
722
+ 5. Maintain healthy skepticism and verify multiple times
723
+ Your response should reflect deep mathematical understanding and precise logical thinking, making your solution path and reasoning clear to others.
724
+ When you're ready, present your complete solution with:
725
+ - Clear problem understanding
726
+ - Detailed solution process
727
+ - Key insights
728
+ - Thorough verification
729
+ Focus on clear, logical progression of ideas and thorough explanation of your mathematical reasoning. Provide answers in the same language as the user asking the question, repeat the final answer using a '\\boxed{}' without any units, you have [[8192]] tokens to complete the answer.
730
+ """
731
+ ```
732
+
733
+ ##### Transformers 推理
734
+
735
+
736
+ ```python
737
+ import torch
738
+ from transformers import AutoTokenizer, AutoModelForCausalLM
739
+
740
+ model_dir = "internlm/internlm3-8b-instruct"
741
+ tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
742
+ # Set `torch_dtype=torch.float16` to load model in float16, otherwise it will be loaded as float32 and might cause OOM Error.
743
+ model = AutoModelForCausalLM.from_pretrained(model_dir, trust_remote_code=True, torch_dtype=torch.bfloat16).cuda()
744
+ # (Optional) If on low resource devices, you can load model in 4-bit or 8-bit to further save GPU memory via bitsandbytes.
745
+ # InternLM3 8B in 4bit will cost nearly 8GB GPU memory.
746
+ # pip install -U bitsandbytes
747
+ # 8-bit: model = AutoModelForCausalLM.from_pretrained(model_dir, device_map="auto", trust_remote_code=True, load_in_8bit=True)
748
+ # 4-bit: model = AutoModelForCausalLM.from_pretrained(model_dir, device_map="auto", trust_remote_code=True, load_in_4bit=True)
749
+ model = model.eval()
750
+
751
+ messages = [
752
+ {"role": "system", "content": thinking_system_prompt},
753
+ {"role": "user", "content": "已知函数\(f(x)=\mathrm{e}^{x}-ax - a^{3}\)。\n(1)当\(a = 1\)时,求曲线\(y = f(x)\)在点\((1,f(1))\)处的切线方程;\n(2)若\(f(x)\)有极小值,且极小值小于\(0\),求\(a\)的取值范围。"},
754
+ ]
755
+ tokenized_chat = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt").to("cuda")
756
+
757
+ generated_ids = model.generate(tokenized_chat, max_new_tokens=8192)
758
+
759
+ generated_ids = [
760
+ output_ids[len(input_ids):] for input_ids, output_ids in zip(tokenized_chat, generated_ids)
761
+ ]
762
+ prompt = tokenizer.batch_decode(tokenized_chat)[0]
763
+ print(prompt)
764
+ response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
765
+ print(response)
766
+ ```
767
+ ##### LMDeploy 推理
768
+
769
+ LMDeploy is a toolkit for compressing, deploying, and serving LLM, developed by the MMRazor and MMDeploy teams.
770
+
771
+ ```bash
772
+ pip install lmdeploy
773
+ ```
774
+
775
+ You can run batch inference locally with the following python code:
776
+
777
+ ```python
778
+ from lmdeploy import pipeline, GenerationConfig, ChatTemplateConfig
779
+ model_dir = "internlm/internlm3-8b-instruct"
780
+ chat_template_config = ChatTemplateConfig(model_name='internlm3')
781
+ pipe = pipeline(model_dir, chat_template_config=chat_template_config)
782
+
783
+ messages = [
784
+ {"role": "system", "content": thinking_system_prompt},
785
+ {"role": "user", "content": "已知函数\(f(x)=\mathrm{e}^{x}-ax - a^{3}\)。\n(1)当\(a = 1\)时,求曲线\(y = f(x)\)在点\((1,f(1))\)处的切线方程;\n(2)若\(f(x)\)有极小值,且极小值小于\(0\),求\(a\)的取值范围。"},
786
+ ]
787
+
788
+ response = pipe(messages, gen_config=GenerationConfig(max_new_tokens=2048))
789
+ print(response)
790
+ ```
791
+
792
+ ##### Ollama 推理
793
+
794
+ 准备工作
795
+
796
+ ```python
797
+ # install ollama
798
+ curl -fsSL https://ollama.com/install.sh | sh
799
+ # fetch 模型
800
+ ollama pull internlm/internlm3-8b-instruct
801
+ # install python库
802
+ pip install ollama
803
+ ```
804
+
805
+ inference code,
806
+
807
+ ```python
808
+ import ollama
809
+
810
+ messages = [
811
+ {
812
+ "role": "system",
813
+ "content": thinking_system_prompt,
814
+ },
815
+ {
816
+ "role": "user",
817
+ "content": "Given the function\(f(x)=\mathrm{e}^{x}-ax - a^{3}\),\n(1) When \(a = 1\), find the equation of the tangent line to the curve \(y = f(x)\) at the point \((1,f(1))\).\n(2) If \(f(x)\) has a local minimum and the minimum value is less than \(0\), determine the range of values for \(a\)."
818
+ },
819
+ ]
820
+
821
+ stream = ollama.chat(
822
+ model='internlm/internlm3-8b-instruct',
823
+ messages=messages,
824
+ stream=True,
825
+ )
826
+
827
+ for chunk in stream:
828
+ print(chunk['message']['content'], end='', flush=True)
829
+ ```
830
+
831
+
832
+ ####
833
+
834
+ ##### vLLM 推理
835
+
836
+ 参考[文档](https://docs.vllm.ai/en/latest/getting_started/installation/index.html) 安装 vllm 最新代码
837
+
838
+ ```bash
839
+ pip install vllm --pre --extra-index-url https://wheels.vllm.ai/nightly
840
+ ```
841
+
842
+ 推理代码
843
+
844
+ ```python
845
+ from vllm import LLM, SamplingParams
846
+
847
+ llm = LLM(model="internlm/internlm3-8b-instruct")
848
+ sampling_params = SamplingParams(temperature=1, repetition_penalty=1.005, top_k=40, top_p=0.8, max_tokens=8192)
849
+
850
+ prompts = [
851
+ {
852
+ "role": "system",
853
+ "content": thinking_system_prompt,
854
+ },
855
+ {
856
+ "role": "user",
857
+ "content": "已知函数\(f(x)=\mathrm{e}^{x}-ax - a^{3}\)。\n(1)当\(a = 1\)时,求曲线\(y = f(x)\)在点\((1,f(1))\)处的切线方程;\n(2)若\(f(x)\)有极小值,且极小值小于\(0\),求\(a\)的取值范围。"
858
+ },
859
+ ]
860
+ outputs = llm.chat(prompts,
861
+ sampling_params=sampling_params,
862
+ use_tqdm=False)
863
+ print(outputs)
864
+ ```
865
+
866
+
867
+
868
+
869
+
870
+
871
+
872
+
873
+
874
+ ## 开源许可证
875
+
876
+ 本仓库的代码和权重依照 Apache-2.0 协议开源。
877
+
878
+ ## 引用
879
+
880
+ ```
881
+ @misc{cai2024internlm2,
882
+ title={InternLM2 Technical Report},
883
+ author={Zheng Cai and Maosong Cao and Haojiong Chen and Kai Chen and Keyu Chen and Xin Chen and Xun Chen and Zehui Chen and Zhi Chen and Pei Chu and Xiaoyi Dong and Haodong Duan and Qi Fan and Zhaoye Fei and Yang Gao and Jiaye Ge and Chenya Gu and Yuzhe Gu and Tao Gui and Aijia Guo and Qipeng Guo and Conghui He and Yingfan Hu and Ting Huang and Tao Jiang and Penglong Jiao and Zhenjiang Jin and Zhikai Lei and Jiaxing Li and Jingwen Li and Linyang Li and Shuaibin Li and Wei Li and Yining Li and Hongwei Liu and Jiangning Liu and Jiawei Hong and Kaiwen Liu and Kuikun Liu and Xiaoran Liu and Chengqi Lv and Haijun Lv and Kai Lv and Li Ma and Runyuan Ma and Zerun Ma and Wenchang Ning and Linke Ouyang and Jiantao Qiu and Yuan Qu and Fukai Shang and Yunfan Shao and Demin Song and Zifan Song and Zhihao Sui and Peng Sun and Yu Sun and Huanze Tang and Bin Wang and Guoteng Wang and Jiaqi Wang and Jiayu Wang and Rui Wang and Yudong Wang and Ziyi Wang and Xingjian Wei and Qizhen Weng and Fan Wu and Yingtong Xiong and Chao Xu and Ruiliang Xu and Hang Yan and Yirong Yan and Xiaogui Yang and Haochen Ye and Huaiyuan Ying and Jia Yu and Jing Yu and Yuhang Zang and Chuyu Zhang and Li Zhang and Pan Zhang and Peng Zhang and Ruijie Zhang and Shuo Zhang and Songyang Zhang and Wenjian Zhang and Wenwei Zhang and Xingcheng Zhang and Xinyue Zhang and Hui Zhao and Qian Zhao and Xiaomeng Zhao and Fengzhe Zhou and Zaida Zhou and Jingming Zhuo and Yicheng Zou and Xipeng Qiu and Yu Qiao and Dahua Lin},
884
+ year={2024},
885
+ eprint={2403.17297},
886
+ archivePrefix={arXiv},
887
+ primaryClass={cs.CL}
888
+ }
889
+ ```
config.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "InternLM3ForCausalLM"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "auto_map": {
7
+ "AutoConfig": "configuration_internlm3.InternLM3Config",
8
+ "AutoModel": "modeling_internlm3.InternLM3Model",
9
+ "AutoModelForCausalLM": "modeling_internlm3.InternLM3ForCausalLM"
10
+ },
11
+ "bias": false,
12
+ "bos_token_id": 1,
13
+ "eos_token_id": 2,
14
+ "head_dim": 128,
15
+ "hidden_act": "silu",
16
+ "hidden_size": 4096,
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 10240,
19
+ "max_position_embeddings": 32768,
20
+ "model_type": "internlm3",
21
+ "num_attention_heads": 32,
22
+ "num_hidden_layers": 48,
23
+ "num_key_value_heads": 2,
24
+ "pad_token_id": 2,
25
+ "qkv_bias": false,
26
+ "rms_norm_eps": 1e-05,
27
+ "rope_scaling": {
28
+ "factor": 6.0,
29
+ "rope_type": "dynamic"
30
+ },
31
+ "rope_theta": 50000000,
32
+ "tie_word_embeddings": false,
33
+ "torch_dtype": "bfloat16",
34
+ "transformers_version": "4.47.1",
35
+ "use_cache": true,
36
+ "vocab_size": 128512
37
+ }
configuration_internlm3.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on transformers/src/transformers/models/llama/configuration_llama.py
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ """ InternLM3 model configuration"""
18
+
19
+ from transformers.configuration_utils import PretrainedConfig
20
+ from transformers.modeling_rope_utils import rope_config_validation
21
+ from transformers.utils import logging
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ class InternLM3Config(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`InternLM2Model`]. It is used to instantiate
30
+ an InternLM2 model according to the specified arguments, defining the model architecture. Instantiating a
31
+ configuration with the defaults will yield a similar configuration to that of the InternLM2-7B.
32
+
33
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
34
+ documentation from [`PretrainedConfig`] for more information.
35
+
36
+
37
+ Args:
38
+ vocab_size (`int`, *optional*, defaults to 151936):
39
+ Vocabulary size of the InternLM3 model. Defines the number of different tokens that can be represented by the
40
+ `inputs_ids` passed when calling [`InternLM3Model`]
41
+ hidden_size (`int`, *optional*, defaults to 4096):
42
+ Dimension of the hidden representations.
43
+ intermediate_size (`int`, *optional*, defaults to 22016):
44
+ Dimension of the MLP representations.
45
+ num_hidden_layers (`int`, *optional*, defaults to 32):
46
+ Number of hidden layers in the Transformer encoder.
47
+ num_attention_heads (`int`, *optional*, defaults to 32):
48
+ Number of attention heads for each attention layer in the Transformer encoder.
49
+ num_key_value_heads (`int`, *optional*, defaults to 32):
50
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
51
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
52
+ `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
53
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
54
+ by meanpooling all the original heads within that group. For more details checkout [this
55
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `32`.
56
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
57
+ The non-linear activation function (function or string) in the decoder.
58
+ max_position_embeddings (`int`, *optional*, defaults to 32768):
59
+ The maximum sequence length that this model might ever be used with.
60
+ initializer_range (`float`, *optional*, defaults to 0.02):
61
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
62
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
63
+ The epsilon used by the rms normalization layers.
64
+ use_cache (`bool`, *optional*, defaults to `True`):
65
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
66
+ relevant if `config.is_decoder=True`.
67
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
68
+ Whether the model's input and output word embeddings should be tied.
69
+ rope_theta (`float`, *optional*, defaults to 10000.0):
70
+ The base period of the RoPE embeddings.
71
+ rope_scaling (`Dict`, *optional*):
72
+ Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
73
+ and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
74
+ accordingly.
75
+ Expected contents:
76
+ `rope_type` (`str`):
77
+ The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
78
+ 'llama3'], with 'default' being the original RoPE implementation.
79
+ `factor` (`float`, *optional*):
80
+ Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
81
+ most scaling types, a `factor` of x will enable the model to handle sequences of length x *
82
+ original maximum pre-trained length.
83
+ `original_max_position_embeddings` (`int`, *optional*):
84
+ Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
85
+ pretraining.
86
+ `attention_factor` (`float`, *optional*):
87
+ Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
88
+ computation. If unspecified, it defaults to value recommended by the implementation, using the
89
+ `factor` field to infer the suggested value.
90
+ `beta_fast` (`float`, *optional*):
91
+ Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
92
+ ramp function. If unspecified, it defaults to 32.
93
+ `beta_slow` (`float`, *optional*):
94
+ Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
95
+ ramp function. If unspecified, it defaults to 1.
96
+ `short_factor` (`List[float]`, *optional*):
97
+ Only used with 'longrope'. The scaling factor to be applied to short contexts (<
98
+ `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
99
+ size divided by the number of attention heads divided by 2
100
+ `long_factor` (`List[float]`, *optional*):
101
+ Only used with 'longrope'. The scaling factor to be applied to long contexts (<
102
+ `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
103
+ size divided by the number of attention heads divided by 2
104
+ `low_freq_factor` (`float`, *optional*):
105
+ Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
106
+ `high_freq_factor` (`float`, *optional*):
107
+ Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
108
+ qkv_bias (`bool`, *optional*, defaults to `False`):
109
+ Whether to use a bias in the query, key and value projection layers during self-attention.
110
+ attention_dropout (`float`, *optional*, defaults to 0.0):
111
+ The dropout ratio for the attention probabilities.
112
+ bias (`bool`, *optional*, defaults to `False`):
113
+ Whether to use a bias in o_proj, up_proj, down_proj and gate_proj layers.
114
+ head_dim (`int`, *optional*):
115
+ The attention head dimension. If None, it will default to hidden_size // num_heads
116
+
117
+ ```python
118
+ >>> from transformers import InternLM3Model, InternLM3Config
119
+
120
+ >>> # Initializing a InternLM3 style configuration
121
+ >>> configuration = InternLM3Config()
122
+
123
+ >>> # Initializing a model from the InternLM3-8B style configuration
124
+ >>> model = InternLM3Model(configuration)
125
+
126
+ >>> # Accessing the model configuration
127
+ >>> configuration = model.config
128
+ ```"""
129
+
130
+ model_type = "internlm3"
131
+ keys_to_ignore_at_inference = ["past_key_values"]
132
+
133
+ # Default tensor parallel plan for base model `InternLM3`
134
+ base_model_tp_plan = {
135
+ "layers.*.self_attn.q_proj": "colwise",
136
+ "layers.*.self_attn.k_proj": "colwise",
137
+ "layers.*.self_attn.v_proj": "colwise",
138
+ "layers.*.self_attn.o_proj": "rowwise",
139
+ "layers.*.mlp.gate_proj": "colwise",
140
+ "layers.*.mlp.up_proj": "colwise",
141
+ "layers.*.mlp.down_proj": "rowwise",
142
+ }
143
+
144
+ def __init__(
145
+ self,
146
+ vocab_size=128512,
147
+ hidden_size=4096,
148
+ intermediate_size=11008,
149
+ num_hidden_layers=32,
150
+ num_attention_heads=32,
151
+ num_key_value_heads=32,
152
+ hidden_act="silu",
153
+ max_position_embeddings=32768,
154
+ initializer_range=0.02,
155
+ rms_norm_eps=1e-6,
156
+ use_cache=True,
157
+ tie_word_embeddings=False,
158
+ rope_theta=10000.0,
159
+ rope_scaling=None,
160
+ qkv_bias=False,
161
+ attention_dropout=0.0,
162
+ bias=False,
163
+ head_dim=None,
164
+ **kwargs,
165
+ ):
166
+ self.vocab_size = vocab_size
167
+ self.max_position_embeddings = max_position_embeddings
168
+ self.hidden_size = hidden_size
169
+ self.intermediate_size = intermediate_size
170
+ self.num_hidden_layers = num_hidden_layers
171
+ self.num_attention_heads = num_attention_heads
172
+
173
+ # for backward compatibility
174
+ if num_key_value_heads is None:
175
+ num_key_value_heads = num_attention_heads
176
+
177
+ self.num_key_value_heads = num_key_value_heads
178
+ self.hidden_act = hidden_act
179
+ self.initializer_range = initializer_range
180
+ self.rms_norm_eps = rms_norm_eps
181
+ self.use_cache = use_cache
182
+ self.rope_theta = rope_theta
183
+ self.rope_scaling = rope_scaling
184
+ self.qkv_bias = qkv_bias
185
+ self.attention_dropout = attention_dropout
186
+ self.bias = bias
187
+ self.head_dim = head_dim if head_dim is not None else self.hidden_size // self.num_attention_heads
188
+ # Validate the correctness of rotary position embeddings parameters
189
+ # BC: if there is a 'type' field, move it to 'rope_type'.
190
+ if self.rope_scaling is not None and "type" in self.rope_scaling:
191
+ self.rope_scaling["rope_type"] = self.rope_scaling["type"]
192
+ rope_config_validation(self)
193
+
194
+ super().__init__(
195
+ tie_word_embeddings=tie_word_embeddings,
196
+ **kwargs,
197
+ )
generation_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 1,
3
+ "eos_token_id": [
4
+ 2,
5
+ 128131
6
+ ],
7
+ "pad_token_id": 2,
8
+ "transformers_version": "4.47.1"
9
+ }
model-00001-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a18eb700cd28f03c0745ac75ab53ead514cfa703fabb82c61a37b85cb593ff4
3
+ size 9928388896
model-00002-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e324110df616ca5190f20469528f13421476f38f0e2597dac4d9ee0bdcede797
3
+ size 7680144544
model.safetensors.index.json ADDED
@@ -0,0 +1,442 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 17608482816
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "model-00002-of-00002.safetensors",
7
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
8
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
9
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
10
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
11
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
12
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
13
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
14
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
15
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
16
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
17
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
18
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
19
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
20
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
21
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
22
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
23
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
24
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
25
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
26
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
27
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
28
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
29
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
30
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
31
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
32
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
33
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
34
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
35
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
36
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
37
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
38
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
39
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
40
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
41
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
42
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
43
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
44
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
45
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
46
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
47
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
48
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
49
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
50
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
51
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
52
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
53
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
54
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
55
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
56
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
57
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
58
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
59
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
60
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
61
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
62
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
63
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
64
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
65
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
66
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
67
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
68
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
69
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
70
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
71
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
72
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
73
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
74
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
75
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
76
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
77
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
78
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
79
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
80
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
81
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
82
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
83
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
84
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
85
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
86
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
87
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
88
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
89
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
90
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
91
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
92
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
93
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
94
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
95
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
96
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
97
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
98
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
99
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
100
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
101
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
102
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
103
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
104
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
105
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
106
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
107
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
108
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
109
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
110
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
111
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
112
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
113
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
114
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
115
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
116
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
117
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
118
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
119
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
120
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
121
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
122
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
123
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
124
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
125
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
126
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
127
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
128
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
129
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
130
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
131
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
132
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
133
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
134
+ "model.layers.21.input_layernorm.weight": "model-00001-of-00002.safetensors",
135
+ "model.layers.21.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
136
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
137
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
138
+ "model.layers.21.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
139
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
140
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
141
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
142
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
143
+ "model.layers.22.input_layernorm.weight": "model-00001-of-00002.safetensors",
144
+ "model.layers.22.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
145
+ "model.layers.22.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
146
+ "model.layers.22.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
147
+ "model.layers.22.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
148
+ "model.layers.22.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
149
+ "model.layers.22.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
150
+ "model.layers.22.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
151
+ "model.layers.22.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
152
+ "model.layers.23.input_layernorm.weight": "model-00001-of-00002.safetensors",
153
+ "model.layers.23.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
154
+ "model.layers.23.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
155
+ "model.layers.23.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
156
+ "model.layers.23.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
157
+ "model.layers.23.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
158
+ "model.layers.23.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
159
+ "model.layers.23.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
160
+ "model.layers.23.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
161
+ "model.layers.24.input_layernorm.weight": "model-00001-of-00002.safetensors",
162
+ "model.layers.24.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
163
+ "model.layers.24.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
164
+ "model.layers.24.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
165
+ "model.layers.24.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
166
+ "model.layers.24.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
167
+ "model.layers.24.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
168
+ "model.layers.24.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
169
+ "model.layers.24.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
170
+ "model.layers.25.input_layernorm.weight": "model-00001-of-00002.safetensors",
171
+ "model.layers.25.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
172
+ "model.layers.25.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
173
+ "model.layers.25.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
174
+ "model.layers.25.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
175
+ "model.layers.25.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
176
+ "model.layers.25.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
177
+ "model.layers.25.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
178
+ "model.layers.25.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
179
+ "model.layers.26.input_layernorm.weight": "model-00001-of-00002.safetensors",
180
+ "model.layers.26.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
181
+ "model.layers.26.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
182
+ "model.layers.26.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
183
+ "model.layers.26.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
184
+ "model.layers.26.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
185
+ "model.layers.26.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
186
+ "model.layers.26.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
187
+ "model.layers.26.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
188
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
189
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
190
+ "model.layers.27.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
191
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
192
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
193
+ "model.layers.27.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
194
+ "model.layers.27.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
195
+ "model.layers.27.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
196
+ "model.layers.27.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
197
+ "model.layers.28.input_layernorm.weight": "model-00002-of-00002.safetensors",
198
+ "model.layers.28.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
199
+ "model.layers.28.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
200
+ "model.layers.28.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
201
+ "model.layers.28.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
202
+ "model.layers.28.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
203
+ "model.layers.28.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
204
+ "model.layers.28.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
205
+ "model.layers.28.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
206
+ "model.layers.29.input_layernorm.weight": "model-00002-of-00002.safetensors",
207
+ "model.layers.29.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
208
+ "model.layers.29.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
209
+ "model.layers.29.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
210
+ "model.layers.29.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
211
+ "model.layers.29.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
212
+ "model.layers.29.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
213
+ "model.layers.29.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
214
+ "model.layers.29.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
215
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
216
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
217
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
218
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
219
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
220
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
221
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
222
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
223
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
224
+ "model.layers.30.input_layernorm.weight": "model-00002-of-00002.safetensors",
225
+ "model.layers.30.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
226
+ "model.layers.30.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
227
+ "model.layers.30.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
228
+ "model.layers.30.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
229
+ "model.layers.30.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
230
+ "model.layers.30.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
231
+ "model.layers.30.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
232
+ "model.layers.30.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
233
+ "model.layers.31.input_layernorm.weight": "model-00002-of-00002.safetensors",
234
+ "model.layers.31.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
235
+ "model.layers.31.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
236
+ "model.layers.31.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
237
+ "model.layers.31.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
238
+ "model.layers.31.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
239
+ "model.layers.31.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
240
+ "model.layers.31.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
241
+ "model.layers.31.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
242
+ "model.layers.32.input_layernorm.weight": "model-00002-of-00002.safetensors",
243
+ "model.layers.32.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
244
+ "model.layers.32.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
245
+ "model.layers.32.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
246
+ "model.layers.32.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
247
+ "model.layers.32.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
248
+ "model.layers.32.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
249
+ "model.layers.32.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
250
+ "model.layers.32.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
251
+ "model.layers.33.input_layernorm.weight": "model-00002-of-00002.safetensors",
252
+ "model.layers.33.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
253
+ "model.layers.33.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
254
+ "model.layers.33.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
255
+ "model.layers.33.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
256
+ "model.layers.33.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
257
+ "model.layers.33.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
258
+ "model.layers.33.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
259
+ "model.layers.33.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
260
+ "model.layers.34.input_layernorm.weight": "model-00002-of-00002.safetensors",
261
+ "model.layers.34.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
262
+ "model.layers.34.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
263
+ "model.layers.34.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
264
+ "model.layers.34.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
265
+ "model.layers.34.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
266
+ "model.layers.34.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
267
+ "model.layers.34.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
268
+ "model.layers.34.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
269
+ "model.layers.35.input_layernorm.weight": "model-00002-of-00002.safetensors",
270
+ "model.layers.35.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
271
+ "model.layers.35.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
272
+ "model.layers.35.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
273
+ "model.layers.35.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
274
+ "model.layers.35.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
275
+ "model.layers.35.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
276
+ "model.layers.35.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
277
+ "model.layers.35.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
278
+ "model.layers.36.input_layernorm.weight": "model-00002-of-00002.safetensors",
279
+ "model.layers.36.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
280
+ "model.layers.36.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
281
+ "model.layers.36.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
282
+ "model.layers.36.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
283
+ "model.layers.36.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
284
+ "model.layers.36.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
285
+ "model.layers.36.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
286
+ "model.layers.36.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
287
+ "model.layers.37.input_layernorm.weight": "model-00002-of-00002.safetensors",
288
+ "model.layers.37.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
289
+ "model.layers.37.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
290
+ "model.layers.37.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
291
+ "model.layers.37.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
292
+ "model.layers.37.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
293
+ "model.layers.37.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
294
+ "model.layers.37.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
295
+ "model.layers.37.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
296
+ "model.layers.38.input_layernorm.weight": "model-00002-of-00002.safetensors",
297
+ "model.layers.38.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
298
+ "model.layers.38.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
299
+ "model.layers.38.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
300
+ "model.layers.38.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
301
+ "model.layers.38.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
302
+ "model.layers.38.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
303
+ "model.layers.38.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
304
+ "model.layers.38.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
305
+ "model.layers.39.input_layernorm.weight": "model-00002-of-00002.safetensors",
306
+ "model.layers.39.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
307
+ "model.layers.39.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
308
+ "model.layers.39.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
309
+ "model.layers.39.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
310
+ "model.layers.39.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
311
+ "model.layers.39.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
312
+ "model.layers.39.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
313
+ "model.layers.39.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
314
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
315
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
316
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
317
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
318
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
319
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
320
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
321
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
322
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
323
+ "model.layers.40.input_layernorm.weight": "model-00002-of-00002.safetensors",
324
+ "model.layers.40.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
325
+ "model.layers.40.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
326
+ "model.layers.40.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
327
+ "model.layers.40.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
328
+ "model.layers.40.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
329
+ "model.layers.40.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
330
+ "model.layers.40.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
331
+ "model.layers.40.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
332
+ "model.layers.41.input_layernorm.weight": "model-00002-of-00002.safetensors",
333
+ "model.layers.41.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
334
+ "model.layers.41.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
335
+ "model.layers.41.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
336
+ "model.layers.41.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
337
+ "model.layers.41.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
338
+ "model.layers.41.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
339
+ "model.layers.41.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
340
+ "model.layers.41.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
341
+ "model.layers.42.input_layernorm.weight": "model-00002-of-00002.safetensors",
342
+ "model.layers.42.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
343
+ "model.layers.42.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
344
+ "model.layers.42.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
345
+ "model.layers.42.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
346
+ "model.layers.42.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
347
+ "model.layers.42.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
348
+ "model.layers.42.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
349
+ "model.layers.42.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
350
+ "model.layers.43.input_layernorm.weight": "model-00002-of-00002.safetensors",
351
+ "model.layers.43.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
352
+ "model.layers.43.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
353
+ "model.layers.43.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
354
+ "model.layers.43.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
355
+ "model.layers.43.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
356
+ "model.layers.43.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
357
+ "model.layers.43.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
358
+ "model.layers.43.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
359
+ "model.layers.44.input_layernorm.weight": "model-00002-of-00002.safetensors",
360
+ "model.layers.44.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
361
+ "model.layers.44.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
362
+ "model.layers.44.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
363
+ "model.layers.44.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
364
+ "model.layers.44.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
365
+ "model.layers.44.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
366
+ "model.layers.44.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
367
+ "model.layers.44.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
368
+ "model.layers.45.input_layernorm.weight": "model-00002-of-00002.safetensors",
369
+ "model.layers.45.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
370
+ "model.layers.45.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
371
+ "model.layers.45.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
372
+ "model.layers.45.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
373
+ "model.layers.45.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
374
+ "model.layers.45.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
375
+ "model.layers.45.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
376
+ "model.layers.45.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
377
+ "model.layers.46.input_layernorm.weight": "model-00002-of-00002.safetensors",
378
+ "model.layers.46.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
379
+ "model.layers.46.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
380
+ "model.layers.46.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
381
+ "model.layers.46.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
382
+ "model.layers.46.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
383
+ "model.layers.46.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
384
+ "model.layers.46.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
385
+ "model.layers.46.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
386
+ "model.layers.47.input_layernorm.weight": "model-00002-of-00002.safetensors",
387
+ "model.layers.47.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
388
+ "model.layers.47.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
389
+ "model.layers.47.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
390
+ "model.layers.47.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
391
+ "model.layers.47.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
392
+ "model.layers.47.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
393
+ "model.layers.47.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
394
+ "model.layers.47.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
395
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
396
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
397
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
398
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
399
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
400
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
401
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
402
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
403
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
404
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
405
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
406
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
407
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
408
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
409
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
410
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
411
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
412
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
413
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
414
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
415
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
416
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
417
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
418
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
419
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
420
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
421
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
422
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
423
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
424
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
425
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
426
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
427
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
428
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
429
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
430
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
431
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
432
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
433
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
434
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
435
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
436
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
437
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
438
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
439
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
440
+ "model.norm.weight": "model-00002-of-00002.safetensors"
441
+ }
442
+ }
modeling_internlm3.py ADDED
@@ -0,0 +1,1191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # This code is based on transformers/src/transformers/models/llama/modeling_llama.py
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ import math
17
+ from typing import List, Optional, Tuple, Union
18
+
19
+ import torch
20
+ import torch.utils.checkpoint
21
+ from torch import nn
22
+
23
+ from transformers.activations import ACT2FN
24
+ from transformers.cache_utils import Cache, DynamicCache, StaticCache
25
+ from transformers.generation import GenerationMixin
26
+ from transformers.modeling_attn_mask_utils import AttentionMaskConverter
27
+ from transformers.modeling_flash_attention_utils import FlashAttentionKwargs, _flash_attention_forward
28
+ from transformers.modeling_outputs import (
29
+ BaseModelOutputWithPast,
30
+ CausalLMOutputWithPast,
31
+ QuestionAnsweringModelOutput,
32
+ SequenceClassifierOutputWithPast,
33
+ TokenClassifierOutput,
34
+ )
35
+ from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS
36
+ from transformers.modeling_utils import PreTrainedModel
37
+ from transformers.processing_utils import Unpack
38
+ from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS
39
+ from transformers.utils import (
40
+ LossKwargs,
41
+ add_code_sample_docstrings,
42
+ add_start_docstrings,
43
+ add_start_docstrings_to_model_forward,
44
+ is_flash_attn_greater_or_equal_2_10,
45
+ logging,
46
+ replace_return_docstrings,
47
+ )
48
+ from .configuration_internlm3 import InternLM3Config
49
+
50
+
51
+ logger = logging.get_logger(__name__)
52
+
53
+ _CONFIG_FOR_DOC = "InternLM3Config"
54
+
55
+
56
+ class InternLM3RMSNorm(nn.Module):
57
+ def __init__(self, hidden_size, eps=1e-6):
58
+ """
59
+ InternLM3RMSNorm is equivalent to T5LayerNorm
60
+ """
61
+ super().__init__()
62
+ self.weight = nn.Parameter(torch.ones(hidden_size))
63
+ self.variance_epsilon = eps
64
+
65
+ def forward(self, hidden_states):
66
+ input_dtype = hidden_states.dtype
67
+ hidden_states = hidden_states.to(torch.float32)
68
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
69
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
70
+ return self.weight * hidden_states.to(input_dtype)
71
+
72
+ def extra_repr(self):
73
+ return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
74
+
75
+
76
+ ALL_LAYERNORM_LAYERS.append(InternLM3RMSNorm)
77
+
78
+
79
+ class InternLM3RotaryEmbedding(nn.Module):
80
+ def __init__(
81
+ self,
82
+ dim=None,
83
+ max_position_embeddings=2048,
84
+ base=10000,
85
+ device=None,
86
+ scaling_factor=1.0,
87
+ rope_type="default",
88
+ config: Optional[InternLM3Config] = None,
89
+ ):
90
+ super().__init__()
91
+ # TODO (joao): remove the `if` below, only used for BC
92
+ self.rope_kwargs = {}
93
+ if config is None:
94
+ logger.warning_once(
95
+ "`InternLM3RotaryEmbedding` can now be fully parameterized by passing the model config through the "
96
+ "`config` argument. All other arguments will be removed in v4.46"
97
+ )
98
+ self.rope_kwargs = {
99
+ "rope_type": rope_type,
100
+ "factor": scaling_factor,
101
+ "dim": dim,
102
+ "base": base,
103
+ "max_position_embeddings": max_position_embeddings,
104
+ }
105
+ self.rope_type = rope_type
106
+ self.max_seq_len_cached = max_position_embeddings
107
+ self.original_max_seq_len = max_position_embeddings
108
+ else:
109
+ # BC: "rope_type" was originally "type"
110
+ if config.rope_scaling is not None:
111
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
112
+ else:
113
+ self.rope_type = "default"
114
+ self.max_seq_len_cached = config.max_position_embeddings
115
+ self.original_max_seq_len = config.max_position_embeddings
116
+
117
+ self.config = config
118
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
119
+
120
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, **self.rope_kwargs)
121
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
122
+ self.original_inv_freq = self.inv_freq
123
+
124
+ def _dynamic_frequency_update(self, position_ids, device):
125
+ """
126
+ dynamic RoPE layers should recompute `inv_freq` in the following situations:
127
+ 1 - growing beyond the cached sequence length (allow scaling)
128
+ 2 - the current sequence length is in the original scale (avoid losing precision with small sequences)
129
+ """
130
+ seq_len = torch.max(position_ids) + 1
131
+ if seq_len > self.max_seq_len_cached: # growth
132
+ inv_freq, self.attention_scaling = self.rope_init_fn(
133
+ self.config, device, seq_len=seq_len, **self.rope_kwargs
134
+ )
135
+ self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation
136
+ self.max_seq_len_cached = seq_len
137
+
138
+ if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset
139
+ self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
140
+ self.max_seq_len_cached = self.original_max_seq_len
141
+
142
+ @torch.no_grad()
143
+ def forward(self, x, position_ids):
144
+ if "dynamic" in self.rope_type:
145
+ self._dynamic_frequency_update(position_ids, device=x.device)
146
+
147
+ # Core RoPE block
148
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
149
+ position_ids_expanded = position_ids[:, None, :].float()
150
+ # Force float32 (see https://github.com/huggingface/transformers/pull/29285)
151
+ device_type = x.device.type
152
+ device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
153
+ with torch.autocast(device_type=device_type, enabled=False):
154
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
155
+ emb = torch.cat((freqs, freqs), dim=-1)
156
+ cos = emb.cos()
157
+ sin = emb.sin()
158
+
159
+ # Advanced RoPE types (e.g. yarn) apply a post-processing scaling factor, equivalent to scaling attention
160
+ cos = cos * self.attention_scaling
161
+ sin = sin * self.attention_scaling
162
+
163
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
164
+
165
+
166
+ class InternLM3LinearScalingRotaryEmbedding(InternLM3RotaryEmbedding):
167
+ """InternLM3RotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
168
+
169
+ def __init__(self, *args, **kwargs):
170
+ logger.warning_once(
171
+ "`InternLM3LinearScalingRotaryEmbedding` is deprecated an will be removed in v4.46. Please use "
172
+ "`InternLM3RotaryEmbedding`, which now also does linear scaling (simply pass the model config to __init__)."
173
+ )
174
+ kwargs["rope_type"] = "linear"
175
+ super().__init__(*args, **kwargs)
176
+
177
+
178
+ class InternLM3DynamicNTKScalingRotaryEmbedding(InternLM3RotaryEmbedding):
179
+ """InternLM3RotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla"""
180
+
181
+ def __init__(self, *args, **kwargs):
182
+ logger.warning_once(
183
+ "`InternLM3DynamicNTKScalingRotaryEmbedding` is deprecated an will be removed in v4.46. Please use "
184
+ "`InternLM3RotaryEmbedding`, which now also does dynamic ntk scaling (simply pass the model config to "
185
+ "__init__)."
186
+ )
187
+ kwargs["rope_type"] = "dynamic"
188
+ super().__init__(*args, **kwargs)
189
+
190
+
191
+ def rotate_half(x):
192
+ """Rotates half the hidden dims of the input."""
193
+ x1 = x[..., : x.shape[-1] // 2]
194
+ x2 = x[..., x.shape[-1] // 2 :]
195
+ return torch.cat((-x2, x1), dim=-1)
196
+
197
+
198
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
199
+ """Applies Rotary Position Embedding to the query and key tensors.
200
+
201
+ Args:
202
+ q (`torch.Tensor`): The query tensor.
203
+ k (`torch.Tensor`): The key tensor.
204
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
205
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
206
+ position_ids (`torch.Tensor`, *optional*):
207
+ Deprecated and unused.
208
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
209
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
210
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
211
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
212
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
213
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
214
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
215
+ Returns:
216
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
217
+ """
218
+ cos = cos.unsqueeze(unsqueeze_dim)
219
+ sin = sin.unsqueeze(unsqueeze_dim)
220
+ q_embed = (q * cos) + (rotate_half(q) * sin)
221
+ k_embed = (k * cos) + (rotate_half(k) * sin)
222
+ return q_embed, k_embed
223
+
224
+
225
+ class InternLM3MLP(nn.Module):
226
+ def __init__(self, config):
227
+ super().__init__()
228
+ self.config = config
229
+ self.hidden_size = config.hidden_size
230
+ self.intermediate_size = config.intermediate_size
231
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.bias)
232
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.bias)
233
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.bias)
234
+ self.act_fn = ACT2FN[config.hidden_act]
235
+
236
+ def forward(self, x):
237
+ down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
238
+ return down_proj
239
+
240
+
241
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
242
+ """
243
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
244
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
245
+ """
246
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
247
+ if n_rep == 1:
248
+ return hidden_states
249
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
250
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
251
+
252
+
253
+ class InternLM3Attention(nn.Module):
254
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
255
+
256
+ def __init__(self, config: InternLM3Config, layer_idx: Optional[int] = None):
257
+ super().__init__()
258
+ self.config = config
259
+ self.layer_idx = layer_idx
260
+ if layer_idx is None:
261
+ logger.warning_once(
262
+ f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
263
+ "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
264
+ "when creating this class."
265
+ )
266
+
267
+ self.attention_dropout = config.attention_dropout
268
+ self.hidden_size = config.hidden_size
269
+ self.num_heads = config.num_attention_heads
270
+ self.head_dim = getattr(config, "head_dim", self.hidden_size // self.num_heads)
271
+ self.num_key_value_heads = config.num_key_value_heads
272
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
273
+ self.max_position_embeddings = config.max_position_embeddings
274
+ self.rope_theta = config.rope_theta
275
+ self.is_causal = True
276
+
277
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.qkv_bias)
278
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.qkv_bias)
279
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.qkv_bias)
280
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.bias)
281
+
282
+ # TODO (joao): remove in v4.46 (RoPE is computed in the model, not in the decoder layers)
283
+ self.rotary_emb = InternLM3RotaryEmbedding(config=self.config)
284
+
285
+ def forward(
286
+ self,
287
+ hidden_states: torch.Tensor,
288
+ attention_mask: Optional[torch.Tensor] = None,
289
+ position_ids: Optional[torch.LongTensor] = None,
290
+ past_key_value: Optional[Cache] = None,
291
+ output_attentions: bool = False,
292
+ use_cache: bool = False,
293
+ cache_position: Optional[torch.LongTensor] = None,
294
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46
295
+ **kwargs,
296
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
297
+ bsz, q_len, _ = hidden_states.size()
298
+
299
+ query_states = self.q_proj(hidden_states)
300
+ key_states = self.k_proj(hidden_states)
301
+ value_states = self.v_proj(hidden_states)
302
+
303
+ # use -1 to infer num_heads and num_key_value_heads as they may vary if tensor parallel is used
304
+ query_states = query_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
305
+ key_states = key_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
306
+ value_states = value_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
307
+
308
+ if position_embeddings is None:
309
+ logger.warning_once(
310
+ "The attention layers in this model are transitioning from computing the RoPE embeddings internally "
311
+ "through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed "
312
+ "`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.46 `position_ids` will be "
313
+ "removed and `position_embeddings` will be mandatory."
314
+ )
315
+ cos, sin = self.rotary_emb(value_states, position_ids)
316
+ else:
317
+ cos, sin = position_embeddings
318
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
319
+
320
+ if past_key_value is not None:
321
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
322
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
323
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
324
+
325
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
326
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
327
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
328
+
329
+ if attention_mask is not None: # no matter the length, we just slice it
330
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
331
+ attn_weights = attn_weights + causal_mask
332
+
333
+ # upcast attention to fp32
334
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
335
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
336
+ attn_output = torch.matmul(attn_weights, value_states)
337
+
338
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
339
+ raise ValueError(
340
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
341
+ f" {attn_output.size()}"
342
+ )
343
+
344
+ attn_output = attn_output.transpose(1, 2).contiguous()
345
+
346
+ attn_output = attn_output.reshape(bsz, q_len, -1)
347
+
348
+ attn_output = self.o_proj(attn_output)
349
+
350
+ if not output_attentions:
351
+ attn_weights = None
352
+
353
+ return attn_output, attn_weights, past_key_value
354
+
355
+
356
+ class InternLM3FlashAttention2(InternLM3Attention):
357
+ """
358
+ InternLM3 flash attention module. This module inherits from `InternLM3Attention` as the weights of the module stays
359
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
360
+ flash attention and deal with padding tokens in case the input contains any of them.
361
+ """
362
+
363
+ def __init__(self, *args, **kwargs):
364
+ super().__init__(*args, **kwargs)
365
+
366
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
367
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
368
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
369
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
370
+
371
+ def forward(
372
+ self,
373
+ hidden_states: torch.Tensor,
374
+ attention_mask: Optional[torch.LongTensor] = None,
375
+ position_ids: Optional[torch.LongTensor] = None,
376
+ past_key_value: Optional[Cache] = None,
377
+ output_attentions: bool = False,
378
+ use_cache: bool = False,
379
+ cache_position: Optional[torch.LongTensor] = None,
380
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46
381
+ **kwargs: Unpack[FlashAttentionKwargs],
382
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
383
+ if isinstance(past_key_value, StaticCache):
384
+ raise ValueError(
385
+ "`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` "
386
+ "make sure to use `sdpa` in the mean time, and open an issue at https://github.com/huggingface/transformers"
387
+ )
388
+
389
+ output_attentions = False
390
+
391
+ bsz, q_len, _ = hidden_states.size()
392
+
393
+ query_states = self.q_proj(hidden_states)
394
+ key_states = self.k_proj(hidden_states)
395
+ value_states = self.v_proj(hidden_states)
396
+
397
+ # Flash attention requires the input to have the shape
398
+ # batch_size x seq_length x head_dim x hidden_dim
399
+ # therefore we just need to keep the original shape
400
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
401
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
402
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
403
+
404
+ if position_embeddings is None:
405
+ logger.warning_once(
406
+ "The attention layers in this model are transitioning from computing the RoPE embeddings internally "
407
+ "through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed "
408
+ "`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.46 `position_ids` will be "
409
+ "removed and `position_embeddings` will be mandatory."
410
+ )
411
+ cos, sin = self.rotary_emb(value_states, position_ids)
412
+ else:
413
+ cos, sin = position_embeddings
414
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
415
+
416
+ if past_key_value is not None:
417
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
418
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
419
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
420
+
421
+ # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
422
+ # to be able to avoid many of these transpose/reshape/view.
423
+ query_states = query_states.transpose(1, 2)
424
+ key_states = key_states.transpose(1, 2)
425
+ value_states = value_states.transpose(1, 2)
426
+
427
+ dropout_rate = self.attention_dropout if self.training else 0.0
428
+
429
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
430
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
431
+ # cast them back in the correct dtype just to be sure everything works as expected.
432
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
433
+ # in fp32. (InternLM3RMSNorm handles it correctly)
434
+
435
+ input_dtype = query_states.dtype
436
+ if input_dtype == torch.float32:
437
+ if torch.is_autocast_enabled():
438
+ target_dtype = torch.get_autocast_gpu_dtype()
439
+ # Handle the case where the model is quantized
440
+ elif hasattr(self.config, "_pre_quantization_dtype"):
441
+ target_dtype = self.config._pre_quantization_dtype
442
+ else:
443
+ target_dtype = self.q_proj.weight.dtype
444
+
445
+ logger.warning_once(
446
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
447
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
448
+ f" {target_dtype}."
449
+ )
450
+
451
+ query_states = query_states.to(target_dtype)
452
+ key_states = key_states.to(target_dtype)
453
+ value_states = value_states.to(target_dtype)
454
+
455
+ attn_output = _flash_attention_forward(
456
+ query_states,
457
+ key_states,
458
+ value_states,
459
+ attention_mask,
460
+ q_len,
461
+ position_ids=position_ids,
462
+ dropout=dropout_rate,
463
+ sliding_window=getattr(self, "sliding_window", None),
464
+ use_top_left_mask=self._flash_attn_uses_top_left_mask,
465
+ is_causal=self.is_causal,
466
+ **kwargs,
467
+ )
468
+
469
+ attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()
470
+ attn_output = self.o_proj(attn_output)
471
+
472
+ if not output_attentions:
473
+ attn_weights = None
474
+
475
+ return attn_output, attn_weights, past_key_value
476
+
477
+
478
+ class InternLM3SdpaAttention(InternLM3Attention):
479
+ """
480
+ InternLM3 attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
481
+ `InternLM3Attention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
482
+ SDPA API.
483
+ """
484
+
485
+ # Adapted from InternLM3Attention.forward
486
+ def forward(
487
+ self,
488
+ hidden_states: torch.Tensor,
489
+ attention_mask: Optional[torch.Tensor] = None,
490
+ position_ids: Optional[torch.LongTensor] = None,
491
+ past_key_value: Optional[Cache] = None,
492
+ output_attentions: bool = False,
493
+ use_cache: bool = False,
494
+ cache_position: Optional[torch.LongTensor] = None,
495
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46
496
+ **kwargs,
497
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
498
+ if output_attentions:
499
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
500
+ logger.warning_once(
501
+ "InternLM3Model is using InternLM3SdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
502
+ 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
503
+ )
504
+ return super().forward(
505
+ hidden_states=hidden_states,
506
+ attention_mask=attention_mask,
507
+ position_ids=position_ids,
508
+ past_key_value=past_key_value,
509
+ output_attentions=output_attentions,
510
+ use_cache=use_cache,
511
+ cache_position=cache_position,
512
+ position_embeddings=position_embeddings,
513
+ )
514
+
515
+ bsz, q_len, _ = hidden_states.size()
516
+
517
+ query_states = self.q_proj(hidden_states)
518
+ key_states = self.k_proj(hidden_states)
519
+ value_states = self.v_proj(hidden_states)
520
+
521
+ # use -1 to infer num_heads and num_key_value_heads as they may vary if tensor parallel is used
522
+ query_states = query_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
523
+ key_states = key_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
524
+ value_states = value_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
525
+
526
+ if position_embeddings is None:
527
+ logger.warning_once(
528
+ "The attention layers in this model are transitioning from computing the RoPE embeddings internally "
529
+ "through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed "
530
+ "`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.46 `position_ids` will be "
531
+ "removed and `position_embeddings` will be mandatory."
532
+ )
533
+ cos, sin = self.rotary_emb(value_states, position_ids)
534
+ else:
535
+ cos, sin = position_embeddings
536
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
537
+
538
+ if past_key_value is not None:
539
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
540
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
541
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
542
+
543
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
544
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
545
+
546
+ causal_mask = attention_mask
547
+ if attention_mask is not None:
548
+ causal_mask = causal_mask[:, :, :, : key_states.shape[-2]]
549
+
550
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
551
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
552
+ if query_states.device.type == "cuda" and causal_mask is not None:
553
+ query_states = query_states.contiguous()
554
+ key_states = key_states.contiguous()
555
+ value_states = value_states.contiguous()
556
+
557
+ # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
558
+ # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
559
+ is_causal = True if causal_mask is None and q_len > 1 else False
560
+
561
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
562
+ query_states,
563
+ key_states,
564
+ value_states,
565
+ attn_mask=causal_mask,
566
+ dropout_p=self.attention_dropout if self.training else 0.0,
567
+ is_causal=is_causal,
568
+ )
569
+
570
+ attn_output = attn_output.transpose(1, 2).contiguous()
571
+ attn_output = attn_output.view(bsz, q_len, -1)
572
+
573
+ attn_output = self.o_proj(attn_output)
574
+
575
+ return attn_output, None, past_key_value
576
+
577
+
578
+ InternLM3_ATTENTION_CLASSES = {
579
+ "eager": InternLM3Attention,
580
+ "flash_attention_2": InternLM3FlashAttention2,
581
+ "sdpa": InternLM3SdpaAttention,
582
+ }
583
+
584
+
585
+ class InternLM3DecoderLayer(nn.Module):
586
+ def __init__(self, config: InternLM3Config, layer_idx: int):
587
+ super().__init__()
588
+ self.hidden_size = config.hidden_size
589
+
590
+ self.self_attn = InternLM3_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx)
591
+
592
+ self.mlp = InternLM3MLP(config)
593
+ self.input_layernorm = InternLM3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
594
+ self.post_attention_layernorm = InternLM3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
595
+
596
+ def forward(
597
+ self,
598
+ hidden_states: torch.Tensor,
599
+ attention_mask: Optional[torch.Tensor] = None,
600
+ position_ids: Optional[torch.LongTensor] = None,
601
+ past_key_value: Optional[Cache] = None,
602
+ output_attentions: Optional[bool] = False,
603
+ use_cache: Optional[bool] = False,
604
+ cache_position: Optional[torch.LongTensor] = None,
605
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46
606
+ **kwargs,
607
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
608
+ """
609
+ Args:
610
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
611
+ attention_mask (`torch.FloatTensor`, *optional*):
612
+ attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
613
+ query_sequence_length, key_sequence_length)` if default attention is used.
614
+ output_attentions (`bool`, *optional*):
615
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
616
+ returned tensors for more detail.
617
+ use_cache (`bool`, *optional*):
618
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
619
+ (see `past_key_values`).
620
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
621
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
622
+ Indices depicting the position of the input sequence tokens in the sequence
623
+ position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
624
+ Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
625
+ with `head_dim` being the embedding dimension of each attention head.
626
+ kwargs (`dict`, *optional*):
627
+ Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
628
+ into the model
629
+ """
630
+ residual = hidden_states
631
+
632
+ hidden_states = self.input_layernorm(hidden_states)
633
+
634
+ # Self Attention
635
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
636
+ hidden_states=hidden_states,
637
+ attention_mask=attention_mask,
638
+ position_ids=position_ids,
639
+ past_key_value=past_key_value,
640
+ output_attentions=output_attentions,
641
+ use_cache=use_cache,
642
+ cache_position=cache_position,
643
+ position_embeddings=position_embeddings,
644
+ **kwargs,
645
+ )
646
+ hidden_states = residual + hidden_states
647
+
648
+ # Fully Connected
649
+ residual = hidden_states
650
+ hidden_states = self.post_attention_layernorm(hidden_states)
651
+ hidden_states = self.mlp(hidden_states)
652
+ hidden_states = residual + hidden_states
653
+
654
+ outputs = (hidden_states,)
655
+
656
+ if output_attentions:
657
+ outputs += (self_attn_weights,)
658
+
659
+ if use_cache:
660
+ outputs += (present_key_value,)
661
+
662
+ return outputs
663
+
664
+
665
+ InternLM3_START_DOCSTRING = r"""
666
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
667
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
668
+ etc.)
669
+
670
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
671
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
672
+ and behavior.
673
+
674
+ Parameters:
675
+ config ([`InternLM3Config`]):
676
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
677
+ load the weights associated with the model, only the configuration. Check out the
678
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
679
+ """
680
+
681
+
682
+ @add_start_docstrings(
683
+ "The bare InternLM3 Model outputting raw hidden-states without any specific head on top.",
684
+ InternLM3_START_DOCSTRING,
685
+ )
686
+ class InternLM3PreTrainedModel(PreTrainedModel):
687
+ config_class = InternLM3Config
688
+ base_model_prefix = "model"
689
+ supports_gradient_checkpointing = True
690
+ _no_split_modules = ["InternLM3DecoderLayer"]
691
+ _skip_keys_device_placement = ["past_key_values"]
692
+ _supports_flash_attn_2 = True
693
+ _supports_sdpa = True
694
+ _supports_cache_class = True
695
+ _supports_quantized_cache = True
696
+ _supports_static_cache = True
697
+
698
+ def _init_weights(self, module):
699
+ std = self.config.initializer_range
700
+ if isinstance(module, nn.Linear):
701
+ module.weight.data.normal_(mean=0.0, std=std)
702
+ if module.bias is not None:
703
+ module.bias.data.zero_()
704
+ elif isinstance(module, nn.Embedding):
705
+ module.weight.data.normal_(mean=0.0, std=std)
706
+ if module.padding_idx is not None:
707
+ module.weight.data[module.padding_idx].zero_()
708
+
709
+
710
+ INTERNLM3_INPUTS_DOCSTRING = r"""
711
+ Args:
712
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
713
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
714
+ it.
715
+
716
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
717
+ [`PreTrainedTokenizer.__call__`] for details.
718
+
719
+ [What are input IDs?](../glossary#input-ids)
720
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
721
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
722
+
723
+ - 1 for tokens that are **not masked**,
724
+ - 0 for tokens that are **masked**.
725
+
726
+ [What are attention masks?](../glossary#attention-mask)
727
+
728
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
729
+ [`PreTrainedTokenizer.__call__`] for details.
730
+
731
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
732
+ `past_key_values`).
733
+
734
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
735
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
736
+ information on the default strategy.
737
+
738
+ - 1 indicates the head is **not masked**,
739
+ - 0 indicates the head is **masked**.
740
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
741
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
742
+ config.n_positions - 1]`.
743
+
744
+ [What are position IDs?](../glossary#position-ids)
745
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
746
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
747
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
748
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
749
+
750
+ Two formats are allowed:
751
+ - a [`~cache_utils.Cache`] instance, see our
752
+ [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache);
753
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
754
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
755
+ cache format.
756
+
757
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
758
+ legacy cache format will be returned.
759
+
760
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
761
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
762
+ of shape `(batch_size, sequence_length)`.
763
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
764
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
765
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
766
+ model's internal embedding lookup matrix.
767
+ use_cache (`bool`, *optional*):
768
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
769
+ `past_key_values`).
770
+ output_attentions (`bool`, *optional*):
771
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
772
+ tensors for more detail.
773
+ output_hidden_states (`bool`, *optional*):
774
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
775
+ more detail.
776
+ return_dict (`bool`, *optional*):
777
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
778
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
779
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
780
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
781
+ the complete sequence length.
782
+ """
783
+
784
+
785
+ @add_start_docstrings(
786
+ "The bare InternLM3 Model outputting raw hidden-states without any specific head on top.",
787
+ InternLM3_START_DOCSTRING,
788
+ )
789
+ class InternLM3Model(InternLM3PreTrainedModel):
790
+ """
791
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`InternLM3DecoderLayer`]
792
+
793
+ Args:
794
+ config: InternLM3Config
795
+ """
796
+ _auto_class = "AutoModel"
797
+ def __init__(self, config: InternLM3Config):
798
+ super().__init__(config)
799
+ self.padding_idx = config.pad_token_id
800
+ self.vocab_size = config.vocab_size
801
+
802
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
803
+ self.layers = nn.ModuleList(
804
+ [InternLM3DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
805
+ )
806
+ self.norm = InternLM3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
807
+ self.rotary_emb = InternLM3RotaryEmbedding(config=config)
808
+
809
+ self.gradient_checkpointing = False
810
+ if getattr(config, "pretraining_tp", 1) != 1:
811
+ logger.warn("`pretraining_tp` is deprecated, please use `model.tensor_parallel` instead.")
812
+
813
+ # Initialize weights and apply final processing
814
+ self.post_init()
815
+
816
+ def get_input_embeddings(self):
817
+ return self.embed_tokens
818
+
819
+ def set_input_embeddings(self, value):
820
+ self.embed_tokens = value
821
+
822
+ @add_start_docstrings_to_model_forward(INTERNLM3_INPUTS_DOCSTRING)
823
+ def forward(
824
+ self,
825
+ input_ids: torch.LongTensor = None,
826
+ attention_mask: Optional[torch.Tensor] = None,
827
+ position_ids: Optional[torch.LongTensor] = None,
828
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
829
+ inputs_embeds: Optional[torch.FloatTensor] = None,
830
+ use_cache: Optional[bool] = None,
831
+ output_attentions: Optional[bool] = None,
832
+ output_hidden_states: Optional[bool] = None,
833
+ return_dict: Optional[bool] = None,
834
+ cache_position: Optional[torch.LongTensor] = None,
835
+ **flash_attn_kwargs: Unpack[FlashAttentionKwargs],
836
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
837
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
838
+ output_hidden_states = (
839
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
840
+ )
841
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
842
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
843
+
844
+ if (input_ids is None) ^ (inputs_embeds is not None):
845
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
846
+
847
+ if self.gradient_checkpointing and self.training and use_cache:
848
+ logger.warning_once(
849
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
850
+ )
851
+ use_cache = False
852
+
853
+ if inputs_embeds is None:
854
+ inputs_embeds = self.embed_tokens(input_ids)
855
+
856
+ # kept for BC (non `Cache` `past_key_values` inputs)
857
+ return_legacy_cache = False
858
+ if use_cache and not isinstance(past_key_values, Cache):
859
+ return_legacy_cache = True
860
+ if past_key_values is None:
861
+ past_key_values = DynamicCache()
862
+ else:
863
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
864
+ logger.warning_once(
865
+ "We detected that you are passing `past_key_values` as a tuple of tuples. This is deprecated and "
866
+ "will be removed in v4.47. Please convert your cache or use an appropriate `Cache` class "
867
+ "(https://huggingface.co/docs/transformers/kv_cache#legacy-cache-format)"
868
+ )
869
+
870
+ if cache_position is None:
871
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
872
+ cache_position = torch.arange(
873
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
874
+ )
875
+ if position_ids is None:
876
+ position_ids = cache_position.unsqueeze(0)
877
+
878
+ causal_mask = self._update_causal_mask(
879
+ attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
880
+ )
881
+ hidden_states = inputs_embeds
882
+
883
+ # create position embeddings to be shared across the decoder layers
884
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
885
+
886
+ # decoder layers
887
+ all_hidden_states = () if output_hidden_states else None
888
+ all_self_attns = () if output_attentions else None
889
+ next_decoder_cache = None
890
+
891
+ for decoder_layer in self.layers[: self.config.num_hidden_layers]:
892
+ if output_hidden_states:
893
+ all_hidden_states += (hidden_states,)
894
+
895
+ if self.gradient_checkpointing and self.training:
896
+ layer_outputs = self._gradient_checkpointing_func(
897
+ decoder_layer.__call__,
898
+ hidden_states,
899
+ causal_mask,
900
+ position_ids,
901
+ past_key_values,
902
+ output_attentions,
903
+ use_cache,
904
+ cache_position,
905
+ position_embeddings,
906
+ )
907
+ else:
908
+ layer_outputs = decoder_layer(
909
+ hidden_states,
910
+ attention_mask=causal_mask,
911
+ position_ids=position_ids,
912
+ past_key_value=past_key_values,
913
+ output_attentions=output_attentions,
914
+ use_cache=use_cache,
915
+ cache_position=cache_position,
916
+ position_embeddings=position_embeddings,
917
+ **flash_attn_kwargs,
918
+ )
919
+
920
+ hidden_states = layer_outputs[0]
921
+
922
+ if use_cache:
923
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
924
+
925
+ if output_attentions:
926
+ all_self_attns += (layer_outputs[1],)
927
+
928
+ hidden_states = self.norm(hidden_states)
929
+
930
+ # add hidden states from the last decoder layer
931
+ if output_hidden_states:
932
+ all_hidden_states += (hidden_states,)
933
+
934
+ next_cache = next_decoder_cache if use_cache else None
935
+ if return_legacy_cache:
936
+ next_cache = next_cache.to_legacy_cache()
937
+
938
+ if not return_dict:
939
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
940
+ return BaseModelOutputWithPast(
941
+ last_hidden_state=hidden_states,
942
+ past_key_values=next_cache,
943
+ hidden_states=all_hidden_states,
944
+ attentions=all_self_attns,
945
+ )
946
+
947
+ def _update_causal_mask(
948
+ self,
949
+ attention_mask: torch.Tensor,
950
+ input_tensor: torch.Tensor,
951
+ cache_position: torch.Tensor,
952
+ past_key_values: Cache,
953
+ output_attentions: bool,
954
+ ):
955
+ if self.config._attn_implementation == "flash_attention_2":
956
+ if attention_mask is not None and 0.0 in attention_mask:
957
+ return attention_mask
958
+ return None
959
+
960
+ # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
961
+ # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
962
+ # to infer the attention mask.
963
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
964
+ using_static_cache = isinstance(past_key_values, StaticCache)
965
+
966
+ # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
967
+ if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions:
968
+ if AttentionMaskConverter._ignore_causal_mask_sdpa(
969
+ attention_mask,
970
+ inputs_embeds=input_tensor,
971
+ past_key_values_length=past_seen_tokens,
972
+ is_training=self.training,
973
+ ):
974
+ return None
975
+
976
+ dtype, device = input_tensor.dtype, input_tensor.device
977
+ sequence_length = input_tensor.shape[1]
978
+ if using_static_cache:
979
+ target_length = past_key_values.get_max_cache_shape()
980
+ else:
981
+ target_length = (
982
+ attention_mask.shape[-1]
983
+ if isinstance(attention_mask, torch.Tensor)
984
+ else past_seen_tokens + sequence_length + 1
985
+ )
986
+
987
+ # In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
988
+ causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
989
+ attention_mask,
990
+ sequence_length=sequence_length,
991
+ target_length=target_length,
992
+ dtype=dtype,
993
+ device=device,
994
+ cache_position=cache_position,
995
+ batch_size=input_tensor.shape[0],
996
+ )
997
+
998
+ if (
999
+ self.config._attn_implementation == "sdpa"
1000
+ and attention_mask is not None
1001
+ and attention_mask.device.type == "cuda"
1002
+ and not output_attentions
1003
+ ):
1004
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
1005
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
1006
+ # Details: https://github.com/pytorch/pytorch/issues/110213
1007
+ min_dtype = torch.finfo(dtype).min
1008
+ causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
1009
+
1010
+ return causal_mask
1011
+
1012
+ @staticmethod
1013
+ def _prepare_4d_causal_attention_mask_with_cache_position(
1014
+ attention_mask: torch.Tensor,
1015
+ sequence_length: int,
1016
+ target_length: int,
1017
+ dtype: torch.dtype,
1018
+ device: torch.device,
1019
+ cache_position: torch.Tensor,
1020
+ batch_size: int,
1021
+ **kwargs,
1022
+ ):
1023
+ """
1024
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
1025
+ `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
1026
+
1027
+ Args:
1028
+ attention_mask (`torch.Tensor`):
1029
+ A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
1030
+ `(batch_size, 1, query_length, key_value_length)`.
1031
+ sequence_length (`int`):
1032
+ The sequence length being processed.
1033
+ target_length (`int`):
1034
+ The target length: when generating with static cache, the mask should be as long as the static cache,
1035
+ to account for the 0 padding, the part of the cache that is not filled yet.
1036
+ dtype (`torch.dtype`):
1037
+ The dtype to use for the 4D attention mask.
1038
+ device (`torch.device`):
1039
+ The device to plcae the 4D attention mask on.
1040
+ cache_position (`torch.Tensor`):
1041
+ Indices depicting the position of the input sequence tokens in the sequence.
1042
+ batch_size (`torch.Tensor`):
1043
+ Batch size.
1044
+ """
1045
+ if attention_mask is not None and attention_mask.dim() == 4:
1046
+ # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
1047
+ causal_mask = attention_mask
1048
+ else:
1049
+ min_dtype = torch.finfo(dtype).min
1050
+ causal_mask = torch.full(
1051
+ (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device
1052
+ )
1053
+ if sequence_length != 1:
1054
+ causal_mask = torch.triu(causal_mask, diagonal=1)
1055
+ causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
1056
+ causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
1057
+ if attention_mask is not None:
1058
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
1059
+ mask_length = attention_mask.shape[-1]
1060
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
1061
+ padding_mask = padding_mask == 0
1062
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
1063
+ padding_mask, min_dtype
1064
+ )
1065
+
1066
+ return causal_mask
1067
+
1068
+
1069
+ class KwargsForCausalLM(FlashAttentionKwargs, LossKwargs): ...
1070
+
1071
+
1072
+ class InternLM3ForCausalLM(InternLM3PreTrainedModel, GenerationMixin):
1073
+ _auto_class = "AutoModelForCausalLM"
1074
+ _tied_weights_keys = ["lm_head.weight"]
1075
+ _tp_plan = {"lm_head": "colwise_rep"}
1076
+
1077
+ def __init__(self, config):
1078
+ super().__init__(config)
1079
+ self.model = InternLM3Model(config)
1080
+ self.vocab_size = config.vocab_size
1081
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1082
+
1083
+ # Initialize weights and apply final processing
1084
+ self.post_init()
1085
+
1086
+ def get_input_embeddings(self):
1087
+ return self.model.embed_tokens
1088
+
1089
+ def set_input_embeddings(self, value):
1090
+ self.model.embed_tokens = value
1091
+
1092
+ def get_output_embeddings(self):
1093
+ return self.lm_head
1094
+
1095
+ def set_output_embeddings(self, new_embeddings):
1096
+ self.lm_head = new_embeddings
1097
+
1098
+ def set_decoder(self, decoder):
1099
+ self.model = decoder
1100
+
1101
+ def get_decoder(self):
1102
+ return self.model
1103
+
1104
+ @add_start_docstrings_to_model_forward(INTERNLM3_INPUTS_DOCSTRING)
1105
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1106
+ def forward(
1107
+ self,
1108
+ input_ids: torch.LongTensor = None,
1109
+ attention_mask: Optional[torch.Tensor] = None,
1110
+ position_ids: Optional[torch.LongTensor] = None,
1111
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
1112
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1113
+ labels: Optional[torch.LongTensor] = None,
1114
+ use_cache: Optional[bool] = None,
1115
+ output_attentions: Optional[bool] = None,
1116
+ output_hidden_states: Optional[bool] = None,
1117
+ return_dict: Optional[bool] = None,
1118
+ cache_position: Optional[torch.LongTensor] = None,
1119
+ num_logits_to_keep: int = 0,
1120
+ **kwargs: Unpack[KwargsForCausalLM],
1121
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1122
+ r"""
1123
+ Args:
1124
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1125
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1126
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1127
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1128
+
1129
+ num_logits_to_keep (`int`, *optional*):
1130
+ Calculate logits for the last `num_logits_to_keep` tokens. If `0`, calculate logits for all
1131
+ `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
1132
+ token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
1133
+
1134
+ Returns:
1135
+
1136
+ Example:
1137
+
1138
+ ```python
1139
+ >>> from transformers import AutoTokenizer, InternLM3ForCausalLM
1140
+
1141
+ >>> model = InternLM3ForCausalLM.from_pretrained("internlm/InternLM3-8b-hf")
1142
+ >>> tokenizer = AutoTokenizer.from_pretrained("internlm/InternLM3-8b-hf")
1143
+
1144
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
1145
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1146
+
1147
+ >>> # Generate
1148
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1149
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1150
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
1151
+ ```"""
1152
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1153
+ output_hidden_states = (
1154
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1155
+ )
1156
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1157
+
1158
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1159
+ outputs = self.model(
1160
+ input_ids=input_ids,
1161
+ attention_mask=attention_mask,
1162
+ position_ids=position_ids,
1163
+ past_key_values=past_key_values,
1164
+ inputs_embeds=inputs_embeds,
1165
+ use_cache=use_cache,
1166
+ output_attentions=output_attentions,
1167
+ output_hidden_states=output_hidden_states,
1168
+ return_dict=return_dict,
1169
+ cache_position=cache_position,
1170
+ **kwargs,
1171
+ )
1172
+
1173
+ hidden_states = outputs[0]
1174
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
1175
+ logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :])
1176
+
1177
+ loss = None
1178
+ if labels is not None:
1179
+ loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
1180
+
1181
+ if not return_dict:
1182
+ output = (logits,) + outputs[1:]
1183
+ return (loss,) + output if loss is not None else output
1184
+
1185
+ return CausalLMOutputWithPast(
1186
+ loss=loss,
1187
+ logits=logits,
1188
+ past_key_values=outputs.past_key_values,
1189
+ hidden_states=outputs.hidden_states,
1190
+ attentions=outputs.attentions,
1191
+ )
special_tokens_map.json ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|action_start|>",
6
+ "<|action_end|>",
7
+ "<|interpreter|>",
8
+ "<|plugin|>",
9
+ "<restate>",
10
+ "</restate>",
11
+ "<planning>",
12
+ "</planning>",
13
+ "<recollect>",
14
+ "</recollect>",
15
+ "<execution>",
16
+ "</execution>",
17
+ "<review>",
18
+ "</review>",
19
+ "<summarize>",
20
+ "</summarize>",
21
+ "<retry>",
22
+ "</retry>",
23
+ "<conclude>",
24
+ "</conclude>"
25
+ ],
26
+ "bos_token": {
27
+ "content": "<s>",
28
+ "lstrip": false,
29
+ "normalized": false,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ },
33
+ "eos_token": {
34
+ "content": "</s>",
35
+ "lstrip": false,
36
+ "normalized": false,
37
+ "rstrip": false,
38
+ "single_word": false
39
+ },
40
+ "pad_token": {
41
+ "content": "</s>",
42
+ "lstrip": false,
43
+ "normalized": false,
44
+ "rstrip": false,
45
+ "single_word": false
46
+ },
47
+ "unk_token": {
48
+ "content": "<unk>",
49
+ "lstrip": false,
50
+ "normalized": false,
51
+ "rstrip": false,
52
+ "single_word": false
53
+ }
54
+ }
tokenization_internlm3.py ADDED
@@ -0,0 +1,294 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from shutil import copyfile
3
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
4
+
5
+ import sentencepiece as spm
6
+ from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
7
+ from transformers.utils import logging
8
+
9
+ if TYPE_CHECKING:
10
+ from transformers.tokenization_utils_base import TextInput
11
+
12
+ logger = logging.get_logger(__name__)
13
+
14
+ VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"}
15
+
16
+ SPIECE_UNDERLINE = "▁"
17
+
18
+
19
+ class InternLM3Tokenizer(PreTrainedTokenizer):
20
+ """
21
+ Construct a InternLM3 tokenizer. Based on byte-level Byte-Pair-Encoding. The default padding token is unset as there is
22
+ no padding token in the original model.
23
+
24
+ Args:
25
+ vocab_file (`str`):
26
+ Path to the vocabulary file.
27
+ unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<unk>"`):
28
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
29
+ token instead.
30
+ bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<s>"`):
31
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
32
+ eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"</s>"`):
33
+ The end of sequence token.
34
+ pad_token (`str` or `tokenizers.AddedToken`, *optional*):
35
+ A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by
36
+ attention mechanisms or loss computation.
37
+ sp_model_kwargs (`Dict[str, Any]`, `Optional`, *optional*):
38
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
39
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
40
+ to set:
41
+
42
+ - `enable_sampling`: Enable subword regularization.
43
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
44
+
45
+ - `nbest_size = {0,1}`: No sampling is performed.
46
+ - `nbest_size > 1`: samples from the nbest_size results.
47
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
48
+ using forward-filtering-and-backward-sampling algorithm.
49
+
50
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
51
+ BPE-dropout.
52
+
53
+ add_bos_token (`bool`, *optional*, defaults to `True`):
54
+ Whether or not to add an `bos_token` at the start of sequences.
55
+ add_eos_token (`bool`, *optional*, defaults to `False`):
56
+ Whether or not to add an `eos_token` at the end of sequences.
57
+ clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
58
+ Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like
59
+ extra spaces.
60
+ use_default_system_prompt (`bool`, *optional*, defaults to `False`):
61
+ Whether or not the default system prompt for InternLM3 should be used.
62
+ spaces_between_special_tokens (`bool`, *optional*, defaults to `False`):
63
+ Whether or not to add spaces between special tokens.
64
+ spaces_for_interleaved_special_tokens (`bool`, *optional*, defaults to `False`):
65
+ Whether or not to add spaces between special tokens that are interleaved with normal tokens.
66
+ add_prefix_space (`bool`, *optional*, defaults to `True`):
67
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
68
+ other word. Again, this should be set with `from_slow=True` to make sure it's taken into account.
69
+ """
70
+ _auto_class = "AutoTokenizer"
71
+ vocab_files_names = VOCAB_FILES_NAMES
72
+ model_input_names = ["input_ids", "attention_mask"]
73
+
74
+ def __init__(
75
+ self,
76
+ vocab_file,
77
+ unk_token="<unk>",
78
+ bos_token="<s>",
79
+ eos_token="</s>",
80
+ pad_token=None,
81
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
82
+ add_bos_token=True,
83
+ add_eos_token=False,
84
+ clean_up_tokenization_spaces=False,
85
+ use_default_system_prompt=False,
86
+ spaces_between_special_tokens=False,
87
+ spaces_for_interleaved_special_tokens=False,
88
+ add_prefix_space=True,
89
+ **kwargs,
90
+ ):
91
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
92
+ bos_token = AddedToken(bos_token, normalized=False, special=True) if isinstance(bos_token, str) else bos_token
93
+ eos_token = AddedToken(eos_token, normalized=False, special=True) if isinstance(eos_token, str) else eos_token
94
+ unk_token = AddedToken(unk_token, normalized=False, special=True) if isinstance(unk_token, str) else unk_token
95
+ pad_token = AddedToken(pad_token, normalized=False, special=True) if isinstance(pad_token, str) else pad_token
96
+
97
+ self.vocab_file = vocab_file
98
+ self.add_bos_token = add_bos_token
99
+ self.add_eos_token = add_eos_token
100
+ self.use_default_system_prompt = use_default_system_prompt
101
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
102
+ self.sp_model.Load(vocab_file)
103
+ self.add_prefix_space = add_prefix_space
104
+ self.spaces_for_interleaved_special_tokens = spaces_for_interleaved_special_tokens
105
+
106
+ vocab_size = self.sp_model.get_piece_size()
107
+ self.decoder = {i: self.sp_model.id_to_piece(i) for i in range(vocab_size)}
108
+
109
+ super().__init__(
110
+ bos_token=bos_token,
111
+ eos_token=eos_token,
112
+ unk_token=unk_token,
113
+ pad_token=pad_token,
114
+ add_bos_token=add_bos_token,
115
+ add_eos_token=add_eos_token,
116
+ sp_model_kwargs=sp_model_kwargs,
117
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
118
+ use_default_system_prompt=use_default_system_prompt,
119
+ spaces_between_special_tokens=spaces_between_special_tokens,
120
+ add_prefix_space=add_prefix_space,
121
+ **kwargs,
122
+ )
123
+
124
+ def __getstate__(self):
125
+ state = self.__dict__.copy()
126
+ state["sp_model"] = None
127
+ state["sp_model_proto"] = self.sp_model.serialized_model_proto()
128
+ return state
129
+
130
+ def __setstate__(self, d):
131
+ self.__dict__.update(d)
132
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
133
+ self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
134
+
135
+ @property
136
+ def vocab_size(self):
137
+ """Returns vocab size"""
138
+ return self.sp_model.get_piece_size()
139
+
140
+ def get_vocab(self):
141
+ """Returns vocab as a dict"""
142
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
143
+ vocab.update(self.added_tokens_encoder)
144
+ return vocab
145
+
146
+ def tokenize(self, text: "TextInput", **kwargs) -> List[str]:
147
+ """
148
+ Args:
149
+ text: TextInput
150
+ Simply calls PreTrainedTokenizer's method
151
+ """
152
+ return super().tokenize(text, **kwargs)
153
+
154
+ def _tokenize(self, text, **kwargs):
155
+ """
156
+ Args:
157
+ text: TextInput
158
+ Returns a tokenized string. The Gemma tokenizer never adds a prefix space.
159
+ """
160
+ return self.sp_model.encode(text, out_type=str)
161
+
162
+ def _convert_token_to_id(self, token):
163
+ """Converts a token (str) in an id using the vocab."""
164
+ return self.sp_model.piece_to_id(token)
165
+
166
+ def _convert_id_to_token(self, index):
167
+ """Converts an index (integer) in a token (str) using the vocab."""
168
+ return self.decoder.get(index, "")
169
+
170
+ def convert_tokens_to_string(self, tokens):
171
+ """Converts a sequence of tokens (string) in a single string."""
172
+ # since we manually add the prefix space, we have to remove it when decoding
173
+ if tokens[0].startswith(SPIECE_UNDERLINE) and self.add_prefix_space:
174
+ tokens[0] = tokens[0][1:]
175
+
176
+ current_sub_tokens = []
177
+ out_string = ""
178
+ prev_is_special = False
179
+ for i, token in enumerate(tokens):
180
+ # make sure that special tokens are not decoded using sentencepiece model
181
+ if token in self.all_special_tokens:
182
+ if not prev_is_special and i != 0 and self.spaces_for_interleaved_special_tokens:
183
+ out_string += " "
184
+ out_string += self.sp_model.decode(current_sub_tokens) + token
185
+ prev_is_special = True
186
+ current_sub_tokens = []
187
+ else:
188
+ if (
189
+ prev_is_special
190
+ and i == 1
191
+ and self.add_prefix_space
192
+ and not token.startswith(SPIECE_UNDERLINE)
193
+ and self.spaces_for_interleaved_special_tokens
194
+ ):
195
+ out_string += " "
196
+ current_sub_tokens.append(token)
197
+ prev_is_special = False
198
+ out_string += self.sp_model.decode(current_sub_tokens)
199
+ return out_string
200
+
201
+ def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
202
+ """
203
+ Save the vocabulary and special tokens file to a directory.
204
+
205
+ Args:
206
+ save_directory (`str`):
207
+ The directory in which to save the vocabulary.
208
+
209
+ Returns:
210
+ `Tuple(str)`: Paths to the files saved.
211
+ """
212
+ if not os.path.isdir(save_directory):
213
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
214
+ return
215
+ out_vocab_file = os.path.join(save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
216
+
217
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
218
+ copyfile(self.vocab_file, out_vocab_file)
219
+ elif not os.path.isfile(self.vocab_file):
220
+ with open(out_vocab_file, "wb") as fi:
221
+ content_spiece_model = self.sp_model.serialized_model_proto()
222
+ fi.write(content_spiece_model)
223
+
224
+ return (out_vocab_file,)
225
+
226
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
227
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
228
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
229
+
230
+ output = bos_token_id + token_ids_0 + eos_token_id
231
+
232
+ if token_ids_1 is not None:
233
+ output = output + bos_token_id + token_ids_1 + eos_token_id
234
+
235
+ return output
236
+
237
+ def get_special_tokens_mask(
238
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
239
+ ) -> List[int]:
240
+ """
241
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
242
+ special tokens using the tokenizer `prepare_for_model` method.
243
+
244
+ Args:
245
+ token_ids_0 (`List[int]`):
246
+ List of IDs.
247
+ token_ids_1 (`List[int]`, *optional*):
248
+ Optional second list of IDs for sequence pairs.
249
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
250
+ Whether or not the token list is already formatted with special tokens for the model.
251
+
252
+ Returns:
253
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
254
+ """
255
+ if already_has_special_tokens:
256
+ return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
257
+
258
+ bos_token_id = [1] if self.add_bos_token else []
259
+ eos_token_id = [1] if self.add_eos_token else []
260
+
261
+ if token_ids_1 is None:
262
+ return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id
263
+ return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id + bos_token_id + ([0] * len(token_ids_1)) + eos_token_id
264
+
265
+ def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None) -> List[int]:
266
+ """
267
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT
268
+ sequence pair mask has the following format:
269
+
270
+ ```
271
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
272
+ | first sequence | second sequence |
273
+ ```
274
+
275
+ if token_ids_1 is None, only returns the first portion of the mask (0s).
276
+
277
+ Args:
278
+ token_ids_0 (`List[int]`):
279
+ List of ids.
280
+ token_ids_1 (`List[int]`, *optional*):
281
+ Optional second list of IDs for sequence pairs.
282
+
283
+ Returns:
284
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
285
+ """
286
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
287
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
288
+
289
+ output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)
290
+
291
+ if token_ids_1 is not None:
292
+ output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)
293
+
294
+ return output
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bcacff3229854f5103ee7a85473a30ca9a8b3a68f3aae9b7479574b23ac2256b
3
+ size 2475075
tokenizer_config.json ADDED
@@ -0,0 +1,249 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": true,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ },
30
+ "128111": {
31
+ "content": "<restate>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false,
36
+ "special": true
37
+ },
38
+ "128112": {
39
+ "content": "</restate>",
40
+ "lstrip": false,
41
+ "normalized": false,
42
+ "rstrip": false,
43
+ "single_word": false,
44
+ "special": true
45
+ },
46
+ "128113": {
47
+ "content": "<planning>",
48
+ "lstrip": false,
49
+ "normalized": false,
50
+ "rstrip": false,
51
+ "single_word": false,
52
+ "special": true
53
+ },
54
+ "128114": {
55
+ "content": "</planning>",
56
+ "lstrip": false,
57
+ "normalized": false,
58
+ "rstrip": false,
59
+ "single_word": false,
60
+ "special": true
61
+ },
62
+ "128115": {
63
+ "content": "<recollect>",
64
+ "lstrip": false,
65
+ "normalized": false,
66
+ "rstrip": false,
67
+ "single_word": false,
68
+ "special": true
69
+ },
70
+ "128116": {
71
+ "content": "</recollect>",
72
+ "lstrip": false,
73
+ "normalized": false,
74
+ "rstrip": false,
75
+ "single_word": false,
76
+ "special": true
77
+ },
78
+ "128117": {
79
+ "content": "<execution>",
80
+ "lstrip": false,
81
+ "normalized": false,
82
+ "rstrip": false,
83
+ "single_word": false,
84
+ "special": true
85
+ },
86
+ "128118": {
87
+ "content": "</execution>",
88
+ "lstrip": false,
89
+ "normalized": false,
90
+ "rstrip": false,
91
+ "single_word": false,
92
+ "special": true
93
+ },
94
+ "128119": {
95
+ "content": "<review>",
96
+ "lstrip": false,
97
+ "normalized": false,
98
+ "rstrip": false,
99
+ "single_word": false,
100
+ "special": true
101
+ },
102
+ "128120": {
103
+ "content": "</review>",
104
+ "lstrip": false,
105
+ "normalized": false,
106
+ "rstrip": false,
107
+ "single_word": false,
108
+ "special": true
109
+ },
110
+ "128121": {
111
+ "content": "<summarize>",
112
+ "lstrip": false,
113
+ "normalized": false,
114
+ "rstrip": false,
115
+ "single_word": false,
116
+ "special": true
117
+ },
118
+ "128122": {
119
+ "content": "</summarize>",
120
+ "lstrip": false,
121
+ "normalized": false,
122
+ "rstrip": false,
123
+ "single_word": false,
124
+ "special": true
125
+ },
126
+ "128123": {
127
+ "content": "<retry>",
128
+ "lstrip": false,
129
+ "normalized": false,
130
+ "rstrip": false,
131
+ "single_word": false,
132
+ "special": true
133
+ },
134
+ "128124": {
135
+ "content": "</retry>",
136
+ "lstrip": false,
137
+ "normalized": false,
138
+ "rstrip": false,
139
+ "single_word": false,
140
+ "special": true
141
+ },
142
+ "128125": {
143
+ "content": "<conclude>",
144
+ "lstrip": false,
145
+ "normalized": false,
146
+ "rstrip": false,
147
+ "single_word": false,
148
+ "special": true
149
+ },
150
+ "128126": {
151
+ "content": "</conclude>",
152
+ "lstrip": false,
153
+ "normalized": false,
154
+ "rstrip": false,
155
+ "single_word": false,
156
+ "special": true
157
+ },
158
+ "128127": {
159
+ "content": "<|plugin|>",
160
+ "lstrip": false,
161
+ "normalized": false,
162
+ "rstrip": false,
163
+ "single_word": false,
164
+ "special": true
165
+ },
166
+ "128128": {
167
+ "content": "<|interpreter|>",
168
+ "lstrip": false,
169
+ "normalized": false,
170
+ "rstrip": false,
171
+ "single_word": false,
172
+ "special": true
173
+ },
174
+ "128129": {
175
+ "content": "<|action_end|>",
176
+ "lstrip": false,
177
+ "normalized": false,
178
+ "rstrip": false,
179
+ "single_word": false,
180
+ "special": true
181
+ },
182
+ "128130": {
183
+ "content": "<|action_start|>",
184
+ "lstrip": false,
185
+ "normalized": false,
186
+ "rstrip": false,
187
+ "single_word": false,
188
+ "special": true
189
+ },
190
+ "128131": {
191
+ "content": "<|im_end|>",
192
+ "lstrip": false,
193
+ "normalized": false,
194
+ "rstrip": false,
195
+ "single_word": false,
196
+ "special": true
197
+ },
198
+ "128132": {
199
+ "content": "<|im_start|>",
200
+ "lstrip": false,
201
+ "normalized": false,
202
+ "rstrip": false,
203
+ "single_word": false,
204
+ "special": true
205
+ }
206
+ },
207
+ "additional_special_tokens": [
208
+ "<|im_start|>",
209
+ "<|im_end|>",
210
+ "<|action_start|>",
211
+ "<|action_end|>",
212
+ "<|interpreter|>",
213
+ "<|plugin|>",
214
+ "<restate>",
215
+ "</restate>",
216
+ "<planning>",
217
+ "</planning>",
218
+ "<recollect>",
219
+ "</recollect>",
220
+ "<execution>",
221
+ "</execution>",
222
+ "<review>",
223
+ "</review>",
224
+ "<summarize>",
225
+ "</summarize>",
226
+ "<retry>",
227
+ "</retry>",
228
+ "<conclude>",
229
+ "</conclude>"
230
+ ],
231
+ "auto_map": {
232
+ "AutoTokenizer": [
233
+ "tokenization_internlm3.InternLM3Tokenizer",
234
+ null
235
+ ]
236
+ },
237
+ "bos_token": "<s>",
238
+ "chat_template": "{{ bos_token }}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
239
+ "clean_up_tokenization_spaces": false,
240
+ "eos_token": "</s>",
241
+ "extra_special_tokens": {},
242
+ "model_max_length": 1000000000000000019884624838656,
243
+ "pad_token": "</s>",
244
+ "sp_model_kwargs": {},
245
+ "spaces_between_special_tokens": false,
246
+ "tokenizer_class": "InternLM3Tokenizer",
247
+ "unk_token": "<unk>",
248
+ "use_default_system_prompt": false
249
+ }