Upload folder using huggingface_hub
Browse files- .gitattributes +1 -0
- LICENSE +202 -0
- MTEB_result.json +0 -0
- added_tokens.json +24 -0
- config.json +37 -0
- config_sentence_transformers.json +30 -0
- configuration_cge2.py +87 -0
- merges.txt +0 -0
- model.safetensors +3 -0
- modeling_cge2.py +631 -0
- special_tokens_map.json +31 -0
- tokenizer.json +3 -0
- tokenizer_config.json +208 -0
- vocab.json +0 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
LICENSE
ADDED
|
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
Apache License
|
| 3 |
+
Version 2.0, January 2004
|
| 4 |
+
http://www.apache.org/licenses/
|
| 5 |
+
|
| 6 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 7 |
+
|
| 8 |
+
1. Definitions.
|
| 9 |
+
|
| 10 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 11 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 12 |
+
|
| 13 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 14 |
+
the copyright owner that is granting the License.
|
| 15 |
+
|
| 16 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 17 |
+
other entities that control, are controlled by, or are under common
|
| 18 |
+
control with that entity. For the purposes of this definition,
|
| 19 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 20 |
+
direction or management of such entity, whether by contract or
|
| 21 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 22 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 23 |
+
|
| 24 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 25 |
+
exercising permissions granted by this License.
|
| 26 |
+
|
| 27 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 28 |
+
including but not limited to software source code, documentation
|
| 29 |
+
source, and configuration files.
|
| 30 |
+
|
| 31 |
+
"Object" form shall mean any form resulting from mechanical
|
| 32 |
+
transformation or translation of a Source form, including but
|
| 33 |
+
not limited to compiled object code, generated documentation,
|
| 34 |
+
and conversions to other media types.
|
| 35 |
+
|
| 36 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 37 |
+
Object form, made available under the License, as indicated by a
|
| 38 |
+
copyright notice that is included in or attached to the work
|
| 39 |
+
(an example is provided in the Appendix below).
|
| 40 |
+
|
| 41 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 42 |
+
form, that is based on (or derived from) the Work and for which the
|
| 43 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 44 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 45 |
+
of this License, Derivative Works shall not include works that remain
|
| 46 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 47 |
+
the Work and Derivative Works thereof.
|
| 48 |
+
|
| 49 |
+
"Contribution" shall mean any work of authorship, including
|
| 50 |
+
the original version of the Work and any modifications or additions
|
| 51 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 52 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 53 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 54 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 55 |
+
means any form of electronic, verbal, or written communication sent
|
| 56 |
+
to the Licensor or its representatives, including but not limited to
|
| 57 |
+
communication on electronic mailing lists, source code control systems,
|
| 58 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 59 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 60 |
+
excluding communication that is conspicuously marked or otherwise
|
| 61 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 62 |
+
|
| 63 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 64 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 65 |
+
subsequently incorporated within the Work.
|
| 66 |
+
|
| 67 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 68 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 69 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 70 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 71 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 72 |
+
Work and such Derivative Works in Source or Object form.
|
| 73 |
+
|
| 74 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 75 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 76 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 77 |
+
(except as stated in this section) patent license to make, have made,
|
| 78 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 79 |
+
where such license applies only to those patent claims licensable
|
| 80 |
+
by such Contributor that are necessarily infringed by their
|
| 81 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 82 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 83 |
+
institute patent litigation against any entity (including a
|
| 84 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 85 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 86 |
+
or contributory patent infringement, then any patent licenses
|
| 87 |
+
granted to You under this License for that Work shall terminate
|
| 88 |
+
as of the date such litigation is filed.
|
| 89 |
+
|
| 90 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 91 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 92 |
+
modifications, and in Source or Object form, provided that You
|
| 93 |
+
meet the following conditions:
|
| 94 |
+
|
| 95 |
+
(a) You must give any other recipients of the Work or
|
| 96 |
+
Derivative Works a copy of this License; and
|
| 97 |
+
|
| 98 |
+
(b) You must cause any modified files to carry prominent notices
|
| 99 |
+
stating that You changed the files; and
|
| 100 |
+
|
| 101 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 102 |
+
that You distribute, all copyright, patent, trademark, and
|
| 103 |
+
attribution notices from the Source form of the Work,
|
| 104 |
+
excluding those notices that do not pertain to any part of
|
| 105 |
+
the Derivative Works; and
|
| 106 |
+
|
| 107 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 108 |
+
distribution, then any Derivative Works that You distribute must
|
| 109 |
+
include a readable copy of the attribution notices contained
|
| 110 |
+
within such NOTICE file, excluding those notices that do not
|
| 111 |
+
pertain to any part of the Derivative Works, in at least one
|
| 112 |
+
of the following places: within a NOTICE text file distributed
|
| 113 |
+
as part of the Derivative Works; within the Source form or
|
| 114 |
+
documentation, if provided along with the Derivative Works; or,
|
| 115 |
+
within a display generated by the Derivative Works, if and
|
| 116 |
+
wherever such third-party notices normally appear. The contents
|
| 117 |
+
of the NOTICE file are for informational purposes only and
|
| 118 |
+
do not modify the License. You may add Your own attribution
|
| 119 |
+
notices within Derivative Works that You distribute, alongside
|
| 120 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 121 |
+
that such additional attribution notices cannot be construed
|
| 122 |
+
as modifying the License.
|
| 123 |
+
|
| 124 |
+
You may add Your own copyright statement to Your modifications and
|
| 125 |
+
may provide additional or different license terms and conditions
|
| 126 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 127 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 128 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 129 |
+
the conditions stated in this License.
|
| 130 |
+
|
| 131 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 132 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 133 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 134 |
+
this License, without any additional terms or conditions.
|
| 135 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 136 |
+
the terms of any separate license agreement you may have executed
|
| 137 |
+
with Licensor regarding such Contributions.
|
| 138 |
+
|
| 139 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 140 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 141 |
+
except as required for reasonable and customary use in describing the
|
| 142 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 143 |
+
|
| 144 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 145 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 146 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 147 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 148 |
+
implied, including, without limitation, any warranties or conditions
|
| 149 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 150 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 151 |
+
appropriateness of using or redistributing the Work and assume any
|
| 152 |
+
risks associated with Your exercise of permissions under this License.
|
| 153 |
+
|
| 154 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 155 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 156 |
+
unless required by applicable law (such as deliberate and grossly
|
| 157 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 158 |
+
liable to You for damages, including any direct, indirect, special,
|
| 159 |
+
incidental, or consequential damages of any character arising as a
|
| 160 |
+
result of this License or out of the use or inability to use the
|
| 161 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 162 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 163 |
+
other commercial damages or losses), even if such Contributor
|
| 164 |
+
has been advised of the possibility of such damages.
|
| 165 |
+
|
| 166 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 167 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 168 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 169 |
+
or other liability obligations and/or rights consistent with this
|
| 170 |
+
License. However, in accepting such obligations, You may act only
|
| 171 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 172 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 173 |
+
defend, and hold each Contributor harmless for any liability
|
| 174 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 175 |
+
of your accepting any such warranty or additional liability.
|
| 176 |
+
|
| 177 |
+
END OF TERMS AND CONDITIONS
|
| 178 |
+
|
| 179 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 180 |
+
|
| 181 |
+
To apply the Apache License to your work, attach the following
|
| 182 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
| 183 |
+
replaced with your own identifying information. (Don't include
|
| 184 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 185 |
+
comment syntax for the file format. We also recommend that a
|
| 186 |
+
file or class name and description of purpose be included on the
|
| 187 |
+
same "printed page" as the copyright notice for easier
|
| 188 |
+
identification within third-party archives.
|
| 189 |
+
|
| 190 |
+
Copyright 2024 Alibaba Cloud
|
| 191 |
+
|
| 192 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 193 |
+
you may not use this file except in compliance with the License.
|
| 194 |
+
You may obtain a copy of the License at
|
| 195 |
+
|
| 196 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 197 |
+
|
| 198 |
+
Unless required by applicable law or agreed to in writing, software
|
| 199 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 200 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 201 |
+
See the License for the specific language governing permissions and
|
| 202 |
+
limitations under the License.
|
MTEB_result.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
added_tokens.json
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"</tool_call>": 151658,
|
| 3 |
+
"<tool_call>": 151657,
|
| 4 |
+
"<|box_end|>": 151649,
|
| 5 |
+
"<|box_start|>": 151648,
|
| 6 |
+
"<|endoftext|>": 151643,
|
| 7 |
+
"<|file_sep|>": 151664,
|
| 8 |
+
"<|fim_middle|>": 151660,
|
| 9 |
+
"<|fim_pad|>": 151662,
|
| 10 |
+
"<|fim_prefix|>": 151659,
|
| 11 |
+
"<|fim_suffix|>": 151661,
|
| 12 |
+
"<|im_end|>": 151645,
|
| 13 |
+
"<|im_start|>": 151644,
|
| 14 |
+
"<|image_pad|>": 151655,
|
| 15 |
+
"<|object_ref_end|>": 151647,
|
| 16 |
+
"<|object_ref_start|>": 151646,
|
| 17 |
+
"<|quad_end|>": 151651,
|
| 18 |
+
"<|quad_start|>": 151650,
|
| 19 |
+
"<|repo_name|>": 151663,
|
| 20 |
+
"<|video_pad|>": 151656,
|
| 21 |
+
"<|vision_end|>": 151653,
|
| 22 |
+
"<|vision_pad|>": 151654,
|
| 23 |
+
"<|vision_start|>": 151652
|
| 24 |
+
}
|
config.json
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"CgeForEmbedding"
|
| 4 |
+
],
|
| 5 |
+
"attention_dropout": 0.0,
|
| 6 |
+
"attn_implementation": "flash_attention_2",
|
| 7 |
+
"auto_map": {
|
| 8 |
+
"AutoConfig": "configuration_cge2.CGEConfig",
|
| 9 |
+
"AutoModel": "modeling_cge2.CgeForEmbedding"
|
| 10 |
+
},
|
| 11 |
+
"compressed_dim": 896,
|
| 12 |
+
"embedding_method": "pma",
|
| 13 |
+
"hidden_act": "silu",
|
| 14 |
+
"hidden_size": 896,
|
| 15 |
+
"initializer_range": 0.02,
|
| 16 |
+
"intermediate_size": 4864,
|
| 17 |
+
"max_position_embeddings": 32768,
|
| 18 |
+
"max_window_layers": 24,
|
| 19 |
+
"model_type": "cge2",
|
| 20 |
+
"num_attention_heads": 14,
|
| 21 |
+
"num_hidden_layers": 24,
|
| 22 |
+
"num_key_value_heads": 2,
|
| 23 |
+
"padding_side": "left",
|
| 24 |
+
"pma_ln": true,
|
| 25 |
+
"pma_norm": false,
|
| 26 |
+
"pma_norm_mode": "post_normal",
|
| 27 |
+
"pma_num_heads": 32,
|
| 28 |
+
"rms_norm_eps": 1e-06,
|
| 29 |
+
"rope_theta": 1000000.0,
|
| 30 |
+
"sliding_window": null,
|
| 31 |
+
"tokenizer_name_or_path": "/ainative/muti-modal/yuhang/431428/download_models/CGE2_0B5_1841",
|
| 32 |
+
"torch_dtype": "bfloat16",
|
| 33 |
+
"transformers_version": "4.51.3",
|
| 34 |
+
"use_cache": true,
|
| 35 |
+
"use_sliding_window": false,
|
| 36 |
+
"vocab_size": 151936
|
| 37 |
+
}
|
config_sentence_transformers.json
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"prompts": {
|
| 3 |
+
"CodeEditSearchRetrieval-query": "Retrieve the diff code that relevant the following query:\n",
|
| 4 |
+
"CodeSearchNetRetrieval-query": "Retrieve the code that solves the following query:\n",
|
| 5 |
+
"AppsRetrieval-query": "Given a problem description from a programming contest, retrieve code examples that can assist in solving it.\n",
|
| 6 |
+
"CodeFeedbackMT-query": "Given a multi-turn conversation history that includes both text and code, retrieve relevant multi-modal answers composed of text and code that address the ongoing discussion.\n",
|
| 7 |
+
"CodeFeedbackST-query": "Given a single-turn question composed of text and code, retrieve suitable answers that also mix text and code to provide helpful feedback.\n",
|
| 8 |
+
"CodeSearchNetCCRetrieval-query": "Given an initial code segment, retrieve the subsequent segment that continues the code.\n",
|
| 9 |
+
"CodeTransOceanContest-query": "Given a Python code snippet, retrieve its semantically equivalent version written in C++.\n",
|
| 10 |
+
"CodeTransOceanDL-query": "Given a code snippet, retrieve a semantically equivalent implementation of the same code.\n",
|
| 11 |
+
"COIRCodeSearchNetRetrieval-query": "Given a code snippet, retrieve its corresponding document string that summarizes its functionality.\n",
|
| 12 |
+
"CosQA-query": "Given a query from a web search, retrieve code that is helpful in addressing the query.\n",
|
| 13 |
+
"StackOverflowQA-query": "Given a question combining text and code, retrieve relevant answers that also contain both text and code snippets and can address the question.\n",
|
| 14 |
+
"SyntheticText2SQL-query": "Given a natural language question, retrieve SQL queries that serve as appropriate responses.\n",
|
| 15 |
+
"CodeEditSearchRetrieval-passage": "Retrieved Answer:",
|
| 16 |
+
"CodeSearchNetRetrieval-passage": "Retrieved Answer:",
|
| 17 |
+
"AppsRetrieval-passage": "Retrieved Answer:",
|
| 18 |
+
"CodeFeedbackMT-passage": "Retrieved Answer:",
|
| 19 |
+
"CodeFeedbackST-passage": "Retrieved Answer:",
|
| 20 |
+
"CodeSearchNetCCRetrieval-passage": "Retrieved Answer:",
|
| 21 |
+
"CodeTransOceanContest-passage": "Retrieved Answer:",
|
| 22 |
+
"CodeTransOceanDL-passage": "Retrieved Answer:",
|
| 23 |
+
"COIRCodeSearchNetRetrieval-passage": "Retrieved Answer:",
|
| 24 |
+
"CosQA-passage": "Retrieved Answer:",
|
| 25 |
+
"StackOverflowQA-passage": "Retrieved Answer:",
|
| 26 |
+
"SyntheticText2SQL-passage": "Retrieved Answer:"
|
| 27 |
+
},
|
| 28 |
+
"default_prompt_name": null,
|
| 29 |
+
"similarity_fn_name": "cosine"
|
| 30 |
+
}
|
configuration_cge2.py
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from transformers import PretrainedConfig
|
| 2 |
+
|
| 3 |
+
class CGEConfig(PretrainedConfig):
|
| 4 |
+
model_type = "cge2"
|
| 5 |
+
keys_to_ignore_at_inference = ["past_key_values"]
|
| 6 |
+
|
| 7 |
+
base_model_tp_plan = {
|
| 8 |
+
"layers.*.self_attn.q_proj": "colwise",
|
| 9 |
+
"layers.*.self_attn.k_proj": "colwise",
|
| 10 |
+
"layers.*.self_attn.v_proj": "colwise",
|
| 11 |
+
"layers.*.self_attn.o_proj": "rowwise",
|
| 12 |
+
"layers.*.mlp.gate_proj": "colwise",
|
| 13 |
+
"layers.*.mlp.up_proj": "colwise",
|
| 14 |
+
"layers.*.mlp.down_proj": "rowwise",
|
| 15 |
+
}
|
| 16 |
+
base_model_pp_plan = {
|
| 17 |
+
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
|
| 18 |
+
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
|
| 19 |
+
"norm": (["hidden_states"], ["hidden_states"]),
|
| 20 |
+
}
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def __init__(
|
| 24 |
+
self,
|
| 25 |
+
attention_dropout=0.0,
|
| 26 |
+
bos_token_id=151643,
|
| 27 |
+
eos_token_id=151643,
|
| 28 |
+
hidden_act="silu",
|
| 29 |
+
hidden_size=896,
|
| 30 |
+
initializer_range=0.02,
|
| 31 |
+
intermediate_size=4864,
|
| 32 |
+
max_position_embeddings=32768,
|
| 33 |
+
max_window_layers=24,
|
| 34 |
+
model_type="cge2",
|
| 35 |
+
num_attention_heads=14,
|
| 36 |
+
num_hidden_layers=24,
|
| 37 |
+
num_key_value_heads=2,
|
| 38 |
+
rms_norm_eps=1e-6,
|
| 39 |
+
rope_theta=1000000.0,
|
| 40 |
+
sliding_window=32768,
|
| 41 |
+
tie_word_embeddings=True,
|
| 42 |
+
torch_dtype="bfloat16",
|
| 43 |
+
transformers_version="4.43.1",
|
| 44 |
+
use_cache=True,
|
| 45 |
+
use_sliding_window=False,
|
| 46 |
+
vocab_size=151936,
|
| 47 |
+
**kwargs,
|
| 48 |
+
):
|
| 49 |
+
self.vocab_size = vocab_size
|
| 50 |
+
self.max_position_embeddings = max_position_embeddings
|
| 51 |
+
self.hidden_size = hidden_size
|
| 52 |
+
self.intermediate_size = intermediate_size
|
| 53 |
+
self.num_hidden_layers = num_hidden_layers
|
| 54 |
+
self.num_attention_heads = num_attention_heads
|
| 55 |
+
self.use_sliding_window = use_sliding_window
|
| 56 |
+
self.sliding_window = sliding_window if use_sliding_window else None
|
| 57 |
+
self.max_window_layers = max_window_layers
|
| 58 |
+
|
| 59 |
+
# for backward compatibility
|
| 60 |
+
if num_key_value_heads is None:
|
| 61 |
+
num_key_value_heads = num_attention_heads
|
| 62 |
+
|
| 63 |
+
self.num_key_value_heads = num_key_value_heads
|
| 64 |
+
self.hidden_act = hidden_act
|
| 65 |
+
self.initializer_range = initializer_range
|
| 66 |
+
self.rms_norm_eps = rms_norm_eps
|
| 67 |
+
self.use_cache = use_cache
|
| 68 |
+
self.rope_theta = rope_theta
|
| 69 |
+
self.attention_dropout = attention_dropout
|
| 70 |
+
|
| 71 |
+
super().__init__(
|
| 72 |
+
tie_word_embeddings=tie_word_embeddings,
|
| 73 |
+
**kwargs,
|
| 74 |
+
)
|
| 75 |
+
def to_dict(self):
|
| 76 |
+
output = super().to_dict()
|
| 77 |
+
|
| 78 |
+
keys_to_remove = [
|
| 79 |
+
"base_model"
|
| 80 |
+
]
|
| 81 |
+
|
| 82 |
+
for key in keys_to_remove:
|
| 83 |
+
output.pop(key, None)
|
| 84 |
+
|
| 85 |
+
return output
|
| 86 |
+
|
| 87 |
+
__all__ = ["CGEConfig"]
|
merges.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:533dfdc4667f37c8040b9e413588c36836ce3adf760addd0d1823c4f080910d9
|
| 3 |
+
size 994540592
|
modeling_cge2.py
ADDED
|
@@ -0,0 +1,631 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import gc
|
| 3 |
+
import inspect
|
| 4 |
+
import math
|
| 5 |
+
import multiprocessing as mp
|
| 6 |
+
import queue
|
| 7 |
+
from multiprocessing import Queue
|
| 8 |
+
import warnings
|
| 9 |
+
from typing import Any, Union, List, Dict, Literal, Optional
|
| 10 |
+
import torch
|
| 11 |
+
import torch.nn.functional as F
|
| 12 |
+
import torch.utils.checkpoint
|
| 13 |
+
from torch import nn
|
| 14 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
| 15 |
+
from transformers import PretrainedConfig
|
| 16 |
+
|
| 17 |
+
from transformers import Qwen2Config
|
| 18 |
+
from transformers.activations import ACT2FN
|
| 19 |
+
from transformers.cache_utils import Cache, DynamicCache
|
| 20 |
+
from transformers.modeling_attn_mask_utils import _prepare_4d_causal_attention_mask, _prepare_4d_causal_attention_mask_for_sdpa, _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa
|
| 21 |
+
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
|
| 22 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 23 |
+
from transformers.utils import (
|
| 24 |
+
add_start_docstrings,
|
| 25 |
+
add_start_docstrings_to_model_forward,
|
| 26 |
+
is_flash_attn_2_available,
|
| 27 |
+
is_flash_attn_greater_or_equal_2_10,
|
| 28 |
+
logging,
|
| 29 |
+
replace_return_docstrings,
|
| 30 |
+
)
|
| 31 |
+
import numpy as np
|
| 32 |
+
from transformers import Qwen2Config
|
| 33 |
+
from transformers import Qwen2ForCausalLM
|
| 34 |
+
import inspect
|
| 35 |
+
import math
|
| 36 |
+
import os
|
| 37 |
+
import warnings
|
| 38 |
+
from typing import List, Optional, Tuple, Union
|
| 39 |
+
from tqdm import tqdm, trange
|
| 40 |
+
import torch
|
| 41 |
+
import torch.nn.functional as F
|
| 42 |
+
import torch.utils.checkpoint
|
| 43 |
+
from torch import nn
|
| 44 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
| 45 |
+
|
| 46 |
+
from transformers.activations import ACT2FN
|
| 47 |
+
from transformers.cache_utils import Cache, DynamicCache
|
| 48 |
+
from transformers.modeling_attn_mask_utils import _prepare_4d_causal_attention_mask, _prepare_4d_causal_attention_mask_for_sdpa, _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa
|
| 49 |
+
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
|
| 50 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 51 |
+
from transformers.utils import (
|
| 52 |
+
add_start_docstrings,
|
| 53 |
+
add_start_docstrings_to_model_forward,
|
| 54 |
+
is_flash_attn_2_available,
|
| 55 |
+
is_flash_attn_greater_or_equal_2_10,
|
| 56 |
+
logging,
|
| 57 |
+
replace_return_docstrings,
|
| 58 |
+
)
|
| 59 |
+
import numpy as np
|
| 60 |
+
import torch
|
| 61 |
+
import os
|
| 62 |
+
import argparse
|
| 63 |
+
import json
|
| 64 |
+
from tqdm import tqdm
|
| 65 |
+
from typing import cast, List, Union, Tuple
|
| 66 |
+
from transformers import AutoTokenizer, AutoModel # pylint: disable=C0413
|
| 67 |
+
from peft import LoraConfig, get_peft_model, TaskType
|
| 68 |
+
import time
|
| 69 |
+
import torch.nn.functional as F
|
| 70 |
+
import sys
|
| 71 |
+
import time
|
| 72 |
+
import torch
|
| 73 |
+
import torch.nn as nn
|
| 74 |
+
import torch.nn.functional as F
|
| 75 |
+
import numpy as np
|
| 76 |
+
from tqdm import tqdm, trange
|
| 77 |
+
from collections import defaultdict
|
| 78 |
+
from transformers import AutoTokenizer, AutoModel, AutoModelForCausalLM, AutoConfig
|
| 79 |
+
import torch.distributed as dist
|
| 80 |
+
from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
|
| 81 |
+
import sys
|
| 82 |
+
import torch
|
| 83 |
+
import torch.nn as nn
|
| 84 |
+
import torch.nn.functional as F
|
| 85 |
+
import math
|
| 86 |
+
import re
|
| 87 |
+
import logging
|
| 88 |
+
logging.getLogger().setLevel(logging.INFO)
|
| 89 |
+
from .configuration_cge2 import CGEConfig
|
| 90 |
+
from transformers.models.qwen2.modeling_qwen2 import Qwen2DecoderLayer, Qwen2Attention
|
| 91 |
+
|
| 92 |
+
class MAB_POST(nn.Module):
|
| 93 |
+
def __init__(self, dim_Q, dim_K, dim_V, num_heads, ln=False):
|
| 94 |
+
super(MAB_POST, self).__init__()
|
| 95 |
+
self.dim_V = dim_V
|
| 96 |
+
self.num_heads = num_heads
|
| 97 |
+
self.fc_q = nn.Linear(dim_Q, dim_V)
|
| 98 |
+
self.fc_k = nn.Linear(dim_K, dim_V)
|
| 99 |
+
self.fc_v = nn.Linear(dim_K, dim_V)
|
| 100 |
+
if ln:
|
| 101 |
+
self.ln0 = nn.LayerNorm(dim_V)
|
| 102 |
+
self.ln1 = nn.LayerNorm(dim_V)
|
| 103 |
+
self.fc_o = nn.Linear(dim_V, dim_V)
|
| 104 |
+
nn.init.xavier_uniform_(self.fc_q.weight)
|
| 105 |
+
nn.init.xavier_uniform_(self.fc_k.weight)
|
| 106 |
+
nn.init.xavier_uniform_(self.fc_v.weight)
|
| 107 |
+
nn.init.xavier_uniform_(self.fc_o.weight)
|
| 108 |
+
|
| 109 |
+
def forward(self, Q, K, pad_mask=None):
|
| 110 |
+
|
| 111 |
+
Q_ = self.fc_q(Q)
|
| 112 |
+
K_, V_ = self.fc_k(K), self.fc_v(K)
|
| 113 |
+
|
| 114 |
+
dim_split = self.dim_V // self.num_heads
|
| 115 |
+
Q_ = torch.cat(Q_.split(dim_split, 2), 0)
|
| 116 |
+
K_ = torch.cat(K_.split(dim_split, 2), 0)
|
| 117 |
+
V_ = torch.cat(V_.split(dim_split, 2), 0)
|
| 118 |
+
|
| 119 |
+
pad_mask = pad_mask.unsqueeze(1).repeat(self.num_heads, Q.size(1), 1)
|
| 120 |
+
score = Q_.bmm(K_.transpose(1,2))/math.sqrt(self.dim_V)
|
| 121 |
+
score = score.masked_fill(pad_mask == 0, -1e12)
|
| 122 |
+
A = torch.softmax(score, 2)
|
| 123 |
+
A = A * pad_mask
|
| 124 |
+
O = torch.cat(A.bmm(V_).split(Q.size(0), 0), 2)
|
| 125 |
+
O = Q + O
|
| 126 |
+
O = O if getattr(self, 'ln0', None) is None else self.ln0(O)
|
| 127 |
+
O = O + F.relu(self.fc_o(O))
|
| 128 |
+
O = O if getattr(self, 'ln1', None) is None else self.ln1(O)
|
| 129 |
+
return O
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
class PMA(nn.Module):
|
| 133 |
+
def __init__(self, dim, compressed_dim, num_heads, num_seeds, ln=False, pma_mode=None):
|
| 134 |
+
super(PMA, self).__init__()
|
| 135 |
+
self.S = nn.Parameter(torch.Tensor(1, num_seeds, compressed_dim))
|
| 136 |
+
nn.init.xavier_uniform_(self.S)
|
| 137 |
+
if pma_mode == 'post_normal':
|
| 138 |
+
self.mab = MAB_POST(compressed_dim, dim, compressed_dim, num_heads, ln=ln)
|
| 139 |
+
else:
|
| 140 |
+
raise ValueError(f"Error, the pma_mode {pma_mode} is not implemented !")
|
| 141 |
+
|
| 142 |
+
def forward(self, X, pad_mask):
|
| 143 |
+
if self.S.dtype != torch.bfloat16:
|
| 144 |
+
X = X.float()
|
| 145 |
+
return self.mab(self.S.repeat(X.size(0), 1, 1), X, pad_mask)
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
class MAB_POST_v2(nn.Module):
|
| 150 |
+
def __init__(self, dim_Q, dim_K, dim_V, num_heads, ln=False):
|
| 151 |
+
super(MAB_POST_v2, self).__init__()
|
| 152 |
+
self.dim_V = dim_V
|
| 153 |
+
self.num_heads = num_heads
|
| 154 |
+
self.fc_q = nn.Linear(dim_Q, dim_V)
|
| 155 |
+
self.fc_k = nn.Linear(dim_K, dim_V)
|
| 156 |
+
self.fc_v = nn.Linear(dim_K, dim_V)
|
| 157 |
+
|
| 158 |
+
if ln:
|
| 159 |
+
self.ln0 = nn.LayerNorm(dim_V)
|
| 160 |
+
self.ln1 = nn.LayerNorm(dim_V)
|
| 161 |
+
self.fc_o = nn.Linear(dim_V, dim_V)
|
| 162 |
+
nn.init.xavier_uniform_(self.fc_q.weight)
|
| 163 |
+
nn.init.xavier_uniform_(self.fc_k.weight)
|
| 164 |
+
nn.init.xavier_uniform_(self.fc_v.weight)
|
| 165 |
+
nn.init.xavier_uniform_(self.fc_o.weight)
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
# Q(B, num_seed, D), pad_mask (bs, seq) Post-LN
|
| 170 |
+
def forward(self, Q, K, pad_mask=None):
|
| 171 |
+
|
| 172 |
+
Q_tmp = self.fc_q(Q) # B, num_seed, C
|
| 173 |
+
K_, V_ = self.fc_k(K), self.fc_v(K) # B, L, C
|
| 174 |
+
|
| 175 |
+
dim_split = self.dim_V // self.num_heads
|
| 176 |
+
Q_ = torch.cat(Q_tmp.split(dim_split, 2), 0) # (B* num_head, num_seed, C)
|
| 177 |
+
K_ = torch.cat(K_.split(dim_split, 2), 0) # (B* num_head, L, C)
|
| 178 |
+
V_ = torch.cat(V_.split(dim_split, 2), 0) # (B* num_head,L, C)
|
| 179 |
+
|
| 180 |
+
pad_mask = pad_mask.unsqueeze(1).repeat(self.num_heads, Q.size(1), 1) # (B*num_head, num_seed, L)
|
| 181 |
+
score = Q_.bmm(K_.transpose(1,2))/math.sqrt(self.dim_V) # (B*num_head, num_seed, L)
|
| 182 |
+
score = score.masked_fill(pad_mask == 0, -1e12) # B,num_seed,L
|
| 183 |
+
A = torch.softmax(score, 2) # (B*num_head, num_seed, L)
|
| 184 |
+
A = A * pad_mask
|
| 185 |
+
O = torch.cat(A.bmm(V_).split(Q.size(0), 0), 2) # (B, num_seed, D)
|
| 186 |
+
O = Q_tmp + O
|
| 187 |
+
# O = torch.cat((Q_ + A.bmm(V_)).split(Q.size(0), 0), 2)
|
| 188 |
+
O = O if getattr(self, 'ln0', None) is None else self.ln0(O)
|
| 189 |
+
O = O + F.relu(self.fc_o(O))
|
| 190 |
+
O = O if getattr(self, 'ln1', None) is None else self.ln1(O)
|
| 191 |
+
return O
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
class PMA_v2(nn.Module):
|
| 197 |
+
def __init__(self, dim, compressed_dim, num_heads, num_seeds, ln=False):
|
| 198 |
+
super(PMA_v2, self).__init__()
|
| 199 |
+
self.S = nn.Parameter(torch.Tensor(1, num_seeds, dim))
|
| 200 |
+
nn.init.xavier_uniform_(self.S)
|
| 201 |
+
# if pma_mode == 'post_normal':
|
| 202 |
+
self.mab = MAB_POST_v2(dim, dim, compressed_dim, num_heads, ln=ln)
|
| 203 |
+
# elif pma_mode == 'pre_normal':
|
| 204 |
+
# self.mab = MAB_PRE_NORMAL(dim, dim, compressed_dim, num_heads, ln=ln)
|
| 205 |
+
# elif pma_mode == 'pre_gptj':
|
| 206 |
+
# self.mab = MAB_PRE_GPTJ(dim, dim, compressed_dim, num_heads, ln=ln)
|
| 207 |
+
# else:
|
| 208 |
+
# raise ValueError(f"Error, the pma_mode {pma_mode} is not implemented !")
|
| 209 |
+
# X: (bs, seq, emb), pad_mask: (bs, seq)
|
| 210 |
+
def forward(self, X, pad_mask):
|
| 211 |
+
if self.S.dtype != torch.bfloat16:
|
| 212 |
+
X = X.float()
|
| 213 |
+
return self.mab(self.S.expand(X.size(0), -1, -1), X, pad_mask)
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
class CGEModel(PreTrainedModel):
|
| 217 |
+
config_class = CGEConfig
|
| 218 |
+
config: CGEConfig
|
| 219 |
+
base_model_prefix = "model"
|
| 220 |
+
supports_gradient_checkpointing = True
|
| 221 |
+
_no_split_modules = ["Qwen2DecoderLayer"]
|
| 222 |
+
_skip_keys_device_placement = ["past_key_values"]
|
| 223 |
+
_supports_flash_attn = True
|
| 224 |
+
_supports_sdpa = True
|
| 225 |
+
_supports_flex_attn = True
|
| 226 |
+
|
| 227 |
+
_can_compile_fullgraph = True
|
| 228 |
+
_supports_attention_backend = True
|
| 229 |
+
_can_record_outputs = {
|
| 230 |
+
"hidden_states": Qwen2DecoderLayer,
|
| 231 |
+
"attentions": Qwen2Attention,
|
| 232 |
+
}
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
class CgeForEmbedding(CGEModel):
|
| 236 |
+
|
| 237 |
+
config_class = CGEConfig
|
| 238 |
+
model_type = "cge2"
|
| 239 |
+
|
| 240 |
+
def __init__(self, config):
|
| 241 |
+
super().__init__(config)
|
| 242 |
+
qwen_cfg = Qwen2Config.from_dict(config.to_dict())
|
| 243 |
+
self.plm_model = AutoModelForCausalLM.from_config(qwen_cfg)
|
| 244 |
+
self.embedding_method = config.embedding_method
|
| 245 |
+
self.inf_seq_length = 1024
|
| 246 |
+
self.padding_side = config.padding_side
|
| 247 |
+
|
| 248 |
+
self.emb_dim = self.plm_model.model.embed_tokens.weight.size(1)
|
| 249 |
+
self.keep_max_layer = self.plm_model.config.num_hidden_layers
|
| 250 |
+
self.num_heads = config.pma_num_heads
|
| 251 |
+
self.ln = config.pma_ln
|
| 252 |
+
self.norm = config.pma_norm
|
| 253 |
+
self.pma_mode = config.pma_norm_mode
|
| 254 |
+
self.compressed_dim = config.compressed_dim
|
| 255 |
+
|
| 256 |
+
self.mha_pma_disc = PMA_v2(self.emb_dim, self.compressed_dim, self.num_heads, 1, ln=self.ln)
|
| 257 |
+
self.pool = None
|
| 258 |
+
self.target_devices = self.get_target_devices(None)
|
| 259 |
+
self.tokenizer = AutoTokenizer.from_pretrained(config.tokenizer_name_or_path) if config.tokenizer_name_or_path is not None else None
|
| 260 |
+
self.config_class = CGEConfig
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
def pma_embedding(self, mha_pma, A, mask):
|
| 264 |
+
res = mha_pma(A, mask).squeeze(1)
|
| 265 |
+
return res
|
| 266 |
+
|
| 267 |
+
def get_hidden_states(self, **inputs):
|
| 268 |
+
outputs = self.plm_model(inputs['input_ids'], inputs['attention_mask'], output_hidden_states=True)
|
| 269 |
+
return outputs.hidden_states[self.keep_max_layer]
|
| 270 |
+
|
| 271 |
+
def get_sentence_embedding(self, embedding_method, hidden_states, emb_type, attention_mask):
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
if embedding_method == 'pma':
|
| 275 |
+
|
| 276 |
+
if emb_type == 'disc':
|
| 277 |
+
res_embedding = self.pma_embedding(self.mha_pma_disc, hidden_states, attention_mask)
|
| 278 |
+
if self.norm:
|
| 279 |
+
res_embedding = torch.nn.functional.normalize(res_embedding, p=2.0, dim=-1, eps=1e-12, out=None)
|
| 280 |
+
return res_embedding
|
| 281 |
+
else:
|
| 282 |
+
raise NotImplementedError(f"emb type {emb_type} hasn't been implemented")
|
| 283 |
+
else:
|
| 284 |
+
raise NotImplementedError(f"embedding method {embedding_method} hasn't been implemented")
|
| 285 |
+
|
| 286 |
+
|
| 287 |
+
@staticmethod
|
| 288 |
+
def get_target_devices(devices: Union[str, int, List[str], List[int]]) -> List[str]:
|
| 289 |
+
"""
|
| 290 |
+
|
| 291 |
+
Args:
|
| 292 |
+
devices (Union[str, int, List[str], List[int]]): specified devices, can be `str`, `int`, list of `str`, or list of `int`.
|
| 293 |
+
|
| 294 |
+
Raises:
|
| 295 |
+
ValueError: Devices should be a string or an integer or a list of strings or a list of integers.
|
| 296 |
+
|
| 297 |
+
Returns:
|
| 298 |
+
List[str]: A list of target devices in format.
|
| 299 |
+
"""
|
| 300 |
+
if devices is None:
|
| 301 |
+
if torch.cuda.is_available():
|
| 302 |
+
return [f"cuda:{i}" for i in range(torch.cuda.device_count())]
|
| 303 |
+
elif is_torch_npu_available():
|
| 304 |
+
return [f"npu:{i}" for i in range(torch.npu.device_count())]
|
| 305 |
+
elif hasattr(torch, "musa") and torch.musa.is_available():
|
| 306 |
+
return [f"musa:{i}" for i in range(torch.musa.device_count())]
|
| 307 |
+
elif torch.backends.mps.is_available():
|
| 308 |
+
try:
|
| 309 |
+
return [f"mps:{i}" for i in range(torch.mps.device_count())]
|
| 310 |
+
except:
|
| 311 |
+
return ["mps"]
|
| 312 |
+
else:
|
| 313 |
+
return ["cpu"]
|
| 314 |
+
elif isinstance(devices, str):
|
| 315 |
+
return [devices]
|
| 316 |
+
elif isinstance(devices, int):
|
| 317 |
+
if hasattr(torch, "musa") and torch.musa.is_available():
|
| 318 |
+
return [f"musa:{devices}"]
|
| 319 |
+
else:
|
| 320 |
+
return [f"cuda:{devices}"]
|
| 321 |
+
elif isinstance(devices, list):
|
| 322 |
+
if isinstance(devices[0], str):
|
| 323 |
+
return devices
|
| 324 |
+
elif isinstance(devices[0], int):
|
| 325 |
+
if hasattr(torch, "musa") and torch.musa.is_available():
|
| 326 |
+
return [f"musa:{device}" for device in devices]
|
| 327 |
+
else:
|
| 328 |
+
return [f"cuda:{device}" for device in devices]
|
| 329 |
+
else:
|
| 330 |
+
raise ValueError("devices should be a string or an integer or a list of strings or a list of integers.")
|
| 331 |
+
else:
|
| 332 |
+
raise ValueError("devices should be a string or an integer or a list of strings or a list of integers.")
|
| 333 |
+
|
| 334 |
+
# adapted from https://github.com/UKPLab/sentence-transformers/blob/1802076d4eae42ff0a5629e1b04e75785d4e193b/sentence_transformers/SentenceTransformer.py#L807
|
| 335 |
+
|
| 336 |
+
|
| 337 |
+
def start_multi_process_pool(
|
| 338 |
+
self,
|
| 339 |
+
process_target_func: Any,
|
| 340 |
+
) -> Dict[Literal["input", "output", "processes"], Any]:
|
| 341 |
+
"""
|
| 342 |
+
Starts a multi-process pool to process the encoding with several independent processes
|
| 343 |
+
via :meth:`SentenceTransformer.encode_multi_process <sentence_transformers.SentenceTransformer.encode_multi_process>`.
|
| 344 |
+
|
| 345 |
+
This method is recommended if you want to encode on multiple GPUs or CPUs. It is advised
|
| 346 |
+
to start only one process per GPU. This method works together with encode_multi_process
|
| 347 |
+
and stop_multi_process_pool.
|
| 348 |
+
|
| 349 |
+
Returns:
|
| 350 |
+
Dict[str, Any]: A dictionary with the target processes, an input queue, and an output queue.
|
| 351 |
+
"""
|
| 352 |
+
if self.plm_model is None or self.mha_pma_disc is None:
|
| 353 |
+
raise ValueError("Model is not initialized.")
|
| 354 |
+
|
| 355 |
+
logging.info("Start multi-process pool on devices: {}".format(", ".join(map(str, self.target_devices))))
|
| 356 |
+
|
| 357 |
+
self.to("cpu")
|
| 358 |
+
self.share_memory()
|
| 359 |
+
ctx = mp.get_context("spawn")
|
| 360 |
+
input_queue = ctx.Queue()
|
| 361 |
+
output_queue = ctx.Queue()
|
| 362 |
+
processes = []
|
| 363 |
+
|
| 364 |
+
for device_id in tqdm(self.target_devices, desc='initial target device'):
|
| 365 |
+
p = ctx.Process(
|
| 366 |
+
target=process_target_func,
|
| 367 |
+
args=(device_id, self, input_queue, output_queue),
|
| 368 |
+
daemon=True,
|
| 369 |
+
)
|
| 370 |
+
p.start()
|
| 371 |
+
processes.append(p)
|
| 372 |
+
|
| 373 |
+
return {"input": input_queue, "output": output_queue, "processes": processes}
|
| 374 |
+
|
| 375 |
+
|
| 376 |
+
|
| 377 |
+
@staticmethod
|
| 378 |
+
def _encode_multi_process_worker(
|
| 379 |
+
target_device: str, model: 'CgeForEmbedding', input_queue: Queue, results_queue: Queue
|
| 380 |
+
) -> None:
|
| 381 |
+
model = model.to(target_device)
|
| 382 |
+
while True:
|
| 383 |
+
try:
|
| 384 |
+
chunk_id, sentences, kwargs = (
|
| 385 |
+
input_queue.get()
|
| 386 |
+
)
|
| 387 |
+
embeddings = model.encode_single_device(
|
| 388 |
+
sentences,
|
| 389 |
+
device=target_device,
|
| 390 |
+
**kwargs
|
| 391 |
+
)
|
| 392 |
+
|
| 393 |
+
results_queue.put([chunk_id, embeddings])
|
| 394 |
+
except queue.Empty:
|
| 395 |
+
break
|
| 396 |
+
|
| 397 |
+
def encode_multi_process(
|
| 398 |
+
self,
|
| 399 |
+
sentences: List[str],
|
| 400 |
+
pool: Dict[Literal["input", "output", "processes"], Any],
|
| 401 |
+
**kwargs
|
| 402 |
+
):
|
| 403 |
+
|
| 404 |
+
chunk_size = math.ceil(len(sentences) / len(pool["processes"]))
|
| 405 |
+
|
| 406 |
+
input_queue = pool["input"]
|
| 407 |
+
last_chunk_id = 0
|
| 408 |
+
chunk = []
|
| 409 |
+
|
| 410 |
+
for sentence in sentences:
|
| 411 |
+
chunk.append(sentence)
|
| 412 |
+
if len(chunk) >= chunk_size:
|
| 413 |
+
input_queue.put(
|
| 414 |
+
[last_chunk_id, chunk, kwargs]
|
| 415 |
+
)
|
| 416 |
+
last_chunk_id += 1
|
| 417 |
+
chunk = []
|
| 418 |
+
|
| 419 |
+
if len(chunk) > 0:
|
| 420 |
+
input_queue.put([last_chunk_id, chunk, kwargs])
|
| 421 |
+
last_chunk_id += 1
|
| 422 |
+
|
| 423 |
+
output_queue = pool["output"]
|
| 424 |
+
results_list = sorted(
|
| 425 |
+
[output_queue.get() for _ in trange(last_chunk_id, desc="")],
|
| 426 |
+
key=lambda x: x[0],
|
| 427 |
+
)
|
| 428 |
+
embeddings = self._concatenate_results_from_multi_process([result[1] for result in results_list])
|
| 429 |
+
return embeddings
|
| 430 |
+
|
| 431 |
+
def _concatenate_results_from_multi_process(self, results_list: List[Union[torch.Tensor, np.ndarray, Any]]):
|
| 432 |
+
"""concatenate and return the results from all the processes
|
| 433 |
+
|
| 434 |
+
Args:
|
| 435 |
+
results_list (List[Union[torch.Tensor, np.ndarray, Any]]): A list of results from all the processes.
|
| 436 |
+
|
| 437 |
+
Raises:
|
| 438 |
+
NotImplementedError: Unsupported type for results_list
|
| 439 |
+
|
| 440 |
+
Returns:
|
| 441 |
+
Union[torch.Tensor, np.ndarray]: return the embedding vectors in a numpy array or tensor.
|
| 442 |
+
"""
|
| 443 |
+
if isinstance(results_list[0], torch.Tensor):
|
| 444 |
+
# move all tensors to the same device
|
| 445 |
+
results_list = [res.to(self.target_devices[0]) for res in results_list]
|
| 446 |
+
return torch.cat(results_list, dim=0)
|
| 447 |
+
elif isinstance(results_list[0], np.ndarray):
|
| 448 |
+
return np.concatenate(results_list, axis=0)
|
| 449 |
+
else:
|
| 450 |
+
raise NotImplementedError("Unsupported type for results_list")
|
| 451 |
+
|
| 452 |
+
def encode_single_device(
|
| 453 |
+
self,
|
| 454 |
+
sentences: Union[List[str], str],
|
| 455 |
+
batch_size: int = 16,
|
| 456 |
+
convert_to_numpy: bool = False,
|
| 457 |
+
convert_to_tensor: bool = True,
|
| 458 |
+
show_progress_bar: bool = True,
|
| 459 |
+
max_seq_length: int = 1024,
|
| 460 |
+
device: Optional[str] = None,
|
| 461 |
+
**kwargs: Any
|
| 462 |
+
):
|
| 463 |
+
if max_seq_length is None:
|
| 464 |
+
max_seq_length = self.inf_seq_length
|
| 465 |
+
|
| 466 |
+
input_is_string = False
|
| 467 |
+
if isinstance(sentences, str) or not hasattr(sentences, "__len__"):
|
| 468 |
+
sentences = [sentences]
|
| 469 |
+
input_is_string = True
|
| 470 |
+
all_embeddings = []
|
| 471 |
+
length_sorted_idx = np.argsort([-len(s) for s in sentences])
|
| 472 |
+
sentences_sorted = [sentences[idx] for idx in length_sorted_idx] # 大到小重排
|
| 473 |
+
with torch.no_grad():
|
| 474 |
+
for start_index in trange(0, len(sentences), batch_size, desc="Batches", disable=not show_progress_bar):
|
| 475 |
+
sentences_batch = sentences_sorted[start_index: start_index + batch_size]
|
| 476 |
+
inputs = self.tokenizer(sentences_batch, padding=True, truncation=True, max_length=max_seq_length, return_tensors='pt').to(self.plm_model.device)
|
| 477 |
+
hidden_states = self.get_hidden_states(**inputs)
|
| 478 |
+
embeddings = self.get_sentence_embedding(self.embedding_method, hidden_states, 'disc', inputs['attention_mask'])
|
| 479 |
+
embeddings = embeddings.detach()
|
| 480 |
+
if convert_to_numpy:
|
| 481 |
+
if embeddings.dtype == torch.bfloat16:
|
| 482 |
+
embeddings = embeddings.cpu().to(torch.float32)
|
| 483 |
+
else:
|
| 484 |
+
embeddings = embeddings.cpu()
|
| 485 |
+
all_embeddings.extend(embeddings)
|
| 486 |
+
all_embeddings = [all_embeddings[idx] for idx in np.argsort(length_sorted_idx)]
|
| 487 |
+
if convert_to_tensor:
|
| 488 |
+
all_embeddings = torch.stack(all_embeddings)
|
| 489 |
+
elif convert_to_numpy:
|
| 490 |
+
all_embeddings = np.asarray([emb.numpy() for emb in all_embeddings])
|
| 491 |
+
|
| 492 |
+
if input_is_string:
|
| 493 |
+
all_embeddings = all_embeddings[0]
|
| 494 |
+
return all_embeddings
|
| 495 |
+
|
| 496 |
+
|
| 497 |
+
def encode(self, sentences, batch_size=16, convert_to_numpy=False,
|
| 498 |
+
convert_to_tensor=True, show_progress_bar=True, max_seq_length=None, **kwargs):
|
| 499 |
+
if max_seq_length is None:
|
| 500 |
+
max_seq_length = self.inf_seq_length
|
| 501 |
+
|
| 502 |
+
if convert_to_tensor == convert_to_numpy:
|
| 503 |
+
convert_to_tensor=True
|
| 504 |
+
convert_to_numpy=False
|
| 505 |
+
|
| 506 |
+
if isinstance(sentences, str) or len(self.target_devices) == 1:
|
| 507 |
+
return self.encode_single_device(
|
| 508 |
+
sentences,
|
| 509 |
+
batch_size=batch_size,
|
| 510 |
+
convert_to_numpy=convert_to_numpy,
|
| 511 |
+
convert_to_tensor=convert_to_tensor,
|
| 512 |
+
show_progress_bar=show_progress_bar,
|
| 513 |
+
max_seq_length=max_seq_length,
|
| 514 |
+
device=self.target_devices[0],
|
| 515 |
+
**kwargs
|
| 516 |
+
)
|
| 517 |
+
if self.pool is None:
|
| 518 |
+
self.pool = self.start_multi_process_pool(CgeForEmbedding._encode_multi_process_worker)
|
| 519 |
+
|
| 520 |
+
|
| 521 |
+
all_embeddings = []
|
| 522 |
+
length_sorted_idx = np.argsort([-len(s) for s in sentences])
|
| 523 |
+
sentences_sorted = [sentences[idx] for idx in length_sorted_idx] # 大到小重排
|
| 524 |
+
with torch.no_grad():
|
| 525 |
+
for start_index in trange(0, len(sentences), batch_size, desc="Batches", disable=not show_progress_bar):
|
| 526 |
+
sentences_batch = sentences_sorted[start_index: start_index + batch_size]
|
| 527 |
+
embeddings_batch = self.encode_multi_process(
|
| 528 |
+
sentences_batch,
|
| 529 |
+
self.pool,
|
| 530 |
+
convert_to_numpy=convert_to_numpy,
|
| 531 |
+
convert_to_tensor=convert_to_tensor,
|
| 532 |
+
show_progress_bar=show_progress_bar,
|
| 533 |
+
max_seq_length=max_seq_length,
|
| 534 |
+
**kwargs
|
| 535 |
+
)
|
| 536 |
+
embeddings_batch = embeddings_batch.detach()
|
| 537 |
+
if convert_to_numpy:
|
| 538 |
+
if embeddings_batch.dtype == torch.bfloat16:
|
| 539 |
+
embeddings_batch = embeddings_batch.cpu().to(torch.float32)
|
| 540 |
+
else:
|
| 541 |
+
embeddings_batch = embeddings_batch.cpu()
|
| 542 |
+
all_embeddings.extend(embeddings_batch)
|
| 543 |
+
all_embeddings = [all_embeddings[idx] for idx in np.argsort(length_sorted_idx)]
|
| 544 |
+
if convert_to_tensor:
|
| 545 |
+
all_embeddings = torch.stack(all_embeddings)
|
| 546 |
+
elif convert_to_numpy:
|
| 547 |
+
all_embeddings = np.asarray([emb.numpy() for emb in all_embeddings])
|
| 548 |
+
|
| 549 |
+
|
| 550 |
+
return all_embeddings
|
| 551 |
+
|
| 552 |
+
|
| 553 |
+
def encode_queries(self, sentences, batch_size=16, convert_to_numpy=False,
|
| 554 |
+
convert_to_tensor=True, show_progress_bar=True, max_seq_length=None, **kwargs):
|
| 555 |
+
if max_seq_length is None:
|
| 556 |
+
max_seq_length = self.inf_seq_length
|
| 557 |
+
|
| 558 |
+
if convert_to_tensor == convert_to_numpy:
|
| 559 |
+
convert_to_tensor=True
|
| 560 |
+
convert_to_numpy=False
|
| 561 |
+
|
| 562 |
+
return self.encode(
|
| 563 |
+
sentences=sentences,
|
| 564 |
+
batch_size=batch_size,
|
| 565 |
+
convert_to_numpy=convert_to_numpy,
|
| 566 |
+
convert_to_tensor=convert_to_tensor,
|
| 567 |
+
show_progress_bar=show_progress_bar,
|
| 568 |
+
max_seq_length=max_seq_length,
|
| 569 |
+
**kwargs
|
| 570 |
+
)
|
| 571 |
+
|
| 572 |
+
|
| 573 |
+
def encode_corpus(self, sentences, batch_size=16, convert_to_numpy=False,
|
| 574 |
+
convert_to_tensor=True, show_progress_bar=True, max_seq_length=None, **kwargs):
|
| 575 |
+
|
| 576 |
+
if max_seq_length is None:
|
| 577 |
+
max_seq_length = self.inf_seq_length
|
| 578 |
+
|
| 579 |
+
if convert_to_tensor == convert_to_numpy:
|
| 580 |
+
convert_to_tensor=True
|
| 581 |
+
convert_to_numpy=False
|
| 582 |
+
sentences = [sentence['title']+' '+sentence['text'] for sentence in sentences]
|
| 583 |
+
|
| 584 |
+
return self.encode(
|
| 585 |
+
sentences=sentences,
|
| 586 |
+
batch_size=batch_size,
|
| 587 |
+
convert_to_numpy=convert_to_numpy,
|
| 588 |
+
convert_to_tensor=convert_to_tensor,
|
| 589 |
+
show_progress_bar=show_progress_bar,
|
| 590 |
+
max_seq_length=max_seq_length,
|
| 591 |
+
**kwargs
|
| 592 |
+
|
| 593 |
+
)
|
| 594 |
+
|
| 595 |
+
@staticmethod
|
| 596 |
+
def stop_multi_process_pool(pool: Dict[Literal["input", "output", "processes"], Any]) -> None:
|
| 597 |
+
"""
|
| 598 |
+
Stops all processes started with start_multi_process_pool.
|
| 599 |
+
|
| 600 |
+
Args:
|
| 601 |
+
pool (Dict[str, object]): A dictionary containing the input queue, output queue, and process list.
|
| 602 |
+
|
| 603 |
+
Returns:
|
| 604 |
+
None
|
| 605 |
+
"""
|
| 606 |
+
for p in pool["processes"]:
|
| 607 |
+
p.terminate()
|
| 608 |
+
|
| 609 |
+
for p in pool["processes"]:
|
| 610 |
+
p.join()
|
| 611 |
+
p.close()
|
| 612 |
+
|
| 613 |
+
pool["input"].close()
|
| 614 |
+
pool["output"].close()
|
| 615 |
+
pool = None
|
| 616 |
+
|
| 617 |
+
def stop_self_pool(self):
|
| 618 |
+
if self.pool is not None:
|
| 619 |
+
self.stop_multi_process_pool(self.pool)
|
| 620 |
+
self.pool = None
|
| 621 |
+
try:
|
| 622 |
+
self.model.to('cpu')
|
| 623 |
+
torch.cuda.empty_cache()
|
| 624 |
+
except:
|
| 625 |
+
pass
|
| 626 |
+
if gc is not None and callable(gc.collect):
|
| 627 |
+
gc.collect()
|
| 628 |
+
|
| 629 |
+
def __del__(self):
|
| 630 |
+
self.stop_self_pool()
|
| 631 |
+
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"additional_special_tokens": [
|
| 3 |
+
"<|im_start|>",
|
| 4 |
+
"<|im_end|>",
|
| 5 |
+
"<|object_ref_start|>",
|
| 6 |
+
"<|object_ref_end|>",
|
| 7 |
+
"<|box_start|>",
|
| 8 |
+
"<|box_end|>",
|
| 9 |
+
"<|quad_start|>",
|
| 10 |
+
"<|quad_end|>",
|
| 11 |
+
"<|vision_start|>",
|
| 12 |
+
"<|vision_end|>",
|
| 13 |
+
"<|vision_pad|>",
|
| 14 |
+
"<|image_pad|>",
|
| 15 |
+
"<|video_pad|>"
|
| 16 |
+
],
|
| 17 |
+
"eos_token": {
|
| 18 |
+
"content": "<|im_end|>",
|
| 19 |
+
"lstrip": false,
|
| 20 |
+
"normalized": false,
|
| 21 |
+
"rstrip": false,
|
| 22 |
+
"single_word": false
|
| 23 |
+
},
|
| 24 |
+
"pad_token": {
|
| 25 |
+
"content": "<|endoftext|>",
|
| 26 |
+
"lstrip": false,
|
| 27 |
+
"normalized": false,
|
| 28 |
+
"rstrip": false,
|
| 29 |
+
"single_word": false
|
| 30 |
+
}
|
| 31 |
+
}
|
tokenizer.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9c5ae00e602b8860cbd784ba82a8aa14e8feecec692e7076590d014d7b7fdafa
|
| 3 |
+
size 11421896
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,208 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_bos_token": false,
|
| 3 |
+
"add_prefix_space": false,
|
| 4 |
+
"added_tokens_decoder": {
|
| 5 |
+
"151643": {
|
| 6 |
+
"content": "<|endoftext|>",
|
| 7 |
+
"lstrip": false,
|
| 8 |
+
"normalized": false,
|
| 9 |
+
"rstrip": false,
|
| 10 |
+
"single_word": false,
|
| 11 |
+
"special": true
|
| 12 |
+
},
|
| 13 |
+
"151644": {
|
| 14 |
+
"content": "<|im_start|>",
|
| 15 |
+
"lstrip": false,
|
| 16 |
+
"normalized": false,
|
| 17 |
+
"rstrip": false,
|
| 18 |
+
"single_word": false,
|
| 19 |
+
"special": true
|
| 20 |
+
},
|
| 21 |
+
"151645": {
|
| 22 |
+
"content": "<|im_end|>",
|
| 23 |
+
"lstrip": false,
|
| 24 |
+
"normalized": false,
|
| 25 |
+
"rstrip": false,
|
| 26 |
+
"single_word": false,
|
| 27 |
+
"special": true
|
| 28 |
+
},
|
| 29 |
+
"151646": {
|
| 30 |
+
"content": "<|object_ref_start|>",
|
| 31 |
+
"lstrip": false,
|
| 32 |
+
"normalized": false,
|
| 33 |
+
"rstrip": false,
|
| 34 |
+
"single_word": false,
|
| 35 |
+
"special": true
|
| 36 |
+
},
|
| 37 |
+
"151647": {
|
| 38 |
+
"content": "<|object_ref_end|>",
|
| 39 |
+
"lstrip": false,
|
| 40 |
+
"normalized": false,
|
| 41 |
+
"rstrip": false,
|
| 42 |
+
"single_word": false,
|
| 43 |
+
"special": true
|
| 44 |
+
},
|
| 45 |
+
"151648": {
|
| 46 |
+
"content": "<|box_start|>",
|
| 47 |
+
"lstrip": false,
|
| 48 |
+
"normalized": false,
|
| 49 |
+
"rstrip": false,
|
| 50 |
+
"single_word": false,
|
| 51 |
+
"special": true
|
| 52 |
+
},
|
| 53 |
+
"151649": {
|
| 54 |
+
"content": "<|box_end|>",
|
| 55 |
+
"lstrip": false,
|
| 56 |
+
"normalized": false,
|
| 57 |
+
"rstrip": false,
|
| 58 |
+
"single_word": false,
|
| 59 |
+
"special": true
|
| 60 |
+
},
|
| 61 |
+
"151650": {
|
| 62 |
+
"content": "<|quad_start|>",
|
| 63 |
+
"lstrip": false,
|
| 64 |
+
"normalized": false,
|
| 65 |
+
"rstrip": false,
|
| 66 |
+
"single_word": false,
|
| 67 |
+
"special": true
|
| 68 |
+
},
|
| 69 |
+
"151651": {
|
| 70 |
+
"content": "<|quad_end|>",
|
| 71 |
+
"lstrip": false,
|
| 72 |
+
"normalized": false,
|
| 73 |
+
"rstrip": false,
|
| 74 |
+
"single_word": false,
|
| 75 |
+
"special": true
|
| 76 |
+
},
|
| 77 |
+
"151652": {
|
| 78 |
+
"content": "<|vision_start|>",
|
| 79 |
+
"lstrip": false,
|
| 80 |
+
"normalized": false,
|
| 81 |
+
"rstrip": false,
|
| 82 |
+
"single_word": false,
|
| 83 |
+
"special": true
|
| 84 |
+
},
|
| 85 |
+
"151653": {
|
| 86 |
+
"content": "<|vision_end|>",
|
| 87 |
+
"lstrip": false,
|
| 88 |
+
"normalized": false,
|
| 89 |
+
"rstrip": false,
|
| 90 |
+
"single_word": false,
|
| 91 |
+
"special": true
|
| 92 |
+
},
|
| 93 |
+
"151654": {
|
| 94 |
+
"content": "<|vision_pad|>",
|
| 95 |
+
"lstrip": false,
|
| 96 |
+
"normalized": false,
|
| 97 |
+
"rstrip": false,
|
| 98 |
+
"single_word": false,
|
| 99 |
+
"special": true
|
| 100 |
+
},
|
| 101 |
+
"151655": {
|
| 102 |
+
"content": "<|image_pad|>",
|
| 103 |
+
"lstrip": false,
|
| 104 |
+
"normalized": false,
|
| 105 |
+
"rstrip": false,
|
| 106 |
+
"single_word": false,
|
| 107 |
+
"special": true
|
| 108 |
+
},
|
| 109 |
+
"151656": {
|
| 110 |
+
"content": "<|video_pad|>",
|
| 111 |
+
"lstrip": false,
|
| 112 |
+
"normalized": false,
|
| 113 |
+
"rstrip": false,
|
| 114 |
+
"single_word": false,
|
| 115 |
+
"special": true
|
| 116 |
+
},
|
| 117 |
+
"151657": {
|
| 118 |
+
"content": "<tool_call>",
|
| 119 |
+
"lstrip": false,
|
| 120 |
+
"normalized": false,
|
| 121 |
+
"rstrip": false,
|
| 122 |
+
"single_word": false,
|
| 123 |
+
"special": false
|
| 124 |
+
},
|
| 125 |
+
"151658": {
|
| 126 |
+
"content": "</tool_call>",
|
| 127 |
+
"lstrip": false,
|
| 128 |
+
"normalized": false,
|
| 129 |
+
"rstrip": false,
|
| 130 |
+
"single_word": false,
|
| 131 |
+
"special": false
|
| 132 |
+
},
|
| 133 |
+
"151659": {
|
| 134 |
+
"content": "<|fim_prefix|>",
|
| 135 |
+
"lstrip": false,
|
| 136 |
+
"normalized": false,
|
| 137 |
+
"rstrip": false,
|
| 138 |
+
"single_word": false,
|
| 139 |
+
"special": false
|
| 140 |
+
},
|
| 141 |
+
"151660": {
|
| 142 |
+
"content": "<|fim_middle|>",
|
| 143 |
+
"lstrip": false,
|
| 144 |
+
"normalized": false,
|
| 145 |
+
"rstrip": false,
|
| 146 |
+
"single_word": false,
|
| 147 |
+
"special": false
|
| 148 |
+
},
|
| 149 |
+
"151661": {
|
| 150 |
+
"content": "<|fim_suffix|>",
|
| 151 |
+
"lstrip": false,
|
| 152 |
+
"normalized": false,
|
| 153 |
+
"rstrip": false,
|
| 154 |
+
"single_word": false,
|
| 155 |
+
"special": false
|
| 156 |
+
},
|
| 157 |
+
"151662": {
|
| 158 |
+
"content": "<|fim_pad|>",
|
| 159 |
+
"lstrip": false,
|
| 160 |
+
"normalized": false,
|
| 161 |
+
"rstrip": false,
|
| 162 |
+
"single_word": false,
|
| 163 |
+
"special": false
|
| 164 |
+
},
|
| 165 |
+
"151663": {
|
| 166 |
+
"content": "<|repo_name|>",
|
| 167 |
+
"lstrip": false,
|
| 168 |
+
"normalized": false,
|
| 169 |
+
"rstrip": false,
|
| 170 |
+
"single_word": false,
|
| 171 |
+
"special": false
|
| 172 |
+
},
|
| 173 |
+
"151664": {
|
| 174 |
+
"content": "<|file_sep|>",
|
| 175 |
+
"lstrip": false,
|
| 176 |
+
"normalized": false,
|
| 177 |
+
"rstrip": false,
|
| 178 |
+
"single_word": false,
|
| 179 |
+
"special": false
|
| 180 |
+
}
|
| 181 |
+
},
|
| 182 |
+
"additional_special_tokens": [
|
| 183 |
+
"<|im_start|>",
|
| 184 |
+
"<|im_end|>",
|
| 185 |
+
"<|object_ref_start|>",
|
| 186 |
+
"<|object_ref_end|>",
|
| 187 |
+
"<|box_start|>",
|
| 188 |
+
"<|box_end|>",
|
| 189 |
+
"<|quad_start|>",
|
| 190 |
+
"<|quad_end|>",
|
| 191 |
+
"<|vision_start|>",
|
| 192 |
+
"<|vision_end|>",
|
| 193 |
+
"<|vision_pad|>",
|
| 194 |
+
"<|image_pad|>",
|
| 195 |
+
"<|video_pad|>"
|
| 196 |
+
],
|
| 197 |
+
"bos_token": null,
|
| 198 |
+
"chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
|
| 199 |
+
"clean_up_tokenization_spaces": false,
|
| 200 |
+
"eos_token": "<|im_end|>",
|
| 201 |
+
"errors": "replace",
|
| 202 |
+
"extra_special_tokens": {},
|
| 203 |
+
"model_max_length": 32768,
|
| 204 |
+
"pad_token": "<|endoftext|>",
|
| 205 |
+
"split_special_tokens": false,
|
| 206 |
+
"tokenizer_class": "Qwen2Tokenizer",
|
| 207 |
+
"unk_token": null
|
| 208 |
+
}
|
vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|