Uncover-F TheBloke commited on
Commit
4dc479e
·
0 Parent(s):

Duplicate from TheBloke/llemma_7b-AWQ

Browse files

Co-authored-by: Tom Jobbins <TheBloke@users.noreply.huggingface.co>

.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
LICENSE.txt ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ LLAMA 2 COMMUNITY LICENSE AGREEMENT
2
+ Llama 2 Version Release Date: July 18, 2023
3
+
4
+ "Agreement" means the terms and conditions for use, reproduction, distribution and
5
+ modification of the Llama Materials set forth herein.
6
+
7
+ "Documentation" means the specifications, manuals and documentation
8
+ accompanying Llama 2 distributed by Meta at ai.meta.com/resources/models-and-
9
+ libraries/llama-downloads/.
10
+
11
+ "Licensee" or "you" means you, or your employer or any other person or entity (if
12
+ you are entering into this Agreement on such person or entity's behalf), of the age
13
+ required under applicable laws, rules or regulations to provide legal consent and that
14
+ has legal authority to bind your employer or such other person or entity if you are
15
+ entering in this Agreement on their behalf.
16
+
17
+ "Llama 2" means the foundational large language models and software and
18
+ algorithms, including machine-learning model code, trained model weights,
19
+ inference-enabling code, training-enabling code, fine-tuning enabling code and other
20
+ elements of the foregoing distributed by Meta at ai.meta.com/resources/models-and-
21
+ libraries/llama-downloads/.
22
+
23
+ "Llama Materials" means, collectively, Meta's proprietary Llama 2 and
24
+ Documentation (and any portion thereof) made available under this Agreement.
25
+
26
+ "Meta" or "we" means Meta Platforms Ireland Limited (if you are located in or, if you
27
+ are an entity, your principal place of business is in the EEA or Switzerland) and Meta
28
+ Platforms, Inc. (if you are located outside of the EEA or Switzerland).
29
+
30
+ By clicking "I Accept" below or by using or distributing any portion or element of the
31
+ Llama Materials, you agree to be bound by this Agreement.
32
+
33
+ 1. License Rights and Redistribution.
34
+
35
+ a. Grant of Rights. You are granted a non-exclusive, worldwide, non-
36
+ transferable and royalty-free limited license under Meta's intellectual property or
37
+ other rights owned by Meta embodied in the Llama Materials to use, reproduce,
38
+ distribute, copy, create derivative works of, and make modifications to the Llama
39
+ Materials.
40
+
41
+ b. Redistribution and Use.
42
+
43
+ i. If you distribute or make the Llama Materials, or any derivative works
44
+ thereof, available to a third party, you shall provide a copy of this Agreement to such
45
+ third party.
46
+ ii. If you receive Llama Materials, or any derivative works thereof, from
47
+ a Licensee as part of an integrated end user product, then Section 2 of this
48
+ Agreement will not apply to you.
49
+
50
+ iii. You must retain in all copies of the Llama Materials that you
51
+ distribute the following attribution notice within a "Notice" text file distributed as a
52
+ part of such copies: "Llama 2 is licensed under the LLAMA 2 Community License,
53
+ Copyright (c) Meta Platforms, Inc. All Rights Reserved."
54
+
55
+ iv. Your use of the Llama Materials must comply with applicable laws
56
+ and regulations (including trade compliance laws and regulations) and adhere to the
57
+ Acceptable Use Policy for the Llama Materials (available at
58
+ https://ai.meta.com/llama/use-policy), which is hereby incorporated by reference into
59
+ this Agreement.
60
+
61
+ v. You will not use the Llama Materials or any output or results of the
62
+ Llama Materials to improve any other large language model (excluding Llama 2 or
63
+ derivative works thereof).
64
+
65
+ 2. Additional Commercial Terms. If, on the Llama 2 version release date, the
66
+ monthly active users of the products or services made available by or for Licensee,
67
+ or Licensee's affiliates, is greater than 700 million monthly active users in the
68
+ preceding calendar month, you must request a license from Meta, which Meta may
69
+ grant to you in its sole discretion, and you are not authorized to exercise any of the
70
+ rights under this Agreement unless or until Meta otherwise expressly grants you
71
+ such rights.
72
+
73
+ 3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE
74
+ LLAMA MATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE
75
+ PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
76
+ EITHER EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY
77
+ WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR
78
+ FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE
79
+ FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING
80
+ THE LLAMA MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR
81
+ USE OF THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS.
82
+
83
+ 4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE
84
+ LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT,
85
+ NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS
86
+ AGREEMENT, FOR ANY LOST PROFITS OR ANY INDIRECT, SPECIAL,
87
+ CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN
88
+ IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF
89
+ ANY OF THE FOREGOING.
90
+
91
+ 5. Intellectual Property.
92
+
93
+ a. No trademark licenses are granted under this Agreement, and in
94
+ connection with the Llama Materials, neither Meta nor Licensee may use any name
95
+ or mark owned by or associated with the other or any of its affiliates, except as
96
+ required for reasonable and customary use in describing and redistributing the
97
+ Llama Materials.
98
+
99
+ b. Subject to Meta's ownership of Llama Materials and derivatives made by or
100
+ for Meta, with respect to any derivative works and modifications of the Llama
101
+ Materials that are made by you, as between you and Meta, you are and will be the
102
+ owner of such derivative works and modifications.
103
+
104
+ c. If you institute litigation or other proceedings against Meta or any entity
105
+ (including a cross-claim or counterclaim in a lawsuit) alleging that the Llama
106
+ Materials or Llama 2 outputs or results, or any portion of any of the foregoing,
107
+ constitutes infringement of intellectual property or other rights owned or licensable
108
+ by you, then any licenses granted to you under this Agreement shall terminate as of
109
+ the date such litigation or claim is filed or instituted. You will indemnify and hold
110
+ harmless Meta from and against any claim by any third party arising out of or related
111
+ to your use or distribution of the Llama Materials.
112
+
113
+ 6. Term and Termination. The term of this Agreement will commence upon your
114
+ acceptance of this Agreement or access to the Llama Materials and will continue in
115
+ full force and effect until terminated in accordance with the terms and conditions
116
+ herein. Meta may terminate this Agreement if you are in breach of any term or
117
+ condition of this Agreement. Upon termination of this Agreement, you shall delete
118
+ and cease use of the Llama Materials. Sections 3, 4 and 7 shall survive the
119
+ termination of this Agreement.
120
+
121
+ 7. Governing Law and Jurisdiction. This Agreement will be governed and
122
+ construed under the laws of the State of California without regard to choice of law
123
+ principles, and the UN Convention on Contracts for the International Sale of Goods
124
+ does not apply to this Agreement. The courts of California shall have exclusive
125
+ jurisdiction of any dispute arising out of this Agreement.
126
+
Notice ADDED
@@ -0,0 +1 @@
 
 
1
+ Llama 2 is licensed under the LLAMA 2 Community License, Copyright © Meta Platforms, Inc. All Rights Reserved.
README.md ADDED
@@ -0,0 +1,388 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: EleutherAI/llemma_7b
3
+ datasets:
4
+ - EleutherAI/proof-pile-2
5
+ inference: false
6
+ language:
7
+ - en
8
+ license: llama2
9
+ model_creator: EleutherAI
10
+ model_name: Llemma 7B
11
+ model_type: llama
12
+ prompt_template: '{prompt}
13
+
14
+ '
15
+ quantized_by: TheBloke
16
+ tags:
17
+ - math
18
+ - reasoning
19
+ ---
20
+ <!-- markdownlint-disable MD041 -->
21
+
22
+ <!-- header start -->
23
+ <!-- 200823 -->
24
+ <div style="width: auto; margin-left: auto; margin-right: auto">
25
+ <img src="https://i.imgur.com/EBdldam.jpg" alt="TheBlokeAI" style="width: 100%; min-width: 400px; display: block; margin: auto;">
26
+ </div>
27
+ <div style="display: flex; justify-content: space-between; width: 100%;">
28
+ <div style="display: flex; flex-direction: column; align-items: flex-start;">
29
+ <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://discord.gg/theblokeai">Chat & support: TheBloke's Discord server</a></p>
30
+ </div>
31
+ <div style="display: flex; flex-direction: column; align-items: flex-end;">
32
+ <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://www.patreon.com/TheBlokeAI">Want to contribute? TheBloke's Patreon page</a></p>
33
+ </div>
34
+ </div>
35
+ <div style="text-align:center; margin-top: 0em; margin-bottom: 0em"><p style="margin-top: 0.25em; margin-bottom: 0em;">TheBloke's LLM work is generously supported by a grant from <a href="https://a16z.com">andreessen horowitz (a16z)</a></p></div>
36
+ <hr style="margin-top: 1.0em; margin-bottom: 1.0em;">
37
+ <!-- header end -->
38
+
39
+ # Llemma 7B - AWQ
40
+ - Model creator: [EleutherAI](https://huggingface.co/EleutherAI)
41
+ - Original model: [Llemma 7B](https://huggingface.co/EleutherAI/llemma_7b)
42
+
43
+ <!-- description start -->
44
+ ## Description
45
+
46
+ This repo contains AWQ model files for [EleutherAI's Llemma 7B](https://huggingface.co/EleutherAI/llemma_7b).
47
+
48
+
49
+ ### About AWQ
50
+
51
+ AWQ is an efficient, accurate and blazing-fast low-bit weight quantization method, currently supporting 4-bit quantization. Compared to GPTQ, it offers faster Transformers-based inference with equivalent or better quality compared to the most commonly used GPTQ settings.
52
+
53
+ It is supported by:
54
+
55
+ - [Text Generation Webui](https://github.com/oobabooga/text-generation-webui) - using Loader: AutoAWQ
56
+ - [vLLM](https://github.com/vllm-project/vllm) - Llama and Mistral models only
57
+ - [Hugging Face Text Generation Inference (TGI)](https://github.com/huggingface/text-generation-inference)
58
+ - [AutoAWQ](https://github.com/casper-hansen/AutoAWQ) - for use from Python code
59
+
60
+ <!-- description end -->
61
+ <!-- repositories-available start -->
62
+ ## Repositories available
63
+
64
+ * [AWQ model(s) for GPU inference.](https://huggingface.co/TheBloke/llemma_7b-AWQ)
65
+ * [GPTQ models for GPU inference, with multiple quantisation parameter options.](https://huggingface.co/TheBloke/llemma_7b-GPTQ)
66
+ * [2, 3, 4, 5, 6 and 8-bit GGUF models for CPU+GPU inference](https://huggingface.co/TheBloke/llemma_7b-GGUF)
67
+ * [EleutherAI's original unquantised fp16 model in pytorch format, for GPU inference and for further conversions](https://huggingface.co/EleutherAI/llemma_7b)
68
+ <!-- repositories-available end -->
69
+
70
+ <!-- prompt-template start -->
71
+ ## Prompt template: Unknown
72
+
73
+ ```
74
+ {prompt}
75
+
76
+ ```
77
+
78
+ <!-- prompt-template end -->
79
+
80
+
81
+ <!-- README_AWQ.md-provided-files start -->
82
+ ## Provided files, and AWQ parameters
83
+
84
+ For my first release of AWQ models, I am releasing 128g models only. I will consider adding 32g as well if there is interest, and once I have done perplexity and evaluation comparisons, but at this time 32g models are still not fully tested with AutoAWQ and vLLM.
85
+
86
+ Models are released as sharded safetensors files.
87
+
88
+ | Branch | Bits | GS | AWQ Dataset | Seq Len | Size |
89
+ | ------ | ---- | -- | ----------- | ------- | ---- |
90
+ | [main](https://huggingface.co/TheBloke/llemma_7b-AWQ/tree/main) | 4 | 128 | [CamelAI Math](https://huggingface.co/datasets/andersonbcdefg/math) | 4096 | 3.89 GB
91
+
92
+ <!-- README_AWQ.md-provided-files end -->
93
+
94
+ <!-- README_AWQ.md-text-generation-webui start -->
95
+ ## How to easily download and use this model in [text-generation-webui](https://github.com/oobabooga/text-generation-webui)
96
+
97
+ Please make sure you're using the latest version of [text-generation-webui](https://github.com/oobabooga/text-generation-webui).
98
+
99
+ It is strongly recommended to use the text-generation-webui one-click-installers unless you're sure you know how to make a manual install.
100
+
101
+ 1. Click the **Model tab**.
102
+ 2. Under **Download custom model or LoRA**, enter `TheBloke/llemma_7b-AWQ`.
103
+ 3. Click **Download**.
104
+ 4. The model will start downloading. Once it's finished it will say "Done".
105
+ 5. In the top left, click the refresh icon next to **Model**.
106
+ 6. In the **Model** dropdown, choose the model you just downloaded: `llemma_7b-AWQ`
107
+ 7. Select **Loader: AutoAWQ**.
108
+ 8. Click Load, and the model will load and is now ready for use.
109
+ 9. If you want any custom settings, set them and then click **Save settings for this model** followed by **Reload the Model** in the top right.
110
+ 10. Once you're ready, click the **Text Generation** tab and enter a prompt to get started!
111
+ <!-- README_AWQ.md-text-generation-webui end -->
112
+
113
+ <!-- README_AWQ.md-use-from-vllm start -->
114
+ ## Multi-user inference server: vLLM
115
+
116
+ Documentation on installing and using vLLM [can be found here](https://vllm.readthedocs.io/en/latest/).
117
+
118
+ - Please ensure you are using vLLM version 0.2 or later.
119
+ - When using vLLM as a server, pass the `--quantization awq` parameter.
120
+
121
+ For example:
122
+
123
+ ```shell
124
+ python3 python -m vllm.entrypoints.api_server --model TheBloke/llemma_7b-AWQ --quantization awq
125
+ ```
126
+
127
+ - When using vLLM from Python code, again set `quantization=awq`.
128
+
129
+ For example:
130
+
131
+ ```python
132
+ from vllm import LLM, SamplingParams
133
+
134
+ prompts = [
135
+ "Tell me about AI",
136
+ "Write a story about llamas",
137
+ "What is 291 - 150?",
138
+ "How much wood would a woodchuck chuck if a woodchuck could chuck wood?",
139
+ ]
140
+ prompt_template=f'''{prompt}
141
+ '''
142
+
143
+ prompts = [prompt_template.format(prompt=prompt) for prompt in prompts]
144
+
145
+ sampling_params = SamplingParams(temperature=0.8, top_p=0.95)
146
+
147
+ llm = LLM(model="TheBloke/llemma_7b-AWQ", quantization="awq", dtype="auto")
148
+
149
+ outputs = llm.generate(prompts, sampling_params)
150
+
151
+ # Print the outputs.
152
+ for output in outputs:
153
+ prompt = output.prompt
154
+ generated_text = output.outputs[0].text
155
+ print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
156
+ ```
157
+ <!-- README_AWQ.md-use-from-vllm start -->
158
+
159
+ <!-- README_AWQ.md-use-from-tgi start -->
160
+ ## Multi-user inference server: Hugging Face Text Generation Inference (TGI)
161
+
162
+ Use TGI version 1.1.0 or later. The official Docker container is: `ghcr.io/huggingface/text-generation-inference:1.1.0`
163
+
164
+ Example Docker parameters:
165
+
166
+ ```shell
167
+ --model-id TheBloke/llemma_7b-AWQ --port 3000 --quantize awq --max-input-length 3696 --max-total-tokens 4096 --max-batch-prefill-tokens 4096
168
+ ```
169
+
170
+ Example Python code for interfacing with TGI (requires [huggingface-hub](https://github.com/huggingface/huggingface_hub) 0.17.0 or later):
171
+
172
+ ```shell
173
+ pip3 install huggingface-hub
174
+ ```
175
+
176
+ ```python
177
+ from huggingface_hub import InferenceClient
178
+
179
+ endpoint_url = "https://your-endpoint-url-here"
180
+
181
+ prompt = "Tell me about AI"
182
+ prompt_template=f'''{prompt}
183
+ '''
184
+
185
+ client = InferenceClient(endpoint_url)
186
+ response = client.text_generation(prompt,
187
+ max_new_tokens=128,
188
+ do_sample=True,
189
+ temperature=0.7,
190
+ top_p=0.95,
191
+ top_k=40,
192
+ repetition_penalty=1.1)
193
+
194
+ print(f"Model output: ", response)
195
+ ```
196
+ <!-- README_AWQ.md-use-from-tgi end -->
197
+
198
+ <!-- README_AWQ.md-use-from-python start -->
199
+ ## Inference from Python code using AutoAWQ
200
+
201
+ ### Install the AutoAWQ package
202
+
203
+ Requires: [AutoAWQ](https://github.com/casper-hansen/AutoAWQ) 0.1.1 or later.
204
+
205
+ ```shell
206
+ pip3 install autoawq
207
+ ```
208
+
209
+ If you have problems installing [AutoAWQ](https://github.com/casper-hansen/AutoAWQ) using the pre-built wheels, install it from source instead:
210
+
211
+ ```shell
212
+ pip3 uninstall -y autoawq
213
+ git clone https://github.com/casper-hansen/AutoAWQ
214
+ cd AutoAWQ
215
+ pip3 install .
216
+ ```
217
+
218
+ ### AutoAWQ example code
219
+
220
+ ```python
221
+ from awq import AutoAWQForCausalLM
222
+ from transformers import AutoTokenizer
223
+
224
+ model_name_or_path = "TheBloke/llemma_7b-AWQ"
225
+
226
+ # Load tokenizer
227
+ tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=False)
228
+ # Load model
229
+ model = AutoAWQForCausalLM.from_quantized(model_name_or_path, fuse_layers=True,
230
+ trust_remote_code=False, safetensors=True)
231
+
232
+ prompt = "Tell me about AI"
233
+ prompt_template=f'''{prompt}
234
+ '''
235
+
236
+ print("*** Running model.generate:")
237
+
238
+ token_input = tokenizer(
239
+ prompt_template,
240
+ return_tensors='pt'
241
+ ).input_ids.cuda()
242
+
243
+ # Generate output
244
+ generation_output = model.generate(
245
+ token_input,
246
+ do_sample=True,
247
+ temperature=0.7,
248
+ top_p=0.95,
249
+ top_k=40,
250
+ max_new_tokens=512
251
+ )
252
+
253
+ # Get the tokens from the output, decode them, print them
254
+ token_output = generation_output[0]
255
+ text_output = tokenizer.decode(token_output)
256
+ print("LLM output: ", text_output)
257
+
258
+ """
259
+ # Inference should be possible with transformers pipeline as well in future
260
+ # But currently this is not yet supported by AutoAWQ (correct as of September 25th 2023)
261
+ from transformers import pipeline
262
+
263
+ print("*** Pipeline:")
264
+ pipe = pipeline(
265
+ "text-generation",
266
+ model=model,
267
+ tokenizer=tokenizer,
268
+ max_new_tokens=512,
269
+ do_sample=True,
270
+ temperature=0.7,
271
+ top_p=0.95,
272
+ top_k=40,
273
+ repetition_penalty=1.1
274
+ )
275
+
276
+ print(pipe(prompt_template)[0]['generated_text'])
277
+ """
278
+ ```
279
+ <!-- README_AWQ.md-use-from-python end -->
280
+
281
+ <!-- README_AWQ.md-compatibility start -->
282
+ ## Compatibility
283
+
284
+ The files provided are tested to work with:
285
+
286
+ - [text-generation-webui](https://github.com/oobabooga/text-generation-webui) using `Loader: AutoAWQ`.
287
+ - [vLLM](https://github.com/vllm-project/vllm) version 0.2.0 and later.
288
+ - [Hugging Face Text Generation Inference (TGI)](https://github.com/huggingface/text-generation-inference) version 1.1.0 and later.
289
+ - [AutoAWQ](https://github.com/casper-hansen/AutoAWQ) version 0.1.1 and later.
290
+
291
+ <!-- README_AWQ.md-compatibility end -->
292
+
293
+ <!-- footer start -->
294
+ <!-- 200823 -->
295
+ ## Discord
296
+
297
+ For further support, and discussions on these models and AI in general, join us at:
298
+
299
+ [TheBloke AI's Discord server](https://discord.gg/theblokeai)
300
+
301
+ ## Thanks, and how to contribute
302
+
303
+ Thanks to the [chirper.ai](https://chirper.ai) team!
304
+
305
+ Thanks to Clay from [gpus.llm-utils.org](llm-utils)!
306
+
307
+ I've had a lot of people ask if they can contribute. I enjoy providing models and helping people, and would love to be able to spend even more time doing it, as well as expanding into new projects like fine tuning/training.
308
+
309
+ If you're able and willing to contribute it will be most gratefully received and will help me to keep providing more models, and to start work on new AI projects.
310
+
311
+ Donaters will get priority support on any and all AI/LLM/model questions and requests, access to a private Discord room, plus other benefits.
312
+
313
+ * Patreon: https://patreon.com/TheBlokeAI
314
+ * Ko-Fi: https://ko-fi.com/TheBlokeAI
315
+
316
+ **Special thanks to**: Aemon Algiz.
317
+
318
+ **Patreon special mentions**: Pierre Kircher, Stanislav Ovsiannikov, Michael Levine, Eugene Pentland, Andrey, 준교 김, Randy H, Fred von Graf, Artur Olbinski, Caitlyn Gatomon, terasurfer, Jeff Scroggin, James Bentley, Vadim, Gabriel Puliatti, Harry Royden McLaughlin, Sean Connelly, Dan Guido, Edmond Seymore, Alicia Loh, subjectnull, AzureBlack, Manuel Alberto Morcote, Thomas Belote, Lone Striker, Chris Smitley, Vitor Caleffi, Johann-Peter Hartmann, Clay Pascal, biorpg, Brandon Frisco, sidney chen, transmissions 11, Pedro Madruga, jinyuan sun, Ajan Kanaga, Emad Mostaque, Trenton Dambrowitz, Jonathan Leane, Iucharbius, usrbinkat, vamX, George Stoitzev, Luke Pendergrass, theTransient, Olakabola, Swaroop Kallakuri, Cap'n Zoog, Brandon Phillips, Michael Dempsey, Nikolai Manek, danny, Matthew Berman, Gabriel Tamborski, alfie_i, Raymond Fosdick, Tom X Nguyen, Raven Klaugh, LangChain4j, Magnesian, Illia Dulskyi, David Ziegler, Mano Prime, Luis Javier Navarrete Lozano, Erik Bjäreholt, 阿明, Nathan Dryer, Alex, Rainer Wilmers, zynix, TL, Joseph William Delisle, John Villwock, Nathan LeClaire, Willem Michiel, Joguhyik, GodLy, OG, Alps Aficionado, Jeffrey Morgan, ReadyPlayerEmma, Tiffany J. Kim, Sebastain Graf, Spencer Kim, Michael Davis, webtim, Talal Aujan, knownsqashed, John Detwiler, Imad Khwaja, Deo Leter, Jerry Meng, Elijah Stavena, Rooh Singh, Pieter, SuperWojo, Alexandros Triantafyllidis, Stephen Murray, Ai Maven, ya boyyy, Enrico Ros, Ken Nordquist, Deep Realms, Nicholas, Spiking Neurons AB, Elle, Will Dee, Jack West, RoA, Luke @flexchar, Viktor Bowallius, Derek Yates, Subspace Studios, jjj, Toran Billups, Asp the Wyvern, Fen Risland, Ilya, NimbleBox.ai, Chadd, Nitin Borwankar, Emre, Mandus, Leonard Tan, Kalila, K, Trailburnt, S_X, Cory Kujawski
319
+
320
+
321
+ Thank you to all my generous patrons and donaters!
322
+
323
+ And thank you again to a16z for their generous grant.
324
+
325
+ <!-- footer end -->
326
+
327
+ # Original model card: EleutherAI's Llemma 7B
328
+
329
+ <img src="llemma.png" width="400">
330
+
331
+ [ArXiv](http://arxiv.org/abs/2310.10631) | [Models](https://huggingface.co/EleutherAI/llemma_34b) | [Data](https://huggingface.co/datasets/EleutherAI/proof-pile-2) | [Code](https://github.com/EleutherAI/math-lm) | [Blog](https://blog.eleuther.ai/llemma/) | [Sample Explorer](https://llemma-demo.github.io/)
332
+
333
+ [Zhangir Azerbayev](https://zhangir-azerbayev.github.io/), [Hailey Schoelkopf](https://github.com/haileyschoelkopf), [Keiran Paster](https://keirp.com), [Marco Dos Santos](https://github.com/dsantosmarco), [Stephen McAleer](https://www.andrew.cmu.edu/user/smcaleer/), [Albert Q. Jiang](https://albertqjiang.github.io/), [Jia Deng](https://www.cs.princeton.edu/~jiadeng/), [Stella Biderman](https://www.stellabiderman.com/), [Sean Welleck](https://wellecks.com/)
334
+
335
+ **Llemma 7B** is a language model for mathematics. It was initialized with [Code Llama 7B](https://github.com/facebookresearch/codellama) weights, and trained on the [Proof-Pile-2](https://huggingface.co/datasets/EleutherAI/proof-pile-2) for 200B tokens.
336
+
337
+ This model also comes in a 34B parameter version: [Llemma 34B](https://huggingface.co/EleutherAI/llemma_34b).
338
+
339
+ ## Evaluations
340
+
341
+ Llemma models are particularly strong at chain-of-thought mathematical reasoning and using computational tools for mathematics, such as Python and formal theorem provers.
342
+
343
+
344
+ ### Chain-of-thought Math
345
+ On chain-of-thought mathematics tasks, Llemma models outperform Llama-2, Code Llama, and when controlled for model size, outperform Minerva.
346
+
347
+ | Model | Size | GSM8k | [OCW](https://openreview.net/forum?id=IFXTZERXdM7) | MMLU-STEM | [SAT](https://huggingface.co/datasets/mcaleste/sat_multiple_choice_math_may_23) | MATH |
348
+ |------------|------|--------|-------|-----------|-------|-------|
349
+ | Llama 2 | 7B | 11.8% | 3.7% | 29.9% | 25% | 3.2% |
350
+ | Code Llama | 7B | 10.5% | 4.4% | 25.1% | 9.4% | 4.5% |
351
+ | LLEMMA | 7B | **36.4%** | **7.7%** | **37.7%** | **53.1%** | **18.0%** |
352
+ | Minerva | 8B | 16.2% | **7.7%** | 35.6% | - | 14.1% |
353
+ |------------|------|--------|-------|-----------|-------|-------|
354
+ | Code Llama | 34B | 29.6% | 7.0% | 40.5% | 40.6% | 12.2% |
355
+ | LLEMMA | 34B | **51.5%** | **11.8%** | **49.0%** | **71.9%** | **25.0%** |
356
+ |------------|------|--------|-------|-----------|-------|-------|
357
+ | Minerva | 62B | 52.4% | 12.0% | 53.9% | - | 27.6% |
358
+ | Minerva | 540B | 58.8% | 17.6% | 63.9% | - | 33.6% |
359
+
360
+
361
+ Further performance can be extracted by using majority voting:
362
+
363
+ | Model | Size | GSM8k maj@100 | OCW maj@100 | MMLU-STEM maj@16 | SAT maj@16 | MATH maj@256 |
364
+ |---------|------|-------------|-----------|-----------------|-----------|------------|
365
+ | LLEMMA | 7B | 54.0% | 14.3% | 49.9% | 78.1% | **33.5** |
366
+ | Minerva | 8B | 28.4% | 12.5% | 43.4% | - | 25.4% |
367
+ |---------|------|-------------|-----------|-----------------|-----------|------------|
368
+ | LLEMMA | 34B | 69.3% | 18.4% | 59.7% | 81.3% | **43.1%** |
369
+ |---------|------|-------------|-----------|-----------------|-----------|------------|
370
+ | Minerva | 62B | 68.5% | 23.5% | 63.5% | - | 43.4% |
371
+ | Minerva | 540B | 78.5% | 30.8% | 75.0% | - | 50.3% |
372
+
373
+ ### Tool Use and Theorem Proving
374
+ In addition to chain-of-thought reasoning, Llemma has strong capabilities in computational mathematics tasks. For tool use and formal theorem proving evaluations, see [our paper](http://arxiv.org/abs/2310.10631).
375
+
376
+ ### Citation
377
+ ```
378
+ @misc{azerbayev2023llemma,
379
+ title={Llemma: An Open Language Model For Mathematics},
380
+ author={Zhangir Azerbayev and Hailey Schoelkopf and Keiran Paster and Marco Dos Santos and Stephen McAleer and Albert Q. Jiang and Jia Deng and Stella Biderman and Sean Welleck},
381
+ year={2023},
382
+ eprint={2310.10631},
383
+ archivePrefix={arXiv},
384
+ primaryClass={cs.CL}
385
+ }
386
+ ```
387
+
388
+
USE_POLICY.md ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Llama 2 Acceptable Use Policy
2
+
3
+ Meta is committed to promoting safe and fair use of its tools and features, including Llama 2. If you access or use Llama 2, you agree to this Acceptable Use Policy (“Policy”). The most recent copy of this policy can be found at [ai.meta.com/llama/use-policy](http://ai.meta.com/llama/use-policy).
4
+
5
+ ## Prohibited Uses
6
+ We want everyone to use Llama 2 safely and responsibly. You agree you will not use, or allow others to use, Llama 2 to:
7
+
8
+ 1. Violate the law or others’ rights, including to:
9
+ 1. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as:
10
+ 1. Violence or terrorism
11
+ 2. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material
12
+ 3. Human trafficking, exploitation, and sexual violence
13
+ 4. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials.
14
+ 5. Sexual solicitation
15
+ 6. Any other criminal activity
16
+ 2. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals
17
+ 3. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services
18
+ 4. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices
19
+ 5. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws
20
+ 6. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama 2 Materials
21
+ 7. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system
22
+
23
+
24
+
25
+ 2. Engage in, promote, incite, facilitate, or assist in the planning or development of activities that present a risk of death or bodily harm to individuals, including use of Llama 2 related to the following:
26
+ 1. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State
27
+ 2. Guns and illegal weapons (including weapon development)
28
+ 3. Illegal drugs and regulated/controlled substances
29
+ 4. Operation of critical infrastructure, transportation technologies, or heavy machinery
30
+ 5. Self-harm or harm to others, including suicide, cutting, and eating disorders
31
+ 6. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual
32
+
33
+
34
+
35
+ 3. Intentionally deceive or mislead others, including use of Llama 2 related to the following:
36
+ 1. Generating, promoting, or furthering fraud or the creation or promotion of disinformation
37
+ 2. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content
38
+ 3. Generating, promoting, or further distributing spam
39
+ 4. Impersonating another individual without consent, authorization, or legal right
40
+ 5. Representing that the use of Llama 2 or outputs are human-generated
41
+ 6. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement
42
+ 4. Fail to appropriately disclose to end users any known dangers of your AI system
43
+
44
+ Please report any violation of this Policy, software “bug,” or other problems that could lead to a violation of this Policy through one of the following means:
45
+
46
+ * Reporting issues with the model: [github.com/facebookresearch/llama](http://github.com/facebookresearch/llama)
47
+ * Reporting risky content generated by the model: [developers.facebook.com/llama_output_feedback](http://developers.facebook.com/llama_output_feedback)
48
+ * Reporting bugs and security concerns: [facebook.com/whitehat/info](http://facebook.com/whitehat/info)
49
+ * Reporting violations of the Acceptable Use Policy or unlicensed uses of Llama: [LlamaUseReport@meta.com](mailto:LlamaUseReport@meta.com)
50
+
added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "</s>": 2,
3
+ "<s>": 1,
4
+ "<unk>": 0
5
+ }
config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/workspace/process/eleutherai_llemma_7b/source",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "bos_token_id": 1,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 4096,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 11008,
13
+ "max_position_embeddings": 4096,
14
+ "model_type": "llama",
15
+ "num_attention_heads": 32,
16
+ "num_hidden_layers": 32,
17
+ "num_key_value_heads": 32,
18
+ "pad_token_id": 0,
19
+ "pretraining_tp": 1,
20
+ "rms_norm_eps": 1e-05,
21
+ "rope_scaling": null,
22
+ "rope_theta": 10000.0,
23
+ "tie_word_embeddings": false,
24
+ "torch_dtype": "float16",
25
+ "transformers_version": "4.34.0",
26
+ "use_cache": true,
27
+ "vocab_size": 32016,
28
+ "quantization_config": {
29
+ "quant_method": "awq",
30
+ "zero_point": true,
31
+ "group_size": 128,
32
+ "bits": 4,
33
+ "version": "gemm"
34
+ }
35
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.31.0"
7
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f868df63967e5ddaba365a2e82a3ecc8d1537532da4682637121fa2f048e37c8
3
+ size 3889653656
quant_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "zero_point": true,
3
+ "q_group_size": 128,
4
+ "w_bit": 4,
5
+ "version": "GEMM"
6
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "eos_token": "</s>",
4
+ "unk_token": "<unk>"
5
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45ccb9c8b6b561889acea59191d66986d314e7cbd6a78abc6e49b139ca91c1e6
3
+ size 500058
tokenizer_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "<s>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "clean_up_tokenization_spaces": false,
13
+ "eos_token": {
14
+ "__type": "AddedToken",
15
+ "content": "</s>",
16
+ "lstrip": false,
17
+ "normalized": true,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "legacy": null,
22
+ "model_max_length": 1000000000000000019884624838656,
23
+ "pad_token": null,
24
+ "sp_model_kwargs": {},
25
+ "tokenizer_class": "LlamaTokenizer",
26
+ "unk_token": {
27
+ "__type": "AddedToken",
28
+ "content": "<unk>",
29
+ "lstrip": false,
30
+ "normalized": true,
31
+ "rstrip": false,
32
+ "single_word": false
33
+ }
34
+ }