prince-canuma commited on
Commit
b8a475a
·
verified ·
1 Parent(s): ec9f0fa

Upload folder using huggingface_hub (#1)

Browse files

- 565a36634aa949edfeb0789b0d633da167cfe2feb36333a5f052af64a99cecfd (26166aa0e40258f470a2fb8c84871b8f9599c464)
- 35050515987dd7a869c0f1d461b1bc1df1d4a9411b7dea185d336cc7c4e26ccb (88ec39379a90249f9141f2f0b1bf5d455ec1ad99)
- 89468ea9164f75c7d2b03acf3720c9b33340c773552feb1d3358365423b8a430 (477ffdb2fab8513bebc5d9344ba86f257a085454)
- ae1b03fdf0ab5d8bb6f2cca94a16a73ededcae8310c5d961bebfcf47a5e49ca2 (4160344a3748ffbb875fbc8e2033c0493ac9ae5b)
- 865937f7ae4f431ca856433b3248b834ef87bfb76a0d500d007ba110dfafdbe7 (0d3b9a89b654d1ce7f23cef78c419e69a40c0081)
- d8450cc2f8fae0dfce550dbb939b7f3ca6cf8ac248db1dce73baf0b300a1c2b0 (ccfa19c2b1897c9b7660ad3225307537a10694e7)
- c6225d68c74f99d1f9a01532e8c3bd17185ae899101d7ce4bffa3f7c31d038dd (54c5af2d1e59e666aed41c215c9d91367aeed6da)
- 779d014b40a98282d5e8ad8342a31ec48ce97b6ad87db3904187a12b7bf5802e (013827bc3f8a335f110994eeefcd362c026f4ac6)
- 0496ebe6f4b9aeb324988e8a97871a30904e6cb8b37989743cf9c0d61b5e810d (11a0d3a7e99b10e37d23ffaf7cc00425cd05eae1)

README.md ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ license: other
5
+ tags:
6
+ - chat
7
+ - mlx
8
+ license_name: tongyi-qianwen
9
+ license_link: https://huggingface.co/Qwen/Qwen2-Math-72B-Instruct/blob/main/LICENSE
10
+ pipeline_tag: text-generation
11
+ ---
12
+
13
+ # mlx-community/Qwen2-Math-72B-Instruct-4bit
14
+
15
+ The Model [mlx-community/Qwen2-Math-72B-Instruct-4bit](https://huggingface.co/mlx-community/Qwen2-Math-72B-Instruct-4bit) was converted to MLX format from [Qwen/Qwen2-Math-72B-Instruct](https://huggingface.co/Qwen/Qwen2-Math-72B-Instruct) using mlx-lm version **0.16.1**.
16
+
17
+ ## Use with mlx
18
+
19
+ ```bash
20
+ pip install mlx-lm
21
+ ```
22
+
23
+ ```python
24
+ from mlx_lm import load, generate
25
+
26
+ model, tokenizer = load("mlx-community/Qwen2-Math-72B-Instruct-4bit")
27
+ response = generate(model, tokenizer, prompt="hello", verbose=True)
28
+ ```
added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 151643,
3
+ "<|im_end|>": 151645,
4
+ "<|im_start|>": 151644
5
+ }
config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen2ForCausalLM"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "bos_token_id": 151643,
7
+ "eos_token_id": 151645,
8
+ "hidden_act": "silu",
9
+ "hidden_size": 8192,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 29568,
12
+ "max_position_embeddings": 4096,
13
+ "max_window_layers": 70,
14
+ "model_type": "qwen2",
15
+ "num_attention_heads": 64,
16
+ "num_hidden_layers": 80,
17
+ "num_key_value_heads": 8,
18
+ "quantization": {
19
+ "group_size": 64,
20
+ "bits": 4
21
+ },
22
+ "rms_norm_eps": 1e-06,
23
+ "rope_theta": 10000.0,
24
+ "sliding_window": 32768,
25
+ "tie_word_embeddings": false,
26
+ "torch_dtype": "bfloat16",
27
+ "transformers_version": "4.43.1",
28
+ "use_cache": true,
29
+ "use_mrope": false,
30
+ "use_sliding_window": false,
31
+ "vocab_size": 152064
32
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f60eae8d0f3acfdba9ff2cdf1c21e9ca3d7f5af01c179d87b6125ebbb4527278
3
+ size 5365567671
model-00002-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fb0179efda863f7086909e0a5534167a981400d86e539e2e7e66262bce385ad
3
+ size 5294878244
model-00003-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2600539e2407dcf1b640463e235885fcf5af2151ebf55ee64a2255691c5d8d8d
3
+ size 5346171127
model-00004-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1a20dd64a161020d1fdd6b00ec17c05e85c244316bc795b74be59be417e6778
3
+ size 5294845211
model-00005-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0914602c0b3d6fb0dad768528226c2c2d51a0ba02ac67f5a5130380320f1ddd
3
+ size 5294878277
model-00006-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:761ddd11f2005b02cbdab282efbef7cf4c32cde45350b5f931be9347b8923085
3
+ size 5294878236
model-00007-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3cd18a5b9186d6fb8add3ea1bdb64566104f4327713caded7ef17856c5f1f657
3
+ size 5346171153
model-00008-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f9345e1b263d384a0e1804938759300b399f7a8ca59f5f3308c60f99c256758
3
+ size 3663161114
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>"
5
+ ],
6
+ "eos_token": {
7
+ "content": "<|im_end|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "pad_token": {
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ }
20
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ }
28
+ },
29
+ "additional_special_tokens": [
30
+ "<|im_start|>",
31
+ "<|im_end|>"
32
+ ],
33
+ "bos_token": null,
34
+ "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
35
+ "clean_up_tokenization_spaces": false,
36
+ "eos_token": "<|im_end|>",
37
+ "errors": "replace",
38
+ "model_max_length": 32768,
39
+ "pad_token": "<|endoftext|>",
40
+ "split_special_tokens": false,
41
+ "tokenizer_class": "Qwen2Tokenizer",
42
+ "unk_token": null
43
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff