awni commited on
Commit
629a367
·
verified ·
1 Parent(s): d96d3d0

Add files using upload-large-folder tool

Browse files
Files changed (4) hide show
  1. README.md +1 -1
  2. config.json +2 -2
  3. model.safetensors +1 -1
  4. tokenizer_config.json +2 -1
README.md CHANGED
@@ -15,7 +15,7 @@ pipeline_tag: text-generation
15
 
16
  This model [mlx-community/Falcon-H1-0.5B-Instruct-bf16](https://huggingface.co/mlx-community/Falcon-H1-0.5B-Instruct-bf16) was
17
  converted to MLX format from [tiiuae/Falcon-H1-0.5B-Instruct](https://huggingface.co/tiiuae/Falcon-H1-0.5B-Instruct)
18
- using mlx-lm version **0.25.2**.
19
 
20
  ## Use with mlx
21
 
 
15
 
16
  This model [mlx-community/Falcon-H1-0.5B-Instruct-bf16](https://huggingface.co/mlx-community/Falcon-H1-0.5B-Instruct-bf16) was
17
  converted to MLX format from [tiiuae/Falcon-H1-0.5B-Instruct](https://huggingface.co/tiiuae/Falcon-H1-0.5B-Instruct)
18
+ using mlx-lm version **0.28.0**.
19
 
20
  ## Use with mlx
21
 
config.json CHANGED
@@ -9,7 +9,7 @@
9
  "attn_layer_indices": null,
10
  "bos_token_id": 1,
11
  "embedding_multiplier": 5.656854249492381,
12
- "eos_token_id": 228,
13
  "head_dim": 64,
14
  "hidden_act": "silu",
15
  "hidden_size": 1024,
@@ -30,7 +30,7 @@
30
  "mamba_proj_bias": false,
31
  "mamba_rms_norm": false,
32
  "mamba_use_mlp": true,
33
- "max_position_embeddings": 131072,
34
  "mlp_bias": false,
35
  "mlp_expansion_factor": 8,
36
  "mlp_multipliers": [
 
9
  "attn_layer_indices": null,
10
  "bos_token_id": 1,
11
  "embedding_multiplier": 5.656854249492381,
12
+ "eos_token_id": 11,
13
  "head_dim": 64,
14
  "hidden_act": "silu",
15
  "hidden_size": 1024,
 
30
  "mamba_proj_bias": false,
31
  "mamba_rms_norm": false,
32
  "mamba_use_mlp": true,
33
+ "max_position_embeddings": 16384,
34
  "mlp_bias": false,
35
  "mlp_expansion_factor": 8,
36
  "mlp_multipliers": [
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c32132255c1d1ba417065a744afa88ddd6d8242ec1c835fc37a2eabec4a12d01
3
  size 1042886219
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a74d7678f422b101756ae2a50e580f8ee16234b920f2fd72941648073c781d4
3
  size 1042886219
tokenizer_config.json CHANGED
@@ -4535,6 +4535,7 @@
4535
  ">>UNUSED_511<<"
4536
  ],
4537
  "bos_token": "<|begin_of_text|>",
 
4538
  "clean_up_tokenization_spaces": true,
4539
  "eos_token": "<|end_of_text|>",
4540
  "extra_special_tokens": {},
@@ -4544,5 +4545,5 @@
4544
  ],
4545
  "model_max_length": 1000000000000000019884624838656,
4546
  "pad_token": "<pad>",
4547
- "tokenizer_class": "PreTrainedTokenizerFast"
4548
  }
 
4535
  ">>UNUSED_511<<"
4536
  ],
4537
  "bos_token": "<|begin_of_text|>",
4538
+ "chat_template": "{{bos_token}}\n{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0].role == 'system' %}\n {{- messages[0].content + '\\n\\n' }}\n {%- endif %}\n {{- \"You are a function calling AI model. You are provided with function signature within <tools> </tools> XML tags. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions.\\n<tools>\\n\" }}\n {%- for tool in tools %}[{{- tool | tojson }}]{%- endfor %}\n {{- \"\\n</tools>\\nFor each function call, return a json object with function name and arguments within <tool_call> </tool_call> tags with the following schema:\\n<tool_call>\\n{'arguments': <args-dict>, 'name': <function-name>}\\n</tool_call>\\n\" }}\n{%- else %}\n {%- if messages[0].role == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0].content + '<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}{% for message in messages %}{%- if message.role != 'system' %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{%- endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
4539
  "clean_up_tokenization_spaces": true,
4540
  "eos_token": "<|end_of_text|>",
4541
  "extra_special_tokens": {},
 
4545
  ],
4546
  "model_max_length": 1000000000000000019884624838656,
4547
  "pad_token": "<pad>",
4548
+ "tokenizer_class": "PreTrainedTokenizer"
4549
  }