Charlie81 commited on
Commit
ee589d3
·
1 Parent(s): d5537e3

quantization completed

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. __pycache__/quantization.cpython-312.pyc +0 -0
  2. quantized_models/aggressive/DeepSeek-R1-Distill-Qwen-1.5B__attn4_ffn8_emb8/DeepSeek-R1-Distill-Qwen-1.5B_attn4_ffn8_emb8/chat_template.jinja +1 -0
  3. quantized_models/aggressive/DeepSeek-R1-Distill-Qwen-1.5B__attn4_ffn8_emb8/DeepSeek-R1-Distill-Qwen-1.5B_attn4_ffn8_emb8/config.json +3 -0
  4. quantized_models/aggressive/DeepSeek-R1-Distill-Qwen-1.5B__attn4_ffn8_emb8/DeepSeek-R1-Distill-Qwen-1.5B_attn4_ffn8_emb8/generation_config.json +3 -0
  5. quantized_models/aggressive/DeepSeek-R1-Distill-Qwen-1.5B__attn4_ffn8_emb8/DeepSeek-R1-Distill-Qwen-1.5B_attn4_ffn8_emb8/model-00001-of-00002.safetensors +3 -0
  6. quantized_models/aggressive/DeepSeek-R1-Distill-Qwen-1.5B__attn4_ffn8_emb8/DeepSeek-R1-Distill-Qwen-1.5B_attn4_ffn8_emb8/model-00002-of-00002.safetensors +3 -0
  7. quantized_models/aggressive/DeepSeek-R1-Distill-Qwen-1.5B__attn4_ffn8_emb8/DeepSeek-R1-Distill-Qwen-1.5B_attn4_ffn8_emb8/model.safetensors.index.json +3 -0
  8. quantized_models/aggressive/DeepSeek-R1-Distill-Qwen-1.5B__attn4_ffn8_emb8/DeepSeek-R1-Distill-Qwen-1.5B_attn4_ffn8_emb8/quantization_metadata.json +3 -0
  9. quantized_models/aggressive/DeepSeek-R1-Distill-Qwen-1.5B__attn4_ffn8_emb8/DeepSeek-R1-Distill-Qwen-1.5B_attn4_ffn8_emb8/special_tokens_map.json +3 -0
  10. quantized_models/aggressive/DeepSeek-R1-Distill-Qwen-1.5B__attn4_ffn8_emb8/DeepSeek-R1-Distill-Qwen-1.5B_attn4_ffn8_emb8/tokenizer.json +3 -0
  11. quantized_models/aggressive/DeepSeek-R1-Distill-Qwen-1.5B__attn4_ffn8_emb8/DeepSeek-R1-Distill-Qwen-1.5B_attn4_ffn8_emb8/tokenizer_config.json +3 -0
  12. quantized_models/aggressive/Falcon-E-3B-Base__attn4_ffn8_emb8/Falcon-E-3B-Base_attn4_ffn8_emb8/config.json +3 -0
  13. quantized_models/aggressive/Falcon-E-3B-Base__attn4_ffn8_emb8/Falcon-E-3B-Base_attn4_ffn8_emb8/generation_config.json +3 -0
  14. quantized_models/aggressive/Falcon-E-3B-Base__attn4_ffn8_emb8/Falcon-E-3B-Base_attn4_ffn8_emb8/model.safetensors +3 -0
  15. quantized_models/aggressive/Falcon-E-3B-Base__attn4_ffn8_emb8/Falcon-E-3B-Base_attn4_ffn8_emb8/quantization_metadata.json +3 -0
  16. quantized_models/aggressive/Falcon-E-3B-Base__attn4_ffn8_emb8/Falcon-E-3B-Base_attn4_ffn8_emb8/special_tokens_map.json +3 -0
  17. quantized_models/aggressive/Falcon-E-3B-Base__attn4_ffn8_emb8/Falcon-E-3B-Base_attn4_ffn8_emb8/tokenizer.json +3 -0
  18. quantized_models/aggressive/Falcon-E-3B-Base__attn4_ffn8_emb8/Falcon-E-3B-Base_attn4_ffn8_emb8/tokenizer_config.json +3 -0
  19. quantized_models/aggressive/Phi-3-mini-128k-instruct__attn4_ffn8_emb8/Phi-3-mini-128k-instruct_attn4_ffn8_emb8/added_tokens.json +3 -0
  20. quantized_models/aggressive/Phi-3-mini-128k-instruct__attn4_ffn8_emb8/Phi-3-mini-128k-instruct_attn4_ffn8_emb8/chat_template.jinja +8 -0
  21. quantized_models/aggressive/Phi-3-mini-128k-instruct__attn4_ffn8_emb8/Phi-3-mini-128k-instruct_attn4_ffn8_emb8/config.json +3 -0
  22. quantized_models/aggressive/Phi-3-mini-128k-instruct__attn4_ffn8_emb8/Phi-3-mini-128k-instruct_attn4_ffn8_emb8/configuration_phi3.py +227 -0
  23. quantized_models/aggressive/Phi-3-mini-128k-instruct__attn4_ffn8_emb8/Phi-3-mini-128k-instruct_attn4_ffn8_emb8/generation_config.json +3 -0
  24. quantized_models/aggressive/Phi-3-mini-128k-instruct__attn4_ffn8_emb8/Phi-3-mini-128k-instruct_attn4_ffn8_emb8/model-00001-of-00004.safetensors +3 -0
  25. quantized_models/aggressive/Phi-3-mini-128k-instruct__attn4_ffn8_emb8/Phi-3-mini-128k-instruct_attn4_ffn8_emb8/model-00002-of-00004.safetensors +3 -0
  26. quantized_models/aggressive/Phi-3-mini-128k-instruct__attn4_ffn8_emb8/Phi-3-mini-128k-instruct_attn4_ffn8_emb8/model-00003-of-00004.safetensors +3 -0
  27. quantized_models/aggressive/Phi-3-mini-128k-instruct__attn4_ffn8_emb8/Phi-3-mini-128k-instruct_attn4_ffn8_emb8/model-00004-of-00004.safetensors +3 -0
  28. quantized_models/aggressive/Phi-3-mini-128k-instruct__attn4_ffn8_emb8/Phi-3-mini-128k-instruct_attn4_ffn8_emb8/model.safetensors.index.json +3 -0
  29. quantized_models/aggressive/Phi-3-mini-128k-instruct__attn4_ffn8_emb8/Phi-3-mini-128k-instruct_attn4_ffn8_emb8/quantization_metadata.json +3 -0
  30. quantized_models/aggressive/Phi-3-mini-128k-instruct__attn4_ffn8_emb8/Phi-3-mini-128k-instruct_attn4_ffn8_emb8/special_tokens_map.json +3 -0
  31. quantized_models/aggressive/Phi-3-mini-128k-instruct__attn4_ffn8_emb8/Phi-3-mini-128k-instruct_attn4_ffn8_emb8/tokenizer.json +3 -0
  32. quantized_models/aggressive/Phi-3-mini-128k-instruct__attn4_ffn8_emb8/Phi-3-mini-128k-instruct_attn4_ffn8_emb8/tokenizer.model +3 -0
  33. quantized_models/aggressive/Phi-3-mini-128k-instruct__attn4_ffn8_emb8/Phi-3-mini-128k-instruct_attn4_ffn8_emb8/tokenizer_config.json +3 -0
  34. quantized_models/model_comparison.csv +10 -0
  35. quantized_models/quantization_results.json +2 -2
  36. quantized_models/uniform_8bit/DeepSeek-R1-Distill-Qwen-1.5B__attn8_ffn8_emb8/DeepSeek-R1-Distill-Qwen-1.5B_attn8_ffn8_emb8/chat_template.jinja +1 -0
  37. quantized_models/uniform_8bit/DeepSeek-R1-Distill-Qwen-1.5B__attn8_ffn8_emb8/DeepSeek-R1-Distill-Qwen-1.5B_attn8_ffn8_emb8/config.json +3 -0
  38. quantized_models/uniform_8bit/DeepSeek-R1-Distill-Qwen-1.5B__attn8_ffn8_emb8/DeepSeek-R1-Distill-Qwen-1.5B_attn8_ffn8_emb8/generation_config.json +3 -0
  39. quantized_models/uniform_8bit/DeepSeek-R1-Distill-Qwen-1.5B__attn8_ffn8_emb8/DeepSeek-R1-Distill-Qwen-1.5B_attn8_ffn8_emb8/model-00001-of-00002.safetensors +3 -0
  40. quantized_models/uniform_8bit/DeepSeek-R1-Distill-Qwen-1.5B__attn8_ffn8_emb8/DeepSeek-R1-Distill-Qwen-1.5B_attn8_ffn8_emb8/model-00002-of-00002.safetensors +3 -0
  41. quantized_models/uniform_8bit/DeepSeek-R1-Distill-Qwen-1.5B__attn8_ffn8_emb8/DeepSeek-R1-Distill-Qwen-1.5B_attn8_ffn8_emb8/model.safetensors.index.json +3 -0
  42. quantized_models/uniform_8bit/DeepSeek-R1-Distill-Qwen-1.5B__attn8_ffn8_emb8/DeepSeek-R1-Distill-Qwen-1.5B_attn8_ffn8_emb8/quantization_metadata.json +3 -0
  43. quantized_models/uniform_8bit/DeepSeek-R1-Distill-Qwen-1.5B__attn8_ffn8_emb8/DeepSeek-R1-Distill-Qwen-1.5B_attn8_ffn8_emb8/special_tokens_map.json +3 -0
  44. quantized_models/uniform_8bit/DeepSeek-R1-Distill-Qwen-1.5B__attn8_ffn8_emb8/DeepSeek-R1-Distill-Qwen-1.5B_attn8_ffn8_emb8/tokenizer.json +3 -0
  45. quantized_models/uniform_8bit/DeepSeek-R1-Distill-Qwen-1.5B__attn8_ffn8_emb8/DeepSeek-R1-Distill-Qwen-1.5B_attn8_ffn8_emb8/tokenizer_config.json +3 -0
  46. quantized_models/uniform_8bit/Falcon-E-3B-Base__attn8_ffn8_emb8/Falcon-E-3B-Base_attn8_ffn8_emb8/config.json +3 -0
  47. quantized_models/uniform_8bit/Falcon-E-3B-Base__attn8_ffn8_emb8/Falcon-E-3B-Base_attn8_ffn8_emb8/generation_config.json +3 -0
  48. quantized_models/uniform_8bit/Falcon-E-3B-Base__attn8_ffn8_emb8/Falcon-E-3B-Base_attn8_ffn8_emb8/model.safetensors +3 -0
  49. quantized_models/uniform_8bit/Falcon-E-3B-Base__attn8_ffn8_emb8/Falcon-E-3B-Base_attn8_ffn8_emb8/quantization_metadata.json +3 -0
  50. quantized_models/uniform_8bit/Falcon-E-3B-Base__attn8_ffn8_emb8/Falcon-E-3B-Base_attn8_ffn8_emb8/special_tokens_map.json +3 -0
__pycache__/quantization.cpython-312.pyc CHANGED
Binary files a/__pycache__/quantization.cpython-312.pyc and b/__pycache__/quantization.cpython-312.pyc differ
 
quantized_models/aggressive/DeepSeek-R1-Distill-Qwen-1.5B__attn4_ffn8_emb8/DeepSeek-R1-Distill-Qwen-1.5B_attn4_ffn8_emb8/chat_template.jinja ADDED
@@ -0,0 +1 @@
 
 
1
+ {% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\n' + '```json' + '\n' + tool['function']['arguments'] + '\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\n' + '```json' + '\n' + tool['function']['arguments'] + '\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|><think>\n'}}{% endif %}
quantized_models/aggressive/DeepSeek-R1-Distill-Qwen-1.5B__attn4_ffn8_emb8/DeepSeek-R1-Distill-Qwen-1.5B_attn4_ffn8_emb8/config.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2e1d1cb43c80a432b77714a102c6eceaa43d01de540583c789cf4e85a63b67d
3
+ size 1335
quantized_models/aggressive/DeepSeek-R1-Distill-Qwen-1.5B__attn4_ffn8_emb8/DeepSeek-R1-Distill-Qwen-1.5B_attn4_ffn8_emb8/generation_config.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:275e18531c77a259689e0424ba634aa34d289bdaac4a116c4ec9ba8e921cbb67
3
+ size 181
quantized_models/aggressive/DeepSeek-R1-Distill-Qwen-1.5B__attn4_ffn8_emb8/DeepSeek-R1-Distill-Qwen-1.5B_attn4_ffn8_emb8/model-00001-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7363fb6e730e5ff042860d7d2a5165f67a830991fb2b1c030adf8549ab921ca9
3
+ size 4996670464
quantized_models/aggressive/DeepSeek-R1-Distill-Qwen-1.5B__attn4_ffn8_emb8/DeepSeek-R1-Distill-Qwen-1.5B_attn4_ffn8_emb8/model-00002-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09bba4d6d1831800733b16c3dba46a72add42a7c84e980a0a020b4d4fc757a00
3
+ size 2111719976
quantized_models/aggressive/DeepSeek-R1-Distill-Qwen-1.5B__attn4_ffn8_emb8/DeepSeek-R1-Distill-Qwen-1.5B_attn4_ffn8_emb8/model.safetensors.index.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7e56c5725b828b21936d5ec6e2ae9acec9fd04a739d1fab9824d603c23cbf71
3
+ size 27787
quantized_models/aggressive/DeepSeek-R1-Distill-Qwen-1.5B__attn4_ffn8_emb8/DeepSeek-R1-Distill-Qwen-1.5B_attn4_ffn8_emb8/quantization_metadata.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79f9c07febf73102db7e3768771017d274c95112946e276924f65a23cad39030
3
+ size 405
quantized_models/aggressive/DeepSeek-R1-Distill-Qwen-1.5B__attn4_ffn8_emb8/DeepSeek-R1-Distill-Qwen-1.5B_attn4_ffn8_emb8/special_tokens_map.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59cda48bbe8bab9d61ffb410e6e3c07b6d98bff73cee7c88ff8b51f95f21ab1c
3
+ size 485
quantized_models/aggressive/DeepSeek-R1-Distill-Qwen-1.5B__attn4_ffn8_emb8/DeepSeek-R1-Distill-Qwen-1.5B_attn4_ffn8_emb8/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
3
+ size 11422778
quantized_models/aggressive/DeepSeek-R1-Distill-Qwen-1.5B__attn4_ffn8_emb8/DeepSeek-R1-Distill-Qwen-1.5B_attn4_ffn8_emb8/tokenizer_config.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55edc67ee25d8e0d4b7a13036ee61cdb880b49db4398533cc6a29710d1790ade
3
+ size 4490
quantized_models/aggressive/Falcon-E-3B-Base__attn4_ffn8_emb8/Falcon-E-3B-Base_attn4_ffn8_emb8/config.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a832eefda7e5937f7738a4d64287b3139f1904f64b5115b7f1b18d8d0a8eea9
3
+ size 939
quantized_models/aggressive/Falcon-E-3B-Base__attn4_ffn8_emb8/Falcon-E-3B-Base_attn4_ffn8_emb8/generation_config.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7244a760e89a2d38833bf5d771b175e8c47007a74b64dcfeac7e8ac8ee8e58d
3
+ size 112
quantized_models/aggressive/Falcon-E-3B-Base__attn4_ffn8_emb8/Falcon-E-3B-Base_attn4_ffn8_emb8/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cef967a47fe688b47373eea27778ec3f367c7d72f83a9a499aaf80df9d21c8b8
3
+ size 1267270720
quantized_models/aggressive/Falcon-E-3B-Base__attn4_ffn8_emb8/Falcon-E-3B-Base_attn4_ffn8_emb8/quantization_metadata.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d59c54a098b21b59aa5a2ce76f445bcad092c9727d5f90d5e4c88e48042ffce
3
+ size 379
quantized_models/aggressive/Falcon-E-3B-Base__attn4_ffn8_emb8/Falcon-E-3B-Base_attn4_ffn8_emb8/special_tokens_map.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7f8082c06e8059eae989095356ba775ad4dcdc2e23af15eda6c74e3aee0d239
3
+ size 7415
quantized_models/aggressive/Falcon-E-3B-Base__attn4_ffn8_emb8/Falcon-E-3B-Base_attn4_ffn8_emb8/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:605c664925653e3fbf2f35ea063847db441ba5b7a6af04378880409c3ab311fc
3
+ size 2350986
quantized_models/aggressive/Falcon-E-3B-Base__attn4_ffn8_emb8/Falcon-E-3B-Base_attn4_ffn8_emb8/tokenizer_config.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d921ca08d70115e4ad4496f1686319e557e0e9e9f6406f14a2d4c532de6a4de
3
+ size 99642
quantized_models/aggressive/Phi-3-mini-128k-instruct__attn4_ffn8_emb8/Phi-3-mini-128k-instruct_attn4_ffn8_emb8/added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f5c94d30901fe9c961d776d51eadbda4454991a77b53d36e0be0571a8e72a7d
3
+ size 293
quantized_models/aggressive/Phi-3-mini-128k-instruct__attn4_ffn8_emb8/Phi-3-mini-128k-instruct_attn4_ffn8_emb8/chat_template.jinja ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {% for message in messages %}{% if message['role'] == 'system' %}{{'<|system|>
2
+ ' + message['content'] + '<|end|>
3
+ '}}{% elif message['role'] == 'user' %}{{'<|user|>
4
+ ' + message['content'] + '<|end|>
5
+ '}}{% elif message['role'] == 'assistant' %}{{'<|assistant|>
6
+ ' + message['content'] + '<|end|>
7
+ '}}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>
8
+ ' }}{% else %}{{ eos_token }}{% endif %}
quantized_models/aggressive/Phi-3-mini-128k-instruct__attn4_ffn8_emb8/Phi-3-mini-128k-instruct_attn4_ffn8_emb8/config.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ae80a19fd38aa0a62033a131eae7acd49801bb2c570c4dca779239063a6c6e8
3
+ size 3290
quantized_models/aggressive/Phi-3-mini-128k-instruct__attn4_ffn8_emb8/Phi-3-mini-128k-instruct_attn4_ffn8_emb8/configuration_phi3.py ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """ Phi-3 model configuration"""
17
+
18
+
19
+ from transformers.configuration_utils import PretrainedConfig
20
+ from transformers.utils import logging
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+ PHI3_PRETRAINED_CONFIG_ARCHIVE_MAP = {
26
+ "microsoft/Phi-3-mini-4k-instruct": "https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/resolve/main/config.json",
27
+ "microsoft/Phi-3-mini-128k-instruct": "https://huggingface.co/microsoft/Phi-3-mini-128k-instruct/resolve/main/config.json",
28
+ }
29
+
30
+
31
+ class Phi3Config(PretrainedConfig):
32
+ r"""
33
+ This is the configuration class to store the configuration of a [`Phi3Model`]. It is used to instantiate a Phi-3
34
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
35
+ defaults will yield a similar configuration to that of the
36
+ [microsoft/Phi-3-mini-4k-instruct](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct).
37
+
38
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
39
+ documentation from [`PretrainedConfig`] for more information.
40
+
41
+ Args:
42
+ vocab_size (`int`, *optional*, defaults to 32064):
43
+ Vocabulary size of the Phi-3 model. Defines the number of different tokens that can be represented by the
44
+ `inputs_ids` passed when calling [`Phi3Model`].
45
+ hidden_size (`int`, *optional*, defaults to 3072):
46
+ Dimension of the hidden representations.
47
+ intermediate_size (`int`, *optional*, defaults to 8192):
48
+ Dimension of the MLP representations.
49
+ num_hidden_layers (`int`, *optional*, defaults to 32):
50
+ Number of hidden layers in the Transformer decoder.
51
+ num_attention_heads (`int`, *optional*, defaults to 32):
52
+ Number of attention heads for each attention layer in the Transformer decoder.
53
+ num_key_value_heads (`int`, *optional*):
54
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
55
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
56
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
57
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
58
+ by meanpooling all the original heads within that group. For more details checkout [this
59
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
60
+ `num_attention_heads`.
61
+ resid_pdrop (`float`, *optional*, defaults to 0.0):
62
+ Dropout probability for mlp outputs.
63
+ embd_pdrop (`int`, *optional*, defaults to 0.0):
64
+ The dropout ratio for the embeddings.
65
+ attention_dropout (`float`, *optional*, defaults to 0.0):
66
+ The dropout ratio after computing the attention scores.
67
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
68
+ The non-linear activation function (function or string) in the decoder.
69
+ max_position_embeddings (`int`, *optional*, defaults to 4096):
70
+ The maximum sequence length that this model might ever be used with.
71
+ original_max_position_embeddings (`int`, *optional*, defaults to 4096):
72
+ The maximum sequence length that this model was trained with. This is used to determine the size of the
73
+ original RoPE embeddings when using long scaling.
74
+ initializer_range (`float`, *optional*, defaults to 0.02):
75
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
76
+ rms_norm_eps (`float`, *optional*, defaults to 1e-05):
77
+ The epsilon value used for the RMSNorm.
78
+ use_cache (`bool`, *optional*, defaults to `True`):
79
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
80
+ relevant if `config.is_decoder=True`. Whether to tie weight embeddings or not.
81
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
82
+ Whether to tie weight embeddings
83
+ rope_theta (`float`, *optional*, defaults to 10000.0):
84
+ The base period of the RoPE embeddings.
85
+ rope_scaling (`dict`, *optional*):
86
+ The scaling strategy for the RoPE embeddings. If `None`, no scaling is applied. If a dictionary, it must
87
+ contain the following keys: `type`, `short_factor` and `long_factor`. The `type` must be `longrope` and
88
+ the `short_factor` and `long_factor` must be lists of numbers with the same length as the hidden size
89
+ divided by the number of attention heads divided by 2.
90
+ bos_token_id (`int`, *optional*, defaults to 1):
91
+ The id of the "beginning-of-sequence" token.
92
+ eos_token_id (`int`, *optional*, defaults to 32000):
93
+ The id of the "end-of-sequence" token.
94
+ pad_token_id (`int`, *optional*, defaults to 32000):
95
+ The id of the padding token.
96
+ sliding_window (`int`, *optional*):
97
+ Sliding window attention window size. If `None`, no sliding window is applied.
98
+
99
+ Example:
100
+
101
+ ```python
102
+ >>> from transformers import Phi3Model, Phi3Config
103
+
104
+ >>> # Initializing a Phi-3 style configuration
105
+ >>> configuration = Phi3Config.from_pretrained("microsoft/Phi-3-mini-4k-instruct")
106
+
107
+ >>> # Initializing a model from the configuration
108
+ >>> model = Phi3Model(configuration)
109
+
110
+ >>> # Accessing the model configuration
111
+ >>> configuration = model.config
112
+ ```"""
113
+
114
+ model_type = "phi3"
115
+ keys_to_ignore_at_inference = ["past_key_values"]
116
+
117
+ def __init__(
118
+ self,
119
+ vocab_size=32064,
120
+ hidden_size=3072,
121
+ intermediate_size=8192,
122
+ num_hidden_layers=32,
123
+ num_attention_heads=32,
124
+ num_key_value_heads=None,
125
+ resid_pdrop=0.0,
126
+ embd_pdrop=0.0,
127
+ attention_dropout=0.0,
128
+ hidden_act="silu",
129
+ max_position_embeddings=4096,
130
+ original_max_position_embeddings=4096,
131
+ initializer_range=0.02,
132
+ rms_norm_eps=1e-5,
133
+ use_cache=True,
134
+ tie_word_embeddings=False,
135
+ rope_theta=10000.0,
136
+ rope_scaling=None,
137
+ bos_token_id=1,
138
+ eos_token_id=32000,
139
+ pad_token_id=32000,
140
+ sliding_window=None,
141
+ **kwargs,
142
+ ):
143
+ self.vocab_size = vocab_size
144
+ self.hidden_size = hidden_size
145
+ self.intermediate_size = intermediate_size
146
+ self.num_hidden_layers = num_hidden_layers
147
+ self.num_attention_heads = num_attention_heads
148
+
149
+ if num_key_value_heads is None:
150
+ num_key_value_heads = num_attention_heads
151
+
152
+ self.num_key_value_heads = num_key_value_heads
153
+ self.resid_pdrop = resid_pdrop
154
+ self.embd_pdrop = embd_pdrop
155
+ self.attention_dropout = attention_dropout
156
+ self.hidden_act = hidden_act
157
+ self.max_position_embeddings = max_position_embeddings
158
+ self.original_max_position_embeddings = original_max_position_embeddings
159
+ self.initializer_range = initializer_range
160
+ self.rms_norm_eps = rms_norm_eps
161
+ self.use_cache = use_cache
162
+ self.rope_theta = rope_theta
163
+ self.rope_scaling = rope_scaling
164
+ self._rope_scaling_adjustment()
165
+ self._rope_scaling_validation()
166
+ self.sliding_window = sliding_window
167
+
168
+ super().__init__(
169
+ bos_token_id=bos_token_id,
170
+ eos_token_id=eos_token_id,
171
+ pad_token_id=pad_token_id,
172
+ tie_word_embeddings=tie_word_embeddings,
173
+ **kwargs,
174
+ )
175
+
176
+ def _rope_scaling_adjustment(self):
177
+ """
178
+ Adjust the `type` of the `rope_scaling` configuration for backward compatibility.
179
+ """
180
+ if self.rope_scaling is None:
181
+ return
182
+
183
+ rope_scaling_type = self.rope_scaling.get("type", None)
184
+
185
+ # For backward compatibility if previous version used "su" or "yarn"
186
+ if rope_scaling_type is not None and rope_scaling_type in ["su", "yarn"]:
187
+ self.rope_scaling["type"] = "longrope"
188
+
189
+ def _rope_scaling_validation(self):
190
+ """
191
+ Validate the `rope_scaling` configuration.
192
+ """
193
+ if self.rope_scaling is None:
194
+ return
195
+
196
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 3:
197
+ raise ValueError(
198
+ "`rope_scaling` must be a dictionary with three fields, `type`, `short_factor` and `long_factor`, "
199
+ f"got {self.rope_scaling}"
200
+ )
201
+ rope_scaling_type = self.rope_scaling.get("type", None)
202
+ rope_scaling_short_factor = self.rope_scaling.get("short_factor", None)
203
+ rope_scaling_long_factor = self.rope_scaling.get("long_factor", None)
204
+ if rope_scaling_type is None or rope_scaling_type not in ["longrope"]:
205
+ raise ValueError(f"`rope_scaling`'s type field must be one of ['longrope'], got {rope_scaling_type}")
206
+ if not (
207
+ isinstance(rope_scaling_short_factor, list)
208
+ and all(isinstance(x, (int, float)) for x in rope_scaling_short_factor)
209
+ ):
210
+ raise ValueError(
211
+ f"`rope_scaling`'s short_factor field must be a list of numbers, got {rope_scaling_short_factor}"
212
+ )
213
+ if not len(rope_scaling_short_factor) == self.hidden_size // self.num_attention_heads // 2:
214
+ raise ValueError(
215
+ f"`rope_scaling`'s short_factor field must have length {self.hidden_size // self.num_attention_heads // 2}, got {len(rope_scaling_short_factor)}"
216
+ )
217
+ if not (
218
+ isinstance(rope_scaling_long_factor, list)
219
+ and all(isinstance(x, (int, float)) for x in rope_scaling_long_factor)
220
+ ):
221
+ raise ValueError(
222
+ f"`rope_scaling`'s long_factor field must be a list of numbers, got {rope_scaling_long_factor}"
223
+ )
224
+ if not len(rope_scaling_long_factor) == self.hidden_size // self.num_attention_heads // 2:
225
+ raise ValueError(
226
+ f"`rope_scaling`'s long_factor field must have length {self.hidden_size // self.num_attention_heads // 2}, got {len(rope_scaling_long_factor)}"
227
+ )
quantized_models/aggressive/Phi-3-mini-128k-instruct__attn4_ffn8_emb8/Phi-3-mini-128k-instruct_attn4_ffn8_emb8/generation_config.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18cb81296b3fd9eb9992a9b95a05842f9a5778d47c62cc67bb949f038b19b3bb
3
+ size 172
quantized_models/aggressive/Phi-3-mini-128k-instruct__attn4_ffn8_emb8/Phi-3-mini-128k-instruct_attn4_ffn8_emb8/model-00001-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5dd9d8b79499e31f537f01d04e44d0b149585263b7754492b8b49cd76b443649
3
+ size 4961852416
quantized_models/aggressive/Phi-3-mini-128k-instruct__attn4_ffn8_emb8/Phi-3-mini-128k-instruct_attn4_ffn8_emb8/model-00002-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3186df079e7730d75a220876e3d65182d26bc4806edef9b671cc0b2cfcb95c0
3
+ size 4983111176
quantized_models/aggressive/Phi-3-mini-128k-instruct__attn4_ffn8_emb8/Phi-3-mini-128k-instruct_attn4_ffn8_emb8/model-00003-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45dce7445886e82947618d373d7af8f308a9a9e1795e3498b56937d45cef0ac5
3
+ size 4945374704
quantized_models/aggressive/Phi-3-mini-128k-instruct__attn4_ffn8_emb8/Phi-3-mini-128k-instruct_attn4_ffn8_emb8/model-00004-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d26a29ad8bb5b28067df73b520aecf857b3cb691ff0b2007443b49756cac1c2d
3
+ size 394002560
quantized_models/aggressive/Phi-3-mini-128k-instruct__attn4_ffn8_emb8/Phi-3-mini-128k-instruct_attn4_ffn8_emb8/model.safetensors.index.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab2d1bf7a32b16bde0dacd8f2a59e7c40a8e3f0d929badce9eca8d0a49908674
3
+ size 16368
quantized_models/aggressive/Phi-3-mini-128k-instruct__attn4_ffn8_emb8/Phi-3-mini-128k-instruct_attn4_ffn8_emb8/quantization_metadata.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23a44614fb4110f7127a8b7bd2bafc2d41807e781dc4db92e16c13c067d92bd6
3
+ size 397
quantized_models/aggressive/Phi-3-mini-128k-instruct__attn4_ffn8_emb8/Phi-3-mini-128k-instruct_attn4_ffn8_emb8/special_tokens_map.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:474d699677e264edd3851559548cbbb61801ad8da6b249c81455007602313c34
3
+ size 569
quantized_models/aggressive/Phi-3-mini-128k-instruct__attn4_ffn8_emb8/Phi-3-mini-128k-instruct_attn4_ffn8_emb8/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3cb815b904d82b82b25dcd90edd00e71a5ee5443472ad611bcb84f1339300647
3
+ size 3620657
quantized_models/aggressive/Phi-3-mini-128k-instruct__attn4_ffn8_emb8/Phi-3-mini-128k-instruct_attn4_ffn8_emb8/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
quantized_models/aggressive/Phi-3-mini-128k-instruct__attn4_ffn8_emb8/Phi-3-mini-128k-instruct_attn4_ffn8_emb8/tokenizer_config.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78f1699f672ba5c8c958fa9f69040560a0310967db88fcba80746d299738f13c
3
+ size 2934
quantized_models/model_comparison.csv ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ Model,Configuration,Attn (bits),FFN (bits),Emb (bits),Size (MB),Attn Layers,FFN Layers,Total Quantized
2
+ DeepSeek-R1-Distill-Qwen-1.5B,aggressive,4,8,8,6779.05,112,84,197
3
+ DeepSeek-R1-Distill-Qwen-1.5B,uniform_8bit,8,8,8,6779.05,112,84,197
4
+ DeepSeek-R1-Distill-Qwen-1.5B,very_aggressive,4,4,8,6779.05,112,84,197
5
+ Phi-3-mini-128k-instruct,aggressive,4,8,8,14576.26,64,64,129
6
+ Phi-3-mini-128k-instruct,uniform_8bit,8,8,8,14576.26,64,64,129
7
+ Phi-3-mini-128k-instruct,very_aggressive,4,4,8,14576.26,64,64,129
8
+ Falcon-E-3B-Base,aggressive,4,8,8,512.51,0,0,1
9
+ Falcon-E-3B-Base,uniform_8bit,8,8,8,512.51,0,0,1
10
+ Falcon-E-3B-Base,very_aggressive,4,4,8,512.51,0,0,1
quantized_models/quantization_results.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a40e9485b629bbbe01a3d4a9ba4b83a5f08448cfec579cbee314bd600ed84ea4
3
- size 5126
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e62dfa76148b9d5b1e5b1d1f2bf74b546e035ecc25b73b262784587f118c1e54
3
+ size 3584
quantized_models/uniform_8bit/DeepSeek-R1-Distill-Qwen-1.5B__attn8_ffn8_emb8/DeepSeek-R1-Distill-Qwen-1.5B_attn8_ffn8_emb8/chat_template.jinja ADDED
@@ -0,0 +1 @@
 
 
1
+ {% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\n' + '```json' + '\n' + tool['function']['arguments'] + '\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\n' + '```json' + '\n' + tool['function']['arguments'] + '\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|><think>\n'}}{% endif %}
quantized_models/uniform_8bit/DeepSeek-R1-Distill-Qwen-1.5B__attn8_ffn8_emb8/DeepSeek-R1-Distill-Qwen-1.5B_attn8_ffn8_emb8/config.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2e1d1cb43c80a432b77714a102c6eceaa43d01de540583c789cf4e85a63b67d
3
+ size 1335
quantized_models/uniform_8bit/DeepSeek-R1-Distill-Qwen-1.5B__attn8_ffn8_emb8/DeepSeek-R1-Distill-Qwen-1.5B_attn8_ffn8_emb8/generation_config.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:275e18531c77a259689e0424ba634aa34d289bdaac4a116c4ec9ba8e921cbb67
3
+ size 181
quantized_models/uniform_8bit/DeepSeek-R1-Distill-Qwen-1.5B__attn8_ffn8_emb8/DeepSeek-R1-Distill-Qwen-1.5B_attn8_ffn8_emb8/model-00001-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07c7610a1bd72331a3e69e824be3bf8449f4213575f8f306aaa7cb57f11cf41b
3
+ size 4996670464
quantized_models/uniform_8bit/DeepSeek-R1-Distill-Qwen-1.5B__attn8_ffn8_emb8/DeepSeek-R1-Distill-Qwen-1.5B_attn8_ffn8_emb8/model-00002-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33204e360fb66e0a519c0bc98ad5e279ae14c33cc2b1a4cc238b8946d4605bc2
3
+ size 2111719976
quantized_models/uniform_8bit/DeepSeek-R1-Distill-Qwen-1.5B__attn8_ffn8_emb8/DeepSeek-R1-Distill-Qwen-1.5B_attn8_ffn8_emb8/model.safetensors.index.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7e56c5725b828b21936d5ec6e2ae9acec9fd04a739d1fab9824d603c23cbf71
3
+ size 27787
quantized_models/uniform_8bit/DeepSeek-R1-Distill-Qwen-1.5B__attn8_ffn8_emb8/DeepSeek-R1-Distill-Qwen-1.5B_attn8_ffn8_emb8/quantization_metadata.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83562736cd80be3b8b4a8fb2e94142440cbf495c19e275c9cbce42fe489cf310
3
+ size 405
quantized_models/uniform_8bit/DeepSeek-R1-Distill-Qwen-1.5B__attn8_ffn8_emb8/DeepSeek-R1-Distill-Qwen-1.5B_attn8_ffn8_emb8/special_tokens_map.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59cda48bbe8bab9d61ffb410e6e3c07b6d98bff73cee7c88ff8b51f95f21ab1c
3
+ size 485
quantized_models/uniform_8bit/DeepSeek-R1-Distill-Qwen-1.5B__attn8_ffn8_emb8/DeepSeek-R1-Distill-Qwen-1.5B_attn8_ffn8_emb8/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
3
+ size 11422778
quantized_models/uniform_8bit/DeepSeek-R1-Distill-Qwen-1.5B__attn8_ffn8_emb8/DeepSeek-R1-Distill-Qwen-1.5B_attn8_ffn8_emb8/tokenizer_config.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55edc67ee25d8e0d4b7a13036ee61cdb880b49db4398533cc6a29710d1790ade
3
+ size 4490
quantized_models/uniform_8bit/Falcon-E-3B-Base__attn8_ffn8_emb8/Falcon-E-3B-Base_attn8_ffn8_emb8/config.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a832eefda7e5937f7738a4d64287b3139f1904f64b5115b7f1b18d8d0a8eea9
3
+ size 939
quantized_models/uniform_8bit/Falcon-E-3B-Base__attn8_ffn8_emb8/Falcon-E-3B-Base_attn8_ffn8_emb8/generation_config.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7244a760e89a2d38833bf5d771b175e8c47007a74b64dcfeac7e8ac8ee8e58d
3
+ size 112
quantized_models/uniform_8bit/Falcon-E-3B-Base__attn8_ffn8_emb8/Falcon-E-3B-Base_attn8_ffn8_emb8/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cef967a47fe688b47373eea27778ec3f367c7d72f83a9a499aaf80df9d21c8b8
3
+ size 1267270720
quantized_models/uniform_8bit/Falcon-E-3B-Base__attn8_ffn8_emb8/Falcon-E-3B-Base_attn8_ffn8_emb8/quantization_metadata.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1262bed941b651ef6b7f0c70fdbf8f2a0feb2354cdbde668a138471dfb3d4b4b
3
+ size 379
quantized_models/uniform_8bit/Falcon-E-3B-Base__attn8_ffn8_emb8/Falcon-E-3B-Base_attn8_ffn8_emb8/special_tokens_map.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7f8082c06e8059eae989095356ba775ad4dcdc2e23af15eda6c74e3aee0d239
3
+ size 7415