morriszms commited on
Commit
c7c4c6b
·
verified ·
1 Parent(s): 928530e

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,15 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ MistralTrix8x9B-Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
37
+ MistralTrix8x9B-Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
38
+ MistralTrix8x9B-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
39
+ MistralTrix8x9B-Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
40
+ MistralTrix8x9B-Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
41
+ MistralTrix8x9B-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
42
+ MistralTrix8x9B-Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
43
+ MistralTrix8x9B-Q5_0.gguf filter=lfs diff=lfs merge=lfs -text
44
+ MistralTrix8x9B-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
45
+ MistralTrix8x9B-Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
46
+ MistralTrix8x9B-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
47
+ MistralTrix8x9B-Q8_0/MistralTrix8x9B-Q8_0-00001-of-00002.gguf filter=lfs diff=lfs merge=lfs -text
MistralTrix8x9B-Q2_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:832ca3e425bc122d57dd4d4226e01f1c766546d993adb49116d990f107064c75
3
+ size 21601221056
MistralTrix8x9B-Q3_K_L.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e3e04eb17b90555f491171d9e89d794737044d758e6f1a4d127ced2c0d64813
3
+ size 30170913216
MistralTrix8x9B-Q3_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:733f94792391c3235024c0b0e89e30ab685f1b0bb6d3ac63a87c8c7cbf226f55
3
+ size 28112558528
MistralTrix8x9B-Q3_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1ec5b0991b1b449f0ee48890f460f50a1ce9d221af61ddcbd1513b0958bd9e2
3
+ size 25499507136
MistralTrix8x9B-Q4_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b425e653afc8c0edfab3bb8276b6bb4705fb223190f4a71470929386a21ca1f2
3
+ size 33008990656
MistralTrix8x9B-Q4_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7499b1792f44b06b207440a921be05accc2f6284b1ab3c63e01fa42c442ecd79
3
+ size 35515087296
MistralTrix8x9B-Q4_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8274f3864ed0ced58d1799ddfb99b5ad0b28053e146f36c100b1e6459a83e993
3
+ size 33386478016
MistralTrix8x9B-Q5_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05016841d3d0faf6ed19e88648575df02b0bdbfacb0fa7f453945f60fd1c7cfb
3
+ size 40239577536
MistralTrix8x9B-Q5_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12a711321795e63723b7d28467c1a31fd603e8a9f9e70b41f15457fff5987914
3
+ size 41487382976
MistralTrix8x9B-Q5_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20c148544a8fabdb4a349be16496edaa0b7662b923f24b303c1fa4195bab5802
3
+ size 40239577536
MistralTrix8x9B-Q6_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:396bbcd461717f99f53a13b7975ffb0efd627d2a9513ccb3f91cd6deccbcbc32
3
+ size 47922076096
MistralTrix8x9B-Q8_0/MistralTrix8x9B-Q8_0-00001-of-00002.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5bf335752ca1aac32bdaffe5856d33c8ea57446b1e3bd422d9c37f4ba9f70797
3
+ size 4227690496
README.md ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: Kquant03/MistralTrix8x9B
4
+ tags:
5
+ - TensorBlock
6
+ - GGUF
7
+ ---
8
+
9
+ <div style="width: auto; margin-left: auto; margin-right: auto">
10
+ <img src="https://i.imgur.com/jC7kdl8.jpeg" alt="TensorBlock" style="width: 100%; min-width: 400px; display: block; margin: auto;">
11
+ </div>
12
+ <div style="display: flex; justify-content: space-between; width: 100%;">
13
+ <div style="display: flex; flex-direction: column; align-items: flex-start;">
14
+ <p style="margin-top: 0.5em; margin-bottom: 0em;">
15
+ Feedback and support: TensorBlock's <a href="https://x.com/tensorblock_aoi">Twitter/X</a>, <a href="https://t.me/TensorBlock">Telegram Group</a> and <a href="https://x.com/tensorblock_aoi">Discord server</a>
16
+ </p>
17
+ </div>
18
+ </div>
19
+
20
+ ## Kquant03/MistralTrix8x9B - GGUF
21
+
22
+ This repo contains GGUF format model files for [Kquant03/MistralTrix8x9B](https://huggingface.co/Kquant03/MistralTrix8x9B).
23
+
24
+ The files were quantized using machines provided by [TensorBlock](https://tensorblock.co/), and they are compatible with llama.cpp as of [commit b4242](https://github.com/ggerganov/llama.cpp/commit/a6744e43e80f4be6398fc7733a01642c846dce1d).
25
+
26
+ <div style="text-align: left; margin: 20px 0;">
27
+ <a href="https://tensorblock.co/waitlist/client" style="display: inline-block; padding: 10px 20px; background-color: #007bff; color: white; text-decoration: none; border-radius: 5px; font-weight: bold;">
28
+ Run them on the TensorBlock client using your local machine ↗
29
+ </a>
30
+ </div>
31
+
32
+ ## Prompt template
33
+
34
+ ```
35
+
36
+ ```
37
+
38
+ ## Model file specification
39
+
40
+ | Filename | Quant type | File Size | Description |
41
+ | -------- | ---------- | --------- | ----------- |
42
+ | [MistralTrix8x9B-Q2_K.gguf](https://huggingface.co/tensorblock/MistralTrix8x9B-GGUF/blob/main/MistralTrix8x9B-Q2_K.gguf) | Q2_K | 21.601 GB | smallest, significant quality loss - not recommended for most purposes |
43
+ | [MistralTrix8x9B-Q3_K_S.gguf](https://huggingface.co/tensorblock/MistralTrix8x9B-GGUF/blob/main/MistralTrix8x9B-Q3_K_S.gguf) | Q3_K_S | 25.500 GB | very small, high quality loss |
44
+ | [MistralTrix8x9B-Q3_K_M.gguf](https://huggingface.co/tensorblock/MistralTrix8x9B-GGUF/blob/main/MistralTrix8x9B-Q3_K_M.gguf) | Q3_K_M | 28.113 GB | very small, high quality loss |
45
+ | [MistralTrix8x9B-Q3_K_L.gguf](https://huggingface.co/tensorblock/MistralTrix8x9B-GGUF/blob/main/MistralTrix8x9B-Q3_K_L.gguf) | Q3_K_L | 30.171 GB | small, substantial quality loss |
46
+ | [MistralTrix8x9B-Q4_0.gguf](https://huggingface.co/tensorblock/MistralTrix8x9B-GGUF/blob/main/MistralTrix8x9B-Q4_0.gguf) | Q4_0 | 33.009 GB | legacy; small, very high quality loss - prefer using Q3_K_M |
47
+ | [MistralTrix8x9B-Q4_K_S.gguf](https://huggingface.co/tensorblock/MistralTrix8x9B-GGUF/blob/main/MistralTrix8x9B-Q4_K_S.gguf) | Q4_K_S | 33.386 GB | small, greater quality loss |
48
+ | [MistralTrix8x9B-Q4_K_M.gguf](https://huggingface.co/tensorblock/MistralTrix8x9B-GGUF/blob/main/MistralTrix8x9B-Q4_K_M.gguf) | Q4_K_M | 35.515 GB | medium, balanced quality - recommended |
49
+ | [MistralTrix8x9B-Q5_0.gguf](https://huggingface.co/tensorblock/MistralTrix8x9B-GGUF/blob/main/MistralTrix8x9B-Q5_0.gguf) | Q5_0 | 40.240 GB | legacy; medium, balanced quality - prefer using Q4_K_M |
50
+ | [MistralTrix8x9B-Q5_K_S.gguf](https://huggingface.co/tensorblock/MistralTrix8x9B-GGUF/blob/main/MistralTrix8x9B-Q5_K_S.gguf) | Q5_K_S | 40.240 GB | large, low quality loss - recommended |
51
+ | [MistralTrix8x9B-Q5_K_M.gguf](https://huggingface.co/tensorblock/MistralTrix8x9B-GGUF/blob/main/MistralTrix8x9B-Q5_K_M.gguf) | Q5_K_M | 41.487 GB | large, very low quality loss - recommended |
52
+ | [MistralTrix8x9B-Q6_K.gguf](https://huggingface.co/tensorblock/MistralTrix8x9B-GGUF/blob/main/MistralTrix8x9B-Q6_K.gguf) | Q6_K | 47.922 GB | very large, extremely low quality loss |
53
+ | [MistralTrix8x9B-Q8_0](https://huggingface.co/tensorblock/MistralTrix8x9B-GGUF/blob/main/MistralTrix8x9B-Q8_0) | Q8_0 | 4.228 GB | very large, extremely low quality loss - not recommended |
54
+
55
+
56
+ ## Downloading instruction
57
+
58
+ ### Command line
59
+
60
+ Firstly, install Huggingface Client
61
+
62
+ ```shell
63
+ pip install -U "huggingface_hub[cli]"
64
+ ```
65
+
66
+ Then, downoad the individual model file the a local directory
67
+
68
+ ```shell
69
+ huggingface-cli download tensorblock/MistralTrix8x9B-GGUF --include "MistralTrix8x9B-Q2_K.gguf" --local-dir MY_LOCAL_DIR
70
+ ```
71
+
72
+ If you wanna download multiple model files with a pattern (e.g., `*Q4_K*gguf`), you can try:
73
+
74
+ ```shell
75
+ huggingface-cli download tensorblock/MistralTrix8x9B-GGUF --local-dir MY_LOCAL_DIR --local-dir-use-symlinks False --include='*Q4_K*gguf'
76
+ ```