morriszms commited on
Commit
41666ca
·
verified ·
1 Parent(s): 13fef60

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,15 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ legml-v1.0-8b-instruct-Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
37
+ legml-v1.0-8b-instruct-Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
38
+ legml-v1.0-8b-instruct-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
39
+ legml-v1.0-8b-instruct-Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
40
+ legml-v1.0-8b-instruct-Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
41
+ legml-v1.0-8b-instruct-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
42
+ legml-v1.0-8b-instruct-Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
43
+ legml-v1.0-8b-instruct-Q5_0.gguf filter=lfs diff=lfs merge=lfs -text
44
+ legml-v1.0-8b-instruct-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
45
+ legml-v1.0-8b-instruct-Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
46
+ legml-v1.0-8b-instruct-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
47
+ legml-v1.0-8b-instruct-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: apache-2.0
4
+ base_model: legmlai/legml-v1.0-8b-instruct
5
+ tags:
6
+ - llama-factory
7
+ - full
8
+ - generated_from_trainer
9
+ - TensorBlock
10
+ - GGUF
11
+ datasets:
12
+ - legmlai/finefrench-v1
13
+ - legmlai/openhermes-fr
14
+ language:
15
+ - fr
16
+ model-index:
17
+ - name: legml-v1.0-instruct
18
+ results:
19
+ - task:
20
+ type: text-generation
21
+ name: Text Generation
22
+ dataset:
23
+ name: gpqa-fr
24
+ type: ai2_arc
25
+ config: le-leadboard/gpqa-fr
26
+ split: test
27
+ args:
28
+ num_few_shot: 25
29
+ metrics:
30
+ - type: acc
31
+ value: 14.56
32
+ name: accuracy
33
+ - task:
34
+ type: text-generation
35
+ name: Text Generation
36
+ dataset:
37
+ name: IFEval-fr
38
+ type: le-leadboard/IFEval-fr
39
+ split: validation
40
+ args:
41
+ num_few_shot: 10
42
+ metrics:
43
+ - type: acc
44
+ value: 13.55
45
+ name: accuracy
46
+ - task:
47
+ type: text-generation
48
+ name: Text Generation
49
+ dataset:
50
+ name: MMMLU-fr
51
+ type: le-leadboard/MMMLU-fr
52
+ config: all
53
+ split: test
54
+ args:
55
+ num_few_shot: 5
56
+ metrics:
57
+ - type: acc
58
+ value: 64.57
59
+ name: accuracy
60
+ - task:
61
+ type: text-generation
62
+ name: Text Generation
63
+ dataset:
64
+ name: bbh-fr
65
+ type: le-leadboard/bbh-fr
66
+ config: multiple_choice
67
+ split: validation
68
+ args:
69
+ num_few_shot: 0
70
+ metrics:
71
+ - type: acc
72
+ value: 38.71
73
+ name: accuracy
74
+ - task:
75
+ type: text-generation
76
+ name: Text Generation
77
+ dataset:
78
+ name: musr-fr
79
+ type: le-leadboard/musr-fr
80
+ config: le-leadboard/musr-fr
81
+ split: validation
82
+ args:
83
+ num_few_shot: 5
84
+ metrics:
85
+ - type: acc
86
+ value: 4.41
87
+ name: accuracy
88
+ - task:
89
+ type: text-generation
90
+ name: Text Generation
91
+ dataset:
92
+ name: MATH_LVL5_fr
93
+ type: le-leadboard/MATH_LVL5_fr
94
+ config: le-leadboard/MATH_LVL5_fr
95
+ split: test
96
+ args:
97
+ num_few_shot: 5
98
+ metrics:
99
+ - type: acc
100
+ value: 34.44
101
+ name: accuracy
102
+ ---
103
+
104
+ <div style="width: auto; margin-left: auto; margin-right: auto">
105
+ <img src="https://i.imgur.com/jC7kdl8.jpeg" alt="TensorBlock" style="width: 100%; min-width: 400px; display: block; margin: auto;">
106
+ </div>
107
+
108
+ [![Website](https://img.shields.io/badge/Website-tensorblock.co-blue?logo=google-chrome&logoColor=white)](https://tensorblock.co)
109
+ [![Twitter](https://img.shields.io/twitter/follow/tensorblock_aoi?style=social)](https://twitter.com/tensorblock_aoi)
110
+ [![Discord](https://img.shields.io/badge/Discord-Join%20Us-5865F2?logo=discord&logoColor=white)](https://discord.gg/Ej5NmeHFf2)
111
+ [![GitHub](https://img.shields.io/badge/GitHub-TensorBlock-black?logo=github&logoColor=white)](https://github.com/TensorBlock)
112
+ [![Telegram](https://img.shields.io/badge/Telegram-Group-blue?logo=telegram)](https://t.me/TensorBlock)
113
+
114
+
115
+ ## legmlai/legml-v1.0-8b-instruct - GGUF
116
+
117
+ <div style="text-align: left; margin: 20px 0;">
118
+ <a href="https://discord.com/invite/Ej5NmeHFf2" style="display: inline-block; padding: 10px 20px; background-color: #5865F2; color: white; text-decoration: none; border-radius: 5px; font-weight: bold;">
119
+ Join our Discord to learn more about what we're building ↗
120
+ </a>
121
+ </div>
122
+
123
+ This repo contains GGUF format model files for [legmlai/legml-v1.0-8b-instruct](https://huggingface.co/legmlai/legml-v1.0-8b-instruct).
124
+
125
+ The files were quantized using machines provided by [TensorBlock](https://tensorblock.co/), and they are compatible with llama.cpp as of [commit b5753](https://github.com/ggml-org/llama.cpp/commit/73e53dc834c0a2336cd104473af6897197b96277).
126
+
127
+ ## Our projects
128
+ <table border="1" cellspacing="0" cellpadding="10">
129
+ <tr>
130
+ <th colspan="2" style="font-size: 25px;">Forge</th>
131
+ </tr>
132
+ <tr>
133
+ <th colspan="2">
134
+ <img src="https://imgur.com/faI5UKh.jpeg" alt="Forge Project" width="900"/>
135
+ </th>
136
+ </tr>
137
+ <tr>
138
+ <th colspan="2">An OpenAI-compatible multi-provider routing layer.</th>
139
+ </tr>
140
+ <tr>
141
+ <th colspan="2">
142
+ <a href="https://github.com/TensorBlock/forge" target="_blank" style="
143
+ display: inline-block;
144
+ padding: 8px 16px;
145
+ background-color: #FF7F50;
146
+ color: white;
147
+ text-decoration: none;
148
+ border-radius: 6px;
149
+ font-weight: bold;
150
+ font-family: sans-serif;
151
+ ">🚀 Try it now! 🚀</a>
152
+ </th>
153
+ </tr>
154
+
155
+ <tr>
156
+ <th style="font-size: 25px;">Awesome MCP Servers</th>
157
+ <th style="font-size: 25px;">TensorBlock Studio</th>
158
+ </tr>
159
+ <tr>
160
+ <th><img src="https://imgur.com/2Xov7B7.jpeg" alt="MCP Servers" width="450"/></th>
161
+ <th><img src="https://imgur.com/pJcmF5u.jpeg" alt="Studio" width="450"/></th>
162
+ </tr>
163
+ <tr>
164
+ <th>A comprehensive collection of Model Context Protocol (MCP) servers.</th>
165
+ <th>A lightweight, open, and extensible multi-LLM interaction studio.</th>
166
+ </tr>
167
+ <tr>
168
+ <th>
169
+ <a href="https://github.com/TensorBlock/awesome-mcp-servers" target="_blank" style="
170
+ display: inline-block;
171
+ padding: 8px 16px;
172
+ background-color: #FF7F50;
173
+ color: white;
174
+ text-decoration: none;
175
+ border-radius: 6px;
176
+ font-weight: bold;
177
+ font-family: sans-serif;
178
+ ">👀 See what we built 👀</a>
179
+ </th>
180
+ <th>
181
+ <a href="https://github.com/TensorBlock/TensorBlock-Studio" target="_blank" style="
182
+ display: inline-block;
183
+ padding: 8px 16px;
184
+ background-color: #FF7F50;
185
+ color: white;
186
+ text-decoration: none;
187
+ border-radius: 6px;
188
+ font-weight: bold;
189
+ font-family: sans-serif;
190
+ ">👀 See what we built 👀</a>
191
+ </th>
192
+ </tr>
193
+ </table>
194
+
195
+ ## Prompt template
196
+
197
+ ```
198
+ <|im_start|>system
199
+ {system_prompt}<|im_end|>
200
+ <|im_start|>user
201
+ {prompt}<|im_end|>
202
+ <|im_start|>assistant
203
+ ```
204
+
205
+ ## Model file specification
206
+
207
+ | Filename | Quant type | File Size | Description |
208
+ | -------- | ---------- | --------- | ----------- |
209
+ | [legml-v1.0-8b-instruct-Q2_K.gguf](https://huggingface.co/tensorblock/legmlai_legml-v1.0-8b-instruct-GGUF/blob/main/legml-v1.0-8b-instruct-Q2_K.gguf) | Q2_K | 3.282 GB | smallest, significant quality loss - not recommended for most purposes |
210
+ | [legml-v1.0-8b-instruct-Q3_K_S.gguf](https://huggingface.co/tensorblock/legmlai_legml-v1.0-8b-instruct-GGUF/blob/main/legml-v1.0-8b-instruct-Q3_K_S.gguf) | Q3_K_S | 3.770 GB | very small, high quality loss |
211
+ | [legml-v1.0-8b-instruct-Q3_K_M.gguf](https://huggingface.co/tensorblock/legmlai_legml-v1.0-8b-instruct-GGUF/blob/main/legml-v1.0-8b-instruct-Q3_K_M.gguf) | Q3_K_M | 4.124 GB | very small, high quality loss |
212
+ | [legml-v1.0-8b-instruct-Q3_K_L.gguf](https://huggingface.co/tensorblock/legmlai_legml-v1.0-8b-instruct-GGUF/blob/main/legml-v1.0-8b-instruct-Q3_K_L.gguf) | Q3_K_L | 4.431 GB | small, substantial quality loss |
213
+ | [legml-v1.0-8b-instruct-Q4_0.gguf](https://huggingface.co/tensorblock/legmlai_legml-v1.0-8b-instruct-GGUF/blob/main/legml-v1.0-8b-instruct-Q4_0.gguf) | Q4_0 | 4.775 GB | legacy; small, very high quality loss - prefer using Q3_K_M |
214
+ | [legml-v1.0-8b-instruct-Q4_K_S.gguf](https://huggingface.co/tensorblock/legmlai_legml-v1.0-8b-instruct-GGUF/blob/main/legml-v1.0-8b-instruct-Q4_K_S.gguf) | Q4_K_S | 4.802 GB | small, greater quality loss |
215
+ | [legml-v1.0-8b-instruct-Q4_K_M.gguf](https://huggingface.co/tensorblock/legmlai_legml-v1.0-8b-instruct-GGUF/blob/main/legml-v1.0-8b-instruct-Q4_K_M.gguf) | Q4_K_M | 5.028 GB | medium, balanced quality - recommended |
216
+ | [legml-v1.0-8b-instruct-Q5_0.gguf](https://huggingface.co/tensorblock/legmlai_legml-v1.0-8b-instruct-GGUF/blob/main/legml-v1.0-8b-instruct-Q5_0.gguf) | Q5_0 | 5.721 GB | legacy; medium, balanced quality - prefer using Q4_K_M |
217
+ | [legml-v1.0-8b-instruct-Q5_K_S.gguf](https://huggingface.co/tensorblock/legmlai_legml-v1.0-8b-instruct-GGUF/blob/main/legml-v1.0-8b-instruct-Q5_K_S.gguf) | Q5_K_S | 5.721 GB | large, low quality loss - recommended |
218
+ | [legml-v1.0-8b-instruct-Q5_K_M.gguf](https://huggingface.co/tensorblock/legmlai_legml-v1.0-8b-instruct-GGUF/blob/main/legml-v1.0-8b-instruct-Q5_K_M.gguf) | Q5_K_M | 5.851 GB | large, very low quality loss - recommended |
219
+ | [legml-v1.0-8b-instruct-Q6_K.gguf](https://huggingface.co/tensorblock/legmlai_legml-v1.0-8b-instruct-GGUF/blob/main/legml-v1.0-8b-instruct-Q6_K.gguf) | Q6_K | 6.726 GB | very large, extremely low quality loss |
220
+ | [legml-v1.0-8b-instruct-Q8_0.gguf](https://huggingface.co/tensorblock/legmlai_legml-v1.0-8b-instruct-GGUF/blob/main/legml-v1.0-8b-instruct-Q8_0.gguf) | Q8_0 | 8.710 GB | very large, extremely low quality loss - not recommended |
221
+
222
+
223
+ ## Downloading instruction
224
+
225
+ ### Command line
226
+
227
+ Firstly, install Huggingface Client
228
+
229
+ ```shell
230
+ pip install -U "huggingface_hub[cli]"
231
+ ```
232
+
233
+ Then, downoad the individual model file the a local directory
234
+
235
+ ```shell
236
+ huggingface-cli download tensorblock/legmlai_legml-v1.0-8b-instruct-GGUF --include "legml-v1.0-8b-instruct-Q2_K.gguf" --local-dir MY_LOCAL_DIR
237
+ ```
238
+
239
+ If you wanna download multiple model files with a pattern (e.g., `*Q4_K*gguf`), you can try:
240
+
241
+ ```shell
242
+ huggingface-cli download tensorblock/legmlai_legml-v1.0-8b-instruct-GGUF --local-dir MY_LOCAL_DIR --local-dir-use-symlinks False --include='*Q4_K*gguf'
243
+ ```
legml-v1.0-8b-instruct-Q2_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:132216fc8fdb5c5dcee77da72dc6afaaa88ad5fd8b06c8eed534fdeee2f5b4ad
3
+ size 3281729216
legml-v1.0-8b-instruct-Q3_K_L.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4fa233fa328f905c65533c17699a6e29f0abc0444a2f46db6537658553ea7f54
3
+ size 4431390400
legml-v1.0-8b-instruct-Q3_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29be44814dc521969470f5df9bee0b393f618c47ddb862642cff71cf3f14f3a8
3
+ size 4124157632
legml-v1.0-8b-instruct-Q3_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a3f867fa06ff8e33073df53078d7370aac61712dcb56f7c784ac97d163fb706
3
+ size 3769607872
legml-v1.0-8b-instruct-Q4_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a4d2dc5914363e885565e016398604a3f09f4b3a0ab455845fd498bebb54436
3
+ size 4774745792
legml-v1.0-8b-instruct-Q4_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33c2da58fa4dacc18616a31d9466c753e0d88f566fb53b9950e4941ca0521dd2
3
+ size 5027780288
legml-v1.0-8b-instruct-Q4_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7d0b6a199e494fa01d9a06c5ee51c97f31a83c1b2fd69d71a76601aad134c09
3
+ size 4802008768
legml-v1.0-8b-instruct-Q5_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:598928cb8ba4ed98c8d63e971bcc3395d6dc6f72038f1d0f9d912406b06e27c0
3
+ size 5720757952
legml-v1.0-8b-instruct-Q5_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce315c2e33df32020e38c20e5ac5165568cfc04e4aa6bd858e1f22a308a7bf90
3
+ size 5851109056
legml-v1.0-8b-instruct-Q5_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2df75c46ea0ac8b108ed306c846ec89dd7f099f11208aa94b6446a805a293fdd
3
+ size 5720757952
legml-v1.0-8b-instruct-Q6_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d59158d4f1cc86c18f47c7aaa781cf9c8fa0fcdd32c7129d16b4486ee02a84a
3
+ size 6725895872
legml-v1.0-8b-instruct-Q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46b9c6fcb3e161503e9a518007453c4f77b03e3ca2362141dcde0a0b345e4d13
3
+ size 8709514944