jamesdumay commited on
Commit
3a76435
·
verified ·
1 Parent(s): d36f61a

Migrate recommended models dataset to hashed entry layout

Browse files

Add index.json and hashed metadata entry paths for the recommended model catalog.

Files changed (41) hide show
  1. README.md +3 -2
  2. index.json +200 -0
  3. models/sha256-014f6b5897f35cf403936ded449f84395e5457a86756d3fc256c275cbb10c616/metadata.json +12 -0
  4. models/sha256-0302e8e8b4197e056d6bc9c962194ce93e23bfd5d4e23c0db201b7ca0f24acda/metadata.json +12 -0
  5. models/sha256-0afa2bbd49bcdec5d8de0e4e963e2ff05dcdeb77874aaa17037fc3a46298be97/metadata.json +15 -0
  6. models/sha256-124ed90efc924f62fc551abd896fa67010d7c901bf1db993c0b892f794ad4d1f/metadata.json +15 -0
  7. models/sha256-126ebe8597ded548b81baee6ca32cf1bec40b0bd8f16bfb242a3a445e6c1450c/metadata.json +16 -0
  8. models/sha256-141c18cb77bf3c8e6f0e1ed343fbb2eded986d0f9539f5931cd2cddcfbe0512b/metadata.json +12 -0
  9. models/sha256-142399797e62c91c1438748b84825409bf896bf1e006bd371857f03f1b5a1bb0/metadata.json +12 -0
  10. models/sha256-25c2acc8a85d230146810a47f1197d16adf712bf4da08ff9d652fed044a4f34d/metadata.json +12 -0
  11. models/sha256-2da8f5374c6ac42743cc97eb75c0f94ff38148f422bb5cc913871389b9cf70cd/metadata.json +12 -0
  12. models/sha256-30ce70b96bc1cbc708930855598c54daede525c5a6e548b1726e6f9f1ed40b60/metadata.json +12 -0
  13. models/sha256-317cc74e9dbc7b789d51088ce3430dafe566bd0dc6e72ab35d89f9cc90a5467e/metadata.json +29 -0
  14. models/sha256-3552e3d1b162204c88f4a5e194981156ac65141be87f9894b5b36835101979c1/metadata.json +12 -0
  15. models/sha256-38714a4ddb1c654b07df0e2bdde4acad0e5090cb54e656650f98927e2b4f3dbb/metadata.json +12 -0
  16. models/sha256-3ce9e41bcc7ac4942bd4237961306f9c6f0d2ad25ad1dca671acd65531450f7a/metadata.json +12 -0
  17. models/sha256-43da15b27fbddeb09b735ca73044097e55ea781bf9ced03cd45aedb09b20a428/metadata.json +12 -0
  18. models/sha256-530064106f7af37e49f2746a9ad5a84a59590621d441ad88d87a984a477c5cfe/metadata.json +12 -0
  19. models/sha256-60ae689aa5ccbd1387fd50d945ab13fd7936de5e0a90d5ba4358acd5a94fb44e/metadata.json +12 -0
  20. models/sha256-62449ec67d46ff80d8e87ae2f8e365af363d77ddea29c0ec1d612cdd98532992/metadata.json +12 -0
  21. models/sha256-65429236385af48afc1a32646b4c9f7f2ec596b6038b8c1df465251243454b31/metadata.json +15 -0
  22. models/sha256-654a3b00b6f00d880c15f4d6cd79d49e3c0940c6d9afb5b53feeb640a11b2612/metadata.json +12 -0
  23. models/sha256-6f94eb62fa4e5ea39ac7a9211fb5b0745039d373c715de4d074d59e3dd8601ac/metadata.json +16 -0
  24. models/sha256-74e53e5cd3017a27a921730edd97a09e41d7e32d5a7e151b18dbcd06dd63ee61/metadata.json +16 -0
  25. models/sha256-77f8a16681e07845a08e2adf70003d7743643daa8ec11f185f9b679b64252e4c/metadata.json +12 -0
  26. models/sha256-782855a0d5aef42bea734bc6a8d3e9c8b97c2d0b89ef1d77c337bc2c3b813b63/metadata.json +12 -0
  27. models/sha256-7c188c456ad2a0c9fcb17e661bf1e9c3197b8eb5ceaf08021ee060d9cd64d457/metadata.json +12 -0
  28. models/sha256-8b7b80a6b046a9b54d9c294dc01beb37ddb4e7fe06c66d4cca6d7c5b9659caf6/metadata.json +16 -0
  29. models/sha256-8e2a4b228397412bca5df002bf07f2aa24ba0f4f7c5afa4c99e1062635c975d6/metadata.json +12 -0
  30. models/sha256-9cd1d145bdc387fe992f48e08b4efebe148c943cb3a47ad346278ae5ee2d8681/metadata.json +12 -0
  31. models/sha256-9e44a1d1869f7bb5a5e639ba7afa1b6e4893e16d07176c3f60c59abc11e0bae5/metadata.json +12 -0
  32. models/sha256-a36bfb1bb182c377676973b1558376811757f5da9cd08ed0807ba968fb496b0c/metadata.json +12 -0
  33. models/sha256-adaf5e3d4b1ba78d51cf5d45f980e79ce57d068d1636e58552bbe596cf5acdef/metadata.json +12 -0
  34. models/sha256-bbedc72de1c38566d2f66e565a9e5a12038404da58b1eb952baf126f0d0a39b6/metadata.json +25 -0
  35. models/sha256-c3febba3f550e6c86ef95e079aefd50365718e213343feafe554a59b1cd3c42a/metadata.json +15 -0
  36. models/sha256-ce097731219bfb95091ea1767631e2c718b1585e3392353326fe24c8e7ec1bcb/metadata.json +12 -0
  37. models/sha256-d756c518cc3ec3c992152cd03514e5f98c0fb62880667f27716c877b3bebb44e/metadata.json +12 -0
  38. models/sha256-e49cac04ce4667accc61655e189035615c78711769aa7b8a43fdca887b366436/metadata.json +16 -0
  39. models/sha256-e72a7ff194d35e87832281741d2d4bba639d8d5913c55a3aa9e945a6fcfb7c37/metadata.json +12 -0
  40. models/sha256-f488ea7c68e65fdd7775f04d58766601ee1a7d8f571a154c25f2467bb295f369/metadata.json +16 -0
  41. models/sha256-feb131c258a0c95b5f36109f87b2d0f04462916ce5e2197f129d888c7e0b2a4a/metadata.json +12 -0
README.md CHANGED
@@ -3,6 +3,7 @@
3
  This dataset stores curated recommended model metadata for `mesh-llm`.
4
 
5
  Layout:
6
- - `models/<urlencoded-model-id>/metadata.json`
 
7
 
8
- Each entry stores the curated model `id`, display `name`, resolved download `url`, sizing, description, draft hint, MoE hints, split assets, and optional `mmproj`.
 
3
  This dataset stores curated recommended model metadata for `mesh-llm`.
4
 
5
  Layout:
6
+ - `index.json`
7
+ - `models/sha256-<model-id-hash>/metadata.json`
8
 
9
+ The canonical model `id` lives in each metadata file and in `index.json`. Storage paths are opaque and derived from `sha256(id)`.
index.json ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "schema_version": 1,
3
+ "entries": [
4
+ {
5
+ "id": "DeepSeek-R1-Distill-70B-Q4_K_M",
6
+ "name": "DeepSeek-R1-Distill-70B-Q4_K_M",
7
+ "path": "models/sha256-7c188c456ad2a0c9fcb17e661bf1e9c3197b8eb5ceaf08021ee060d9cd64d457/metadata.json"
8
+ },
9
+ {
10
+ "id": "Llama-3.1-405B-Instruct-Q2_K",
11
+ "name": "Llama-3.1-405B-Instruct-Q2_K",
12
+ "path": "models/sha256-e72a7ff194d35e87832281741d2d4bba639d8d5913c55a3aa9e945a6fcfb7c37/metadata.json"
13
+ },
14
+ {
15
+ "id": "Llama-3.3-70B-Instruct-Q4_K_M",
16
+ "name": "Llama-3.3-70B-Instruct-Q4_K_M",
17
+ "path": "models/sha256-654a3b00b6f00d880c15f4d6cd79d49e3c0940c6d9afb5b53feeb640a11b2612/metadata.json"
18
+ },
19
+ {
20
+ "id": "Mixtral-8x22B-Instruct-Q4_K_M",
21
+ "name": "Mixtral-8x22B-Instruct-Q4_K_M",
22
+ "path": "models/sha256-74e53e5cd3017a27a921730edd97a09e41d7e32d5a7e151b18dbcd06dd63ee61/metadata.json"
23
+ },
24
+ {
25
+ "id": "Qwen/Qwen2.5-0.5B-Instruct-GGUF/qwen2.5-0.5b-instruct-q4_k_m.gguf",
26
+ "name": "Qwen2.5-0.5B-Instruct-Q4_K_M",
27
+ "path": "models/sha256-60ae689aa5ccbd1387fd50d945ab13fd7936de5e0a90d5ba4358acd5a94fb44e/metadata.json"
28
+ },
29
+ {
30
+ "id": "Qwen/Qwen2.5-3B-Instruct-GGUF/qwen2.5-3b-instruct-q4_k_m.gguf",
31
+ "name": "Qwen2.5-3B-Instruct-Q4_K_M",
32
+ "path": "models/sha256-a36bfb1bb182c377676973b1558376811757f5da9cd08ed0807ba968fb496b0c/metadata.json"
33
+ },
34
+ {
35
+ "id": "Qwen/Qwen2.5-Coder-32B-Instruct-GGUF/qwen2.5-coder-32b-instruct-q4_k_m.gguf",
36
+ "name": "Qwen2.5-Coder-32B-Instruct-Q4_K_M",
37
+ "path": "models/sha256-d756c518cc3ec3c992152cd03514e5f98c0fb62880667f27716c877b3bebb44e/metadata.json"
38
+ },
39
+ {
40
+ "id": "Qwen/Qwen2.5-Coder-7B-Instruct-GGUF/qwen2.5-coder-7b-instruct-q4_k_m.gguf",
41
+ "name": "Qwen2.5-Coder-7B-Instruct-Q4_K_M",
42
+ "path": "models/sha256-25c2acc8a85d230146810a47f1197d16adf712bf4da08ff9d652fed044a4f34d/metadata.json"
43
+ },
44
+ {
45
+ "id": "Qwen/Qwen3-Coder-Next-GGUF:Qwen3-Coder-Next-Q4_K_M",
46
+ "name": "Qwen3-Coder-Next-Q4_K_M",
47
+ "path": "models/sha256-bbedc72de1c38566d2f66e565a9e5a12038404da58b1eb952baf126f0d0a39b6/metadata.json"
48
+ },
49
+ {
50
+ "id": "Qwen2.5-72B-Instruct-Q4_K_M",
51
+ "name": "Qwen2.5-72B-Instruct-Q4_K_M",
52
+ "path": "models/sha256-2da8f5374c6ac42743cc97eb75c0f94ff38148f422bb5cc913871389b9cf70cd/metadata.json"
53
+ },
54
+ {
55
+ "id": "Qwen3-235B-A22B-Q4_K_M",
56
+ "name": "Qwen3-235B-A22B-Q4_K_M",
57
+ "path": "models/sha256-8b7b80a6b046a9b54d9c294dc01beb37ddb4e7fe06c66d4cca6d7c5b9659caf6/metadata.json"
58
+ },
59
+ {
60
+ "id": "Qwen3.5-27B-Q4_K_M",
61
+ "name": "Qwen3.5-27B-Q4_K_M",
62
+ "path": "models/sha256-0afa2bbd49bcdec5d8de0e4e963e2ff05dcdeb77874aaa17037fc3a46298be97/metadata.json"
63
+ },
64
+ {
65
+ "id": "bartowski/Hermes-2-Pro-Mistral-7B-GGUF:Q4_K_M",
66
+ "name": "Hermes-2-Pro-Mistral-7B-Q4_K_M",
67
+ "path": "models/sha256-62449ec67d46ff80d8e87ae2f8e365af363d77ddea29c0ec1d612cdd98532992/metadata.json"
68
+ },
69
+ {
70
+ "id": "bartowski/Llama-3.2-1B-Instruct-GGUF:Q4_K_M",
71
+ "name": "Llama-3.2-1B-Instruct-Q4_K_M",
72
+ "path": "models/sha256-0302e8e8b4197e056d6bc9c962194ce93e23bfd5d4e23c0db201b7ca0f24acda/metadata.json"
73
+ },
74
+ {
75
+ "id": "bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M",
76
+ "name": "Llama-3.2-3B-Instruct-Q4_K_M",
77
+ "path": "models/sha256-adaf5e3d4b1ba78d51cf5d45f980e79ce57d068d1636e58552bbe596cf5acdef/metadata.json"
78
+ },
79
+ {
80
+ "id": "bartowski/Qwen2.5-14B-Instruct-GGUF:Q4_K_M",
81
+ "name": "Qwen2.5-14B-Instruct-Q4_K_M",
82
+ "path": "models/sha256-530064106f7af37e49f2746a9ad5a84a59590621d441ad88d87a984a477c5cfe/metadata.json"
83
+ },
84
+ {
85
+ "id": "bartowski/Qwen2.5-32B-Instruct-GGUF:Q4_K_M",
86
+ "name": "Qwen2.5-32B-Instruct-Q4_K_M",
87
+ "path": "models/sha256-142399797e62c91c1438748b84825409bf896bf1e006bd371857f03f1b5a1bb0/metadata.json"
88
+ },
89
+ {
90
+ "id": "bartowski/Qwen2.5-Coder-14B-Instruct-GGUF:Q4_K_M",
91
+ "name": "Qwen2.5-Coder-14B-Instruct-Q4_K_M",
92
+ "path": "models/sha256-3552e3d1b162204c88f4a5e194981156ac65141be87f9894b5b36835101979c1/metadata.json"
93
+ },
94
+ {
95
+ "id": "bartowski/google_gemma-3-1b-it-GGUF:Q4_K_M",
96
+ "name": "Gemma-3-1B-it-Q4_K_M",
97
+ "path": "models/sha256-43da15b27fbddeb09b735ca73044097e55ea781bf9ced03cd45aedb09b20a428/metadata.json"
98
+ },
99
+ {
100
+ "id": "bartowski/google_gemma-3-27b-it-GGUF:Q4_K_M",
101
+ "name": "Gemma-3-27B-it-Q4_K_M",
102
+ "path": "models/sha256-9e44a1d1869f7bb5a5e639ba7afa1b6e4893e16d07176c3f60c59abc11e0bae5/metadata.json"
103
+ },
104
+ {
105
+ "id": "glogwa68/Llama-4-scout-GGUF:Q4_K_M",
106
+ "name": "Llama-4-Scout-Q4_K_M",
107
+ "path": "models/sha256-e49cac04ce4667accc61655e189035615c78711769aa7b8a43fdca887b366436/metadata.json"
108
+ },
109
+ {
110
+ "id": "unsloth/DeepSeek-R1-Distill-Qwen-14B-GGUF:Q4_K_M",
111
+ "name": "DeepSeek-R1-Distill-Qwen-14B-Q4_K_M",
112
+ "path": "models/sha256-141c18cb77bf3c8e6f0e1ed343fbb2eded986d0f9539f5931cd2cddcfbe0512b/metadata.json"
113
+ },
114
+ {
115
+ "id": "unsloth/DeepSeek-R1-Distill-Qwen-32B-GGUF:Q4_K_M",
116
+ "name": "DeepSeek-R1-Distill-Qwen-32B-Q4_K_M",
117
+ "path": "models/sha256-8e2a4b228397412bca5df002bf07f2aa24ba0f4f7c5afa4c99e1062635c975d6/metadata.json"
118
+ },
119
+ {
120
+ "id": "unsloth/Devstral-Small-2505-GGUF:Q4_K_M",
121
+ "name": "Devstral-Small-2505-Q4_K_M",
122
+ "path": "models/sha256-9cd1d145bdc387fe992f48e08b4efebe148c943cb3a47ad346278ae5ee2d8681/metadata.json"
123
+ },
124
+ {
125
+ "id": "unsloth/GLM-4-32B-0414-GGUF:Q4_K_M",
126
+ "name": "GLM-4-32B-0414-Q4_K_M",
127
+ "path": "models/sha256-feb131c258a0c95b5f36109f87b2d0f04462916ce5e2197f129d888c7e0b2a4a/metadata.json"
128
+ },
129
+ {
130
+ "id": "unsloth/GLM-4.7-Flash-GGUF:Q4_K_M",
131
+ "name": "GLM-4.7-Flash-Q4_K_M",
132
+ "path": "models/sha256-126ebe8597ded548b81baee6ca32cf1bec40b0bd8f16bfb242a3a445e6c1450c/metadata.json"
133
+ },
134
+ {
135
+ "id": "unsloth/MiniMax-M2.5-GGUF:Q4_K_M",
136
+ "name": "MiniMax-M2.5-Q4_K_M",
137
+ "path": "models/sha256-317cc74e9dbc7b789d51088ce3430dafe566bd0dc6e72ab35d89f9cc90a5467e/metadata.json"
138
+ },
139
+ {
140
+ "id": "unsloth/Mistral-Small-3.1-24B-Instruct-2503-GGUF:Q4_K_M",
141
+ "name": "Mistral-Small-3.1-24B-Instruct-Q4_K_M",
142
+ "path": "models/sha256-3ce9e41bcc7ac4942bd4237961306f9c6f0d2ad25ad1dca671acd65531450f7a/metadata.json"
143
+ },
144
+ {
145
+ "id": "unsloth/Qwen3-0.6B-GGUF:Q4_K_M",
146
+ "name": "Qwen3-0.6B-Q4_K_M",
147
+ "path": "models/sha256-782855a0d5aef42bea734bc6a8d3e9c8b97c2d0b89ef1d77c337bc2c3b813b63/metadata.json"
148
+ },
149
+ {
150
+ "id": "unsloth/Qwen3-14B-GGUF:Q4_K_M",
151
+ "name": "Qwen3-14B-Q4_K_M",
152
+ "path": "models/sha256-30ce70b96bc1cbc708930855598c54daede525c5a6e548b1726e6f9f1ed40b60/metadata.json"
153
+ },
154
+ {
155
+ "id": "unsloth/Qwen3-30B-A3B-GGUF:Q4_K_M",
156
+ "name": "Qwen3-30B-A3B-Q4_K_M",
157
+ "path": "models/sha256-f488ea7c68e65fdd7775f04d58766601ee1a7d8f571a154c25f2467bb295f369/metadata.json"
158
+ },
159
+ {
160
+ "id": "unsloth/Qwen3-32B-GGUF:Q4_K_M",
161
+ "name": "Qwen3-32B-Q4_K_M",
162
+ "path": "models/sha256-77f8a16681e07845a08e2adf70003d7743643daa8ec11f185f9b679b64252e4c/metadata.json"
163
+ },
164
+ {
165
+ "id": "unsloth/Qwen3-4B-GGUF:Q4_K_M",
166
+ "name": "Qwen3-4B-Q4_K_M",
167
+ "path": "models/sha256-014f6b5897f35cf403936ded449f84395e5457a86756d3fc256c275cbb10c616/metadata.json"
168
+ },
169
+ {
170
+ "id": "unsloth/Qwen3-8B-GGUF:Q4_K_M",
171
+ "name": "Qwen3-8B-Q4_K_M",
172
+ "path": "models/sha256-ce097731219bfb95091ea1767631e2c718b1585e3392353326fe24c8e7ec1bcb/metadata.json"
173
+ },
174
+ {
175
+ "id": "unsloth/Qwen3-Coder-30B-A3B-Instruct-GGUF:Q4_K_M",
176
+ "name": "Qwen3-Coder-30B-A3B-Instruct-Q4_K_M",
177
+ "path": "models/sha256-6f94eb62fa4e5ea39ac7a9211fb5b0745039d373c715de4d074d59e3dd8601ac/metadata.json"
178
+ },
179
+ {
180
+ "id": "unsloth/Qwen3.5-0.8B-GGUF:Q4_K_M",
181
+ "name": "Qwen3.5-0.8B-Vision-Q4_K_M",
182
+ "path": "models/sha256-124ed90efc924f62fc551abd896fa67010d7c901bf1db993c0b892f794ad4d1f/metadata.json"
183
+ },
184
+ {
185
+ "id": "unsloth/Qwen3.5-4B-GGUF:Q4_K_M",
186
+ "name": "Qwen3.5-4B-Vision-Q4_K_M",
187
+ "path": "models/sha256-c3febba3f550e6c86ef95e079aefd50365718e213343feafe554a59b1cd3c42a/metadata.json"
188
+ },
189
+ {
190
+ "id": "unsloth/Qwen3.5-9B-GGUF:Q4_K_M",
191
+ "name": "Qwen3.5-9B-Vision-Q4_K_M",
192
+ "path": "models/sha256-65429236385af48afc1a32646b4c9f7f2ec596b6038b8c1df465251243454b31/metadata.json"
193
+ },
194
+ {
195
+ "id": "unsloth/gemma-3-12b-it-GGUF:Q4_K_M",
196
+ "name": "Gemma-3-12B-it-Q4_K_M",
197
+ "path": "models/sha256-38714a4ddb1c654b07df0e2bdde4acad0e5090cb54e656650f98927e2b4f3dbb/metadata.json"
198
+ }
199
+ ]
200
+ }
models/sha256-014f6b5897f35cf403936ded449f84395e5457a86756d3fc256c275cbb10c616/metadata.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Qwen3-4B-Q4_K_M",
3
+ "file": "Qwen3-4B-Q4_K_M.gguf",
4
+ "url": "https://huggingface.co/unsloth/Qwen3-4B-GGUF/resolve/main/Qwen3-4B-Q4_K_M.gguf",
5
+ "size": "2.5GB",
6
+ "description": "Qwen3 starter, thinking/non-thinking modes",
7
+ "draft": "Qwen3-0.6B-Q4_K_M",
8
+ "moe": null,
9
+ "extra_files": [],
10
+ "mmproj": null,
11
+ "id": "unsloth/Qwen3-4B-GGUF:Q4_K_M"
12
+ }
models/sha256-0302e8e8b4197e056d6bc9c962194ce93e23bfd5d4e23c0db201b7ca0f24acda/metadata.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Llama-3.2-1B-Instruct-Q4_K_M",
3
+ "file": "Llama-3.2-1B-Instruct-Q4_K_M.gguf",
4
+ "url": "https://huggingface.co/bartowski/Llama-3.2-1B-Instruct-GGUF/resolve/main/Llama-3.2-1B-Instruct-Q4_K_M.gguf",
5
+ "size": "760MB",
6
+ "description": "Draft for Llama 3.x and Llama 4 models",
7
+ "draft": null,
8
+ "moe": null,
9
+ "extra_files": [],
10
+ "mmproj": null,
11
+ "id": "bartowski/Llama-3.2-1B-Instruct-GGUF:Q4_K_M"
12
+ }
models/sha256-0afa2bbd49bcdec5d8de0e4e963e2ff05dcdeb77874aaa17037fc3a46298be97/metadata.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Qwen3.5-27B-Q4_K_M",
3
+ "file": "Qwen3.5-27B-Q4_K_M.gguf",
4
+ "url": "https://registry.ollama.ai/v2/library/qwen3.5/blobs/sha256:d4b8b4f4c350f5d322dc8235175eeae02d32c6f3fd70bdb9ea481e3abb7d7fc4",
5
+ "size": "17GB",
6
+ "description": "Qwen3.5 27B, vision + text, strong reasoning and coding",
7
+ "draft": "Qwen3-0.6B-Q4_K_M",
8
+ "moe": null,
9
+ "extra_files": [],
10
+ "mmproj": {
11
+ "file": "Qwen3.5-27B-mmproj-BF16.gguf",
12
+ "url": "https://huggingface.co/unsloth/Qwen3.5-27B-GGUF/resolve/main/mmproj-BF16.gguf"
13
+ },
14
+ "id": "Qwen3.5-27B-Q4_K_M"
15
+ }
models/sha256-124ed90efc924f62fc551abd896fa67010d7c901bf1db993c0b892f794ad4d1f/metadata.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Qwen3.5-0.8B-Vision-Q4_K_M",
3
+ "file": "Qwen3.5-0.8B-Q4_K_M.gguf",
4
+ "url": "https://huggingface.co/unsloth/Qwen3.5-0.8B-GGUF/resolve/main/Qwen3.5-0.8B-Q4_K_M.gguf",
5
+ "size": "508MB",
6
+ "description": "Tiny vision model, OCR, screenshots, runs anywhere",
7
+ "draft": null,
8
+ "moe": null,
9
+ "extra_files": [],
10
+ "mmproj": {
11
+ "file": "Qwen3.5-0.8B-mmproj-BF16.gguf",
12
+ "url": "https://huggingface.co/unsloth/Qwen3.5-0.8B-GGUF/resolve/main/mmproj-BF16.gguf"
13
+ },
14
+ "id": "unsloth/Qwen3.5-0.8B-GGUF:Q4_K_M"
15
+ }
models/sha256-126ebe8597ded548b81baee6ca32cf1bec40b0bd8f16bfb242a3a445e6c1450c/metadata.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "GLM-4.7-Flash-Q4_K_M",
3
+ "file": "GLM-4.7-Flash-Q4_K_M.gguf",
4
+ "url": "https://huggingface.co/unsloth/GLM-4.7-Flash-GGUF/resolve/main/GLM-4.7-Flash-Q4_K_M.gguf",
5
+ "size": "18GB",
6
+ "description": "MoE 30B/3B active, 64 experts top-4, fast inference, tool calling",
7
+ "draft": null,
8
+ "moe": {
9
+ "n_expert": 64,
10
+ "n_expert_used": 4,
11
+ "min_experts_per_node": 24
12
+ },
13
+ "extra_files": [],
14
+ "mmproj": null,
15
+ "id": "unsloth/GLM-4.7-Flash-GGUF:Q4_K_M"
16
+ }
models/sha256-141c18cb77bf3c8e6f0e1ed343fbb2eded986d0f9539f5931cd2cddcfbe0512b/metadata.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "DeepSeek-R1-Distill-Qwen-14B-Q4_K_M",
3
+ "file": "DeepSeek-R1-Distill-Qwen-14B-Q4_K_M.gguf",
4
+ "url": "https://huggingface.co/unsloth/DeepSeek-R1-Distill-Qwen-14B-GGUF/resolve/main/DeepSeek-R1-Distill-Qwen-14B-Q4_K_M.gguf",
5
+ "size": "9.0GB",
6
+ "description": "DeepSeek R1 reasoning distilled into Qwen 14B",
7
+ "draft": "Qwen2.5-0.5B-Instruct-Q4_K_M",
8
+ "moe": null,
9
+ "extra_files": [],
10
+ "mmproj": null,
11
+ "id": "unsloth/DeepSeek-R1-Distill-Qwen-14B-GGUF:Q4_K_M"
12
+ }
models/sha256-142399797e62c91c1438748b84825409bf896bf1e006bd371857f03f1b5a1bb0/metadata.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Qwen2.5-32B-Instruct-Q4_K_M",
3
+ "file": "Qwen2.5-32B-Instruct-Q4_K_M.gguf",
4
+ "url": "https://huggingface.co/bartowski/Qwen2.5-32B-Instruct-GGUF/resolve/main/Qwen2.5-32B-Instruct-Q4_K_M.gguf",
5
+ "size": "20GB",
6
+ "description": "Proven general chat",
7
+ "draft": "Qwen2.5-0.5B-Instruct-Q4_K_M",
8
+ "moe": null,
9
+ "extra_files": [],
10
+ "mmproj": null,
11
+ "id": "bartowski/Qwen2.5-32B-Instruct-GGUF:Q4_K_M"
12
+ }
models/sha256-25c2acc8a85d230146810a47f1197d16adf712bf4da08ff9d652fed044a4f34d/metadata.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Qwen2.5-Coder-7B-Instruct-Q4_K_M",
3
+ "file": "Qwen2.5-Coder-7B-Instruct-Q4_K_M.gguf",
4
+ "url": "https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct-GGUF/resolve/main/qwen2.5-coder-7b-instruct-q4_k_m.gguf",
5
+ "size": "4.4GB",
6
+ "description": "Code generation & completion",
7
+ "draft": "Qwen2.5-0.5B-Instruct-Q4_K_M",
8
+ "moe": null,
9
+ "extra_files": [],
10
+ "mmproj": null,
11
+ "id": "Qwen/Qwen2.5-Coder-7B-Instruct-GGUF/qwen2.5-coder-7b-instruct-q4_k_m.gguf"
12
+ }
models/sha256-2da8f5374c6ac42743cc97eb75c0f94ff38148f422bb5cc913871389b9cf70cd/metadata.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Qwen2.5-72B-Instruct-Q4_K_M",
3
+ "file": "Qwen2.5-72B-Instruct-Q4_K_M.gguf",
4
+ "url": "https://registry.ollama.ai/v2/library/qwen2.5/blobs/sha256:6e7fdda508e91cb0f63de5c15ff79ac63a1584ccafd751c07ca12b7f442101b8",
5
+ "size": "47GB",
6
+ "description": "Flagship Qwen2.5, great tensor split showcase",
7
+ "draft": "Qwen2.5-0.5B-Instruct-Q4_K_M",
8
+ "moe": null,
9
+ "extra_files": [],
10
+ "mmproj": null,
11
+ "id": "Qwen2.5-72B-Instruct-Q4_K_M"
12
+ }
models/sha256-30ce70b96bc1cbc708930855598c54daede525c5a6e548b1726e6f9f1ed40b60/metadata.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Qwen3-14B-Q4_K_M",
3
+ "file": "Qwen3-14B-Q4_K_M.gguf",
4
+ "url": "https://huggingface.co/unsloth/Qwen3-14B-GGUF/resolve/main/Qwen3-14B-Q4_K_M.gguf",
5
+ "size": "9.0GB",
6
+ "description": "Qwen3 strong chat, thinking modes",
7
+ "draft": "Qwen3-0.6B-Q4_K_M",
8
+ "moe": null,
9
+ "extra_files": [],
10
+ "mmproj": null,
11
+ "id": "unsloth/Qwen3-14B-GGUF:Q4_K_M"
12
+ }
models/sha256-317cc74e9dbc7b789d51088ce3430dafe566bd0dc6e72ab35d89f9cc90a5467e/metadata.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "MiniMax-M2.5-Q4_K_M",
3
+ "file": "MiniMax-M2.5-Q4_K_M-00001-of-00004.gguf",
4
+ "url": "https://huggingface.co/unsloth/MiniMax-M2.5-GGUF/resolve/main/Q4_K_M/MiniMax-M2.5-Q4_K_M-00001-of-00004.gguf",
5
+ "size": "138GB",
6
+ "description": "MiniMax-M2.5 MoE 456B/46B active, 256 experts top-8, Q4_K_M",
7
+ "draft": null,
8
+ "moe": {
9
+ "n_expert": 256,
10
+ "n_expert_used": 8,
11
+ "min_experts_per_node": 96
12
+ },
13
+ "extra_files": [
14
+ {
15
+ "file": "MiniMax-M2.5-Q4_K_M-00002-of-00004.gguf",
16
+ "url": "https://huggingface.co/unsloth/MiniMax-M2.5-GGUF/resolve/main/Q4_K_M/MiniMax-M2.5-Q4_K_M-00002-of-00004.gguf"
17
+ },
18
+ {
19
+ "file": "MiniMax-M2.5-Q4_K_M-00003-of-00004.gguf",
20
+ "url": "https://huggingface.co/unsloth/MiniMax-M2.5-GGUF/resolve/main/Q4_K_M/MiniMax-M2.5-Q4_K_M-00003-of-00004.gguf"
21
+ },
22
+ {
23
+ "file": "MiniMax-M2.5-Q4_K_M-00004-of-00004.gguf",
24
+ "url": "https://huggingface.co/unsloth/MiniMax-M2.5-GGUF/resolve/main/Q4_K_M/MiniMax-M2.5-Q4_K_M-00004-of-00004.gguf"
25
+ }
26
+ ],
27
+ "mmproj": null,
28
+ "id": "unsloth/MiniMax-M2.5-GGUF:Q4_K_M"
29
+ }
models/sha256-3552e3d1b162204c88f4a5e194981156ac65141be87f9894b5b36835101979c1/metadata.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Qwen2.5-Coder-14B-Instruct-Q4_K_M",
3
+ "file": "Qwen2.5-Coder-14B-Instruct-Q4_K_M.gguf",
4
+ "url": "https://huggingface.co/bartowski/Qwen2.5-Coder-14B-Instruct-GGUF/resolve/main/Qwen2.5-Coder-14B-Instruct-Q4_K_M.gguf",
5
+ "size": "9.0GB",
6
+ "description": "Strong code gen, fills gap between 7B and 32B",
7
+ "draft": "Qwen2.5-0.5B-Instruct-Q4_K_M",
8
+ "moe": null,
9
+ "extra_files": [],
10
+ "mmproj": null,
11
+ "id": "bartowski/Qwen2.5-Coder-14B-Instruct-GGUF:Q4_K_M"
12
+ }
models/sha256-38714a4ddb1c654b07df0e2bdde4acad0e5090cb54e656650f98927e2b4f3dbb/metadata.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Gemma-3-12B-it-Q4_K_M",
3
+ "file": "Gemma-3-12B-it-Q4_K_M.gguf",
4
+ "url": "https://huggingface.co/unsloth/gemma-3-12b-it-GGUF/resolve/main/gemma-3-12b-it-Q4_K_M.gguf",
5
+ "size": "7.3GB",
6
+ "description": "Google Gemma 3 12B, punches above weight",
7
+ "draft": "Gemma-3-1B-it-Q4_K_M",
8
+ "moe": null,
9
+ "extra_files": [],
10
+ "mmproj": null,
11
+ "id": "unsloth/gemma-3-12b-it-GGUF:Q4_K_M"
12
+ }
models/sha256-3ce9e41bcc7ac4942bd4237961306f9c6f0d2ad25ad1dca671acd65531450f7a/metadata.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Mistral-Small-3.1-24B-Instruct-Q4_K_M",
3
+ "file": "Mistral-Small-3.1-24B-Instruct-2503-Q4_K_M.gguf",
4
+ "url": "https://huggingface.co/unsloth/Mistral-Small-3.1-24B-Instruct-2503-GGUF/resolve/main/Mistral-Small-3.1-24B-Instruct-2503-Q4_K_M.gguf",
5
+ "size": "14.3GB",
6
+ "description": "Mistral general chat, good tool calling",
7
+ "draft": null,
8
+ "moe": null,
9
+ "extra_files": [],
10
+ "mmproj": null,
11
+ "id": "unsloth/Mistral-Small-3.1-24B-Instruct-2503-GGUF:Q4_K_M"
12
+ }
models/sha256-43da15b27fbddeb09b735ca73044097e55ea781bf9ced03cd45aedb09b20a428/metadata.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Gemma-3-1B-it-Q4_K_M",
3
+ "file": "Gemma-3-1B-it-Q4_K_M.gguf",
4
+ "url": "https://huggingface.co/bartowski/google_gemma-3-1b-it-GGUF/resolve/main/google_gemma-3-1b-it-Q4_K_M.gguf",
5
+ "size": "780MB",
6
+ "description": "Draft for Gemma 3 models",
7
+ "draft": null,
8
+ "moe": null,
9
+ "extra_files": [],
10
+ "mmproj": null,
11
+ "id": "bartowski/google_gemma-3-1b-it-GGUF:Q4_K_M"
12
+ }
models/sha256-530064106f7af37e49f2746a9ad5a84a59590621d441ad88d87a984a477c5cfe/metadata.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Qwen2.5-14B-Instruct-Q4_K_M",
3
+ "file": "Qwen2.5-14B-Instruct-Q4_K_M.gguf",
4
+ "url": "https://huggingface.co/bartowski/Qwen2.5-14B-Instruct-GGUF/resolve/main/Qwen2.5-14B-Instruct-Q4_K_M.gguf",
5
+ "size": "9.0GB",
6
+ "description": "Solid general chat",
7
+ "draft": "Qwen2.5-0.5B-Instruct-Q4_K_M",
8
+ "moe": null,
9
+ "extra_files": [],
10
+ "mmproj": null,
11
+ "id": "bartowski/Qwen2.5-14B-Instruct-GGUF:Q4_K_M"
12
+ }
models/sha256-60ae689aa5ccbd1387fd50d945ab13fd7936de5e0a90d5ba4358acd5a94fb44e/metadata.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Qwen2.5-0.5B-Instruct-Q4_K_M",
3
+ "file": "Qwen2.5-0.5B-Instruct-Q4_K_M.gguf",
4
+ "url": "https://huggingface.co/Qwen/Qwen2.5-0.5B-Instruct-GGUF/resolve/main/qwen2.5-0.5b-instruct-q4_k_m.gguf",
5
+ "size": "491MB",
6
+ "description": "Draft for Qwen2.5 and DeepSeek-R1-Distill models",
7
+ "draft": null,
8
+ "moe": null,
9
+ "extra_files": [],
10
+ "mmproj": null,
11
+ "id": "Qwen/Qwen2.5-0.5B-Instruct-GGUF/qwen2.5-0.5b-instruct-q4_k_m.gguf"
12
+ }
models/sha256-62449ec67d46ff80d8e87ae2f8e365af363d77ddea29c0ec1d612cdd98532992/metadata.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Hermes-2-Pro-Mistral-7B-Q4_K_M",
3
+ "file": "Hermes-2-Pro-Mistral-7B-Q4_K_M.gguf",
4
+ "url": "https://huggingface.co/bartowski/Hermes-2-Pro-Mistral-7B-GGUF/resolve/main/Hermes-2-Pro-Mistral-7B-Q4_K_M.gguf",
5
+ "size": "4.4GB",
6
+ "description": "Goose default, strong tool calling for agents",
7
+ "draft": null,
8
+ "moe": null,
9
+ "extra_files": [],
10
+ "mmproj": null,
11
+ "id": "bartowski/Hermes-2-Pro-Mistral-7B-GGUF:Q4_K_M"
12
+ }
models/sha256-65429236385af48afc1a32646b4c9f7f2ec596b6038b8c1df465251243454b31/metadata.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Qwen3.5-9B-Vision-Q4_K_M",
3
+ "file": "Qwen3.5-9B-Q4_K_M.gguf",
4
+ "url": "https://huggingface.co/unsloth/Qwen3.5-9B-GGUF/resolve/main/Qwen3.5-9B-Q4_K_M.gguf",
5
+ "size": "5.8GB",
6
+ "description": "Vision + text, replaces Qwen3-8B with image understanding",
7
+ "draft": null,
8
+ "moe": null,
9
+ "extra_files": [],
10
+ "mmproj": {
11
+ "file": "Qwen3.5-9B-mmproj-BF16.gguf",
12
+ "url": "https://huggingface.co/unsloth/Qwen3.5-9B-GGUF/resolve/main/mmproj-BF16.gguf"
13
+ },
14
+ "id": "unsloth/Qwen3.5-9B-GGUF:Q4_K_M"
15
+ }
models/sha256-654a3b00b6f00d880c15f4d6cd79d49e3c0940c6d9afb5b53feeb640a11b2612/metadata.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Llama-3.3-70B-Instruct-Q4_K_M",
3
+ "file": "Llama-3.3-70B-Instruct-Q4_K_M.gguf",
4
+ "url": "https://registry.ollama.ai/v2/library/llama3.3/blobs/sha256:4824460d29f2058aaf6e1118a63a7a197a09bed509f0e7d4e2efb1ee273b447d",
5
+ "size": "43GB",
6
+ "description": "Meta Llama 3.3 70B, strong all-around",
7
+ "draft": "Llama-3.2-1B-Instruct-Q4_K_M",
8
+ "moe": null,
9
+ "extra_files": [],
10
+ "mmproj": null,
11
+ "id": "Llama-3.3-70B-Instruct-Q4_K_M"
12
+ }
models/sha256-6f94eb62fa4e5ea39ac7a9211fb5b0745039d373c715de4d074d59e3dd8601ac/metadata.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Qwen3-Coder-30B-A3B-Instruct-Q4_K_M",
3
+ "file": "Qwen3-Coder-30B-A3B-Instruct-Q4_K_M.gguf",
4
+ "url": "https://huggingface.co/unsloth/Qwen3-Coder-30B-A3B-Instruct-GGUF/resolve/main/Qwen3-Coder-30B-A3B-Instruct-Q4_K_M.gguf",
5
+ "size": "18.6GB",
6
+ "description": "MoE agentic coding, tool use, 128 experts top-8",
7
+ "draft": "Qwen3-0.6B-Q4_K_M",
8
+ "moe": {
9
+ "n_expert": 128,
10
+ "n_expert_used": 8,
11
+ "min_experts_per_node": 46
12
+ },
13
+ "extra_files": [],
14
+ "mmproj": null,
15
+ "id": "unsloth/Qwen3-Coder-30B-A3B-Instruct-GGUF:Q4_K_M"
16
+ }
models/sha256-74e53e5cd3017a27a921730edd97a09e41d7e32d5a7e151b18dbcd06dd63ee61/metadata.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Mixtral-8x22B-Instruct-Q4_K_M",
3
+ "file": "Mixtral-8x22B-Instruct-Q4_K_M.gguf",
4
+ "url": "https://registry.ollama.ai/v2/library/mixtral/blobs/sha256:f3329ad0c787f4f73cab99e8c877bb76403060561dd0caa318127683c87bbcb4",
5
+ "size": "86GB",
6
+ "description": "Mixtral 8x22B MoE, 8 experts top-2",
7
+ "draft": null,
8
+ "moe": {
9
+ "n_expert": 8,
10
+ "n_expert_used": 2,
11
+ "min_experts_per_node": 4
12
+ },
13
+ "extra_files": [],
14
+ "mmproj": null,
15
+ "id": "Mixtral-8x22B-Instruct-Q4_K_M"
16
+ }
models/sha256-77f8a16681e07845a08e2adf70003d7743643daa8ec11f185f9b679b64252e4c/metadata.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Qwen3-32B-Q4_K_M",
3
+ "file": "Qwen3-32B-Q4_K_M.gguf",
4
+ "url": "https://huggingface.co/unsloth/Qwen3-32B-GGUF/resolve/main/Qwen3-32B-Q4_K_M.gguf",
5
+ "size": "19.8GB",
6
+ "description": "Best Qwen3 dense, thinking/non-thinking modes",
7
+ "draft": "Qwen3-0.6B-Q4_K_M",
8
+ "moe": null,
9
+ "extra_files": [],
10
+ "mmproj": null,
11
+ "id": "unsloth/Qwen3-32B-GGUF:Q4_K_M"
12
+ }
models/sha256-782855a0d5aef42bea734bc6a8d3e9c8b97c2d0b89ef1d77c337bc2c3b813b63/metadata.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Qwen3-0.6B-Q4_K_M",
3
+ "file": "Qwen3-0.6B-Q4_K_M.gguf",
4
+ "url": "https://huggingface.co/unsloth/Qwen3-0.6B-GGUF/resolve/main/Qwen3-0.6B-Q4_K_M.gguf",
5
+ "size": "397MB",
6
+ "description": "Draft for Qwen3 models",
7
+ "draft": null,
8
+ "moe": null,
9
+ "extra_files": [],
10
+ "mmproj": null,
11
+ "id": "unsloth/Qwen3-0.6B-GGUF:Q4_K_M"
12
+ }
models/sha256-7c188c456ad2a0c9fcb17e661bf1e9c3197b8eb5ceaf08021ee060d9cd64d457/metadata.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "DeepSeek-R1-Distill-70B-Q4_K_M",
3
+ "file": "DeepSeek-R1-Distill-70B-Q4_K_M.gguf",
4
+ "url": "https://registry.ollama.ai/v2/library/deepseek-r1/blobs/sha256:4cd576d9aa16961244012223abf01445567b061f1814b57dfef699e4cf8df339",
5
+ "size": "43GB",
6
+ "description": "DeepSeek R1 distilled to 70B (Qwen2.5-based), strong reasoning",
7
+ "draft": null,
8
+ "moe": null,
9
+ "extra_files": [],
10
+ "mmproj": null,
11
+ "id": "DeepSeek-R1-Distill-70B-Q4_K_M"
12
+ }
models/sha256-8b7b80a6b046a9b54d9c294dc01beb37ddb4e7fe06c66d4cca6d7c5b9659caf6/metadata.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Qwen3-235B-A22B-Q4_K_M",
3
+ "file": "Qwen3-235B-A22B-Q4_K_M.gguf",
4
+ "url": "https://registry.ollama.ai/v2/library/qwen3/blobs/sha256:aeacdadecbed8a07e42026d1a1d3cd30715bb2994ebe4e4ca4009e1a4abe8d5d",
5
+ "size": "142GB",
6
+ "description": "Qwen3 235B MoE A22B active, 128 experts top-8",
7
+ "draft": null,
8
+ "moe": {
9
+ "n_expert": 128,
10
+ "n_expert_used": 8,
11
+ "min_experts_per_node": 46
12
+ },
13
+ "extra_files": [],
14
+ "mmproj": null,
15
+ "id": "Qwen3-235B-A22B-Q4_K_M"
16
+ }
models/sha256-8e2a4b228397412bca5df002bf07f2aa24ba0f4f7c5afa4c99e1062635c975d6/metadata.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "DeepSeek-R1-Distill-Qwen-32B-Q4_K_M",
3
+ "file": "DeepSeek-R1-Distill-Qwen-32B-Q4_K_M.gguf",
4
+ "url": "https://huggingface.co/unsloth/DeepSeek-R1-Distill-Qwen-32B-GGUF/resolve/main/DeepSeek-R1-Distill-Qwen-32B-Q4_K_M.gguf",
5
+ "size": "19.9GB",
6
+ "description": "DeepSeek R1 reasoning distilled into Qwen 32B",
7
+ "draft": "Qwen2.5-0.5B-Instruct-Q4_K_M",
8
+ "moe": null,
9
+ "extra_files": [],
10
+ "mmproj": null,
11
+ "id": "unsloth/DeepSeek-R1-Distill-Qwen-32B-GGUF:Q4_K_M"
12
+ }
models/sha256-9cd1d145bdc387fe992f48e08b4efebe148c943cb3a47ad346278ae5ee2d8681/metadata.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Devstral-Small-2505-Q4_K_M",
3
+ "file": "Devstral-Small-2505-Q4_K_M.gguf",
4
+ "url": "https://huggingface.co/unsloth/Devstral-Small-2505-GGUF/resolve/main/Devstral-Small-2505-Q4_K_M.gguf",
5
+ "size": "14.3GB",
6
+ "description": "Mistral agentic coding, tool use",
7
+ "draft": null,
8
+ "moe": null,
9
+ "extra_files": [],
10
+ "mmproj": null,
11
+ "id": "unsloth/Devstral-Small-2505-GGUF:Q4_K_M"
12
+ }
models/sha256-9e44a1d1869f7bb5a5e639ba7afa1b6e4893e16d07176c3f60c59abc11e0bae5/metadata.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Gemma-3-27B-it-Q4_K_M",
3
+ "file": "Gemma-3-27B-it-Q4_K_M.gguf",
4
+ "url": "https://huggingface.co/bartowski/google_gemma-3-27b-it-GGUF/resolve/main/google_gemma-3-27b-it-Q4_K_M.gguf",
5
+ "size": "17GB",
6
+ "description": "Google Gemma 3 27B, strong reasoning",
7
+ "draft": "Gemma-3-1B-it-Q4_K_M",
8
+ "moe": null,
9
+ "extra_files": [],
10
+ "mmproj": null,
11
+ "id": "bartowski/google_gemma-3-27b-it-GGUF:Q4_K_M"
12
+ }
models/sha256-a36bfb1bb182c377676973b1558376811757f5da9cd08ed0807ba968fb496b0c/metadata.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Qwen2.5-3B-Instruct-Q4_K_M",
3
+ "file": "Qwen2.5-3B-Instruct-Q4_K_M.gguf",
4
+ "url": "https://huggingface.co/Qwen/Qwen2.5-3B-Instruct-GGUF/resolve/main/qwen2.5-3b-instruct-q4_k_m.gguf",
5
+ "size": "2.1GB",
6
+ "description": "Small & fast general chat",
7
+ "draft": "Qwen2.5-0.5B-Instruct-Q4_K_M",
8
+ "moe": null,
9
+ "extra_files": [],
10
+ "mmproj": null,
11
+ "id": "Qwen/Qwen2.5-3B-Instruct-GGUF/qwen2.5-3b-instruct-q4_k_m.gguf"
12
+ }
models/sha256-adaf5e3d4b1ba78d51cf5d45f980e79ce57d068d1636e58552bbe596cf5acdef/metadata.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Llama-3.2-3B-Instruct-Q4_K_M",
3
+ "file": "Llama-3.2-3B-Instruct-Q4_K_M.gguf",
4
+ "url": "https://huggingface.co/bartowski/Llama-3.2-3B-Instruct-GGUF/resolve/main/Llama-3.2-3B-Instruct-Q4_K_M.gguf",
5
+ "size": "2.0GB",
6
+ "description": "Meta Llama 3.2, goose default, good tool calling",
7
+ "draft": "Llama-3.2-1B-Instruct-Q4_K_M",
8
+ "moe": null,
9
+ "extra_files": [],
10
+ "mmproj": null,
11
+ "id": "bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M"
12
+ }
models/sha256-bbedc72de1c38566d2f66e565a9e5a12038404da58b1eb952baf126f0d0a39b6/metadata.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Qwen3-Coder-Next-Q4_K_M",
3
+ "file": "Qwen3-Coder-Next-Q4_K_M-00001-of-00004.gguf",
4
+ "url": "https://huggingface.co/Qwen/Qwen3-Coder-Next-GGUF/resolve/main/Qwen3-Coder-Next-Q4_K_M/Qwen3-Coder-Next-Q4_K_M-00001-of-00004.gguf",
5
+ "size": "48GB",
6
+ "description": "Qwen3 Coder Next ~85B dense, frontier coding model",
7
+ "draft": null,
8
+ "moe": null,
9
+ "extra_files": [
10
+ {
11
+ "file": "Qwen3-Coder-Next-Q4_K_M-00002-of-00004.gguf",
12
+ "url": "https://huggingface.co/Qwen/Qwen3-Coder-Next-GGUF/resolve/main/Qwen3-Coder-Next-Q4_K_M/Qwen3-Coder-Next-Q4_K_M-00002-of-00004.gguf"
13
+ },
14
+ {
15
+ "file": "Qwen3-Coder-Next-Q4_K_M-00003-of-00004.gguf",
16
+ "url": "https://huggingface.co/Qwen/Qwen3-Coder-Next-GGUF/resolve/main/Qwen3-Coder-Next-Q4_K_M/Qwen3-Coder-Next-Q4_K_M-00003-of-00004.gguf"
17
+ },
18
+ {
19
+ "file": "Qwen3-Coder-Next-Q4_K_M-00004-of-00004.gguf",
20
+ "url": "https://huggingface.co/Qwen/Qwen3-Coder-Next-GGUF/resolve/main/Qwen3-Coder-Next-Q4_K_M/Qwen3-Coder-Next-Q4_K_M-00004-of-00004.gguf"
21
+ }
22
+ ],
23
+ "mmproj": null,
24
+ "id": "Qwen/Qwen3-Coder-Next-GGUF:Qwen3-Coder-Next-Q4_K_M"
25
+ }
models/sha256-c3febba3f550e6c86ef95e079aefd50365718e213343feafe554a59b1cd3c42a/metadata.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Qwen3.5-4B-Vision-Q4_K_M",
3
+ "file": "Qwen3.5-4B-Q4_K_M.gguf",
4
+ "url": "https://huggingface.co/unsloth/Qwen3.5-4B-GGUF/resolve/main/Qwen3.5-4B-Q4_K_M.gguf",
5
+ "size": "2.7GB",
6
+ "description": "Small vision model, good quality/size balance",
7
+ "draft": null,
8
+ "moe": null,
9
+ "extra_files": [],
10
+ "mmproj": {
11
+ "file": "Qwen3.5-4B-mmproj-BF16.gguf",
12
+ "url": "https://huggingface.co/unsloth/Qwen3.5-4B-GGUF/resolve/main/mmproj-BF16.gguf"
13
+ },
14
+ "id": "unsloth/Qwen3.5-4B-GGUF:Q4_K_M"
15
+ }
models/sha256-ce097731219bfb95091ea1767631e2c718b1585e3392353326fe24c8e7ec1bcb/metadata.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Qwen3-8B-Q4_K_M",
3
+ "file": "Qwen3-8B-Q4_K_M.gguf",
4
+ "url": "https://huggingface.co/unsloth/Qwen3-8B-GGUF/resolve/main/Qwen3-8B-Q4_K_M.gguf",
5
+ "size": "5.0GB",
6
+ "description": "Qwen3 mid-tier, strong for its size",
7
+ "draft": "Qwen3-0.6B-Q4_K_M",
8
+ "moe": null,
9
+ "extra_files": [],
10
+ "mmproj": null,
11
+ "id": "unsloth/Qwen3-8B-GGUF:Q4_K_M"
12
+ }
models/sha256-d756c518cc3ec3c992152cd03514e5f98c0fb62880667f27716c877b3bebb44e/metadata.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Qwen2.5-Coder-32B-Instruct-Q4_K_M",
3
+ "file": "Qwen2.5-Coder-32B-Instruct-Q4_K_M.gguf",
4
+ "url": "https://huggingface.co/Qwen/Qwen2.5-Coder-32B-Instruct-GGUF/resolve/main/qwen2.5-coder-32b-instruct-q4_k_m.gguf",
5
+ "size": "20GB",
6
+ "description": "Top-tier code gen, matches GPT-4o on code",
7
+ "draft": "Qwen2.5-0.5B-Instruct-Q4_K_M",
8
+ "moe": null,
9
+ "extra_files": [],
10
+ "mmproj": null,
11
+ "id": "Qwen/Qwen2.5-Coder-32B-Instruct-GGUF/qwen2.5-coder-32b-instruct-q4_k_m.gguf"
12
+ }
models/sha256-e49cac04ce4667accc61655e189035615c78711769aa7b8a43fdca887b366436/metadata.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Llama-4-Scout-Q4_K_M",
3
+ "file": "Llama-4-Scout-4bit-Q4_K_M.gguf",
4
+ "url": "https://huggingface.co/glogwa68/Llama-4-scout-GGUF/resolve/main/Llama-4-Scout-4bit-Q4_K_M.gguf",
5
+ "size": "22.5GB",
6
+ "description": "MoE 109B/17B active, 16 experts top-1, Meta latest, tool calling",
7
+ "draft": null,
8
+ "moe": {
9
+ "n_expert": 16,
10
+ "n_expert_used": 1,
11
+ "min_experts_per_node": 6
12
+ },
13
+ "extra_files": [],
14
+ "mmproj": null,
15
+ "id": "glogwa68/Llama-4-scout-GGUF:Q4_K_M"
16
+ }
models/sha256-e72a7ff194d35e87832281741d2d4bba639d8d5913c55a3aa9e945a6fcfb7c37/metadata.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Llama-3.1-405B-Instruct-Q2_K",
3
+ "file": "Llama-3.1-405B-Instruct-Q2_K.gguf",
4
+ "url": "https://registry.ollama.ai/v2/library/llama3.1/blobs/sha256:e7e1972e5b13caead8a8dd9c94f4a0dec59ac2d9dd52e0cd1c067e6077eb4677",
5
+ "size": "149GB",
6
+ "description": "Llama 3.1 405B Instruct Q2_K, largest dense model",
7
+ "draft": null,
8
+ "moe": null,
9
+ "extra_files": [],
10
+ "mmproj": null,
11
+ "id": "Llama-3.1-405B-Instruct-Q2_K"
12
+ }
models/sha256-f488ea7c68e65fdd7775f04d58766601ee1a7d8f571a154c25f2467bb295f369/metadata.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Qwen3-30B-A3B-Q4_K_M",
3
+ "file": "Qwen3-30B-A3B-Q4_K_M.gguf",
4
+ "url": "https://huggingface.co/unsloth/Qwen3-30B-A3B-GGUF/resolve/main/Qwen3-30B-A3B-Q4_K_M.gguf",
5
+ "size": "17.3GB",
6
+ "description": "MoE general chat, 128 experts top-8, thinking/non-thinking",
7
+ "draft": "Qwen3-0.6B-Q4_K_M",
8
+ "moe": {
9
+ "n_expert": 128,
10
+ "n_expert_used": 8,
11
+ "min_experts_per_node": 46
12
+ },
13
+ "extra_files": [],
14
+ "mmproj": null,
15
+ "id": "unsloth/Qwen3-30B-A3B-GGUF:Q4_K_M"
16
+ }
models/sha256-feb131c258a0c95b5f36109f87b2d0f04462916ce5e2197f129d888c7e0b2a4a/metadata.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "GLM-4-32B-0414-Q4_K_M",
3
+ "file": "GLM-4-32B-0414-Q4_K_M.gguf",
4
+ "url": "https://huggingface.co/unsloth/GLM-4-32B-0414-GGUF/resolve/main/GLM-4-32B-0414-Q4_K_M.gguf",
5
+ "size": "19.7GB",
6
+ "description": "Strong 32B, good tool calling",
7
+ "draft": null,
8
+ "moe": null,
9
+ "extra_files": [],
10
+ "mmproj": null,
11
+ "id": "unsloth/GLM-4-32B-0414-GGUF:Q4_K_M"
12
+ }