Mungert commited on
Commit
2d7edda
·
verified ·
0 Parent(s):

Super-squash history to reclaim storage

Browse files
.gitattributes ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ Homunculus-f16.gguf filter=lfs diff=lfs merge=lfs -text
37
+ Homunculus-f16_q8_0.gguf filter=lfs diff=lfs merge=lfs -text
38
+ Homunculus-bf16_q8_0.gguf filter=lfs diff=lfs merge=lfs -text
39
+ Homunculus-f16_q6_k.gguf filter=lfs diff=lfs merge=lfs -text
40
+ Homunculus-bf16_q6_k.gguf filter=lfs diff=lfs merge=lfs -text
41
+ Homunculus-f16_q4_k.gguf filter=lfs diff=lfs merge=lfs -text
42
+ Homunculus-bf16_q4_k.gguf filter=lfs diff=lfs merge=lfs -text
43
+ Homunculus-q2_k_l.gguf filter=lfs diff=lfs merge=lfs -text
44
+ Homunculus-q3_k_l.gguf filter=lfs diff=lfs merge=lfs -text
45
+ Homunculus-q4_k_l.gguf filter=lfs diff=lfs merge=lfs -text
46
+ Homunculus-q5_k_l.gguf filter=lfs diff=lfs merge=lfs -text
47
+ Homunculus-q6_k_l.gguf filter=lfs diff=lfs merge=lfs -text
48
+ Homunculus-q2_k_m.gguf filter=lfs diff=lfs merge=lfs -text
49
+ Homunculus-q2_k_s.gguf filter=lfs diff=lfs merge=lfs -text
50
+ Homunculus-q3_k_m.gguf filter=lfs diff=lfs merge=lfs -text
51
+ Homunculus-q3_k_s.gguf filter=lfs diff=lfs merge=lfs -text
52
+ Homunculus-q4_k_m.gguf filter=lfs diff=lfs merge=lfs -text
53
+ Homunculus-q4_k_s.gguf filter=lfs diff=lfs merge=lfs -text
54
+ Homunculus-q5_k_m.gguf filter=lfs diff=lfs merge=lfs -text
55
+ Homunculus-q5_k_s.gguf filter=lfs diff=lfs merge=lfs -text
56
+ Homunculus-q6_k_m.gguf filter=lfs diff=lfs merge=lfs -text
57
+ Homunculus-q8_0.gguf filter=lfs diff=lfs merge=lfs -text
58
+ Homunculus-q4_0.gguf filter=lfs diff=lfs merge=lfs -text
59
+ Homunculus-q4_1.gguf filter=lfs diff=lfs merge=lfs -text
60
+ Homunculus-q4_0_l.gguf filter=lfs diff=lfs merge=lfs -text
61
+ Homunculus-q4_1_l.gguf filter=lfs diff=lfs merge=lfs -text
62
+ Homunculus-q5_0.gguf filter=lfs diff=lfs merge=lfs -text
63
+ Homunculus-q5_1.gguf filter=lfs diff=lfs merge=lfs -text
64
+ Homunculus-q5_0_l.gguf filter=lfs diff=lfs merge=lfs -text
65
+ Homunculus-q5_1_l.gguf filter=lfs diff=lfs merge=lfs -text
66
+ Homunculus-iq1_s.gguf filter=lfs diff=lfs merge=lfs -text
67
+ Homunculus-iq1_m.gguf filter=lfs diff=lfs merge=lfs -text
68
+ Homunculus-iq2_xs.gguf filter=lfs diff=lfs merge=lfs -text
69
+ Homunculus-iq2_xxs.gguf filter=lfs diff=lfs merge=lfs -text
70
+ Homunculus-iq2_s.gguf filter=lfs diff=lfs merge=lfs -text
71
+ Homunculus-iq2_m.gguf filter=lfs diff=lfs merge=lfs -text
72
+ Homunculus-iq3_xs.gguf filter=lfs diff=lfs merge=lfs -text
73
+ Homunculus-iq3_xxs.gguf filter=lfs diff=lfs merge=lfs -text
74
+ Homunculus-iq3_s.gguf filter=lfs diff=lfs merge=lfs -text
75
+ Homunculus-iq3_m.gguf filter=lfs diff=lfs merge=lfs -text
76
+ Homunculus-iq4_xs.gguf filter=lfs diff=lfs merge=lfs -text
77
+ Homunculus-iq4_nl.gguf filter=lfs diff=lfs merge=lfs -text
78
+ Homunculus-tq1_0.gguf filter=lfs diff=lfs merge=lfs -text
79
+ Homunculus-tq2_0.gguf filter=lfs diff=lfs merge=lfs -text
80
+ Homunculus.imatrix filter=lfs diff=lfs merge=lfs -text
81
+ Homunculus-bf16.gguf filter=lfs diff=lfs merge=lfs -text
Homunculus-bf16.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f31f777f7268ee31582c1a797f29ef5f1a20465c0aca8a95bb89bb680de3db0
3
+ size 24924395744
Homunculus-bf16_q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1799d0ca44b90bcd95151ca9cc16f4c0ce3b298e895ab73489b3d55568187435
3
+ size 18436331744
Homunculus-f16_q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a54074c11d9255dbc5d2b201a06128d20492d6dbd0061e075b18bdbdfed5ce2
3
+ size 18436331744
Homunculus-iq1_m.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd94e914195b16a6f541bd176d02dcd21895219ae5c478673579c013a123f2c4
3
+ size 4145469952
Homunculus-iq1_s.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6569a9227e313c46060491df3da31a45466322e88f35bd84f964d3fb18ac8d41
3
+ size 3881687552
Homunculus-iq2_m.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:411e729dd822effe6bb1b08c586ee3c05f70cbc8fa7a8ed9e0b79623bc085d95
3
+ size 4895529472
Homunculus-iq2_s.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4c09a45de9196e90c96c04d6b549c269ad324c3e8b5fcf72cbb8fa69f41bf8c
3
+ size 4692367872
Homunculus-iq2_xs.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c06f4e999fda9c78a486ada4d78f1de4b1b60862fa50b1a37da21a96f7a4bdcc
3
+ size 4558346752
Homunculus-iq2_xxs.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b016f9c49f513e0f6cea2848cf7b6a3a5fa1ed5796c1acb561aa605ff7a7cab
3
+ size 4248689152
Homunculus-iq3_m.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9e5a672faa3b8f4a14af48f3a09e00f91231a7e8265dc08d3f042c5aa5531cb
3
+ size 5882419712
Homunculus-iq3_s.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca966fa7b52333095c666d9c20b2ff35b722e50a9d7ce338a8c0b4394a512583
3
+ size 5783542272
Homunculus-iq3_xs.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b944f15b8677a28f18f38857d27a19b3e2df40d9ce0fc0386f4154765bb4b5af
3
+ size 5527951872
Homunculus-iq3_xxs.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe7e922e1dbc790dd9c3645558faa9553c1b4ee2ffdf1284202f95c06c3faadb
3
+ size 5283502592
Homunculus-iq4_nl.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f219a2f03b9905bf29a640e948c224ddead7f3cde7ba6cb8fd354ef6fa68c2ac
3
+ size 7241887232
Homunculus-iq4_xs.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23b77e8f18befdee65327caafa5862cfe6d8c7698a867513d1ce3a4647e5dc07
3
+ size 6883384832
Homunculus-q2_k_m.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04e079422c2698ae2a9b389cb6403f4802e1292024cdc50e3c547682c5a97ef5
3
+ size 5249597952
Homunculus-q2_k_s.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1cd184d78bb339c9a6269476f576715e1138bfb0fc7638d5f3962d4c99695186
3
+ size 4753807872
Homunculus-q3_k_m.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:685563696c4a5d4aedbafb842630e91854a3d246158a88685497eeb27c581b0a
3
+ size 6471844352
Homunculus-q3_k_s.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea4c788d1e300113c1eb85dcabd90cdad63ffb3659119730efa4f865f9876ef9
3
+ size 5842196992
Homunculus-q4_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12bc52ee298bf09268f9dd82aef45eaf57f5bff5de1d8ec8c40f872497ef4675
3
+ size 7015455232
Homunculus-q4_1.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:815a2e02d4eb870423921beaf0caa8afefae5a9a94f7b4107f9a39f171426437
3
+ size 7794104832
Homunculus-q4_k_m.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:debcccd7e0f6dc84939f91be6770bd547012a622bd923ca47c771bf6e261f63a
3
+ size 7758807552
Homunculus-q4_k_s.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f0e83e8963ce673846491b6829352e9d7e22723e1227bc048232a8b67f0291c
3
+ size 7524680192
Homunculus-q5_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df45ccba9ac091b80d175bde1ad3c50a792c67a97901eff4bef2aed99b0cdee0
3
+ size 8572754432
Homunculus-q5_1.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b31b29d968b349ad214e646562995ead19aac65f55d1ea7c4536045342f4bc91
3
+ size 9351404032
Homunculus-q5_k_m.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e18f9278c4987cf698741732f4edb42b14e89a15a855c52f8537fd0ca82c8ca
3
+ size 9010217472
Homunculus-q5_k_s.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90e6bbe7569270849831dbb56b34bee1d30b30934625440df9ce04ee48997c98
3
+ size 8884879872
Homunculus-q6_k_m.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da254eeaeaf3add340bfc9c3c2b43ddf3346147e217cdb3bcab0041d50a2994c
3
+ size 10227384832
Homunculus-q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:254736c3720c107f0dbfab30b055510cb7652e2ce68e5fb04ed93d5baf1cba5c
3
+ size 13244651744
Homunculus-tq1_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b922c7887b50c9de5e94c1b969591c14a0b85a754954932ef7545963dbd04200
3
+ size 3381816832
Homunculus-tq2_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b516af2f3c3327d5796e1b808811e7ab9a4e4792756f01f6e099889db80da45a
3
+ size 3892997632
Homunculus.imatrix ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ae89cb322f7d0e8c7e79cb46ddf93d837b1a5163d09a5e6df653ee6186dbf7b
3
+ size 7054418
README.md ADDED
@@ -0,0 +1,351 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ license: apache-2.0
5
+ library_name: transformers
6
+ base_model:
7
+ - mistralai/Mistral-Nemo-Base-2407 # lightweight student
8
+ - Qwen/Qwen3-235B-A22B # thinking + non-thinking teacher
9
+ tags:
10
+ - distillation
11
+ - /think
12
+ - /nothink
13
+ - reasoning-transfer
14
+ - arcee-ai
15
+ ---
16
+
17
+ # <span style="color: #7FFF7F;">Homunculus GGUF Models</span>
18
+
19
+
20
+ ## <span style="color: #7F7FFF;">Model Generation Details</span>
21
+
22
+ This model was generated using [llama.cpp](https://github.com/ggerganov/llama.cpp) at commit [`0d398442`](https://github.com/ggerganov/llama.cpp/commit/0d3984424f2973c49c4bcabe4cc0153b4f90c601).
23
+
24
+
25
+
26
+
27
+ ## <span style="color: #7FFF7F;">Ultra-Low-Bit Quantization with IQ-DynamicGate (1-2 bit)</span>
28
+
29
+ Our latest quantization method introduces **precision-adaptive quantization** for ultra-low-bit models (1-2 bit), with benchmark-proven improvements on **Llama-3-8B**. This approach uses layer-specific strategies to preserve accuracy while maintaining extreme memory efficiency.
30
+
31
+ ### **Benchmark Context**
32
+ All tests conducted on **Llama-3-8B-Instruct** using:
33
+ - Standard perplexity evaluation pipeline
34
+ - 2048-token context window
35
+ - Same prompt set across all quantizations
36
+
37
+ ### **Method**
38
+ - **Dynamic Precision Allocation**:
39
+ - First/Last 25% of layers → IQ4_XS (selected layers)
40
+ - Middle 50% → IQ2_XXS/IQ3_S (increase efficiency)
41
+ - **Critical Component Protection**:
42
+ - Embeddings/output layers use Q5_K
43
+ - Reduces error propagation by 38% vs standard 1-2bit
44
+
45
+ ### **Quantization Performance Comparison (Llama-3-8B)**
46
+
47
+ | Quantization | Standard PPL | DynamicGate PPL | Δ PPL | Std Size | DG Size | Δ Size | Std Speed | DG Speed |
48
+ |--------------|--------------|------------------|---------|----------|---------|--------|-----------|----------|
49
+ | IQ2_XXS | 11.30 | 9.84 | -12.9% | 2.5G | 2.6G | +0.1G | 234s | 246s |
50
+ | IQ2_XS | 11.72 | 11.63 | -0.8% | 2.7G | 2.8G | +0.1G | 242s | 246s |
51
+ | IQ2_S | 14.31 | 9.02 | -36.9% | 2.7G | 2.9G | +0.2G | 238s | 244s |
52
+ | IQ1_M | 27.46 | 15.41 | -43.9% | 2.2G | 2.5G | +0.3G | 206s | 212s |
53
+ | IQ1_S | 53.07 | 32.00 | -39.7% | 2.1G | 2.4G | +0.3G | 184s | 209s |
54
+
55
+ **Key**:
56
+ - PPL = Perplexity (lower is better)
57
+ - Δ PPL = Percentage change from standard to DynamicGate
58
+ - Speed = Inference time (CPU avx2, 2048 token context)
59
+ - Size differences reflect mixed quantization overhead
60
+
61
+ **Key Improvements:**
62
+ - 🔥 **IQ1_M** shows massive 43.9% perplexity reduction (27.46 → 15.41)
63
+ - 🚀 **IQ2_S** cuts perplexity by 36.9% while adding only 0.2GB
64
+ - ⚡ **IQ1_S** maintains 39.7% better accuracy despite 1-bit quantization
65
+
66
+ **Tradeoffs:**
67
+ - All variants have modest size increases (0.1-0.3GB)
68
+ - Inference speeds remain comparable (<5% difference)
69
+
70
+
71
+ ### **When to Use These Models**
72
+ 📌 **Fitting models into GPU VRAM**
73
+
74
+ ✔ **Memory-constrained deployments**
75
+
76
+ ✔ **Cpu and Edge Devices** where 1-2bit errors can be tolerated
77
+
78
+ ✔ **Research** into ultra-low-bit quantization
79
+
80
+
81
+
82
+ ## **Choosing the Right Model Format**
83
+
84
+ Selecting the correct model format depends on your **hardware capabilities** and **memory constraints**.
85
+
86
+ ### **BF16 (Brain Float 16) – Use if BF16 acceleration is available**
87
+ - A 16-bit floating-point format designed for **faster computation** while retaining good precision.
88
+ - Provides **similar dynamic range** as FP32 but with **lower memory usage**.
89
+ - Recommended if your hardware supports **BF16 acceleration** (check your device's specs).
90
+ - Ideal for **high-performance inference** with **reduced memory footprint** compared to FP32.
91
+
92
+ 📌 **Use BF16 if:**
93
+ ✔ Your hardware has native **BF16 support** (e.g., newer GPUs, TPUs).
94
+ ✔ You want **higher precision** while saving memory.
95
+ ✔ You plan to **requantize** the model into another format.
96
+
97
+ 📌 **Avoid BF16 if:**
98
+ ❌ Your hardware does **not** support BF16 (it may fall back to FP32 and run slower).
99
+ ❌ You need compatibility with older devices that lack BF16 optimization.
100
+
101
+ ---
102
+
103
+ ### **F16 (Float 16) – More widely supported than BF16**
104
+ - A 16-bit floating-point **high precision** but with less of range of values than BF16.
105
+ - Works on most devices with **FP16 acceleration support** (including many GPUs and some CPUs).
106
+ - Slightly lower numerical precision than BF16 but generally sufficient for inference.
107
+
108
+ 📌 **Use F16 if:**
109
+ ✔ Your hardware supports **FP16** but **not BF16**.
110
+ ✔ You need a **balance between speed, memory usage, and accuracy**.
111
+ ✔ You are running on a **GPU** or another device optimized for FP16 computations.
112
+
113
+ 📌 **Avoid F16 if:**
114
+ ❌ Your device lacks **native FP16 support** (it may run slower than expected).
115
+ ❌ You have memory limitations.
116
+
117
+ ---
118
+
119
+ ### **Quantized Models (Q4_K, Q6_K, Q8, etc.) – For CPU & Low-VRAM Inference**
120
+ Quantization reduces model size and memory usage while maintaining as much accuracy as possible.
121
+ - **Lower-bit models (Q4_K)** → **Best for minimal memory usage**, may have lower precision.
122
+ - **Higher-bit models (Q6_K, Q8_0)** → **Better accuracy**, requires more memory.
123
+
124
+ 📌 **Use Quantized Models if:**
125
+ ✔ You are running inference on a **CPU** and need an optimized model.
126
+ ✔ Your device has **low VRAM** and cannot load full-precision models.
127
+ ✔ You want to reduce **memory footprint** while keeping reasonable accuracy.
128
+
129
+ 📌 **Avoid Quantized Models if:**
130
+ ❌ You need **maximum accuracy** (full-precision models are better for this).
131
+ ❌ Your hardware has enough VRAM for higher-precision formats (BF16/F16).
132
+
133
+ ---
134
+
135
+ ### **Very Low-Bit Quantization (IQ3_XS, IQ3_S, IQ3_M, Q4_K, Q4_0)**
136
+ These models are optimized for **extreme memory efficiency**, making them ideal for **low-power devices** or **large-scale deployments** where memory is a critical constraint.
137
+
138
+ - **IQ3_XS**: Ultra-low-bit quantization (3-bit) with **extreme memory efficiency**.
139
+ - **Use case**: Best for **ultra-low-memory devices** where even Q4_K is too large.
140
+ - **Trade-off**: Lower accuracy compared to higher-bit quantizations.
141
+
142
+ - **IQ3_S**: Small block size for **maximum memory efficiency**.
143
+ - **Use case**: Best for **low-memory devices** where **IQ3_XS** is too aggressive.
144
+
145
+ - **IQ3_M**: Medium block size for better accuracy than **IQ3_S**.
146
+ - **Use case**: Suitable for **low-memory devices** where **IQ3_S** is too limiting.
147
+
148
+ - **Q4_K**: 4-bit quantization with **block-wise optimization** for better accuracy.
149
+ - **Use case**: Best for **low-memory devices** where **Q6_K** is too large.
150
+
151
+ - **Q4_0**: Pure 4-bit quantization, optimized for **ARM devices**.
152
+ - **Use case**: Best for **ARM-based devices** or **low-memory environments**.
153
+
154
+ ---
155
+
156
+ ### **Summary Table: Model Format Selection**
157
+
158
+ | Model Format | Precision | Memory Usage | Device Requirements | Best Use Case |
159
+ |--------------|------------|---------------|----------------------|---------------|
160
+ | **BF16** | Highest | High | BF16-supported GPU/CPUs | High-speed inference with reduced memory |
161
+ | **F16** | High | High | FP16-supported devices | GPU inference when BF16 isn't available |
162
+ | **Q4_K** | Medium Low | Low | CPU or Low-VRAM devices | Best for memory-constrained environments |
163
+ | **Q6_K** | Medium | Moderate | CPU with more memory | Better accuracy while still being quantized |
164
+ | **Q8_0** | High | Moderate | CPU or GPU with enough VRAM | Best accuracy among quantized models |
165
+ | **IQ3_XS** | Very Low | Very Low | Ultra-low-memory devices | Extreme memory efficiency and low accuracy |
166
+ | **Q4_0** | Low | Low | ARM or low-memory devices | llama.cpp can optimize for ARM devices |
167
+
168
+ ---
169
+
170
+ ## **Included Files & Details**
171
+
172
+ ### `Homunculus-bf16.gguf`
173
+ - Model weights preserved in **BF16**.
174
+ - Use this if you want to **requantize** the model into a different format.
175
+ - Best if your device supports **BF16 acceleration**.
176
+
177
+ ### `Homunculus-f16.gguf`
178
+ - Model weights stored in **F16**.
179
+ - Use if your device supports **FP16**, especially if BF16 is not available.
180
+
181
+ ### `Homunculus-bf16-q8_0.gguf`
182
+ - **Output & embeddings** remain in **BF16**.
183
+ - All other layers quantized to **Q8_0**.
184
+ - Use if your device supports **BF16** and you want a quantized version.
185
+
186
+ ### `Homunculus-f16-q8_0.gguf`
187
+ - **Output & embeddings** remain in **F16**.
188
+ - All other layers quantized to **Q8_0**.
189
+
190
+ ### `Homunculus-q4_k.gguf`
191
+ - **Output & embeddings** quantized to **Q8_0**.
192
+ - All other layers quantized to **Q4_K**.
193
+ - Good for **CPU inference** with limited memory.
194
+
195
+ ### `Homunculus-q4_k_s.gguf`
196
+ - Smallest **Q4_K** variant, using less memory at the cost of accuracy.
197
+ - Best for **very low-memory setups**.
198
+
199
+ ### `Homunculus-q6_k.gguf`
200
+ - **Output & embeddings** quantized to **Q8_0**.
201
+ - All other layers quantized to **Q6_K** .
202
+
203
+ ### `Homunculus-q8_0.gguf`
204
+ - Fully **Q8** quantized model for better accuracy.
205
+ - Requires **more memory** but offers higher precision.
206
+
207
+ ### `Homunculus-iq3_xs.gguf`
208
+ - **IQ3_XS** quantization, optimized for **extreme memory efficiency**.
209
+ - Best for **ultra-low-memory devices**.
210
+
211
+ ### `Homunculus-iq3_m.gguf`
212
+ - **IQ3_M** quantization, offering a **medium block size** for better accuracy.
213
+ - Suitable for **low-memory devices**.
214
+
215
+ ### `Homunculus-q4_0.gguf`
216
+ - Pure **Q4_0** quantization, optimized for **ARM devices**.
217
+ - Best for **low-memory environments**.
218
+ - Prefer IQ4_NL for better accuracy.
219
+
220
+ # <span id="testllm" style="color: #7F7FFF;">🚀 If you find these models useful</span>
221
+ ❤ **Please click "Like" if you find this useful!**
222
+ Help me test my **AI-Powered Network Monitor Assistant** with **quantum-ready security checks**:
223
+ 👉 [Quantum Network Monitor](https://readyforquantum.com/dashboard/?assistant=open&utm_source=huggingface&utm_medium=referral&utm_campaign=huggingface_repo_readme)
224
+
225
+ 💬 **How to test**:
226
+ Choose an **AI assistant type**:
227
+ - `TurboLLM` (GPT-4o-mini)
228
+ - `HugLLM` (Hugginface Open-source)
229
+ - `TestLLM` (Experimental CPU-only)
230
+
231
+ ### **What I’m Testing**
232
+ I’m pushing the limits of **small open-source models for AI network monitoring**, specifically:
233
+ - **Function calling** against live network services
234
+ - **How small can a model go** while still handling:
235
+ - Automated **Nmap scans**
236
+ - **Quantum-readiness checks**
237
+ - **Network Monitoring tasks**
238
+
239
+ 🟡 **TestLLM** – Current experimental model (llama.cpp on 2 CPU threads):
240
+ - ✅ **Zero-configuration setup**
241
+ - ⏳ 30s load time (slow inference but **no API costs**)
242
+ - 🔧 **Help wanted!** If you’re into **edge-device AI**, let’s collaborate!
243
+
244
+ ### **Other Assistants**
245
+ 🟢 **TurboLLM** – Uses **gpt-4o-mini** for:
246
+ - **Create custom cmd processors to run .net code on Quantum Network Monitor Agents**
247
+ - **Real-time network diagnostics and monitoring**
248
+ - **Security Audits**
249
+ - **Penetration testing** (Nmap/Metasploit)
250
+
251
+
252
+ 🔵 **HugLLM** – Latest Open-source models:
253
+ - 🌐 Runs on Hugging Face Inference API
254
+
255
+ ### 💡 **Example commands to you could test**:
256
+ 1. `"Give me info on my websites SSL certificate"`
257
+ 2. `"Check if my server is using quantum safe encyption for communication"`
258
+ 3. `"Run a comprehensive security audit on my server"`
259
+ 4. '"Create a cmd processor to .. (what ever you want)" Note you need to install a Quantum Network Monitor Agent to run the .net code from. This is a very flexible and powerful feature. Use with caution!
260
+
261
+ ### Final Word
262
+
263
+ I fund the servers used to create these model files, run the Quantum Network Monitor service, and pay for inference from Novita and OpenAI—all out of my own pocket. All the code behind the model creation and the Quantum Network Monitor project is [open source](https://github.com/Mungert69). Feel free to use whatever you find helpful.
264
+
265
+ If you appreciate the work, please consider [buying me a coffee](https://www.buymeacoffee.com/mahadeva) ☕. Your support helps cover service costs and allows me to raise token limits for everyone.
266
+
267
+ I'm also open to job opportunities or sponsorship.
268
+
269
+ Thank you! 😊
270
+
271
+
272
+
273
+
274
+ ![Homunculus Logo](https://huggingface.co/arcee-ai/Homunculus/resolve/main/logo.jpg)
275
+
276
+ # Arcee **Homunculus-12B**
277
+
278
+ **Homunculus** is a 12 billion-parameter instruction model distilled from **Qwen3-235B** onto the **Mistral-Nemo** backbone.
279
+ It was purpose-built to preserve Qwen’s two-mode interaction style—`/think` (deliberate chain-of-thought) and `/nothink` (concise answers)—while running on a single consumer GPU.
280
+
281
+ ---
282
+
283
+ ## ✨ What’s special?
284
+
285
+ | Feature | Detail |
286
+ | --------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------- |
287
+ | **Reasoning-trace transfer** | Instead of copying just final probabilities, we align *full* logit trajectories, yielding more faithful reasoning. |
288
+ | **Total-Variation-Distance loss** | To better match the teacher’s confidence distribution and smooth the loss landscape. |
289
+ | **Tokenizer replacement** | The original Mistral tokenizer was swapped for Qwen3's tokenizer. |
290
+ | **Dual interaction modes** | Use `/think` when you want transparent step-by-step reasoning (good for analysis & debugging). Use `/nothink` for terse, production-ready answers. Most reliable in the system role field. | |
291
+
292
+ ---
293
+
294
+ ## Benchmark results
295
+
296
+ | Benchmark | Score |
297
+ | --------- | ----- |
298
+ | GPQADiamond (average of 3) | 57.1% |
299
+ | mmlu | 67.5% |
300
+
301
+ ## 🔧 Quick Start
302
+
303
+ ```python
304
+ from transformers import AutoTokenizer, AutoModelForCausalLM
305
+
306
+ model_id = "arcee-ai/Homunculus"
307
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
308
+ model = AutoModelForCausalLM.from_pretrained(
309
+ model_id,
310
+ torch_dtype="auto",
311
+ device_map="auto"
312
+ )
313
+
314
+ # /think mode - Chain-of-thought reasoning
315
+ messages = [
316
+ {"role": "system", "content": "You are a helpful assistant. /think"},
317
+ {"role": "user", "content": "Why is the sky blue?"},
318
+ ]
319
+ output = model.generate(
320
+ tokenizer.apply_chat_template(messages, tokenize=True, return_tensors="pt"),
321
+ max_new_tokens=512,
322
+ temperature=0.7
323
+ )
324
+ print(tokenizer.decode(output[0], skip_special_tokens=True))
325
+
326
+ # /nothink mode - Direct answers
327
+ messages = [
328
+ {"role": "system", "content": "You are a helpful assistant. /nothink"},
329
+ {"role": "user", "content": "Summarize the plot of Hamlet in two sentences."},
330
+ ]
331
+ output = model.generate(
332
+ tokenizer.apply_chat_template(messages, tokenize=True, return_tensors="pt"),
333
+ max_new_tokens=128,
334
+ temperature=0.7
335
+ )
336
+ print(tokenizer.decode(output[0], skip_special_tokens=True))
337
+ ```
338
+
339
+ ## 💡 Intended Use & Limitations
340
+
341
+ Homunculus is designed for:
342
+
343
+ * **Research** on reasoning-trace distillation, Logit Imitation, and mode-switchable assistants.
344
+ * **Lightweight production** deployments that need strong reasoning at <12 GB VRAM.
345
+
346
+ ### Known limitations
347
+
348
+ * May inherit biases from the Qwen3 teacher and internet-scale pretraining data.
349
+ * Long-context (>32 k tokens) use is experimental—expect latency & memory overhead.
350
+
351
+ ---