Commit ·
5070bb3
verified ·
0
Parent(s):
Super-squash history to reclaim storage
Browse files- .gitattributes +80 -0
- README.md +495 -0
- sychonix-bf16-q4_k.gguf +3 -0
- sychonix-bf16-q6_k.gguf +3 -0
- sychonix-bf16-q8_0.gguf +3 -0
- sychonix-bf16.gguf +3 -0
- sychonix-f16-q4_k.gguf +3 -0
- sychonix-f16-q6_k.gguf +3 -0
- sychonix-f16-q8_0.gguf +3 -0
- sychonix-iq1_m.gguf +3 -0
- sychonix-iq1_s.gguf +3 -0
- sychonix-iq2_m.gguf +3 -0
- sychonix-iq2_s.gguf +3 -0
- sychonix-iq2_xs.gguf +3 -0
- sychonix-iq2_xxs.gguf +3 -0
- sychonix-iq3_m.gguf +3 -0
- sychonix-iq3_s.gguf +3 -0
- sychonix-iq3_xs.gguf +3 -0
- sychonix-iq3_xxs.gguf +3 -0
- sychonix-iq4_nl.gguf +3 -0
- sychonix-iq4_xs.gguf +3 -0
- sychonix-q2_k_s.gguf +3 -0
- sychonix-q3_k_m.gguf +3 -0
- sychonix-q3_k_s.gguf +3 -0
- sychonix-q4_0.gguf +3 -0
- sychonix-q4_1.gguf +3 -0
- sychonix-q4_k_m.gguf +3 -0
- sychonix-q4_k_s.gguf +3 -0
- sychonix-q5_0.gguf +3 -0
- sychonix-q5_1.gguf +3 -0
- sychonix-q5_k_m.gguf +3 -0
- sychonix-q5_k_s.gguf +3 -0
- sychonix-q6_k_m.gguf +3 -0
- sychonix-q8_0.gguf +3 -0
- sychonix-tq1_0.gguf +3 -0
- sychonix-tq2_0.gguf +3 -0
- sychonix.imatrix +3 -0
.gitattributes
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
sychonix-f16.gguf filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
sychonix-f16-q8_0.gguf filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
sychonix-bf16-q8_0.gguf filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
sychonix-f16-q6_k.gguf filter=lfs diff=lfs merge=lfs -text
|
| 40 |
+
sychonix-bf16-q6_k.gguf filter=lfs diff=lfs merge=lfs -text
|
| 41 |
+
sychonix-f16-q4_k.gguf filter=lfs diff=lfs merge=lfs -text
|
| 42 |
+
sychonix-bf16-q4_k.gguf filter=lfs diff=lfs merge=lfs -text
|
| 43 |
+
sychonix-q2_k_l.gguf filter=lfs diff=lfs merge=lfs -text
|
| 44 |
+
sychonix-q3_k_l.gguf filter=lfs diff=lfs merge=lfs -text
|
| 45 |
+
sychonix-q4_k_l.gguf filter=lfs diff=lfs merge=lfs -text
|
| 46 |
+
sychonix-q5_k_l.gguf filter=lfs diff=lfs merge=lfs -text
|
| 47 |
+
sychonix-q6_k_l.gguf filter=lfs diff=lfs merge=lfs -text
|
| 48 |
+
sychonix-q2_k_s.gguf filter=lfs diff=lfs merge=lfs -text
|
| 49 |
+
sychonix-q3_k_m.gguf filter=lfs diff=lfs merge=lfs -text
|
| 50 |
+
sychonix-q3_k_s.gguf filter=lfs diff=lfs merge=lfs -text
|
| 51 |
+
sychonix-q4_k_m.gguf filter=lfs diff=lfs merge=lfs -text
|
| 52 |
+
sychonix-q4_k_s.gguf filter=lfs diff=lfs merge=lfs -text
|
| 53 |
+
sychonix-q5_k_m.gguf filter=lfs diff=lfs merge=lfs -text
|
| 54 |
+
sychonix-q5_k_s.gguf filter=lfs diff=lfs merge=lfs -text
|
| 55 |
+
sychonix-q6_k_m.gguf filter=lfs diff=lfs merge=lfs -text
|
| 56 |
+
sychonix-q8_0.gguf filter=lfs diff=lfs merge=lfs -text
|
| 57 |
+
sychonix-q4_0.gguf filter=lfs diff=lfs merge=lfs -text
|
| 58 |
+
sychonix-q4_1.gguf filter=lfs diff=lfs merge=lfs -text
|
| 59 |
+
sychonix-q4_0_l.gguf filter=lfs diff=lfs merge=lfs -text
|
| 60 |
+
sychonix-q4_1_l.gguf filter=lfs diff=lfs merge=lfs -text
|
| 61 |
+
sychonix-q5_0.gguf filter=lfs diff=lfs merge=lfs -text
|
| 62 |
+
sychonix-q5_1.gguf filter=lfs diff=lfs merge=lfs -text
|
| 63 |
+
sychonix-q5_0_l.gguf filter=lfs diff=lfs merge=lfs -text
|
| 64 |
+
sychonix-q5_1_l.gguf filter=lfs diff=lfs merge=lfs -text
|
| 65 |
+
sychonix-iq1_s.gguf filter=lfs diff=lfs merge=lfs -text
|
| 66 |
+
sychonix-iq1_m.gguf filter=lfs diff=lfs merge=lfs -text
|
| 67 |
+
sychonix-iq2_xs.gguf filter=lfs diff=lfs merge=lfs -text
|
| 68 |
+
sychonix-iq2_xxs.gguf filter=lfs diff=lfs merge=lfs -text
|
| 69 |
+
sychonix-iq2_s.gguf filter=lfs diff=lfs merge=lfs -text
|
| 70 |
+
sychonix-iq2_m.gguf filter=lfs diff=lfs merge=lfs -text
|
| 71 |
+
sychonix-iq3_xs.gguf filter=lfs diff=lfs merge=lfs -text
|
| 72 |
+
sychonix-iq3_xxs.gguf filter=lfs diff=lfs merge=lfs -text
|
| 73 |
+
sychonix-iq3_s.gguf filter=lfs diff=lfs merge=lfs -text
|
| 74 |
+
sychonix-iq3_m.gguf filter=lfs diff=lfs merge=lfs -text
|
| 75 |
+
sychonix-iq4_xs.gguf filter=lfs diff=lfs merge=lfs -text
|
| 76 |
+
sychonix-iq4_nl.gguf filter=lfs diff=lfs merge=lfs -text
|
| 77 |
+
sychonix-tq1_0.gguf filter=lfs diff=lfs merge=lfs -text
|
| 78 |
+
sychonix-tq2_0.gguf filter=lfs diff=lfs merge=lfs -text
|
| 79 |
+
sychonix.imatrix filter=lfs diff=lfs merge=lfs -text
|
| 80 |
+
sychonix-bf16.gguf filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
|
@@ -0,0 +1,495 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
language: en
|
| 3 |
+
tags:
|
| 4 |
+
- exbert
|
| 5 |
+
license: apache-2.0
|
| 6 |
+
datasets:
|
| 7 |
+
- bookcorpus
|
| 8 |
+
- wikipedia
|
| 9 |
+
---
|
| 10 |
+
|
| 11 |
+
# <span style="color: #7FFF7F;">sychonix GGUF Models</span>
|
| 12 |
+
|
| 13 |
+
## <span style="color: #7FFF7F;">Ultra-Low-Bit Quantization with IQ-DynamicGate (1-2 bit)</span>
|
| 14 |
+
|
| 15 |
+
Our latest quantization method introduces **precision-adaptive quantization** for ultra-low-bit models (1-2 bit), with benchmark-proven improvements on **Llama-3-8B**. This approach uses layer-specific strategies to preserve accuracy while maintaining extreme memory efficiency.
|
| 16 |
+
|
| 17 |
+
### **Benchmark Context**
|
| 18 |
+
All tests conducted on **Llama-3-8B-Instruct** using:
|
| 19 |
+
- Standard perplexity evaluation pipeline
|
| 20 |
+
- 2048-token context window
|
| 21 |
+
- Same prompt set across all quantizations
|
| 22 |
+
|
| 23 |
+
### **Method**
|
| 24 |
+
- **Dynamic Precision Allocation**:
|
| 25 |
+
- First/Last 25% of layers → IQ4_XS (selected layers)
|
| 26 |
+
- Middle 50% → IQ2_XXS/IQ3_S (increase efficiency)
|
| 27 |
+
- **Critical Component Protection**:
|
| 28 |
+
- Embeddings/output layers use Q5_K
|
| 29 |
+
- Reduces error propagation by 38% vs standard 1-2bit
|
| 30 |
+
|
| 31 |
+
### **Quantization Performance Comparison (Llama-3-8B)**
|
| 32 |
+
|
| 33 |
+
| Quantization | Standard PPL | DynamicGate PPL | Δ PPL | Std Size | DG Size | Δ Size | Std Speed | DG Speed |
|
| 34 |
+
|--------------|--------------|------------------|---------|----------|---------|--------|-----------|----------|
|
| 35 |
+
| IQ2_XXS | 11.30 | 9.84 | -12.9% | 2.5G | 2.6G | +0.1G | 234s | 246s |
|
| 36 |
+
| IQ2_XS | 11.72 | 11.63 | -0.8% | 2.7G | 2.8G | +0.1G | 242s | 246s |
|
| 37 |
+
| IQ2_S | 14.31 | 9.02 | -36.9% | 2.7G | 2.9G | +0.2G | 238s | 244s |
|
| 38 |
+
| IQ1_M | 27.46 | 15.41 | -43.9% | 2.2G | 2.5G | +0.3G | 206s | 212s |
|
| 39 |
+
| IQ1_S | 53.07 | 32.00 | -39.7% | 2.1G | 2.4G | +0.3G | 184s | 209s |
|
| 40 |
+
|
| 41 |
+
**Key**:
|
| 42 |
+
- PPL = Perplexity (lower is better)
|
| 43 |
+
- Δ PPL = Percentage change from standard to DynamicGate
|
| 44 |
+
- Speed = Inference time (CPU avx2, 2048 token context)
|
| 45 |
+
- Size differences reflect mixed quantization overhead
|
| 46 |
+
|
| 47 |
+
**Key Improvements:**
|
| 48 |
+
- 🔥 **IQ1_M** shows massive 43.9% perplexity reduction (27.46 → 15.41)
|
| 49 |
+
- 🚀 **IQ2_S** cuts perplexity by 36.9% while adding only 0.2GB
|
| 50 |
+
- ⚡ **IQ1_S** maintains 39.7% better accuracy despite 1-bit quantization
|
| 51 |
+
|
| 52 |
+
**Tradeoffs:**
|
| 53 |
+
- All variants have modest size increases (0.1-0.3GB)
|
| 54 |
+
- Inference speeds remain comparable (<5% difference)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
### **When to Use These Models**
|
| 58 |
+
📌 **Fitting models into GPU VRAM**
|
| 59 |
+
|
| 60 |
+
✔ **Memory-constrained deployments**
|
| 61 |
+
|
| 62 |
+
✔ **Cpu and Edge Devices** where 1-2bit errors can be tolerated
|
| 63 |
+
|
| 64 |
+
✔ **Research** into ultra-low-bit quantization
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
## **Choosing the Right Model Format**
|
| 68 |
+
|
| 69 |
+
Selecting the correct model format depends on your **hardware capabilities** and **memory constraints**.
|
| 70 |
+
|
| 71 |
+
### **BF16 (Brain Float 16) – Use if BF16 acceleration is available**
|
| 72 |
+
- A 16-bit floating-point format designed for **faster computation** while retaining good precision.
|
| 73 |
+
- Provides **similar dynamic range** as FP32 but with **lower memory usage**.
|
| 74 |
+
- Recommended if your hardware supports **BF16 acceleration** (check your device's specs).
|
| 75 |
+
- Ideal for **high-performance inference** with **reduced memory footprint** compared to FP32.
|
| 76 |
+
|
| 77 |
+
📌 **Use BF16 if:**
|
| 78 |
+
✔ Your hardware has native **BF16 support** (e.g., newer GPUs, TPUs).
|
| 79 |
+
✔ You want **higher precision** while saving memory.
|
| 80 |
+
✔ You plan to **requantize** the model into another format.
|
| 81 |
+
|
| 82 |
+
📌 **Avoid BF16 if:**
|
| 83 |
+
❌ Your hardware does **not** support BF16 (it may fall back to FP32 and run slower).
|
| 84 |
+
❌ You need compatibility with older devices that lack BF16 optimization.
|
| 85 |
+
|
| 86 |
+
---
|
| 87 |
+
|
| 88 |
+
### **F16 (Float 16) – More widely supported than BF16**
|
| 89 |
+
- A 16-bit floating-point **high precision** but with less of range of values than BF16.
|
| 90 |
+
- Works on most devices with **FP16 acceleration support** (including many GPUs and some CPUs).
|
| 91 |
+
- Slightly lower numerical precision than BF16 but generally sufficient for inference.
|
| 92 |
+
|
| 93 |
+
📌 **Use F16 if:**
|
| 94 |
+
✔ Your hardware supports **FP16** but **not BF16**.
|
| 95 |
+
✔ You need a **balance between speed, memory usage, and accuracy**.
|
| 96 |
+
✔ You are running on a **GPU** or another device optimized for FP16 computations.
|
| 97 |
+
|
| 98 |
+
📌 **Avoid F16 if:**
|
| 99 |
+
❌ Your device lacks **native FP16 support** (it may run slower than expected).
|
| 100 |
+
❌ You have memory limitations.
|
| 101 |
+
|
| 102 |
+
---
|
| 103 |
+
|
| 104 |
+
### **Quantized Models (Q4_K, Q6_K, Q8, etc.) – For CPU & Low-VRAM Inference**
|
| 105 |
+
Quantization reduces model size and memory usage while maintaining as much accuracy as possible.
|
| 106 |
+
- **Lower-bit models (Q4_K)** → **Best for minimal memory usage**, may have lower precision.
|
| 107 |
+
- **Higher-bit models (Q6_K, Q8_0)** → **Better accuracy**, requires more memory.
|
| 108 |
+
|
| 109 |
+
📌 **Use Quantized Models if:**
|
| 110 |
+
✔ You are running inference on a **CPU** and need an optimized model.
|
| 111 |
+
✔ Your device has **low VRAM** and cannot load full-precision models.
|
| 112 |
+
✔ You want to reduce **memory footprint** while keeping reasonable accuracy.
|
| 113 |
+
|
| 114 |
+
📌 **Avoid Quantized Models if:**
|
| 115 |
+
❌ You need **maximum accuracy** (full-precision models are better for this).
|
| 116 |
+
❌ Your hardware has enough VRAM for higher-precision formats (BF16/F16).
|
| 117 |
+
|
| 118 |
+
---
|
| 119 |
+
|
| 120 |
+
### **Very Low-Bit Quantization (IQ3_XS, IQ3_S, IQ3_M, Q4_K, Q4_0)**
|
| 121 |
+
These models are optimized for **extreme memory efficiency**, making them ideal for **low-power devices** or **large-scale deployments** where memory is a critical constraint.
|
| 122 |
+
|
| 123 |
+
- **IQ3_XS**: Ultra-low-bit quantization (3-bit) with **extreme memory efficiency**.
|
| 124 |
+
- **Use case**: Best for **ultra-low-memory devices** where even Q4_K is too large.
|
| 125 |
+
- **Trade-off**: Lower accuracy compared to higher-bit quantizations.
|
| 126 |
+
|
| 127 |
+
- **IQ3_S**: Small block size for **maximum memory efficiency**.
|
| 128 |
+
- **Use case**: Best for **low-memory devices** where **IQ3_XS** is too aggressive.
|
| 129 |
+
|
| 130 |
+
- **IQ3_M**: Medium block size for better accuracy than **IQ3_S**.
|
| 131 |
+
- **Use case**: Suitable for **low-memory devices** where **IQ3_S** is too limiting.
|
| 132 |
+
|
| 133 |
+
- **Q4_K**: 4-bit quantization with **block-wise optimization** for better accuracy.
|
| 134 |
+
- **Use case**: Best for **low-memory devices** where **Q6_K** is too large.
|
| 135 |
+
|
| 136 |
+
- **Q4_0**: Pure 4-bit quantization, optimized for **ARM devices**.
|
| 137 |
+
- **Use case**: Best for **ARM-based devices** or **low-memory environments**.
|
| 138 |
+
|
| 139 |
+
---
|
| 140 |
+
|
| 141 |
+
### **Summary Table: Model Format Selection**
|
| 142 |
+
|
| 143 |
+
| Model Format | Precision | Memory Usage | Device Requirements | Best Use Case |
|
| 144 |
+
|--------------|------------|---------------|----------------------|---------------|
|
| 145 |
+
| **BF16** | Highest | High | BF16-supported GPU/CPUs | High-speed inference with reduced memory |
|
| 146 |
+
| **F16** | High | High | FP16-supported devices | GPU inference when BF16 isn't available |
|
| 147 |
+
| **Q4_K** | Medium Low | Low | CPU or Low-VRAM devices | Best for memory-constrained environments |
|
| 148 |
+
| **Q6_K** | Medium | Moderate | CPU with more memory | Better accuracy while still being quantized |
|
| 149 |
+
| **Q8_0** | High | Moderate | CPU or GPU with enough VRAM | Best accuracy among quantized models |
|
| 150 |
+
| **IQ3_XS** | Very Low | Very Low | Ultra-low-memory devices | Extreme memory efficiency and low accuracy |
|
| 151 |
+
| **Q4_0** | Low | Low | ARM or low-memory devices | llama.cpp can optimize for ARM devices |
|
| 152 |
+
|
| 153 |
+
---
|
| 154 |
+
|
| 155 |
+
## **Included Files & Details**
|
| 156 |
+
|
| 157 |
+
### `sychonix-bf16.gguf`
|
| 158 |
+
- Model weights preserved in **BF16**.
|
| 159 |
+
- Use this if you want to **requantize** the model into a different format.
|
| 160 |
+
- Best if your device supports **BF16 acceleration**.
|
| 161 |
+
|
| 162 |
+
### `sychonix-f16.gguf`
|
| 163 |
+
- Model weights stored in **F16**.
|
| 164 |
+
- Use if your device supports **FP16**, especially if BF16 is not available.
|
| 165 |
+
|
| 166 |
+
### `sychonix-bf16-q8_0.gguf`
|
| 167 |
+
- **Output & embeddings** remain in **BF16**.
|
| 168 |
+
- All other layers quantized to **Q8_0**.
|
| 169 |
+
- Use if your device supports **BF16** and you want a quantized version.
|
| 170 |
+
|
| 171 |
+
### `sychonix-f16-q8_0.gguf`
|
| 172 |
+
- **Output & embeddings** remain in **F16**.
|
| 173 |
+
- All other layers quantized to **Q8_0**.
|
| 174 |
+
|
| 175 |
+
### `sychonix-q4_k.gguf`
|
| 176 |
+
- **Output & embeddings** quantized to **Q8_0**.
|
| 177 |
+
- All other layers quantized to **Q4_K**.
|
| 178 |
+
- Good for **CPU inference** with limited memory.
|
| 179 |
+
|
| 180 |
+
### `sychonix-q4_k_s.gguf`
|
| 181 |
+
- Smallest **Q4_K** variant, using less memory at the cost of accuracy.
|
| 182 |
+
- Best for **very low-memory setups**.
|
| 183 |
+
|
| 184 |
+
### `sychonix-q6_k.gguf`
|
| 185 |
+
- **Output & embeddings** quantized to **Q8_0**.
|
| 186 |
+
- All other layers quantized to **Q6_K** .
|
| 187 |
+
|
| 188 |
+
### `sychonix-q8_0.gguf`
|
| 189 |
+
- Fully **Q8** quantized model for better accuracy.
|
| 190 |
+
- Requires **more memory** but offers higher precision.
|
| 191 |
+
|
| 192 |
+
### `sychonix-iq3_xs.gguf`
|
| 193 |
+
- **IQ3_XS** quantization, optimized for **extreme memory efficiency**.
|
| 194 |
+
- Best for **ultra-low-memory devices**.
|
| 195 |
+
|
| 196 |
+
### `sychonix-iq3_m.gguf`
|
| 197 |
+
- **IQ3_M** quantization, offering a **medium block size** for better accuracy.
|
| 198 |
+
- Suitable for **low-memory devices**.
|
| 199 |
+
|
| 200 |
+
### `sychonix-q4_0.gguf`
|
| 201 |
+
- Pure **Q4_0** quantization, optimized for **ARM devices**.
|
| 202 |
+
- Best for **low-memory environments**.
|
| 203 |
+
- Prefer IQ4_NL for better accuracy.
|
| 204 |
+
|
| 205 |
+
# <span id="testllm" style="color: #7F7FFF;">🚀 If you find these models useful</span>
|
| 206 |
+
❤ **Please click "Like" if you find this useful!**
|
| 207 |
+
Help me test my **AI-Powered Network Monitor Assistant** with **quantum-ready security checks**:
|
| 208 |
+
👉 [Quantum Network Monitor](https://readyforquantum.com/dashboard)
|
| 209 |
+
|
| 210 |
+
💬 **How to test**:
|
| 211 |
+
1. Click the **chat icon** (bottom right on any page)
|
| 212 |
+
2. Choose an **AI assistant type**:
|
| 213 |
+
- `TurboLLM` (GPT-4-mini)
|
| 214 |
+
- `FreeLLM` (Open-source)
|
| 215 |
+
- `TestLLM` (Experimental CPU-only)
|
| 216 |
+
|
| 217 |
+
### **What I’m Testing**
|
| 218 |
+
I’m pushing the limits of **small open-source models for AI network monitoring**, specifically:
|
| 219 |
+
- **Function calling** against live network services
|
| 220 |
+
- **How small can a model go** while still handling:
|
| 221 |
+
- Automated **Nmap scans**
|
| 222 |
+
- **Quantum-readiness checks**
|
| 223 |
+
- **Metasploit integration**
|
| 224 |
+
|
| 225 |
+
🟡 **TestLLM** – Current experimental model (llama.cpp on 6 CPU threads):
|
| 226 |
+
- ✅ **Zero-configuration setup**
|
| 227 |
+
- ⏳ 30s load time (slow inference but **no API costs**)
|
| 228 |
+
- 🔧 **Help wanted!** If you’re into **edge-device AI**, let’s collaborate!
|
| 229 |
+
|
| 230 |
+
### **Other Assistants**
|
| 231 |
+
🟢 **TurboLLM** – Uses **gpt-4-mini** for:
|
| 232 |
+
- **Real-time network diagnostics**
|
| 233 |
+
- **Automated penetration testing** (Nmap/Metasploit)
|
| 234 |
+
- 🔑 Get more tokens by [downloading our Quantum Network Monitor Agent](https://readyforquantum.com/download/?utm_source=huggingface&utm_medium=referral&utm_campaign=huggingface_repo_readme)
|
| 235 |
+
|
| 236 |
+
🔵 **HugLLM** – Open-source models (≈8B params):
|
| 237 |
+
- **2x more tokens** than TurboLLM
|
| 238 |
+
- **AI-powered log analysis**
|
| 239 |
+
- 🌐 Runs on Hugging Face Inference API
|
| 240 |
+
|
| 241 |
+
### 💡 **Example AI Commands to Test**:
|
| 242 |
+
1. `"Give me info on my websites SSL certificate"`
|
| 243 |
+
2. `"Check if my server is using quantum safe encyption for communication"`
|
| 244 |
+
3. `"Run a quick Nmap vulnerability test"`
|
| 245 |
+
4. '"Create a cmd processor to .. (what ever you want)" Note you need to install a Quantum Network Monitor Agent to run the .net code from. This is a very flexible and powerful feature. Use with caution!
|
| 246 |
+
|
| 247 |
+
### Final word
|
| 248 |
+
I fund the servers to create the models files, run the Quantum Network Monitor Service and Pay for Inference from Novita and OpenAI all from my own pocket. All of the code for creating the models and the work I have done with Quantum Network Monitor is [open source](https://github.com/Mungert69). Feel free to use what you find useful. Please support my work and consider [buying me a coffee](https://www.buymeacoffee.com/mahadeva) .
|
| 249 |
+
This will help me pay for the services and increase the token limits for everyone.
|
| 250 |
+
|
| 251 |
+
Thank you :)
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
# BERT base model (uncased)
|
| 256 |
+
|
| 257 |
+
Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in
|
| 258 |
+
[this paper](https://arxiv.org/abs/1810.04805) and first released in
|
| 259 |
+
[this repository](https://github.com/google-research/bert). This model is uncased: it does not make a difference
|
| 260 |
+
between english and English.
|
| 261 |
+
|
| 262 |
+
Disclaimer: The team releasing BERT did not write a model card for this model so this model card has been written by
|
| 263 |
+
the Hugging Face team.
|
| 264 |
+
|
| 265 |
+
## Model description
|
| 266 |
+
|
| 267 |
+
BERT is a transformers model pretrained on a large corpus of English data in a self-supervised fashion. This means it
|
| 268 |
+
was pretrained on the raw texts only, with no humans labeling them in any way (which is why it can use lots of
|
| 269 |
+
publicly available data) with an automatic process to generate inputs and labels from those texts. More precisely, it
|
| 270 |
+
was pretrained with two objectives:
|
| 271 |
+
|
| 272 |
+
- Masked language modeling (MLM): taking a sentence, the model randomly masks 15% of the words in the input then run
|
| 273 |
+
the entire masked sentence through the model and has to predict the masked words. This is different from traditional
|
| 274 |
+
recurrent neural networks (RNNs) that usually see the words one after the other, or from autoregressive models like
|
| 275 |
+
GPT which internally masks the future tokens. It allows the model to learn a bidirectional representation of the
|
| 276 |
+
sentence.
|
| 277 |
+
- Next sentence prediction (NSP): the models concatenates two masked sentences as inputs during pretraining. Sometimes
|
| 278 |
+
they correspond to sentences that were next to each other in the original text, sometimes not. The model then has to
|
| 279 |
+
predict if the two sentences were following each other or not.
|
| 280 |
+
|
| 281 |
+
This way, the model learns an inner representation of the English language that can then be used to extract features
|
| 282 |
+
useful for downstream tasks: if you have a dataset of labeled sentences, for instance, you can train a standard
|
| 283 |
+
classifier using the features produced by the BERT model as inputs.
|
| 284 |
+
|
| 285 |
+
## Model variations
|
| 286 |
+
|
| 287 |
+
BERT has originally been released in base and large variations, for cased and uncased input text. The uncased models also strips out an accent markers.
|
| 288 |
+
Chinese and multilingual uncased and cased versions followed shortly after.
|
| 289 |
+
Modified preprocessing with whole word masking has replaced subpiece masking in a following work, with the release of two models.
|
| 290 |
+
Other 24 smaller models are released afterward.
|
| 291 |
+
|
| 292 |
+
The detailed release history can be found on the [google-research/bert readme](https://github.com/google-research/bert/blob/master/README.md) on github.
|
| 293 |
+
|
| 294 |
+
| Model | #params | Language |
|
| 295 |
+
|------------------------|--------------------------------|-------|
|
| 296 |
+
| [`bert-base-uncased`](https://huggingface.co/bert-base-uncased) | 110M | English |
|
| 297 |
+
| [`bert-large-uncased`](https://huggingface.co/bert-large-uncased) | 340M | English | sub
|
| 298 |
+
| [`bert-base-cased`](https://huggingface.co/bert-base-cased) | 110M | English |
|
| 299 |
+
| [`bert-large-cased`](https://huggingface.co/bert-large-cased) | 340M | English |
|
| 300 |
+
| [`bert-base-chinese`](https://huggingface.co/bert-base-chinese) | 110M | Chinese |
|
| 301 |
+
| [`bert-base-multilingual-cased`](https://huggingface.co/bert-base-multilingual-cased) | 110M | Multiple |
|
| 302 |
+
| [`bert-large-uncased-whole-word-masking`](https://huggingface.co/bert-large-uncased-whole-word-masking) | 340M | English |
|
| 303 |
+
| [`bert-large-cased-whole-word-masking`](https://huggingface.co/bert-large-cased-whole-word-masking) | 340M | English |
|
| 304 |
+
|
| 305 |
+
## Intended uses & limitations
|
| 306 |
+
|
| 307 |
+
You can use the raw model for either masked language modeling or next sentence prediction, but it's mostly intended to
|
| 308 |
+
be fine-tuned on a downstream task. See the [model hub](https://huggingface.co/models?filter=bert) to look for
|
| 309 |
+
fine-tuned versions of a task that interests you.
|
| 310 |
+
|
| 311 |
+
Note that this model is primarily aimed at being fine-tuned on tasks that use the whole sentence (potentially masked)
|
| 312 |
+
to make decisions, such as sequence classification, token classification or question answering. For tasks such as text
|
| 313 |
+
generation you should look at model like GPT2.
|
| 314 |
+
|
| 315 |
+
### How to use
|
| 316 |
+
|
| 317 |
+
You can use this model directly with a pipeline for masked language modeling:
|
| 318 |
+
|
| 319 |
+
```python
|
| 320 |
+
>>> from transformers import pipeline
|
| 321 |
+
>>> unmasker = pipeline('fill-mask', model='bert-base-uncased')
|
| 322 |
+
>>> unmasker("Hello I'm a [MASK] model.")
|
| 323 |
+
|
| 324 |
+
[{'sequence': "[CLS] hello i'm a fashion model. [SEP]",
|
| 325 |
+
'score': 0.1073106899857521,
|
| 326 |
+
'token': 4827,
|
| 327 |
+
'token_str': 'fashion'},
|
| 328 |
+
{'sequence': "[CLS] hello i'm a role model. [SEP]",
|
| 329 |
+
'score': 0.08774490654468536,
|
| 330 |
+
'token': 2535,
|
| 331 |
+
'token_str': 'role'},
|
| 332 |
+
{'sequence': "[CLS] hello i'm a new model. [SEP]",
|
| 333 |
+
'score': 0.05338378623127937,
|
| 334 |
+
'token': 2047,
|
| 335 |
+
'token_str': 'new'},
|
| 336 |
+
{'sequence': "[CLS] hello i'm a super model. [SEP]",
|
| 337 |
+
'score': 0.04667217284440994,
|
| 338 |
+
'token': 3565,
|
| 339 |
+
'token_str': 'super'},
|
| 340 |
+
{'sequence': "[CLS] hello i'm a fine model. [SEP]",
|
| 341 |
+
'score': 0.027095865458250046,
|
| 342 |
+
'token': 2986,
|
| 343 |
+
'token_str': 'fine'}]
|
| 344 |
+
```
|
| 345 |
+
|
| 346 |
+
Here is how to use this model to get the features of a given text in PyTorch:
|
| 347 |
+
|
| 348 |
+
```python
|
| 349 |
+
from transformers import BertTokenizer, BertModel
|
| 350 |
+
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
|
| 351 |
+
model = BertModel.from_pretrained("bert-base-uncased")
|
| 352 |
+
text = "Replace me by any text you'd like."
|
| 353 |
+
encoded_input = tokenizer(text, return_tensors='pt')
|
| 354 |
+
output = model(**encoded_input)
|
| 355 |
+
```
|
| 356 |
+
|
| 357 |
+
and in TensorFlow:
|
| 358 |
+
|
| 359 |
+
```python
|
| 360 |
+
from transformers import BertTokenizer, TFBertModel
|
| 361 |
+
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
|
| 362 |
+
model = TFBertModel.from_pretrained("bert-base-uncased")
|
| 363 |
+
text = "Replace me by any text you'd like."
|
| 364 |
+
encoded_input = tokenizer(text, return_tensors='tf')
|
| 365 |
+
output = model(encoded_input)
|
| 366 |
+
```
|
| 367 |
+
|
| 368 |
+
### Limitations and bias
|
| 369 |
+
|
| 370 |
+
Even if the training data used for this model could be characterized as fairly neutral, this model can have biased
|
| 371 |
+
predictions:
|
| 372 |
+
|
| 373 |
+
```python
|
| 374 |
+
>>> from transformers import pipeline
|
| 375 |
+
>>> unmasker = pipeline('fill-mask', model='bert-base-uncased')
|
| 376 |
+
>>> unmasker("The man worked as a [MASK].")
|
| 377 |
+
|
| 378 |
+
[{'sequence': '[CLS] the man worked as a carpenter. [SEP]',
|
| 379 |
+
'score': 0.09747550636529922,
|
| 380 |
+
'token': 10533,
|
| 381 |
+
'token_str': 'carpenter'},
|
| 382 |
+
{'sequence': '[CLS] the man worked as a waiter. [SEP]',
|
| 383 |
+
'score': 0.0523831807076931,
|
| 384 |
+
'token': 15610,
|
| 385 |
+
'token_str': 'waiter'},
|
| 386 |
+
{'sequence': '[CLS] the man worked as a barber. [SEP]',
|
| 387 |
+
'score': 0.04962705448269844,
|
| 388 |
+
'token': 13362,
|
| 389 |
+
'token_str': 'barber'},
|
| 390 |
+
{'sequence': '[CLS] the man worked as a mechanic. [SEP]',
|
| 391 |
+
'score': 0.03788609802722931,
|
| 392 |
+
'token': 15893,
|
| 393 |
+
'token_str': 'mechanic'},
|
| 394 |
+
{'sequence': '[CLS] the man worked as a salesman. [SEP]',
|
| 395 |
+
'score': 0.037680890411138535,
|
| 396 |
+
'token': 18968,
|
| 397 |
+
'token_str': 'salesman'}]
|
| 398 |
+
|
| 399 |
+
>>> unmasker("The woman worked as a [MASK].")
|
| 400 |
+
|
| 401 |
+
[{'sequence': '[CLS] the woman worked as a nurse. [SEP]',
|
| 402 |
+
'score': 0.21981462836265564,
|
| 403 |
+
'token': 6821,
|
| 404 |
+
'token_str': 'nurse'},
|
| 405 |
+
{'sequence': '[CLS] the woman worked as a waitress. [SEP]',
|
| 406 |
+
'score': 0.1597415804862976,
|
| 407 |
+
'token': 13877,
|
| 408 |
+
'token_str': 'waitress'},
|
| 409 |
+
{'sequence': '[CLS] the woman worked as a maid. [SEP]',
|
| 410 |
+
'score': 0.1154729500412941,
|
| 411 |
+
'token': 10850,
|
| 412 |
+
'token_str': 'maid'},
|
| 413 |
+
{'sequence': '[CLS] the woman worked as a prostitute. [SEP]',
|
| 414 |
+
'score': 0.037968918681144714,
|
| 415 |
+
'token': 19215,
|
| 416 |
+
'token_str': 'prostitute'},
|
| 417 |
+
{'sequence': '[CLS] the woman worked as a cook. [SEP]',
|
| 418 |
+
'score': 0.03042375110089779,
|
| 419 |
+
'token': 5660,
|
| 420 |
+
'token_str': 'cook'}]
|
| 421 |
+
```
|
| 422 |
+
|
| 423 |
+
This bias will also affect all fine-tuned versions of this model.
|
| 424 |
+
|
| 425 |
+
## Training data
|
| 426 |
+
|
| 427 |
+
The BERT model was pretrained on [BookCorpus](https://yknzhu.wixsite.com/mbweb), a dataset consisting of 11,038
|
| 428 |
+
unpublished books and [English Wikipedia](https://en.wikipedia.org/wiki/English_Wikipedia) (excluding lists, tables and
|
| 429 |
+
headers).
|
| 430 |
+
|
| 431 |
+
## Training procedure
|
| 432 |
+
|
| 433 |
+
### Preprocessing
|
| 434 |
+
|
| 435 |
+
The texts are lowercased and tokenized using WordPiece and a vocabulary size of 30,000. The inputs of the model are
|
| 436 |
+
then of the form:
|
| 437 |
+
|
| 438 |
+
```
|
| 439 |
+
[CLS] Sentence A [SEP] Sentence B [SEP]
|
| 440 |
+
```
|
| 441 |
+
|
| 442 |
+
With probability 0.5, sentence A and sentence B correspond to two consecutive sentences in the original corpus, and in
|
| 443 |
+
the other cases, it's another random sentence in the corpus. Note that what is considered a sentence here is a
|
| 444 |
+
consecutive span of text usually longer than a single sentence. The only constrain is that the result with the two
|
| 445 |
+
"sentences" has a combined length of less than 512 tokens.
|
| 446 |
+
|
| 447 |
+
The details of the masking procedure for each sentence are the following:
|
| 448 |
+
- 15% of the tokens are masked.
|
| 449 |
+
- In 80% of the cases, the masked tokens are replaced by `[MASK]`.
|
| 450 |
+
- In 10% of the cases, the masked tokens are replaced by a random token (different) from the one they replace.
|
| 451 |
+
- In the 10% remaining cases, the masked tokens are left as is.
|
| 452 |
+
|
| 453 |
+
### Pretraining
|
| 454 |
+
|
| 455 |
+
The model was trained on 4 cloud TPUs in Pod configuration (16 TPU chips total) for one million steps with a batch size
|
| 456 |
+
of 256. The sequence length was limited to 128 tokens for 90% of the steps and 512 for the remaining 10%. The optimizer
|
| 457 |
+
used is Adam with a learning rate of 1e-4, \\(\beta_{1} = 0.9\\) and \\(\beta_{2} = 0.999\\), a weight decay of 0.01,
|
| 458 |
+
learning rate warmup for 10,000 steps and linear decay of the learning rate after.
|
| 459 |
+
|
| 460 |
+
## Evaluation results
|
| 461 |
+
|
| 462 |
+
When fine-tuned on downstream tasks, this model achieves the following results:
|
| 463 |
+
|
| 464 |
+
Glue test results:
|
| 465 |
+
|
| 466 |
+
| Task | MNLI-(m/mm) | QQP | QNLI | SST-2 | CoLA | STS-B | MRPC | RTE | Average |
|
| 467 |
+
|:----:|:-----------:|:----:|:----:|:-----:|:----:|:-----:|:----:|:----:|:-------:|
|
| 468 |
+
| | 84.6/83.4 | 71.2 | 90.5 | 93.5 | 52.1 | 85.8 | 88.9 | 66.4 | 79.6 |
|
| 469 |
+
|
| 470 |
+
|
| 471 |
+
### BibTeX entry and citation info
|
| 472 |
+
|
| 473 |
+
```bibtex
|
| 474 |
+
@article{DBLP:journals/corr/abs-1810-04805,
|
| 475 |
+
author = {Jacob Devlin and
|
| 476 |
+
Ming{-}Wei Chang and
|
| 477 |
+
Kenton Lee and
|
| 478 |
+
Kristina Toutanova},
|
| 479 |
+
title = {{BERT:} Pre-training of Deep Bidirectional Transformers for Language
|
| 480 |
+
Understanding},
|
| 481 |
+
journal = {CoRR},
|
| 482 |
+
volume = {abs/1810.04805},
|
| 483 |
+
year = {2018},
|
| 484 |
+
url = {http://arxiv.org/abs/1810.04805},
|
| 485 |
+
archivePrefix = {arXiv},
|
| 486 |
+
eprint = {1810.04805},
|
| 487 |
+
timestamp = {Tue, 30 Oct 2018 20:39:56 +0100},
|
| 488 |
+
biburl = {https://dblp.org/rec/journals/corr/abs-1810-04805.bib},
|
| 489 |
+
bibsource = {dblp computer science bibliography, https://dblp.org}
|
| 490 |
+
}
|
| 491 |
+
```
|
| 492 |
+
|
| 493 |
+
<a href="https://huggingface.co/exbert/?model=bert-base-uncased">
|
| 494 |
+
<img width="300px" src="https://cdn-media.huggingface.co/exbert/button.png">
|
| 495 |
+
</a>
|
sychonix-bf16-q4_k.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1635cd654614cf9e4192efe57b4cccf72567a089767496cd741fe9403e551578
|
| 3 |
+
size 101923264
|
sychonix-bf16-q6_k.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d5bdb2f3ed99badb36a75042697e6903f91ddd109d36a08c2ed6421018190862
|
| 3 |
+
size 119258560
|
sychonix-bf16-q8_0.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:300e1517bff7f44c4dafba5bc11cefa96291650413b73269fe2474ecce039e20
|
| 3 |
+
size 139828384
|
sychonix-bf16.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:13755426cf88f1bd40c8bc04812ed1ebdcdff313dd62c434166e298e8a28e901
|
| 3 |
+
size 219454624
|
sychonix-f16-q4_k.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a2940f67ffe52697ebcfa562710206cca1cac92a470db84dd1937a583d68c141
|
| 3 |
+
size 101923264
|
sychonix-f16-q6_k.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:42082028a6201b236bcf4134fe75ecda779f901f4bc3ddcdfde5699d2dafaa4e
|
| 3 |
+
size 119258560
|
sychonix-f16-q8_0.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2b3ab67948402281562030883e8849af39ddba58f696369099b052ec5872d368
|
| 3 |
+
size 139828384
|
sychonix-iq1_m.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e17cd3fce8136808f6d0d88223d813f461d6d733ab5580b8d224b31b185826a6
|
| 3 |
+
size 41988448
|
sychonix-iq1_s.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:59f158ef9d90f7f9d9d01302cf81e26d56a711dff34a2621e88e6e82a9fef486
|
| 3 |
+
size 40993120
|
sychonix-iq2_m.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5180eec6c22588c02628381ac34b7d0c2e84a39f763dfa34bd99740326a779a1
|
| 3 |
+
size 52107616
|
sychonix-iq2_s.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:215ff4c9c60b7fe6cd99dd44c2dd5049eb2a37bdb6a45f9aa21489ece2f9f4df
|
| 3 |
+
size 50780512
|
sychonix-iq2_xs.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c12d65bd3f0d50f72c0b041404151296f1f97a0bff30b353d164fcfdfaf8a014
|
| 3 |
+
size 45195616
|
sychonix-iq2_xxs.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d40ea386cc2ea50d2f41aa42cf58c84f37cedbe9023db1cda545585e434c7d5a
|
| 3 |
+
size 43647328
|
sychonix-iq3_m.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9e8bccb4bf370ad88616d40a4064a5fa06280d9eb2c50e37520026956fd531c5
|
| 3 |
+
size 57508192
|
sychonix-iq3_s.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:80c7756020b438dbec87a8fe1d8a4ab57da379b56400b69525704a1b178aa31b
|
| 3 |
+
size 55314784
|
sychonix-iq3_xs.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ff9d5ee4c99bcddb988e1daafdeeb6192f460543e4107073e1db703c409c5c0a
|
| 3 |
+
size 53655904
|
sychonix-iq3_xxs.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:933ec62fc72a3b977890b9e6cfa09732288402b6f8e8c0e033526a31148b1ed3
|
| 3 |
+
size 50780512
|
sychonix-iq4_nl.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ac687c91552818749c8c7918756c8b522e738952c7bf2b46dd3118a97c2a75d9
|
| 3 |
+
size 69708416
|
sychonix-iq4_xs.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:22c8b1dfb891fb2c95c041bc192b44710426865295946bfb29bed0030232a347
|
| 3 |
+
size 67054208
|
sychonix-q2_k_s.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a18c35463c16dde7629296cc049449d1d556d61f30021a43491fb9155ebccc5c
|
| 3 |
+
size 47241568
|
sychonix-q3_k_m.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:64f669e46ab1e8fb2acb55a3ddcd2864bfe7c60eb1fc4c284db0fd0dfc085177
|
| 3 |
+
size 61102432
|
sychonix-q3_k_s.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:20b1830e14905e187a5b6f72ce1c44016854b1ee7fb65d2c02a8d01530f437c6
|
| 3 |
+
size 55314784
|
sychonix-q4_0.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1b4f3fff8fd0e9fc9656e55622bb97a915752f42be2183880389f69fd773b199
|
| 3 |
+
size 63665056
|
sychonix-q4_1.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bf7385b5892c58cbb1bc007b60da8a1b5debd126fcdf0343de3e13a976a52760
|
| 3 |
+
size 70438528
|
sychonix-q4_k_m.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:89068554bc74668bbc3764d894cce2f405fb1f39e398bb9032db1b970011d1c8
|
| 3 |
+
size 74270336
|
sychonix-q4_k_s.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a17265b59f238f2077bcfb6db5502dfbdaa843521950805915db3bc0e78d79fa
|
| 3 |
+
size 70298240
|
sychonix-q5_0.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b3878952fa85f26eb954ea3cb697a7b9a91d65e6f9f8927437f6565dcce9d9d5
|
| 3 |
+
size 77212000
|
sychonix-q5_1.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d96443ea62447ef912415b46bc9c935964899c37c55fa05566cc7181fe534f6a
|
| 3 |
+
size 83985472
|
sychonix-q5_k_m.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:50378bf05f0afd40473d58b80996303bae763251dbd5835f56b6e00f45f65e3a
|
| 3 |
+
size 82675328
|
sychonix-q5_k_s.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9d11950f5f2833c80742f9f462b07d98d01b3363298d2bcca9a7227c70f09690
|
| 3 |
+
size 80325248
|
sychonix-q6_k_m.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c9a51d8c2615f0f31673f18350c9ea00f8f86a582ea0f05bee718aa86c289d13
|
| 3 |
+
size 91605632
|
sychonix-q8_0.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f0a52c3411e37d48789cff9bd3ecc19c31002a4595c999a5224e3d1d16b4ea44
|
| 3 |
+
size 117852544
|
sychonix-tq1_0.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f7a1bc630ba317918f15dd5e1f5ccc94d6a93b4f45ee26f39abe4ee7625c25e3
|
| 3 |
+
size 39848576
|
sychonix-tq2_0.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3ea9b8c50d9bc3cdb17e161d3a9927c0968b5c706094944f9acb3e2415c73d4d
|
| 3 |
+
size 43829888
|
sychonix.imatrix
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:11750b263d80baa1f8541fabf6439891224f2b123964a4653501a85e4c9ca4ae
|
| 3 |
+
size 334170
|