Update README.md
Browse files
README.md
CHANGED
|
@@ -37,7 +37,7 @@ llama-quantize --imatrix imatrix.dat --leave-output-tensor phi-4.bf16.gguf phi-4
|
|
| 37 |
llama-quantize --allow-requantize --output-tensor-type bf16 --token-embedding-type bf16 phi-4.bf16.gguf phi-4.bf16.q6.gguf q6_k
|
| 38 |
llama-quantize --imatrix imatrix.dat --leave-output-tensor phi-4.bf16.gguf phi-4.bf16.q6.im.gguf q6_k
|
| 39 |
llama-quantize --allow-requantize --output-tensor-type bf16 --token-embedding-type bf16 phi-4.bf16.gguf phi-4.bf16.q8.gguf q8_0
|
| 40 |
-
llama-quantize --allow-requantize --pure phi-4.bf16.gguf phi-4.bf16.q8p.gguf q8_0
|
| 41 |
```
|
| 42 |
---------------------------------------------
|
| 43 |
|
|
|
|
| 37 |
llama-quantize --allow-requantize --output-tensor-type bf16 --token-embedding-type bf16 phi-4.bf16.gguf phi-4.bf16.q6.gguf q6_k
|
| 38 |
llama-quantize --imatrix imatrix.dat --leave-output-tensor phi-4.bf16.gguf phi-4.bf16.q6.im.gguf q6_k
|
| 39 |
llama-quantize --allow-requantize --output-tensor-type bf16 --token-embedding-type bf16 phi-4.bf16.gguf phi-4.bf16.q8.gguf q8_0
|
| 40 |
+
llama-quantize --allow-requantize --pure phi-4.bf16.gguf phi-4.bf16.q8p.gguf q8_0
|
| 41 |
```
|
| 42 |
---------------------------------------------
|
| 43 |
|