Update README.md
Browse files
README.md
CHANGED
|
@@ -46,4 +46,10 @@ llama_cpp_WebUI
|
|
| 46 |
|
| 47 |
1. Llama-3.2-1B-Instruct-Q4_K_M.gguf
|
| 48 |
2. Llama-3.2-1B-Instruct-Q8_0.gguf
|
| 49 |
-
3. mmproj-ultravox-v0_5-llama-3_2-1b-f16.gguf
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 46 |
|
| 47 |
1. Llama-3.2-1B-Instruct-Q4_K_M.gguf
|
| 48 |
2. Llama-3.2-1B-Instruct-Q8_0.gguf
|
| 49 |
+
3. mmproj-ultravox-v0_5-llama-3_2-1b-f16.gguf
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
## run.bat
|
| 54 |
+
|
| 55 |
+
llama-server --n-gpu-layers 15 --ctx-size 8192 -m models/ollma/Llama-3.2-1B-Instruct-Q8_0.gguf --mmproj models/ollma/mmproj-ultravox-v0_5-llama-3_2-1b-f16.gguf --host 127.0.0.1 --port 8083
|