Update README.md
Browse files
README.md
CHANGED
|
@@ -52,4 +52,14 @@ llama_cpp_WebUI
|
|
| 52 |
|
| 53 |
## run.bat
|
| 54 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 55 |
llama-server --n-gpu-layers 15 --ctx-size 8192 -m models/ollma/Llama-3.2-1B-Instruct-Q8_0.gguf --mmproj models/ollma/mmproj-ultravox-v0_5-llama-3_2-1b-f16.gguf --host 127.0.0.1 --port 8083
|
|
|
|
| 52 |
|
| 53 |
## run.bat
|
| 54 |
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
Local Server
|
| 59 |
+
|
| 60 |
+
llama-server.exe --n-gpu-layers 2 --ctx-size 111192 -m ".\models\mistralai\mistralai_Voxtral-Mini-3B-2507-Q8_0.gguf" --mmproj ".\models\mistralai\mmproj-mistralai_Voxtral-Mini-3B-2507-bf16.gguf" --host 0.0.0.0 --port 8005
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
public URL
|
| 64 |
+
|
| 65 |
llama-server --n-gpu-layers 15 --ctx-size 8192 -m models/ollma/Llama-3.2-1B-Instruct-Q8_0.gguf --mmproj models/ollma/mmproj-ultravox-v0_5-llama-3_2-1b-f16.gguf --host 127.0.0.1 --port 8083
|