Upload ALL Run Models.bat
Browse files
llama_cpp_WebUI FILE/ALL Run Models.bat
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
@echo off
|
| 2 |
+
Title π¦ Llama.cpp Local Server - GPU + Model Selector + Mobile Access
|
| 3 |
+
|
| 4 |
+
REM --- PATH SETTINGS ---
|
| 5 |
+
SET BASE_DIR=D:\Flie\llama.cpp
|
| 6 |
+
SET MODELS_DIR=%BASE_DIR%\models
|
| 7 |
+
SET SERVER_EXE=%BASE_DIR%\llama-server.exe
|
| 8 |
+
|
| 9 |
+
REM --- SERVER SETTINGS ---
|
| 10 |
+
SET HOST_IP=0.0.0.0
|
| 11 |
+
SET PORT=8080
|
| 12 |
+
SET GPU_LAYERS=3
|
| 13 |
+
SET CONTEXT_SIZE=114096
|
| 14 |
+
|
| 15 |
+
echo ============================================
|
| 16 |
+
echo π¦ Llama.cpp Local Server - Model Selector
|
| 17 |
+
echo ============================================
|
| 18 |
+
echo.
|
| 19 |
+
echo Available Models in: %MODELS_DIR%
|
| 20 |
+
echo.
|
| 21 |
+
|
| 22 |
+
REM --- LIST ALL MODELS ---
|
| 23 |
+
SETLOCAL ENABLEDELAYEDEXPANSION
|
| 24 |
+
SET COUNT=0
|
| 25 |
+
for %%f in ("%MODELS_DIR%\*.gguf") do (
|
| 26 |
+
SET /A COUNT+=1
|
| 27 |
+
echo !COUNT!. %%~nxf
|
| 28 |
+
SET "MODEL[!COUNT!]=%%~nxf"
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
echo.
|
| 32 |
+
echo --------------------------------------------
|
| 33 |
+
echo Type "NO" β Vision Model (Qwen3-VL-2B)
|
| 34 |
+
echo Type "Ga" β Gamma Model (gemma-3-12b)
|
| 35 |
+
echo Type "La" β Llama Model (Llama-3.2-1B)
|
| 36 |
+
echo --------------------------------------------
|
| 37 |
+
echo.
|
| 38 |
+
set /p choice=Enter model number or type NO/Ga/La:
|
| 39 |
+
|
| 40 |
+
REM --- IF USER TYPES NO ---
|
| 41 |
+
IF /I "%choice%"=="NO" (
|
| 42 |
+
echo.
|
| 43 |
+
echo π§ Starting Vision Model: Qwen3-VL-2B-Instruct-Q8_0
|
| 44 |
+
echo --------------------------------------------
|
| 45 |
+
start "" "%SERVER_EXE%" --n-gpu-layers 3 --ctx-size 114096 -m "%MODELS_DIR%\Qwen3-VL-2B-Instruct-Q8_0.gguf" --mmproj "%MODELS_DIR%\mmproj-Qwen3-VL-2B-Instruct-Q8_0.gguf" --host %HOST_IP% --port %PORT%
|
| 46 |
+
timeout /t 3 >nul
|
| 47 |
+
goto :MOBILE_URL
|
| 48 |
+
)
|
| 49 |
+
|
| 50 |
+
REM --- IF USER TYPES GA ---
|
| 51 |
+
IF /I "%choice%"=="GA" (
|
| 52 |
+
echo.
|
| 53 |
+
echo π§ Starting Gamma Model: gemma-3-12b-it-Q4_K_S
|
| 54 |
+
echo --------------------------------------------
|
| 55 |
+
start "" "%SERVER_EXE%" --n-gpu-layers 3 --ctx-size 114096 -m "%MODELS_DIR%\gamma\gemma-3-12b-it-Q4_K_S.gguf" --mmproj "%MODELS_DIR%\gamma\mmproj-model-f16-12B.gguf" --host %HOST_IP% --port %PORT%
|
| 56 |
+
timeout /t 3 >nul
|
| 57 |
+
goto :MOBILE_URL
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
REM --- IF USER TYPES LA ---
|
| 61 |
+
IF /I "%choice%"=="LA" (
|
| 62 |
+
echo.
|
| 63 |
+
echo π§ Starting Llama Model: Llama-3.2-1B-Instruct-Q8_0
|
| 64 |
+
echo --------------------------------------------
|
| 65 |
+
start "" "%SERVER_EXE%" --n-gpu-layers 3 --ctx-size 114096 -m "%MODELS_DIR%\ollma\Llama-3.2-1B-Instruct-Q8_0.gguf" --mmproj "%MODELS_DIR%\ollma\mmproj-ultravox-v0_5-llama-3_2-1b-f16.gguf" --host %HOST_IP% --port %PORT%
|
| 66 |
+
timeout /t 3 >nul
|
| 67 |
+
goto :MOBILE_URL
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
REM --- NORMAL MODEL SELECTION PATH ---
|
| 71 |
+
IF "%choice%"=="" (
|
| 72 |
+
echo No selection made. Exiting...
|
| 73 |
+
pause
|
| 74 |
+
exit /b
|
| 75 |
+
)
|
| 76 |
+
|
| 77 |
+
SET SELECTED_MODEL=!MODEL[%choice%]!
|
| 78 |
+
SET MODEL_PATH="%MODELS_DIR%\%SELECTED_MODEL%"
|
| 79 |
+
|
| 80 |
+
echo.
|
| 81 |
+
echo β
Selected model: %SELECTED_MODEL%
|
| 82 |
+
echo ---------------------------------------------
|
| 83 |
+
|
| 84 |
+
echo.
|
| 85 |
+
echo π Starting llama-server with %SELECTED_MODEL% ...
|
| 86 |
+
echo.
|
| 87 |
+
|
| 88 |
+
start "" "%SERVER_EXE%" --n-gpu-layers %GPU_LAYERS% --ctx-size %CONTEXT_SIZE% -m %MODEL_PATH% --host %HOST_IP% --port %PORT%
|
| 89 |
+
timeout /t 3 >nul
|
| 90 |
+
|
| 91 |
+
:MOBILE_URL
|
| 92 |
+
REM --- GET LOCAL IP FOR MOBILE ACCESS ---
|
| 93 |
+
for /f "tokens=2 delims=:" %%a in ('ipconfig ^| findstr /c:"IPv4 Address"') do set LOCAL_IP=%%a
|
| 94 |
+
set LOCAL_IP=%LOCAL_IP: =%
|
| 95 |
+
|
| 96 |
+
echo.
|
| 97 |
+
echo π Open on this PC: http://127.0.0.1:%PORT%
|
| 98 |
+
echo π± Open on your mobile: http://%LOCAL_IP%:%PORT%
|
| 99 |
+
echo.
|
| 100 |
+
start "" chrome http://127.0.0.1:%PORT%/
|
| 101 |
+
pause
|