OrbitMC commited on
Commit
27f2cfd
·
verified ·
1 Parent(s): 7ae216c

Upload start (3).sh

Browse files
Files changed (1) hide show
  1. start (3).sh +17 -0
start (3).sh ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Start llama-server in background
4
+ cd /llama.cpp/build
5
+ ./bin/llama-server --host 0.0.0.0 --port 8080 --model /models/model.q8_k_xl.gguf --ctx-size 32768 --threads 2 &
6
+
7
+ # Wait for server to initialize
8
+ echo "Waiting for server to start..."
9
+ until curl -s "http://localhost:8080/v1/models" >/dev/null; do
10
+ sleep 1
11
+ done
12
+
13
+ echo "Server is ready. Starting Gradio app."
14
+
15
+ # Start Gradio UI
16
+ cd /
17
+ python3 app.py