Krish-05 commited on
Commit
b33c5f3
·
verified ·
1 Parent(s): 36e0da0

Update start.sh

Browse files
Files changed (1) hide show
  1. start.sh +41 -10
start.sh CHANGED
@@ -5,31 +5,62 @@ export OMP_NUM_THREADS=4
5
  export MKL_NUM_THREADS=4
6
  export CUDA_VISIBLE_DEVICES=0
7
 
 
 
8
  # Start Ollama in the background
 
9
  ollama serve &
10
 
11
  # Pull the model if not already present
12
- # --- CHANGE THIS LINE ---
13
- if ! ollama list | grep -q "krishna_choudhary/AI_Assistant_Chatbot"; then
14
- ollama pull krishna_choudhary/AI_Assistant_Chatbot
 
 
 
15
  fi
16
 
17
  # Wait for Ollama to start up
18
- max_attempts=30
19
  attempt=0
 
20
  while ! curl -s http://localhost:11434/api/tags >/dev/null; do
21
  sleep 1
22
  attempt=$((attempt + 1))
23
  if [ $attempt -eq $max_attempts ]; then
24
- echo "Ollama failed to start within 30 seconds. Exiting."
25
  exit 1
26
  fi
27
  done
28
-
29
  echo "Ollama is ready."
30
 
31
- # Print the API URL
32
- echo "API is running on: http://0.0.0.0:7860"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
 
34
- # Start the FastAPI server
35
- uvicorn app:app --host 0.0.0.0 --port 7860 --workers 4 --limit-concurrency 20
 
 
5
  export MKL_NUM_THREADS=4
6
  export CUDA_VISIBLE_DEVICES=0
7
 
8
+ echo "--- Starting Ollama, FastAPI, and Streamlit ---"
9
+
10
  # Start Ollama in the background
11
+ echo "Starting Ollama server..."
12
  ollama serve &
13
 
14
  # Pull the model if not already present
15
+ MODEL_TO_PULL="krishna_choudhary/AI_Assistant_Chatbot"
16
+ if ! ollama list | grep -q "$MODEL_TO_PULL"; then
17
+ echo "Pulling Ollama model: $MODEL_TO_PULL"
18
+ ollama pull "$MODEL_TO_PULL"
19
+ else
20
+ echo "Ollama model $MODEL_TO_PULL already present."
21
  fi
22
 
23
  # Wait for Ollama to start up
24
+ max_attempts=60 # Increased attempts as model pulling can take time
25
  attempt=0
26
+ echo "Waiting for Ollama to start (max $max_attempts seconds)..."
27
  while ! curl -s http://localhost:11434/api/tags >/dev/null; do
28
  sleep 1
29
  attempt=$((attempt + 1))
30
  if [ $attempt -eq $max_attempts ]; then
31
+ echo "Ollama failed to start within $((max_attempts)) seconds. Exiting."
32
  exit 1
33
  fi
34
  done
 
35
  echo "Ollama is ready."
36
 
37
+ # --- DEBUGGING: List files in current directory ---
38
+ echo "--- Files in current directory ($PWD): ---"
39
+ ls -l
40
+ echo "-------------------------------------------"
41
+
42
+ # Start the FastAPI server in the background
43
+ echo "Starting FastAPI server..."
44
+ # Ensure 'main:app' correctly points to your main.py file and the 'app' object within it.
45
+ uvicorn main:app --host 0.0.0.0 --port 7860 --workers 1 --limit-concurrency 20 &
46
+
47
+ # Store the PID of the FastAPI server
48
+ FASTAPI_PID=$!
49
+ echo "FastAPI server started with PID: $FASTAPI_PID"
50
+
51
+ # Wait a moment for FastAPI to start (optional, but good practice)
52
+ sleep 5
53
+
54
+ # Start the Streamlit server in the background
55
+ echo "Starting Streamlit app..."
56
+ streamlit run streamlit_app.py --server.port 8501 --server.address 0.0.0.0 &
57
+
58
+ # Store the PID of the Streamlit server
59
+ STREAMLIT_PID=$!
60
+ echo "Streamlit app started with PID: $STREAMLIT_PID"
61
+
62
+ echo "All services initiated. Keeping container alive..."
63
 
64
+ # Keep the script running indefinitely, so the Docker container doesn't exit.
65
+ # This is more robust than `wait -n` if one background process exits unexpectedly.
66
+ tail -f /dev/null