Krish-05 commited on
Commit
1cbe61b
·
verified ·
1 Parent(s): b33c5f3

Update start.sh

Browse files
Files changed (1) hide show
  1. start.sh +6 -25
start.sh CHANGED
@@ -5,7 +5,7 @@ export OMP_NUM_THREADS=4
5
  export MKL_NUM_THREADS=4
6
  export CUDA_VISIBLE_DEVICES=0
7
 
8
- echo "--- Starting Ollama, FastAPI, and Streamlit ---"
9
 
10
  # Start Ollama in the background
11
  echo "Starting Ollama server..."
@@ -21,7 +21,7 @@ else
21
  fi
22
 
23
  # Wait for Ollama to start up
24
- max_attempts=60 # Increased attempts as model pulling can take time
25
  attempt=0
26
  echo "Waiting for Ollama to start (max $max_attempts seconds)..."
27
  while ! curl -s http://localhost:11434/api/tags >/dev/null; do
@@ -39,28 +39,9 @@ echo "--- Files in current directory ($PWD): ---"
39
  ls -l
40
  echo "-------------------------------------------"
41
 
42
- # Start the FastAPI server in the background
43
  echo "Starting FastAPI server..."
44
- # Ensure 'main:app' correctly points to your main.py file and the 'app' object within it.
45
- uvicorn main:app --host 0.0.0.0 --port 7860 --workers 1 --limit-concurrency 20 &
46
 
47
- # Store the PID of the FastAPI server
48
- FASTAPI_PID=$!
49
- echo "FastAPI server started with PID: $FASTAPI_PID"
50
-
51
- # Wait a moment for FastAPI to start (optional, but good practice)
52
- sleep 5
53
-
54
- # Start the Streamlit server in the background
55
- echo "Starting Streamlit app..."
56
- streamlit run streamlit_app.py --server.port 8501 --server.address 0.0.0.0 &
57
-
58
- # Store the PID of the Streamlit server
59
- STREAMLIT_PID=$!
60
- echo "Streamlit app started with PID: $STREAMLIT_PID"
61
-
62
- echo "All services initiated. Keeping container alive..."
63
-
64
- # Keep the script running indefinitely, so the Docker container doesn't exit.
65
- # This is more robust than `wait -n` if one background process exits unexpectedly.
66
- tail -f /dev/null
 
5
  export MKL_NUM_THREADS=4
6
  export CUDA_VISIBLE_DEVICES=0
7
 
8
+ echo "--- Starting Ollama and FastAPI ---"
9
 
10
  # Start Ollama in the background
11
  echo "Starting Ollama server..."
 
21
  fi
22
 
23
  # Wait for Ollama to start up
24
+ max_attempts=90 # Increased attempts as model pulling can take time on first run
25
  attempt=0
26
  echo "Waiting for Ollama to start (max $max_attempts seconds)..."
27
  while ! curl -s http://localhost:11434/api/tags >/dev/null; do
 
39
  ls -l
40
  echo "-------------------------------------------"
41
 
42
+ # Start the FastAPI server
43
  echo "Starting FastAPI server..."
44
+ uvicorn main:app --host 0.0.0.0 --port 7860 --workers 1 --limit-concurrency 20
 
45
 
46
+ # The script will stay alive as long as uvicorn is running in the foreground.
47
+ # If uvicorn exits, the container will exit.