File size: 2,123 Bytes
7dfedd8
 
 
 
 
 
 
 
 
 
 
 
 
 
b114302
7dfedd8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
#!/bin/bash

# Set environment variables for optimization
export OMP_NUM_THREADS=4
export MKL_NUM_THREADS=4
export CUDA_VISIBLE_DEVICES=0

echo "--- Starting Ollama, FastAPI, and Streamlit ---"

# Start Ollama in the background
echo "Starting Ollama server..."
ollama serve &

# Pull the model if not already present
MODEL_TO_PULL="krishna_choudhary/lightweight_chatbot"
if ! ollama list | grep -q "$MODEL_TO_PULL"; then
    echo "Pulling Ollama model: $MODEL_TO_PULL"
    ollama pull "$MODEL_TO_PULL"
else
    echo "Ollama model $MODEL_TO_PULL already present."
fi

# Wait for Ollama to start up
max_attempts=90 # Increased attempts as model pulling can take time on first run
attempt=0
echo "Waiting for Ollama to start (max $max_attempts seconds)..."
while ! curl -s http://localhost:11434/api/tags >/dev/null; do
    sleep 1
    attempt=$((attempt + 1))
    if [ $attempt -eq $max_attempts ]; then
        echo "Ollama failed to start within $((max_attempts)) seconds. Exiting."
        exit 1
    fi
done
echo "Ollama is ready."

# --- DEBUGGING: List files in current directory ---
echo "--- Files in current directory ($PWD): ---"
ls -l
echo "-------------------------------------------"

# Start the FastAPI server in the background
echo "Starting FastAPI server..."
uvicorn main:app --host 0.0.0.0 --port 7860 --workers 1 --limit-concurrency 20 &

# Store the PID of the FastAPI server
FASTAPI_PID=$!
echo "FastAPI server started with PID: $FASTAPI_PID"

# Wait a moment for FastAPI to start (optional, but good practice)
sleep 5

# Start the Streamlit server in the background
echo "Starting Streamlit app..."
# Use 'python -m streamlit' for robustness in Docker environments
python -m streamlit run streamlit_app.py --server.port 8501 --server.address 0.0.0.0 &

# Store the PID of the Streamlit server
STREAMLIT_PID=$!
echo "Streamlit app started with PID: $STREAMLIT_PID"

echo "All services initiated. Keeping container alive..."

# Keep the script running indefinitely, so the Docker container doesn't exit.
# This is more robust than `wait -n` if one background process exits unexpectedly.
tail -f /dev/null