File size: 2,258 Bytes
0d8409d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
#!/bin/bash

# Do NOT export HOME here. Let the system manage HOME for the 'appuser'
# (which will be /home/appuser due to the Dockerfile change).
# Ollama will try to write to /home/appuser/.ollama if it falls back on HOME.

# Define OLLAMA_HOME explicitly for the runtime environment in start.sh.
# This ensures Ollama uses /app/.ollama for its primary data, which is within the /app
# directory owned by 'appuser'.
export OLLAMA_HOME="/app/.ollama"

# Create the Ollama home directory if it doesn't exist.
# The Dockerfile's chown ensures 'appuser' owns /app, so 'appuser' can
# create this directory without permission issues.
mkdir -p "${OLLAMA_HOME}"

echo "Starting Ollama server..."
# Start Ollama server in the background.
# Redirect its output to a log file within OLLAMA_HOME, and also to stdout (Hugging Face Space logs).
/usr/local/bin/ollama serve > "${OLLAMA_HOME}/ollama.log" 2>&1 &

# Give Ollama a moment to write logs before tailing.
# This helps ensure `tail -f` has content to display immediately.
sleep 2

# Tail the Ollama log file in the background so its contents appear in the Space logs.
# This is crucial for debugging Ollama's internal startup process.
tail -f "${OLLAMA_HOME}/ollama.log" &

echo "Waiting for Ollama server to start..."
attempt_num=0
max_attempts=60 # Increased timeout to 60 seconds (60 attempts * 1 second sleep)
while ! curl -s http://localhost:11434/api/tags > /dev/null; do
    if [ $attempt_num -ge $max_attempts ]; then
        echo "Error: Ollama server failed to start after multiple attempts."
        echo "Ollama logs:"
        # Print the full Ollama log if it fails to start for final debugging.
        cat "${OLLAMA_HOME}/ollama.log"
        exit 1 # Exit with an error to signal failure to Hugging Face Spaces.
    fi
    echo "Ollama server not yet ready, waiting... ($((attempt_num + 1))/$max_attempts)"
    sleep 1
    attempt_num=$((attempt_num + 1))
done

echo "Ollama server is ready!"
echo "Starting Streamlit app..."

# Run the Streamlit application.
# --server.port and --server.address are essential for Docker environments
# to ensure Streamlit listens correctly and is accessible via Hugging Face Spaces.
streamlit run src/streamlit_app.py --server.port 8501 --server.address 0.0.0.0