brendon-ai commited on
Commit
5d20705
·
verified ·
1 Parent(s): 4b08d10

Update startup.sh

Browse files
Files changed (1) hide show
  1. startup.sh +29 -50
startup.sh CHANGED
@@ -1,64 +1,43 @@
1
  #!/bin/bash
2
 
3
- # Exit on any error
4
- set -e
5
 
6
- echo "=== Starting Ollama API Server ==="
 
7
 
8
- # Set Ollama environment variables
9
  export OLLAMA_HOST=0.0.0.0:11434
10
- export OLLAMA_MODELS=/app/.ollama/models
11
- export OLLAMA_HOME=/app/.ollama
12
  export OLLAMA_ORIGINS="*"
13
 
14
- # Create models directory with proper permissions
15
- mkdir -p /app/.ollama/models 2>/dev/null || {
16
- echo "Warning: Could not create /app/.ollama/models, trying alternative location..."
17
- export OLLAMA_MODELS=/tmp/.ollama/models
18
- export OLLAMA_HOME=/tmp/.ollama
19
- mkdir -p /tmp/.ollama/models
20
- echo "Using temporary directory: $OLLAMA_MODELS"
21
- }
22
 
23
- # Start Ollama server in the background
24
  echo "Starting Ollama server..."
25
  ollama serve &
26
  OLLAMA_PID=$!
27
- echo "Ollama server started with PID: $OLLAMA_PID"
28
 
29
- # Function to check if Ollama is ready
30
- check_ollama() {
31
- curl -s http://localhost:11434/api/tags > /dev/null 2>&1
32
- return $?
33
- }
34
-
35
- # Wait for Ollama server to be ready
36
- echo "Waiting for Ollama server to be ready..."
37
- MAX_WAIT=60
38
- WAIT_COUNT=0
39
-
40
- while ! check_ollama; do
41
- if [ $WAIT_COUNT -ge $MAX_WAIT ]; then
42
- echo "❌ Ollama server failed to start within $MAX_WAIT seconds"
43
- kill $OLLAMA_PID 2>/dev/null || true
44
- exit 1
45
- fi
46
-
47
- echo "⏳ Waiting for Ollama server... ($((WAIT_COUNT + 1))/$MAX_WAIT)"
48
- sleep 1
49
- WAIT_COUNT=$((WAIT_COUNT + 1))
50
- done
51
-
52
- echo "✅ Ollama server is ready!"
53
-
54
- # Optional: Pull a small model for testing
55
- echo "Pulling a lightweight model (this may take a few minutes)..."
56
  ollama pull tinyllama &
57
 
58
- # Start the main application
59
- echo "Starting main application..."
60
- python3 app.py
61
-
62
- # If we reach here, something went wrong with the main app
63
- echo "❌ Main application exited unexpectedly"
64
- kill $OLLAMA_PID 2>/dev/null || true
 
1
  #!/bin/bash
2
 
3
+ echo "=== Starting Ollama Generate API ==="
 
4
 
5
+ # Set HOME to /tmp to avoid permission issues with /.ollama
6
+ export HOME=/tmp
7
 
8
+ # Use /tmp to avoid any permission issues
9
  export OLLAMA_HOST=0.0.0.0:11434
10
+ export OLLAMA_MODELS=/tmp/ollama/models
11
+ export OLLAMA_HOME=/tmp/ollama
12
  export OLLAMA_ORIGINS="*"
13
 
14
+ # Create directories
15
+ mkdir -p /tmp/ollama/models
16
+ mkdir -p /tmp/.ollama
17
+ echo "✅ Created Ollama directories in /tmp"
 
 
 
 
18
 
19
+ # Start Ollama server
20
  echo "Starting Ollama server..."
21
  ollama serve &
22
  OLLAMA_PID=$!
 
23
 
24
+ # Wait for server to start
25
+ echo "Waiting for Ollama server..."
26
+ sleep 10
27
+
28
+ # Check if server is running
29
+ if curl -s http://localhost:11434/api/tags > /dev/null; then
30
+ echo "✅ Ollama server is running!"
31
+ else
32
+ echo "❌ Ollama server failed to start"
33
+ kill $OLLAMA_PID 2>/dev/null || true
34
+ exit 1
35
+ fi
36
+
37
+ # Pull tiny model in background
38
+ echo "Pulling TinyLlama model..."
 
 
 
 
 
 
 
 
 
 
 
 
39
  ollama pull tinyllama &
40
 
41
+ # Start FastAPI app
42
+ echo "Starting FastAPI application..."
43
+ python3 app.py