thangquang09 commited on
Commit
78f1859
·
1 Parent(s): 70cd215

update Dockerfile

Browse files
Files changed (3) hide show
  1. pull_models.sh +16 -7
  2. requirements.txt +4 -3
  3. start.sh +7 -3
pull_models.sh CHANGED
@@ -1,9 +1,18 @@
1
  #!/bin/bash
2
 
3
- # Kiểm tra xem model đã được tải chưa
4
- if [ ! -d "/root/.ollama/models/manifests/phi2" ]; then
5
- echo "Pulling phi2 model..."
6
- ollama pull phi2
7
- else
8
- echo "phi2 model already exists"
9
- fi
 
 
 
 
 
 
 
 
 
 
1
  #!/bin/bash
2
 
3
+ echo "Checking for models..."
4
+
5
+ # Ensure we're using the right home directory
6
+ export HOME=/root
7
+
8
+ # Check if Ollama is running
9
+ if ! pgrep -x "ollama" > /dev/null
10
+ then
11
+ echo "Ollama is not running. Starting Ollama..."
12
+ ollama serve &
13
+ sleep 5
14
+ fi
15
+
16
+ # Try to pull the model
17
+ echo "Pulling phi2 model..."
18
+ ollama pull phi2
requirements.txt CHANGED
@@ -1,6 +1,7 @@
1
  streamlit
2
- fastapi>=0.100.0 # Newer FastAPI version
3
- llama-index
4
- pydantic>=2.0.0 # Newer Pydantic version
 
5
  Pillow
6
  uvicorn
 
1
  streamlit
2
+ fastapi>=0.100.0
3
+ llama-index>=0.9.0 # Make sure to use the latest version that includes Ollama
4
+ llama-index-llms-ollama # This is the missing package
5
+ pydantic>=2.0.0
6
  Pillow
7
  uvicorn
start.sh CHANGED
@@ -1,13 +1,17 @@
1
  #!/bin/bash
2
 
3
- # Start Ollama in the background
 
4
  ollama serve &
5
 
6
  # Wait for Ollama to start
7
- sleep 5
 
8
 
9
  # Pull models if needed
10
- /pull_models.sh
 
11
 
12
  # Start FastAPI app
 
13
  uvicorn app:app --host 0.0.0.0 --port 7860
 
1
  #!/bin/bash
2
 
3
+ # Start Ollama in the background with proper home directory
4
+ export HOME=/root
5
  ollama serve &
6
 
7
  # Wait for Ollama to start
8
+ echo "Waiting for Ollama to start..."
9
+ sleep 10
10
 
11
  # Pull models if needed
12
+ echo "Running pull_models.sh"
13
+ /app/pull_models.sh
14
 
15
  # Start FastAPI app
16
+ echo "Starting FastAPI application..."
17
  uvicorn app:app --host 0.0.0.0 --port 7860