File size: 899 Bytes
d7c5358
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5871c3c
 
 
7f2698c
 
 
 
d7c5358
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
#!/bin/bash

# Source the virtual environment
source /app/venv/bin/activate

# Starting server
echo "Starting Ollama server"
ollama serve &
sleep 1

# Try to get the model environment variable
if [ -n "${MODEL}" ]; then
  # Split the MODEL variable into an array
  IFS=',' read -ra MODELS <<< "${MODEL}"
else
  # Use the default list of models
  MODELS=(llama3 ) #gemma:2b phi3 mistral
fi


# Splitting the models by comma and pulling each
#IFS=',' read -ra MODELS <<< "$model"
for m in "${MODELS[@]}"; do
    echo "Pulling $m"
    ollama pull "$m"
    sleep 5
done


ollama create aws-path-learning -f ./Modelfile

# Run the Python application
#streamlit run ./src/app.py

# Run the Python application
#streamlit run --server.address 0.0.0.0 ./src/app.py

streamlit run ./src/app.py --server.port 7860 --server.address 0.0.0.0


# Keep the script running to prevent the container from exiting
#wait