code-slicer commited on
Commit
5d942ff
·
verified ·
1 Parent(s): be05e4f

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +21 -28
Dockerfile CHANGED
@@ -13,23 +13,20 @@ RUN apt-get update && apt-get install -y \
13
  # Ollama 설치
14
  RUN curl -fsSL https://ollama.com/install.sh | sh
15
 
16
- # 환경변수 설정 (명시적으로 모델명 지정)
 
 
17
  ENV OLLAMA_MODEL=gemma2:9b
18
- ENV OLLAMA_HOME=/app/.ollama
19
- ENV OLLAMA_MODELS=/app/.ollama
20
- RUN mkdir -p /app/.ollama && chmod -R 777 /app/.ollama
21
 
22
  # 빌드 중 모델 다운로드
23
  RUN set -eux; \
24
- ollama serve & pid=$!; \
25
- i=0; \
26
- until curl -fsS http://127.0.0.1:11434/api/tags >/dev/null 2>&1; do \
27
- i=$((i+1)); \
28
- [ "$i" -lt 60 ] || { echo "Ollama not ready"; kill "$pid"; exit 1; }; \
29
- sleep 1; \
30
- done; \
31
- ollama pull gemma2:9b; \
32
- kill "$pid" || true
33
 
34
  # 3. 작업 디렉터리 설정
35
  WORKDIR /app
@@ -63,18 +60,14 @@ EXPOSE 11434
63
 
64
  # 수정된 CMD
65
  CMD bash -lc '\
66
- export OLLAMA_MODEL=${OLLAMA_MODEL:-gemma2:9b}; \
67
- echo "Starting Ollama with model: $OLLAMA_MODEL"; \
68
- ollama serve & \
69
- for i in {1..120}; do \
70
- if curl -fsS http://127.0.0.1:11434/api/version >/dev/null 2>&1; then \
71
- echo "Ollama server is ready"; \
72
- break; \
73
- fi; \
74
- echo "Waiting for Ollama... ($i/120)"; \
75
- sleep 1; \
76
- done; \
77
- echo "Ensuring model is available: $OLLAMA_MODEL"; \
78
- ollama pull "$OLLAMA_MODEL" || { echo "Failed to pull model"; exit 1; }; \
79
- echo "Starting Streamlit app"; \
80
- streamlit run app.py --server.address=0.0.0.0 --server.port=${PORT:-8501}'
 
13
  # Ollama 설치
14
  RUN curl -fsSL https://ollama.com/install.sh | sh
15
 
16
+ # Ollama 저장소를 /tmp로 전역 고정 (권한 안전)
17
+ ENV OLLAMA_HOME=/tmp/ollama
18
+ ENV OLLAMA_MODELS=/tmp/ollama
19
  ENV OLLAMA_MODEL=gemma2:9b
20
+ RUN mkdir -p /tmp/ollama && chmod -R 777 /tmp/ollama
21
+
 
22
 
23
  # 빌드 중 모델 다운로드
24
  RUN set -eux; \
25
+ mkdir -p /tmp/ollama && chmod -R 777 /tmp/ollama; \
26
+ env HOME=/tmp OLLAMA_HOME=/tmp/ollama OLLAMA_MODELS=/tmp/ollama ollama serve & pid=$!; \
27
+ for i in {1..120}; do curl -sf http://127.0.0.1:11434/api/version >/dev/null && break || sleep 1; done; \
28
+ env HOME=/tmp OLLAMA_HOME=/tmp/ollama OLLAMA_MODELS=/tmp/ollama ollama pull gemma2:9b; \
29
+ kill "$pid" || true
 
 
 
 
30
 
31
  # 3. 작업 디렉터리 설정
32
  WORKDIR /app
 
60
 
61
  # 수정된 CMD
62
  CMD bash -lc '\
63
+ set -euo pipefail; \
64
+ mkdir -p /tmp/ollama && chmod -R 777 /tmp/ollama; \
65
+ echo "Starting Ollama with model: ${OLLAMA_MODEL:-gemma2:9b}"; \
66
+ # ollama 프로세스에만 HOME/OLLAMA_*를 /tmp로 강제 주입
67
+ env HOME=/tmp OLLAMA_HOME=/tmp/ollama OLLAMA_MODELS=/tmp/ollama ollama serve & \
68
+ # 레디체크
69
+ for i in {1..240}; do curl -sf http://127.0.0.1:11434/api/version >/dev/null && break || sleep 1; done; \
70
+ # 모델 풀 (고정: gemma2:9b)
71
+ env HOME=/tmp OLLAMA_HOME=/tmp/ollama OLLAMA_MODELS=/tmp/ollama ollama pull "${OLLAMA_MODEL:-gemma2:9b}" || { echo "[ERR] ollama pull failed"; exit 1; }; \
72
+ # Streamlit 실행 (여긴 HOME=/app 그대로)
73
+ exec streamlit run "app.py" --server.address=0.0.0.0 --server.port=${PORT:-8501}'