code-slicer commited on
Commit
8cfc181
·
verified ·
1 Parent(s): e237410

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +20 -22
Dockerfile CHANGED
@@ -13,28 +13,22 @@ RUN apt-get update && apt-get install -y \
13
  # Ollama 설치
14
  RUN curl -fsSL https://ollama.com/install.sh | sh
15
 
16
- # 환경변수 설정
17
  ENV OLLAMA_MODEL=gemma2:9b
18
- ENV OLLAMA_HOME=/app/.ollama
19
- ENV OLLAMA_MODELS=/app/.ollama
20
-
21
- # Ollama 디렉토리 구조를 완전히 생성하고 권한 설정
22
- RUN mkdir -p /app/.ollama/models \
23
- && mkdir -p /app/.ollama/manifests \
24
- && mkdir -p /app/.ollama/manifests/registry.ollama.ai/library \
25
- && mkdir -p /app/.ollama/blobs \
26
- && chmod -R 777 /app/.ollama
27
 
28
- # 빌드 중 잠깐 서버를 띄워서 모델만 미리 받아두고 종료
29
  RUN set -eux; \
30
- ollama serve & pid=$!; \
 
31
  i=0; \
32
  until curl -fsS http://127.0.0.1:11434/api/tags >/dev/null 2>&1; do \
33
  i=$((i+1)); \
34
  [ "$i" -lt 60 ] || { echo "Ollama not ready"; kill "$pid"; exit 1; }; \
35
  sleep 1; \
36
  done; \
37
- ollama pull gemma2:9b; \
38
  kill "$pid" || true
39
 
40
  # 3. 작업 디렉터리 설정
@@ -55,26 +49,25 @@ ENV HOME=/app
55
  ENV STREAMLIT_HOME=/app/.streamlit
56
  RUN mkdir -p /app/.streamlit && chmod -R 777 /app/.streamlit
57
 
 
58
  ENV HF_HOME=/tmp/hf-home \
59
  TRANSFORMERS_CACHE=/tmp/hf-cache \
60
  HUGGINGFACE_HUB_CACHE=/tmp/hf-cache \
61
  TORCH_HOME=/tmp/torch-cache \
62
  XDG_CACHE_HOME=/tmp/xdg-cache
63
 
64
- RUN mkdir -p /tmp/hf-home /tmp/hf-cache /tmp/torch-cache /tmp/xdg-cache
65
-
66
  ENV OLLAMA_HOST=http://127.0.0.1:11434
67
  EXPOSE 8501
68
  EXPOSE 11434
69
 
70
- # root 권한으로 실행 (권한 문제 완전 해결)
71
- USER root
72
-
73
- # 최종 실행 커맨드
74
  CMD bash -lc '\
75
  export OLLAMA_MODEL=${OLLAMA_MODEL:-gemma2:9b}; \
 
 
76
  echo "Starting Ollama with model: $OLLAMA_MODEL"; \
77
- chmod -R 777 /app/.ollama; \
 
78
  ollama serve & \
79
  for i in {1..120}; do \
80
  if curl -fsS http://127.0.0.1:11434/api/version >/dev/null 2>&1; then \
@@ -84,7 +77,12 @@ CMD bash -lc '\
84
  echo "Waiting for Ollama... ($i/120)"; \
85
  sleep 1; \
86
  done; \
87
- echo "Ensuring model is available: $OLLAMA_MODEL"; \
88
- ollama pull "$OLLAMA_MODEL" || { echo "Failed to pull model"; exit 1; }; \
 
 
 
 
 
89
  echo "Starting Streamlit app"; \
90
  streamlit run app.py --server.address=0.0.0.0 --server.port=${PORT:-8501}'
 
13
  # Ollama 설치
14
  RUN curl -fsSL https://ollama.com/install.sh | sh
15
 
16
+ # 핵심 변경: /tmp 디렉토리 사용
17
  ENV OLLAMA_MODEL=gemma2:9b
18
+ ENV OLLAMA_HOME=/tmp/ollama-home
19
+ ENV OLLAMA_MODELS=/tmp/ollama-home
 
 
 
 
 
 
 
20
 
21
+ # 빌드 중 모델 다운로드 (/tmp 사용)
22
  RUN set -eux; \
23
+ mkdir -p /tmp/ollama-home; \
24
+ OLLAMA_HOME=/tmp/ollama-home OLLAMA_MODELS=/tmp/ollama-home ollama serve & pid=$!; \
25
  i=0; \
26
  until curl -fsS http://127.0.0.1:11434/api/tags >/dev/null 2>&1; do \
27
  i=$((i+1)); \
28
  [ "$i" -lt 60 ] || { echo "Ollama not ready"; kill "$pid"; exit 1; }; \
29
  sleep 1; \
30
  done; \
31
+ OLLAMA_HOME=/tmp/ollama-home OLLAMA_MODELS=/tmp/ollama-home ollama pull gemma2:9b; \
32
  kill "$pid" || true
33
 
34
  # 3. 작업 디렉터리 설정
 
49
  ENV STREAMLIT_HOME=/app/.streamlit
50
  RUN mkdir -p /app/.streamlit && chmod -R 777 /app/.streamlit
51
 
52
+ # 캐시 경로도 /tmp로 통일
53
  ENV HF_HOME=/tmp/hf-home \
54
  TRANSFORMERS_CACHE=/tmp/hf-cache \
55
  HUGGINGFACE_HUB_CACHE=/tmp/hf-cache \
56
  TORCH_HOME=/tmp/torch-cache \
57
  XDG_CACHE_HOME=/tmp/xdg-cache
58
 
 
 
59
  ENV OLLAMA_HOST=http://127.0.0.1:11434
60
  EXPOSE 8501
61
  EXPOSE 11434
62
 
63
+ # 간단하고 안전한 CMD
 
 
 
64
  CMD bash -lc '\
65
  export OLLAMA_MODEL=${OLLAMA_MODEL:-gemma2:9b}; \
66
+ export OLLAMA_HOME=/tmp/ollama-home; \
67
+ export OLLAMA_MODELS=/tmp/ollama-home; \
68
  echo "Starting Ollama with model: $OLLAMA_MODEL"; \
69
+ echo "Using Ollama home: $OLLAMA_HOME"; \
70
+ mkdir -p /tmp/ollama-home; \
71
  ollama serve & \
72
  for i in {1..120}; do \
73
  if curl -fsS http://127.0.0.1:11434/api/version >/dev/null 2>&1; then \
 
77
  echo "Waiting for Ollama... ($i/120)"; \
78
  sleep 1; \
79
  done; \
80
+ echo "Checking if model exists..."; \
81
+ if ! ollama list | grep -q "$OLLAMA_MODEL"; then \
82
+ echo "Model not found, pulling: $OLLAMA_MODEL"; \
83
+ ollama pull "$OLLAMA_MODEL" || { echo "Failed to pull model"; exit 1; }; \
84
+ else \
85
+ echo "Model already available: $OLLAMA_MODEL"; \
86
+ fi; \
87
  echo "Starting Streamlit app"; \
88
  streamlit run app.py --server.address=0.0.0.0 --server.port=${PORT:-8501}'