code-slicer commited on
Commit
aa0760c
Β·
verified Β·
1 Parent(s): 8661625

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +25 -36
Dockerfile CHANGED
@@ -1,66 +1,55 @@
1
- # 1. 파이썬 버전을 3.11으둜 μ„€μ •
2
  FROM python:3.11-slim
3
 
4
- # 2. ν•„μš”ν•œ μ‹œμŠ€ν…œ νŒ¨ν‚€μ§€ 및 git-lfs μ„€μΉ˜
5
- RUN pip install --no-cache-dir hf_transfer>=0.1.6
6
- RUN apt-get update && apt-get install -y \
7
- build-essential \
8
- curl \
9
- git \
10
- git-lfs \
11
  && rm -rf /var/lib/apt/lists/*
12
 
13
- # Ollama μ„€μΉ˜
14
  RUN curl -fsSL https://ollama.com/install.sh | sh
15
 
16
- # βœ… μž‘μ€ λͺ¨λΈ + κΆŒν•œ 문제 μ—†λŠ” μ„€μ •
17
  ENV OLLAMA_HOME=/tmp/ollama
18
  ENV OLLAMA_MODELS=/tmp/ollama
19
  ENV OLLAMA_MODEL=gemma2:9b
 
20
 
21
- # λΉŒλ“œ 쀑 λͺ¨λΈ λ‹€μš΄λ‘œλ“œ (μž‘μ€ λͺ¨λΈμ΄λΌ 빠름)
22
- RUN set -eux; \
23
- mkdir -p /tmp/ollama; \
24
- env HOME=/tmp OLLAMA_HOME=/tmp/ollama OLLAMA_MODELS=/tmp/ollama ollama serve & pid=$!; \
25
- for i in {1..60}; do curl -sf http://127.0.0.1:11434/api/version >/dev/null && break || sleep 1; done; \
26
- env HOME=/tmp OLLAMA_HOME=/tmp/ollama OLLAMA_MODELS=/tmp/ollama ollama pull gemma2:9b; \
27
- kill "$pid" || true
28
-
29
- # 3. μž‘μ—… 디렉터리 μ„€μ •
30
  WORKDIR /app
31
-
32
- # 6. 파이썬 라이브러리 μ„€μΉ˜
33
  COPY requirements.txt .
34
  RUN pip install --no-cache-dir -r requirements.txt
35
-
36
- # 4. μ†ŒμŠ€ μ½”λ“œ 볡사
37
  COPY . /app
38
-
39
- # 5. Git LFS μ„€μ • 및 λŒ€μš©λŸ‰ 파일 λ‹€μš΄λ‘œλ“œ
40
  RUN git lfs install && git lfs pull || true
41
 
42
- # ν™˜κ²½λ³€μˆ˜ μ„€μ •
43
  ENV HOME=/app
44
  ENV STREAMLIT_HOME=/app/.streamlit
45
- RUN mkdir -p /app/.streamlit
46
 
47
  ENV HF_HOME=/tmp/hf-home \
48
  TRANSFORMERS_CACHE=/tmp/hf-cache \
49
  HUGGINGFACE_HUB_CACHE=/tmp/hf-cache \
50
  TORCH_HOME=/tmp/torch-cache \
51
  XDG_CACHE_HOME=/tmp/xdg-cache
 
52
 
53
- RUN mkdir -p /tmp/hf-home /tmp/hf-cache /tmp/torch-cache /tmp/xdg-cache
54
-
55
  ENV OLLAMA_HOST=http://127.0.0.1:11434
 
 
56
  EXPOSE 8501
57
 
58
- # βœ… κ°„λ‹¨ν•˜κ³  ν™•μ‹€ν•œ CMD
59
  CMD bash -lc '\
60
- mkdir -p /tmp/ollama; \
61
- echo "Starting Ollama with model: gemma2:9b"; \
 
62
  env HOME=/tmp OLLAMA_HOME=/tmp/ollama OLLAMA_MODELS=/tmp/ollama ollama serve & \
63
- sleep 10; \
64
- echo "Ollama server should be ready"; \
65
- echo "Starting Streamlit app"; \
66
- streamlit run app.py --server.address=0.0.0.0 --server.port=${PORT:-8501}'
 
1
+ # 1) Python 3.11
2
  FROM python:3.11-slim
3
 
4
+ # 2) System deps & git-lfs
5
+ RUN pip install --no-cache-dir hf_transfer>=0.1.6 && \
6
+ apt-get update && apt-get install -y \
7
+ build-essential \
8
+ curl \
9
+ git \
10
+ git-lfs \
11
  && rm -rf /var/lib/apt/lists/*
12
 
13
+ # 3) Install Ollama
14
  RUN curl -fsSL https://ollama.com/install.sh | sh
15
 
16
+ # 4) Ollama storage β†’ /tmp (writeable in Spaces)
17
  ENV OLLAMA_HOME=/tmp/ollama
18
  ENV OLLAMA_MODELS=/tmp/ollama
19
  ENV OLLAMA_MODEL=gemma2:9b
20
+ RUN install -d -m 777 /tmp/ollama
21
 
22
+ # 5) App setup
 
 
 
 
 
 
 
 
23
  WORKDIR /app
 
 
24
  COPY requirements.txt .
25
  RUN pip install --no-cache-dir -r requirements.txt
 
 
26
  COPY . /app
 
 
27
  RUN git lfs install && git lfs pull || true
28
 
29
+ # 6) Streamlit & caches
30
  ENV HOME=/app
31
  ENV STREAMLIT_HOME=/app/.streamlit
32
+ RUN install -d -m 777 /app/.streamlit
33
 
34
  ENV HF_HOME=/tmp/hf-home \
35
  TRANSFORMERS_CACHE=/tmp/hf-cache \
36
  HUGGINGFACE_HUB_CACHE=/tmp/hf-cache \
37
  TORCH_HOME=/tmp/torch-cache \
38
  XDG_CACHE_HOME=/tmp/xdg-cache
39
+ RUN install -d -m 777 /tmp/hf-home /tmp/hf-cache /tmp/torch-cache /tmp/xdg-cache
40
 
41
+ # 7) Internal Ollama host (same container)
 
42
  ENV OLLAMA_HOST=http://127.0.0.1:11434
43
+ ENV OLLAMA_TIMEOUT=300
44
+
45
  EXPOSE 8501
46
 
47
+ # 8) Runtime: clean /tmp/ollama β†’ serve β†’ healthcheck β†’ pull gemma2:9b β†’ Streamlit
48
  CMD bash -lc '\
49
+ set -euo pipefail; \
50
+ rm -rf /tmp/ollama && install -d -m 777 /tmp/ollama; \
51
+ echo "Starting Ollama with model: ${OLLAMA_MODEL}"; \
52
  env HOME=/tmp OLLAMA_HOME=/tmp/ollama OLLAMA_MODELS=/tmp/ollama ollama serve & \
53
+ for i in {1..240}; do curl -sf http://127.0.0.1:11434/api/version >/dev/null && break || sleep 1; done; \
54
+ env HOME=/tmp OLLAMA_HOME=/tmp/ollama OLLAMA_MODELS=/tmp/ollama ollama pull "${OLLAMA_MODEL}"; \
55
+ exec streamlit run app.py --server.address=0.0.0.0 --server.port=${PORT:-8501}'