code-slicer commited on
Commit
be05e4f
Β·
verified Β·
1 Parent(s): a83f395

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +59 -22
Dockerfile CHANGED
@@ -1,43 +1,80 @@
 
1
  FROM python:3.11-slim
2
 
3
- # base deps
4
- RUN pip install --no-cache-dir hf_transfer>=0.1.6 && \
5
- apt-get update && apt-get install -y build-essential curl git git-lfs && \
6
- rm -rf /var/lib/apt/lists/*
 
 
 
 
7
 
8
  # Ollama μ„€μΉ˜
9
  RUN curl -fsSL https://ollama.com/install.sh | sh
10
 
11
- # μ•± μ„€μΉ˜
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  WORKDIR /app
 
 
13
  COPY requirements.txt .
14
  RUN pip install --no-cache-dir -r requirements.txt
 
 
15
  COPY . /app
 
 
16
  RUN git lfs install && git lfs pull || true
17
 
18
- # Streamlit 폴더 κΆŒν•œ
19
  ENV HOME=/app
 
20
  RUN mkdir -p /app/.streamlit && chmod -R 777 /app/.streamlit
21
 
22
- # βœ… Ollama μ €μž₯μ†Œλ₯Ό /tmp둜 κ³ μ •(Spacesμ—μ„œ μ•ˆμ „)
23
- ENV OLLAMA_HOME=/tmp/ollama
24
- ENV OLLAMA_MODELS=/tmp/ollama
25
- RUN mkdir -p /tmp/ollama && chmod -R 777 /tmp/ollama
 
26
 
27
- # βœ… λŸ°νƒ€μž„ μ„€μ • (λͺ¨λΈ κ³ μ • + νƒ€μž„μ•„μ›ƒ μ—¬μœ )
28
- ENV OLLAMA_HOST=http://127.0.0.1:11434
29
- ENV OLLAMA_MODEL=gemma2:9b
30
- ENV OLLAMA_TIMEOUT=300
31
 
 
32
  EXPOSE 8501
 
33
 
34
- # βœ… μ‹€ν–‰: serve β†’ ν—¬μŠ€μ²΄ν¬ β†’ gemma2:9b pull β†’ Streamlit
35
-
36
  CMD bash -lc '\
37
- set -e; \
38
- export OLLAMA_HOME=/tmp/ollama OLLAMA_MODELS=/tmp/ollama; \
39
- echo "Starting Ollama with model: ${OLLAMA_MODEL}"; \
40
  ollama serve & \
41
- for i in {1..240}; do curl -sf http://127.0.0.1:11434/api/version >/dev/null && break || sleep 1; done; \
42
- ollama pull "${OLLAMA_MODEL}" || { echo "[ERR] ollama pull failed"; exit 1; }; \
43
- exec streamlit run "app (15).py" --server.address=0.0.0.0 --server.port=${PORT:-8501}'
 
 
 
 
 
 
 
 
 
 
1
+ # 1. 파이썬 버전을 3.11으둜 μ„€μ •
2
  FROM python:3.11-slim
3
 
4
+ # 2. ν•„μš”ν•œ μ‹œμŠ€ν…œ νŒ¨ν‚€μ§€ 및 git-lfs μ„€μΉ˜
5
+ RUN pip install --no-cache-dir hf_transfer>=0.1.6
6
+ RUN apt-get update && apt-get install -y \
7
+ build-essential \
8
+ curl \
9
+ git \
10
+ git-lfs \
11
+ && rm -rf /var/lib/apt/lists/*
12
 
13
  # Ollama μ„€μΉ˜
14
  RUN curl -fsSL https://ollama.com/install.sh | sh
15
 
16
+ # ν™˜κ²½λ³€μˆ˜ μ„€μ • (λͺ…μ‹œμ μœΌλ‘œ λͺ¨λΈλͺ… μ§€μ •)
17
+ ENV OLLAMA_MODEL=gemma2:9b
18
+ ENV OLLAMA_HOME=/app/.ollama
19
+ ENV OLLAMA_MODELS=/app/.ollama
20
+ RUN mkdir -p /app/.ollama && chmod -R 777 /app/.ollama
21
+
22
+ # λΉŒλ“œ 쀑 λͺ¨λΈ λ‹€μš΄λ‘œλ“œ
23
+ RUN set -eux; \
24
+ ollama serve & pid=$!; \
25
+ i=0; \
26
+ until curl -fsS http://127.0.0.1:11434/api/tags >/dev/null 2>&1; do \
27
+ i=$((i+1)); \
28
+ [ "$i" -lt 60 ] || { echo "Ollama not ready"; kill "$pid"; exit 1; }; \
29
+ sleep 1; \
30
+ done; \
31
+ ollama pull gemma2:9b; \
32
+ kill "$pid" || true
33
+
34
+ # 3. μž‘μ—… 디렉터리 μ„€μ •
35
  WORKDIR /app
36
+
37
+ # 6. 파이썬 라이브러리 μ„€μΉ˜
38
  COPY requirements.txt .
39
  RUN pip install --no-cache-dir -r requirements.txt
40
+
41
+ # 4. μ†ŒμŠ€ μ½”λ“œ 볡사
42
  COPY . /app
43
+
44
+ # 5. Git LFS μ„€μ • 및 λŒ€μš©λŸ‰ 파일 λ‹€μš΄λ‘œλ“œ
45
  RUN git lfs install && git lfs pull || true
46
 
47
+ # ν™˜κ²½λ³€μˆ˜ μ„€μ •
48
  ENV HOME=/app
49
+ ENV STREAMLIT_HOME=/app/.streamlit
50
  RUN mkdir -p /app/.streamlit && chmod -R 777 /app/.streamlit
51
 
52
+ ENV HF_HOME=/tmp/hf-home \
53
+ TRANSFORMERS_CACHE=/tmp/hf-cache \
54
+ HUGGINGFACE_HUB_CACHE=/tmp/hf-cache \
55
+ TORCH_HOME=/tmp/torch-cache \
56
+ XDG_CACHE_HOME=/tmp/xdg-cache
57
 
58
+ RUN mkdir -p /tmp/hf-home /tmp/hf-cache /tmp/torch-cache /tmp/xdg-cache
 
 
 
59
 
60
+ ENV OLLAMA_HOST=http://127.0.0.1:11434
61
  EXPOSE 8501
62
+ EXPOSE 11434
63
 
64
+ # μˆ˜μ •λœ CMD
 
65
  CMD bash -lc '\
66
+ export OLLAMA_MODEL=${OLLAMA_MODEL:-gemma2:9b}; \
67
+ echo "Starting Ollama with model: $OLLAMA_MODEL"; \
 
68
  ollama serve & \
69
+ for i in {1..120}; do \
70
+ if curl -fsS http://127.0.0.1:11434/api/version >/dev/null 2>&1; then \
71
+ echo "Ollama server is ready"; \
72
+ break; \
73
+ fi; \
74
+ echo "Waiting for Ollama... ($i/120)"; \
75
+ sleep 1; \
76
+ done; \
77
+ echo "Ensuring model is available: $OLLAMA_MODEL"; \
78
+ ollama pull "$OLLAMA_MODEL" || { echo "Failed to pull model"; exit 1; }; \
79
+ echo "Starting Streamlit app"; \
80
+ streamlit run app.py --server.address=0.0.0.0 --server.port=${PORT:-8501}'