code-slicer commited on
Commit
a83f395
ยท
verified ยท
1 Parent(s): fcb0d1d

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +21 -68
Dockerfile CHANGED
@@ -1,90 +1,43 @@
1
- # 1. ํŒŒ์ด์ฌ ๋ฒ„์ „์„ 3.11์œผ๋กœ ์„ค์ •
2
  FROM python:3.11-slim
3
 
4
- # 2. ํ•„์š”ํ•œ ์‹œ์Šคํ…œ ํŒจํ‚ค์ง€ ๋ฐ git-lfs ์„ค์น˜
5
- RUN pip install --no-cache-dir hf_transfer>=0.1.6
6
- RUN apt-get update && apt-get install -y \
7
- build-essential \
8
- curl \
9
- git \
10
- git-lfs \
11
- && rm -rf /var/lib/apt/lists/*
12
 
13
  # Ollama ์„ค์น˜
14
  RUN curl -fsSL https://ollama.com/install.sh | sh
15
 
16
- # ํ™˜๊ฒฝ๋ณ€์ˆ˜ ์„ค์ •
17
- ENV OLLAMA_MODEL=gemma2:9b
18
- ENV OLLAMA_HOME=/app/.ollama
19
- ENV OLLAMA_MODELS=/app/.ollama
20
-
21
- # Ollama ๋””๋ ‰ํ† ๋ฆฌ ๊ตฌ์กฐ๋ฅผ ์™„์ „ํžˆ ์ƒ์„ฑํ•˜๊ณ  ๊ถŒํ•œ ์„ค์ •
22
- RUN mkdir -p /app/.ollama/models \
23
- && mkdir -p /app/.ollama/manifests \
24
- && mkdir -p /app/.ollama/manifests/registry.ollama.ai/library \
25
- && mkdir -p /app/.ollama/blobs \
26
- && chmod -R 777 /app/.ollama
27
-
28
- # ๋นŒ๋“œ ์ค‘ ์ž ๊น ์„œ๋ฒ„๋ฅผ ๋„์›Œ์„œ ๋ชจ๋ธ๋งŒ ๋ฏธ๋ฆฌ ๋ฐ›์•„๋‘๊ณ  ์ข…๋ฃŒ
29
- RUN set -eux; \
30
- ollama serve & pid=$!; \
31
- i=0; \
32
- until curl -fsS http://127.0.0.1:11434/api/tags >/dev/null 2>&1; do \
33
- i=$((i+1)); \
34
- [ "$i" -lt 60 ] || { echo "Ollama not ready"; kill "$pid"; exit 1; }; \
35
- sleep 1; \
36
- done; \
37
- ollama pull gemma2:9b; \
38
- kill "$pid" || true
39
-
40
- # 3. ์ž‘์—… ๋””๋ ‰ํ„ฐ๋ฆฌ ์„ค์ •
41
  WORKDIR /app
42
-
43
- # 6. ํŒŒ์ด์ฌ ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ ์„ค์น˜
44
  COPY requirements.txt .
45
  RUN pip install --no-cache-dir -r requirements.txt
46
-
47
- # 4. ์†Œ์Šค ์ฝ”๋“œ ๋ณต์‚ฌ
48
  COPY . /app
49
-
50
- # 5. Git LFS ์„ค์ • ๋ฐ ๋Œ€์šฉ๋Ÿ‰ ํŒŒ์ผ ๋‹ค์šด๋กœ๋“œ
51
  RUN git lfs install && git lfs pull || true
52
 
53
- # ํ™˜๊ฒฝ๋ณ€์ˆ˜ ์„ค์ •
54
  ENV HOME=/app
55
- ENV STREAMLIT_HOME=/app/.streamlit
56
  RUN mkdir -p /app/.streamlit && chmod -R 777 /app/.streamlit
57
 
58
- ENV HF_HOME=/tmp/hf-home \
59
- TRANSFORMERS_CACHE=/tmp/hf-cache \
60
- HUGGINGFACE_HUB_CACHE=/tmp/hf-cache \
61
- TORCH_HOME=/tmp/torch-cache \
62
- XDG_CACHE_HOME=/tmp/xdg-cache
63
-
64
- RUN mkdir -p /tmp/hf-home /tmp/hf-cache /tmp/torch-cache /tmp/xdg-cache
65
 
 
66
  ENV OLLAMA_HOST=http://127.0.0.1:11434
 
 
 
67
  EXPOSE 8501
68
- EXPOSE 11434
69
 
70
- # root ๊ถŒํ•œ์œผ๋กœ ์‹คํ–‰ (๊ถŒํ•œ ๋ฌธ์ œ ์™„์ „ ํ•ด๊ฒฐ)
71
- USER root
72
 
73
- # ์ตœ์ข… ์‹คํ–‰ ์ปค๋งจ๋“œ
74
  CMD bash -lc '\
75
- export OLLAMA_MODEL=${OLLAMA_MODEL:-gemma2:9b}; \
76
- echo "Starting Ollama with model: $OLLAMA_MODEL"; \
77
- chmod -R 777 /app/.ollama; \
78
  ollama serve & \
79
- for i in {1..120}; do \
80
- if curl -fsS http://127.0.0.1:11434/api/version >/dev/null 2>&1; then \
81
- echo "Ollama server is ready"; \
82
- break; \
83
- fi; \
84
- echo "Waiting for Ollama... ($i/120)"; \
85
- sleep 1; \
86
- done; \
87
- echo "Ensuring model is available: $OLLAMA_MODEL"; \
88
- ollama pull "$OLLAMA_MODEL" || { echo "Failed to pull model"; exit 1; }; \
89
- echo "Starting Streamlit app"; \
90
- streamlit run app.py --server.address=0.0.0.0 --server.port=${PORT:-8501}'
 
 
1
  FROM python:3.11-slim
2
 
3
+ # base deps
4
+ RUN pip install --no-cache-dir hf_transfer>=0.1.6 && \
5
+ apt-get update && apt-get install -y build-essential curl git git-lfs && \
6
+ rm -rf /var/lib/apt/lists/*
 
 
 
 
7
 
8
  # Ollama ์„ค์น˜
9
  RUN curl -fsSL https://ollama.com/install.sh | sh
10
 
11
+ # ์•ฑ ์„ค์น˜
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  WORKDIR /app
 
 
13
  COPY requirements.txt .
14
  RUN pip install --no-cache-dir -r requirements.txt
 
 
15
  COPY . /app
 
 
16
  RUN git lfs install && git lfs pull || true
17
 
18
+ # Streamlit ํด๋” ๊ถŒํ•œ
19
  ENV HOME=/app
 
20
  RUN mkdir -p /app/.streamlit && chmod -R 777 /app/.streamlit
21
 
22
+ # โœ… Ollama ์ €์žฅ์†Œ๋ฅผ /tmp๋กœ ๊ณ ์ •(Spaces์—์„œ ์•ˆ์ „)
23
+ ENV OLLAMA_HOME=/tmp/ollama
24
+ ENV OLLAMA_MODELS=/tmp/ollama
25
+ RUN mkdir -p /tmp/ollama && chmod -R 777 /tmp/ollama
 
 
 
26
 
27
+ # โœ… ๋Ÿฐํƒ€์ž„ ์„ค์ • (๋ชจ๋ธ ๊ณ ์ • + ํƒ€์ž„์•„์›ƒ ์—ฌ์œ )
28
  ENV OLLAMA_HOST=http://127.0.0.1:11434
29
+ ENV OLLAMA_MODEL=gemma2:9b
30
+ ENV OLLAMA_TIMEOUT=300
31
+
32
  EXPOSE 8501
 
33
 
34
+ # โœ… ์‹คํ–‰: serve โ†’ ํ—ฌ์Šค์ฒดํฌ โ†’ gemma2:9b pull โ†’ Streamlit
 
35
 
 
36
  CMD bash -lc '\
37
+ set -e; \
38
+ export OLLAMA_HOME=/tmp/ollama OLLAMA_MODELS=/tmp/ollama; \
39
+ echo "Starting Ollama with model: ${OLLAMA_MODEL}"; \
40
  ollama serve & \
41
+ for i in {1..240}; do curl -sf http://127.0.0.1:11434/api/version >/dev/null && break || sleep 1; done; \
42
+ ollama pull "${OLLAMA_MODEL}" || { echo "[ERR] ollama pull failed"; exit 1; }; \
43
+ exec streamlit run "app (15).py" --server.address=0.0.0.0 --server.port=${PORT:-8501}'