code-slicer commited on
Commit
688172d
ยท
verified ยท
1 Parent(s): 69a3ceb

Upload Dockerfile (5)

Browse files
Files changed (1) hide show
  1. Dockerfile (5) +90 -0
Dockerfile (5) ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 1. ํŒŒ์ด์ฌ ๋ฒ„์ „์„ 3.11์œผ๋กœ ์„ค์ •
2
+ FROM python:3.11-slim
3
+
4
+ # 2. ํ•„์š”ํ•œ ์‹œ์Šคํ…œ ํŒจํ‚ค์ง€ ๋ฐ git-lfs ์„ค์น˜
5
+ RUN pip install --no-cache-dir hf_transfer>=0.1.6
6
+ RUN apt-get update && apt-get install -y \
7
+ build-essential \
8
+ curl \
9
+ git \
10
+ git-lfs \
11
+ && rm -rf /var/lib/apt/lists/*
12
+
13
+ # Ollama ์„ค์น˜
14
+ RUN curl -fsSL https://ollama.com/install.sh | sh
15
+
16
+ # ํ™˜๊ฒฝ๋ณ€์ˆ˜ ์„ค์ •
17
+ ENV OLLAMA_MODEL=gemma2:9b
18
+ ENV OLLAMA_HOME=/app/.ollama
19
+ ENV OLLAMA_MODELS=/app/.ollama
20
+
21
+ # Ollama ๋””๋ ‰ํ† ๋ฆฌ ๊ตฌ์กฐ๋ฅผ ์™„์ „ํžˆ ์ƒ์„ฑํ•˜๊ณ  ๊ถŒํ•œ ์„ค์ •
22
+ RUN mkdir -p /app/.ollama/models \
23
+ && mkdir -p /app/.ollama/manifests \
24
+ && mkdir -p /app/.ollama/manifests/registry.ollama.ai/library \
25
+ && mkdir -p /app/.ollama/blobs \
26
+ && chmod -R 777 /app/.ollama
27
+
28
+ # ๋นŒ๋“œ ์ค‘ ์ž ๊น ์„œ๋ฒ„๋ฅผ ๋„์›Œ์„œ ๋ชจ๋ธ๋งŒ ๋ฏธ๋ฆฌ ๋ฐ›์•„๋‘๊ณ  ์ข…๋ฃŒ
29
+ RUN set -eux; \
30
+ ollama serve & pid=$!; \
31
+ i=0; \
32
+ until curl -fsS http://127.0.0.1:11434/api/tags >/dev/null 2>&1; do \
33
+ i=$((i+1)); \
34
+ [ "$i" -lt 60 ] || { echo "Ollama not ready"; kill "$pid"; exit 1; }; \
35
+ sleep 1; \
36
+ done; \
37
+ ollama pull gemma2:9b; \
38
+ kill "$pid" || true
39
+
40
+ # 3. ์ž‘์—… ๋””๋ ‰ํ„ฐ๋ฆฌ ์„ค์ •
41
+ WORKDIR /app
42
+
43
+ # 6. ํŒŒ์ด์ฌ ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ ์„ค์น˜
44
+ COPY requirements.txt .
45
+ RUN pip install --no-cache-dir -r requirements.txt
46
+
47
+ # 4. ์†Œ์Šค ์ฝ”๋“œ ๋ณต์‚ฌ
48
+ COPY . /app
49
+
50
+ # 5. Git LFS ์„ค์ • ๋ฐ ๋Œ€์šฉ๋Ÿ‰ ํŒŒ์ผ ๋‹ค์šด๋กœ๋“œ
51
+ RUN git lfs install && git lfs pull || true
52
+
53
+ # ํ™˜๊ฒฝ๋ณ€์ˆ˜ ์„ค์ •
54
+ ENV HOME=/app
55
+ ENV STREAMLIT_HOME=/app/.streamlit
56
+ RUN mkdir -p /app/.streamlit && chmod -R 777 /app/.streamlit
57
+
58
+ ENV HF_HOME=/tmp/hf-home \
59
+ TRANSFORMERS_CACHE=/tmp/hf-cache \
60
+ HUGGINGFACE_HUB_CACHE=/tmp/hf-cache \
61
+ TORCH_HOME=/tmp/torch-cache \
62
+ XDG_CACHE_HOME=/tmp/xdg-cache
63
+
64
+ RUN mkdir -p /tmp/hf-home /tmp/hf-cache /tmp/torch-cache /tmp/xdg-cache
65
+
66
+ ENV OLLAMA_HOST=http://127.0.0.1:11434
67
+ EXPOSE 8501
68
+ EXPOSE 11434
69
+
70
+ # root ๊ถŒํ•œ์œผ๋กœ ์‹คํ–‰ (๊ถŒํ•œ ๋ฌธ์ œ ์™„์ „ ํ•ด๊ฒฐ)
71
+ USER root
72
+
73
+ # ์ตœ์ข… ์‹คํ–‰ ์ปค๋งจ๋“œ
74
+ CMD bash -lc '\
75
+ export OLLAMA_MODEL=${OLLAMA_MODEL:-gemma2:9b}; \
76
+ echo "Starting Ollama with model: $OLLAMA_MODEL"; \
77
+ chmod -R 777 /app/.ollama; \
78
+ ollama serve & \
79
+ for i in {1..120}; do \
80
+ if curl -fsS http://127.0.0.1:11434/api/version >/dev/null 2>&1; then \
81
+ echo "Ollama server is ready"; \
82
+ break; \
83
+ fi; \
84
+ echo "Waiting for Ollama... ($i/120)"; \
85
+ sleep 1; \
86
+ done; \
87
+ echo "Ensuring model is available: $OLLAMA_MODEL"; \
88
+ ollama pull "$OLLAMA_MODEL" || { echo "Failed to pull model"; exit 1; }; \
89
+ echo "Starting Streamlit app"; \
90
+ streamlit run app.py --server.address=0.0.0.0 --server.port=${PORT:-8501}'