chatbotMOAI / Dockerfile
code-slicer's picture
Update Dockerfile
f1cfff4 verified
raw
history blame
2.54 kB
# 1. ํŒŒ์ด์ฌ ๋ฒ„์ „์„ 3.11์œผ๋กœ ์„ค์ •
FROM python:3.11-slim
# 2. ํ•„์š”ํ•œ ์‹œ์Šคํ…œ ํŒจํ‚ค์ง€ ๋ฐ git-lfs ์„ค์น˜
RUN pip install --no-cache-dir hf_transfer>=0.1.6
RUN apt-get update && apt-get install -y \
build-essential \
curl \
git \
git-lfs \
&& rm -rf /var/lib/apt/lists/*
# ๊ธฐ์กด ๋ช…๋ น์–ด (404 ์˜ค๋ฅ˜ ๋ฐœ์ƒ)
# RUN curl -L --fail https://ollama.com/download/ollama-linux-amd64 -o /usr/local/bin/ollama \
# && chmod +x /usr/local/bin/ollama
# ์ˆ˜์ •๋œ ๋ช…๋ น์–ด (install.sh ์Šคํฌ๋ฆฝํŠธ ์‚ฌ์šฉ)
RUN curl -fsSL https://ollama.com/install.sh | sh
# Ollama๊ฐ€ ์“ธ ํ™ˆ/๋ชจ๋ธ ๊ฒฝ๋กœ๋ฅผ /app/.ollama ๋กœ ํ†ต์ผ
ENV OLLAMA_HOME=/app/.ollama
ENV OLLAMA_MODELS=/app/.ollama
RUN mkdir -p /app/.ollama && chmod -R 777 /app/.ollama
# ๋นŒ๋“œ ์ค‘ ์ž ๊น ์„œ๋ฒ„๋ฅผ ๋„์›Œ์„œ ๋ชจ๋ธ๋งŒ ๋ฏธ๋ฆฌ ๋ฐ›์•„๋‘๊ณ  ์ข…๋ฃŒ
RUN set -eux; \
ollama serve & pid=$!; \
i=0; \
until curl -fsS http://127.0.0.1:11434/api/tags >/dev/null 2>&1; do \
i=$((i+1)); \
[ "$i" -lt 60 ] || { echo "Ollama not ready"; kill "$pid"; exit 1; }; \
sleep 1; \
done; \
ollama pull gemma2:9b; \
kill "$pid" || true
# 3. ์ž‘์—… ๋””๋ ‰ํ„ฐ๋ฆฌ ์„ค์ •
WORKDIR /app
# 6. ํŒŒ์ด์ฌ ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ ์„ค์น˜
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
# 4. ์†Œ์Šค ์ฝ”๋“œ ๋ณต์‚ฌ
COPY . /app
# 5. Git LFS ์„ค์ • ๋ฐ ๋Œ€์šฉ๋Ÿ‰ ํŒŒ์ผ ๋‹ค์šด๋กœ๋“œ
RUN git lfs install && git lfs pull || true
# 7. ์ŠคํŠธ๋ฆผ๋ฆฟ์ด ์‚ฌ์šฉํ•  ์„ค์ • ํด๋”๋ฅผ ๋ฏธ๋ฆฌ ๋งŒ๋“ค๊ณ  ๊ถŒํ•œ ๋ถ€์—ฌ (PermissionError ํ•ด๊ฒฐ)
# ๊ถŒํ•œ/๊ฒฝ๋กœ ๊ณ ์ •
ENV HOME=/app
ENV STREAMLIT_HOME=/app/.streamlit
RUN mkdir -p /app/.streamlit && chmod -R 777 /app/.streamlit
# ๊ถŒ์žฅ: ์บ์‹œ ๊ฒฝ๋กœ๋ฅผ ํ™˜๊ฒฝ๋ณ€์ˆ˜๋กœ ๊ณ ์ •
ENV HF_HOME=/tmp/hf-home \
TRANSFORMERS_CACHE=/tmp/hf-cache \
HUGGINGFACE_HUB_CACHE=/tmp/hf-cache \
TORCH_HOME=/tmp/torch-cache \
XDG_CACHE_HOME=/tmp/xdg-cache
RUN mkdir -p /tmp/hf-home /tmp/hf-cache /tmp/torch-cache /tmp/xdg-cache
# ๋‚ด๋ถ€ ์—ฐ๊ฒฐ์šฉ ํ™˜๊ฒฝ๋ณ€์ˆ˜(์•ฑ์—์„œ ์ฝ๊ฒŒ ํ•˜๋ ค๋ฉด ํŽธ์˜์ƒ ์ง€์ •)
ENV OLLAMA_HOST=http://127.0.0.1:11434
EXPOSE 8501
# (์™ธ๋ถ€๋กœ Ollama๋ฅผ ์—ด ํ•„์š”๋Š” ์—†์ง€๋งŒ, ์žˆ์–ด๋„ ๋ฌด๋ฐฉ)
EXPOSE 11434
# ๐Ÿ”ด ์ตœ์ข… ์‹คํ–‰ ์ปค๋งจ๋“œ: ollama serve โ†’ ๋ ˆ๋””์ฒดํฌ โ†’ streamlit
CMD bash -lc '\
ollama serve & \
for i in {1..120}; do curl -fsS http://127.0.0.1:11434/api/version >/dev/null && break || sleep 1; done; \
ollama pull ${OLLAMA_MODEL} || exit 1; \
streamlit run app.py --server.address=0.0.0.0 --server.port=${PORT:-8501}'