chatbotMOAI / Dockerfile
code-slicer's picture
Update Dockerfile
6a5727c verified
raw
history blame
2.3 kB
# 1. 파이썬 버전을 3.11으둜 μ„€μ •
FROM python:3.11-slim
# 2. ν•„μš”ν•œ μ‹œμŠ€ν…œ νŒ¨ν‚€μ§€ 및 git-lfs μ„€μΉ˜
RUN pip install --no-cache-dir hf_transfer>=0.1.6
RUN apt-get update && apt-get install -y \
build-essential \
curl \
git \
git-lfs \
&& rm -rf /var/lib/apt/lists/*
# Ollama μ„€μΉ˜
RUN curl -fsSL https://ollama.com/install.sh | sh
# ν™˜κ²½λ³€μˆ˜ μ„€μ • (λͺ…μ‹œμ μœΌλ‘œ λͺ¨λΈλͺ… μ§€μ •)
ENV OLLAMA_MODEL=gemma2:9b
ENV OLLAMA_HOME=/app/.ollama
ENV OLLAMA_MODELS=/app/.ollama
RUN mkdir -p /app/.ollama && chmod -R 777 /app/.ollama
# λΉŒλ“œ 쀑 λͺ¨λΈ λ‹€μš΄λ‘œλ“œ
RUN set -eux; \
ollama serve & pid=$!; \
i=0; \
until curl -fsS http://127.0.0.1:11434/api/tags >/dev/null 2>&1; do \
i=$((i+1)); \
[ "$i" -lt 60 ] || { echo "Ollama not ready"; kill "$pid"; exit 1; }; \
sleep 1; \
done; \
ollama pull gemma2:9b; \
kill "$pid" || true
# 3. μž‘μ—… 디렉터리 μ„€μ •
WORKDIR /app
# 6. 파이썬 라이브러리 μ„€μΉ˜
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
# 4. μ†ŒμŠ€ μ½”λ“œ 볡사
COPY . /app
# 5. Git LFS μ„€μ • 및 λŒ€μš©λŸ‰ 파일 λ‹€μš΄λ‘œλ“œ
RUN git lfs install && git lfs pull || true
# ν™˜κ²½λ³€μˆ˜ μ„€μ •
ENV HOME=/app
ENV STREAMLIT_HOME=/app/.streamlit
RUN mkdir -p /app/.streamlit && chmod -R 777 /app/.streamlit
ENV HF_HOME=/tmp/hf-home \
TRANSFORMERS_CACHE=/tmp/hf-cache \
HUGGINGFACE_HUB_CACHE=/tmp/hf-cache \
TORCH_HOME=/tmp/torch-cache \
XDG_CACHE_HOME=/tmp/xdg-cache
RUN mkdir -p /tmp/hf-home /tmp/hf-cache /tmp/torch-cache /tmp/xdg-cache
ENV OLLAMA_HOST=http://127.0.0.1:11434
EXPOSE 8501
EXPOSE 11434
# μˆ˜μ •λœ CMD
CMD bash -lc '\
export OLLAMA_MODEL=${OLLAMA_MODEL:-gemma2:9b}; \
echo "Starting Ollama with model: $OLLAMA_MODEL"; \
ollama serve & \
for i in {1..120}; do \
if curl -fsS http://127.0.0.1:11434/api/version >/dev/null 2>&1; then \
echo "Ollama server is ready"; \
break; \
fi; \
echo "Waiting for Ollama... ($i/120)"; \
sleep 1; \
done; \
echo "Ensuring model is available: $OLLAMA_MODEL"; \
ollama pull "$OLLAMA_MODEL" || { echo "Failed to pull model"; exit 1; }; \
echo "Starting Streamlit app"; \
streamlit run app.py --server.address=0.0.0.0 --server.port=${PORT:-8501}'