Spaces:
Sleeping
Sleeping
| # 1. νμ΄μ¬ λ²μ μ 3.11μΌλ‘ μ€μ | |
| FROM python:3.11-slim | |
| # 2. νμν μμ€ν ν¨ν€μ§ λ° git-lfs μ€μΉ | |
| RUN pip install --no-cache-dir hf_transfer>=0.1.6 | |
| RUN apt-get update && apt-get install -y \ | |
| build-essential \ | |
| curl \ | |
| git \ | |
| git-lfs \ | |
| && rm -rf /var/lib/apt/lists/* | |
| # Ollama μ€μΉ | |
| RUN curl -fsSL https://ollama.com/install.sh | sh | |
| # νκ²½λ³μ μ€μ (λͺ μμ μΌλ‘ λͺ¨λΈλͺ μ§μ ) | |
| ENV OLLAMA_MODEL=gemma2:9b | |
| ENV OLLAMA_HOME=/app/.ollama | |
| ENV OLLAMA_MODELS=/app/.ollama | |
| RUN mkdir -p /app/.ollama && chmod -R 777 /app/.ollama | |
| # λΉλ μ€ λͺ¨λΈ λ€μ΄λ‘λ | |
| RUN set -eux; \ | |
| ollama serve & pid=$!; \ | |
| i=0; \ | |
| until curl -fsS http://127.0.0.1:11434/api/tags >/dev/null 2>&1; do \ | |
| i=$((i+1)); \ | |
| [ "$i" -lt 60 ] || { echo "Ollama not ready"; kill "$pid"; exit 1; }; \ | |
| sleep 1; \ | |
| done; \ | |
| ollama pull gemma2:9b; \ | |
| kill "$pid" || true | |
| # 3. μμ λλ ν°λ¦¬ μ€μ | |
| WORKDIR /app | |
| # 6. νμ΄μ¬ λΌμ΄λΈλ¬λ¦¬ μ€μΉ | |
| COPY requirements.txt . | |
| RUN pip install --no-cache-dir -r requirements.txt | |
| # 4. μμ€ μ½λ λ³΅μ¬ | |
| COPY . /app | |
| # 5. Git LFS μ€μ λ° λμ©λ νμΌ λ€μ΄λ‘λ | |
| RUN git lfs install && git lfs pull || true | |
| # νκ²½λ³μ μ€μ | |
| ENV HOME=/app | |
| ENV STREAMLIT_HOME=/app/.streamlit | |
| RUN mkdir -p /app/.streamlit && chmod -R 777 /app/.streamlit | |
| ENV HF_HOME=/tmp/hf-home \ | |
| TRANSFORMERS_CACHE=/tmp/hf-cache \ | |
| HUGGINGFACE_HUB_CACHE=/tmp/hf-cache \ | |
| TORCH_HOME=/tmp/torch-cache \ | |
| XDG_CACHE_HOME=/tmp/xdg-cache | |
| RUN mkdir -p /tmp/hf-home /tmp/hf-cache /tmp/torch-cache /tmp/xdg-cache | |
| ENV OLLAMA_HOST=http://127.0.0.1:11434 | |
| EXPOSE 8501 | |
| EXPOSE 11434 | |
| # μμ λ CMD | |
| CMD bash -lc '\ | |
| export OLLAMA_MODEL=${OLLAMA_MODEL:-gemma2:9b}; \ | |
| echo "Starting Ollama with model: $OLLAMA_MODEL"; \ | |
| ollama serve & \ | |
| for i in {1..120}; do \ | |
| if curl -fsS http://127.0.0.1:11434/api/version >/dev/null 2>&1; then \ | |
| echo "Ollama server is ready"; \ | |
| break; \ | |
| fi; \ | |
| echo "Waiting for Ollama... ($i/120)"; \ | |
| sleep 1; \ | |
| done; \ | |
| echo "Ensuring model is available: $OLLAMA_MODEL"; \ | |
| ollama pull "$OLLAMA_MODEL" || { echo "Failed to pull model"; exit 1; }; \ | |
| echo "Starting Streamlit app"; \ | |
| streamlit run app.py --server.address=0.0.0.0 --server.port=${PORT:-8501}' |