Spaces:
Sleeping
Sleeping
| # FROM python:3.11-slim | |
| # Use HuggingFace's pre-built GPU image (includes CUDA, PyTorch, Transformers) | |
| # FROM huggingface/transformers-pytorch-amd-gpu:latest | |
| FROM huggingface/transformers-pytorch-gpu:latest@sha256:4c7317881a534b22e18add49c925096fa902651fb0571c69f3cad58af3ea2c0f | |
| WORKDIR /app | |
| # Install system dependencies | |
| RUN apt-get update && apt-get install -y \ | |
| build-essential \ | |
| curl \ | |
| git \ | |
| && rm -rf /var/lib/apt/lists/* | |
| # Upgrade Python to 3.9 | |
| # RUN apt-get update && \ | |
| # apt-get install -y python3.9 python3.9-distutils python3.9-dev && \ | |
| # update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.9 1 && \ | |
| # curl -sS https://bootstrap.pypa.io/get-pip.py | python3.9 && \ | |
| # rm -rf /var/lib/apt/lists/* | |
| # Verify | |
| RUN python3 -V | |
| # Copy requirements first (for better Docker layer caching) | |
| COPY requirements.txt ./ | |
| # Install Python dependencies | |
| RUN pip3 install --no-cache-dir -r requirements.txt | |
| # Pre-download Hugging Face models during build | |
| # This caches models in the Docker image for faster container startup | |
| # IMPORTANT: Set cache directory BEFORE downloading to ensure models are in /app/.cache/huggingface | |
| ENV HF_HOME=/app/.cache/huggingface | |
| ENV HF_HUB_CACHE=/app/.cache/huggingface | |
| ENV HF_DATASETS_CACHE=/app/.cache/huggingface | |
| ENV TRANSFORMERS_CACHE=/app/.cache/huggingface | |
| ENV SENTENCE_TRANSFORMERS_HOME=/app/.cache/huggingface | |
| COPY download_models.py ./ | |
| COPY src/config/settings.yaml ./src/config/settings.yaml | |
| RUN mkdir -p /app/.cache/huggingface && \ | |
| chmod -R 755 /app/.cache && \ | |
| chmod -R 755 /app/.cache/huggingface && \ | |
| python3 download_models.py | |
| # Copy all application files (excluding .dockerignore patterns) | |
| COPY . . | |
| RUN mkdir -p /app/.streamlit && \ | |
| mkdir -p /app/.cache/streamlit && \ | |
| mkdir -p /app/.cache/huggingface && \ | |
| mkdir -p /app/conversations && \ | |
| mkdir -p /app/feedback && \ | |
| printf '[server]\nport = 8501\nheadless = true\nenableCORS = false\nenableXsrfProtection = false\n\n[browser]\ngatherUsageStats = false\n' > /app/.streamlit/config.toml && \ | |
| chmod -R 777 /app/.streamlit && \ | |
| chmod -R 777 /app/.cache/streamlit && \ | |
| chmod -R 777 /app/.cache/huggingface && \ | |
| chmod -R 777 /app/conversations && \ | |
| chmod -R 777 /app/feedback | |
| ENV STREAMLIT_CONFIG_HOME=/app/.streamlit | |
| ENV STREAMLIT_BROWSER_GATHER_USAGE_STATS=false | |
| ENV STREAMLIT_USER_BASE_PATH=/app/.cache/streamlit | |
| # Expose Streamlit port (HF Spaces maps to 7860 automatically) | |
| EXPOSE 8501 | |
| # Health check for Streamlit | |
| HEALTHCHECK --interval=30s --timeout=10s --start-period=40s --retries=3 \ | |
| CMD curl --fail http://localhost:8501/_stcore/health || exit 1 | |
| #temp developement commands | |
| RUN python3 -c "import torch; print(torch.cuda.is_available(), torch.cuda.get_device_name(0))" || true | |
| # RUN mkdir /app/conversations && chmod -R 777 conversations | |
| # RUN mkdir /app/feedback && chmod -R 777 feedback #langchain-huggingface==0.1.0 | |
| # RUN pip3 install langchain-huggingface | |
| RUN pip3 install colpali-engine | |
| # Run the app | |
| ENTRYPOINT ["streamlit", "run", "app.py", "--server.port=8501", "--server.address=0.0.0.0", "--server.headless", "true"] | |