Spaces:
Build error
Build error
| # ============================================================================== | |
| # Dockerfile for Perceptra AI (Image Captioning & Segmentation App) | |
| # Optimized for deployment on Hugging Face Spaces with GPU support. | |
| # ============================================================================== | |
| # -- Stage 1: Base Image and System Dependencies -- | |
| # Start with a stable Python version. 3.9 is a good choice for broad compatibility. | |
| # Using the 'slim' variant to keep the image size smaller. | |
| FROM python:3.9-slim | |
| # Set the working directory inside the container. All subsequent commands | |
| # will run from this path. | |
| WORKDIR /app | |
| # Set environment variables to prevent Python from writing .pyc files and to | |
| # ensure output is sent straight to the console without buffering. | |
| ENV PYTHONDONTWRITEBYTECODE 1 | |
| ENV PYTHONUNBUFFERED 1 | |
| # Install essential system dependencies. Many Python libraries, especially for | |
| # computer vision, have underlying system requirements. | |
| # - build-essential & cmake: Needed to compile libraries like dlib (for face-recognition). | |
| # - libgl1-mesa-glx, libglib2.0-0, etc.: Required by OpenCV for image processing in a headless environment. | |
| RUN apt-get update && apt-get install -y --no-install-recommends \ | |
| build-essential \ | |
| cmake \ | |
| libgl1-mesa-glx \ | |
| libglib2.0-0 \ | |
| libsm6 \ | |
| libxext6 \ | |
| libxrender-dev \ | |
| git \ | |
| && rm -rf /var/lib/apt/lists/* | |
| # -- Stage 2: Python Dependencies -- | |
| # First, copy only the requirements.txt file. This allows Docker to cache the | |
| # installed packages. The layer will only be re-built if requirements.txt changes. | |
| COPY requirements.txt . | |
| # Install the Python packages specified in requirements.txt. | |
| # --no-cache-dir: Reduces the image size by not storing the pip cache. | |
| RUN pip install --no-cache-dir -r requirements.txt | |
| # -- Stage 3: Application Code & Models -- | |
| # Copy the rest of your application's source code into the container. | |
| # This includes web_app.py, the 'src' directory, and the 'templates' directory. | |
| COPY . . | |
| # Optimization: Pre-download the large YOLO model during the build process. | |
| # This makes the application start much faster on Hugging Face Spaces, as it | |
| # won't need to download the model on every startup. | |
| RUN python -c "from ultralytics import YOLO; YOLO('yolov8x-seg.pt')" | |
| # -- Stage 4: Runtime Configuration -- | |
| # Expose the port the app will run on. Hugging Face Spaces will automatically | |
| # map this to the public URL. Gunicorn will run on this port. | |
| # The default Flask port is 5000, which we'll use here. | |
| EXPOSE 7860 | |
| # The command to run your application using Gunicorn, a production-ready | |
| # WSGI server. This is the standard way to run Flask apps in production. | |
| # - --workers 2: Starts 2 worker processes to handle requests. Adjust as needed. | |
| # - --bind 0.0.0.0:5000: Binds to all network interfaces on port 5000. | |
| # - web_app:app: Tells Gunicorn to look for the Flask instance named 'app' | |
| # inside the 'web_app.py' file. | |
| CMD ["gunicorn", "--workers", "2", "--bind", "0.0.0.0:7860", "web_app:app"] | |