Spaces:
Build error
Build error
File size: 3,018 Bytes
ee1d4aa 05fc473 ee1d4aa |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 |
# ==============================================================================
# Dockerfile for Perceptra AI (Image Captioning & Segmentation App)
# Optimized for deployment on Hugging Face Spaces with GPU support.
# ==============================================================================
# -- Stage 1: Base Image and System Dependencies --
# Start with a stable Python version. 3.9 is a good choice for broad compatibility.
# Using the 'slim' variant to keep the image size smaller.
FROM python:3.9-slim
# Set the working directory inside the container. All subsequent commands
# will run from this path.
WORKDIR /app
# Set environment variables to prevent Python from writing .pyc files and to
# ensure output is sent straight to the console without buffering.
ENV PYTHONDONTWRITEBYTECODE 1
ENV PYTHONUNBUFFERED 1
# Install essential system dependencies. Many Python libraries, especially for
# computer vision, have underlying system requirements.
# - build-essential & cmake: Needed to compile libraries like dlib (for face-recognition).
# - libgl1-mesa-glx, libglib2.0-0, etc.: Required by OpenCV for image processing in a headless environment.
RUN apt-get update && apt-get install -y --no-install-recommends \
build-essential \
cmake \
libgl1-mesa-glx \
libglib2.0-0 \
libsm6 \
libxext6 \
libxrender-dev \
git \
&& rm -rf /var/lib/apt/lists/*
# -- Stage 2: Python Dependencies --
# First, copy only the requirements.txt file. This allows Docker to cache the
# installed packages. The layer will only be re-built if requirements.txt changes.
COPY requirements.txt .
# Install the Python packages specified in requirements.txt.
# --no-cache-dir: Reduces the image size by not storing the pip cache.
RUN pip install --no-cache-dir -r requirements.txt
# -- Stage 3: Application Code & Models --
# Copy the rest of your application's source code into the container.
# This includes web_app.py, the 'src' directory, and the 'templates' directory.
COPY . .
# Optimization: Pre-download the large YOLO model during the build process.
# This makes the application start much faster on Hugging Face Spaces, as it
# won't need to download the model on every startup.
RUN python -c "from ultralytics import YOLO; YOLO('yolov8x-seg.pt')"
# -- Stage 4: Runtime Configuration --
# Expose the port the app will run on. Hugging Face Spaces will automatically
# map this to the public URL. Gunicorn will run on this port.
# The default Flask port is 5000, which we'll use here.
EXPOSE 7860
# The command to run your application using Gunicorn, a production-ready
# WSGI server. This is the standard way to run Flask apps in production.
# - --workers 2: Starts 2 worker processes to handle requests. Adjust as needed.
# - --bind 0.0.0.0:5000: Binds to all network interfaces on port 5000.
# - web_app:app: Tells Gunicorn to look for the Flask instance named 'app'
# inside the 'web_app.py' file.
CMD ["gunicorn", "--workers", "2", "--bind", "0.0.0.0:7860", "web_app:app"]
|