File size: 2,406 Bytes
20a2ec5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ff95d7a
 
20a2ec5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ff95d7a
 
 
20a2ec5
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
# Use a stable Python 3.10 base image (buster-slim) for better compatibility with Pillow build.
# This is a CPU-only base image.
FROM python:3.10-slim-buster

# Set the working directory in the container
WORKDIR /app

# Install system dependencies
# These are commonly needed for Python packages like Pillow (for image processing)
# and for general development utilities.
RUN apt-get update && apt-get install -y \
    build-essential \
    libgl1-mesa-glx \
    libgomp1 \
    git \
    git-lfs \
    ffmpeg \
    libsm6 \
    libxext6 \
    cmake \
    rsync \
    && rm -rf /var/lib/apt/lists/* \
    && git lfs install

# --- OPTIONAL: CUDA/GPU Installation (uncomment ONLY if you need GPU and select GPU hardware) ---
# If you enable these lines, make sure your Hugging Face Space has GPU hardware selected.
# Otherwise, keep them commented out for CPU-only deployment.
# These steps are for installing CUDA toolkit and PyTorch with CUDA support on a slim-buster image.
# You would replace the PyTorch and CUDA versions with what you need.
# ENV CUDA_VERSION=11.8
# ENV CUDNN_VERSION=8
# ENV PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:${PATH}
# ENV LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64:/usr/local/cuda/lib64:${LD_LIBRARY_PATH}
# RUN apt-get update && apt-get install -y --no-install-recommends \
#     cuda-keyring-11-8 \
#     cuda-toolkit-11-8 \
#     libcudnn8=${CUDNN_VERSION}.*-1+cuda${CUDA_VERSION} \
#     libcudnn8-dev=${CUDNN_VERSION}.*-1+cuda${CUDA_VERSION} \
#     && rm -rf /var/lib/apt/lists/*
# RUN pip install --no-cache-dir torch==2.1.0 torchvision==0.16.0 torchaudio==2.1.0 --index-url https://download.pytorch.org/whl/cu118
#
# If you uncommented the CUDA/PyTorch installation above, set this to 'true'.
# ENV USE_GPU=true
# --- END OPTIONAL CUDA/GPU Installation ---

# Copy the requirements file into the container
COPY requirements.txt .

# Install Python dependencies from requirements.txt
RUN pip install --no-cache-dir -r requirements.txt

# Copy the application code into the container
COPY app.py .

# Expose the port Flask runs on
EXPOSE 5000

# Set USE_GPU environment variable for CPU-only deployment.
# If you enable the OPTIONAL CUDA/GPU section above, ensure ENV USE_GPU=true is set there.
ENV USE_GPU=false

# Command to run the Flask application using gunicorn for production serving.
CMD ["gunicorn", "--bind", "0.0.0.0:5000", "app:app"]