retinaface / Dockerfile
benstaf's picture
Update Dockerfile
866f3ed verified
# Use a more recent Python base image with Debian Bullseye
FROM python:3.9-slim-bullseye
# Set the working directory in the container
WORKDIR /app
# Set environment variables for various cache directories
ENV FEAT_MODELS_DIR=/tmp/feat_models
ENV MPLCONFIGDIR=/tmp/matplotlib
ENV TORCH_HOME=/tmp/torch
ENV HF_HOME=/tmp/huggingface
ENV TRANSFORMERS_CACHE=/tmp/transformers
ENV PYTHONUNBUFFERED=1
# Create necessary directories and set permissions
RUN mkdir -p /tmp/feat_models /tmp/matplotlib /tmp/torch /tmp/huggingface /tmp/transformers && \
chmod -R 777 /tmp/
# Install system dependencies that might be needed for py-feat
RUN apt-get update && apt-get install -y \
libgl1-mesa-glx \
libglib2.0-0 \
libsm6 \
libxext6 \
libxrender-dev \
libgomp1 \
libgfortran5 \
wget \
&& rm -rf /var/lib/apt/lists/*
# Copy the requirements file into the container
COPY requirements.txt .
# Install Python dependencies
RUN pip install --no-cache-dir -r requirements.txt
# Try to pre-download some models during build (optional - comment out if it causes issues)
# RUN python -c "
# import os;
# os.environ['FEAT_MODELS_DIR'] = '/tmp/feat_models';
# os.environ['MPLCONFIGDIR'] = '/tmp/matplotlib';
# try:
# import feat;
# detector = feat.detector.Detector();
# print('Models downloaded successfully');
# except Exception as e:
# print(f'Pre-download failed (this is okay): {e}');
# " || echo "Model pre-download failed, will download at runtime"
# Copy your application code into the container
COPY . .
# Create a non-root user for better security (optional)
RUN useradd -m -u 1000 appuser && \
chown -R appuser:appuser /app /tmp/
USER appuser
# Expose the port that FastAPI will run on
EXPOSE 7860
# Command to run your FastAPI application with Uvicorn
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]