# Use an official NVIDIA CUDA base image for GPU support FROM nvidia/cuda:12.1.0-runtime-ubuntu22.04 # Set up Python and other system dependencies RUN apt-get update && apt-get install -y \ python3 \ python3-pip \ git \ wget \ libgl1-mesa-glx \ libglib2.0-0 && \ rm -rf /var/lib/apt/lists/* # Set the working directory WORKDIR /app # Install Conda and create the environment COPY environment.yml . RUN wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh && \ bash miniconda.sh -b -p /opt/conda && \ rm miniconda.sh ENV PATH="/opt/conda/bin:$PATH" # Create the Conda environment RUN conda env create -f environment.yml # Activate the environment and install GPU-specific dependencies SHELL ["conda", "run", "-n", "yolo-onnx-cpu-env", "/bin/bash", "-c"] # Replace onnxruntime with the GPU version RUN pip uninstall -y onnxruntime && pip install onnxruntime-gpu==1.22.1 gunicorn flask # Copy your application files COPY scoring_Yolo_Model.py . COPY best.onnx . COPY scoring_Yolo_Model_Gunicorn.py . # This is the file you provided # Expose the default port for Hugging Face Spaces Docker EXPOSE 7860 # The CMD to run the Gunicorn server # Note: Hugging Face Spaces uses port 7860 by default for Docker. CMD ["conda", "run", "-n", "yolo-onnx-cpu-env", "gunicorn", "--bind", "0.0.0.0:7860", "scoring_Yolo_Model_Gunicorn:app"]