Gurdaan commited on
Commit
fcd65de
·
verified ·
1 Parent(s): 2412b89

Update dockerfile

Browse files
Files changed (1) hide show
  1. dockerfile +30 -23
dockerfile CHANGED
@@ -1,36 +1,43 @@
1
- # Use a base image from Microsoft that includes Conda, Python, and GPU drivers.
2
- # This image is a great starting point for ML workloads on Azure.
3
- FROM mcr.microsoft.com/azureml/openmpi4.1.0-ubuntu20.04:latest
4
 
5
- # Set the working directory inside the container. All subsequent commands
6
- # will be executed from this directory.
 
 
 
 
 
 
 
 
 
7
  WORKDIR /app
8
 
9
- # Copy the Conda environment file into the container.
10
  COPY environment.yml .
 
 
 
 
11
 
12
- # Create the Conda environment using the provided YAML file.
13
- # The name of the environment will be `yolo-onnx-cpu-env` as defined in the file.
14
  RUN conda env create -f environment.yml
15
 
16
- # We need to install `gunicorn` and `Flask` or a similar web server to serve the model.
17
- # Azure Container Apps uses HTTP to trigger scaling.
18
- # We'll install these into the new Conda environment.
19
  SHELL ["conda", "run", "-n", "yolo-onnx-cpu-env", "/bin/bash", "-c"]
20
- RUN pip install gunicorn flask
21
 
22
- # Copy the scoring script, the ONNX model file, and any other necessary files
23
- # into the container's working directory.
 
 
24
  COPY scoring_Yolo_Model.py .
25
  COPY best.onnx .
26
- # Assuming `class_names` or other static files are also present, copy them here.
27
- # COPY class_names.txt .
28
 
29
- # Expose the port that the web server will listen on.
30
- # Azure Container Apps will route traffic to this port.
31
- EXPOSE 8080
32
 
33
- # The CMD instruction defines the command to run when the container starts.
34
- # We use Gunicorn to serve our Flask app, which will be defined in the scoring script.
35
- # The `conda run` command ensures the script is executed within the correct Conda environment.
36
- CMD ["conda", "run", "-n", "yolo-onnx-cpu-env", "gunicorn", "--bind", "0.0.0.0:8080", "scoring_Yolo_Model_Gunicorn:app"]
 
1
+ # Use an official NVIDIA CUDA base image for GPU support
2
+ FROM nvidia/cuda:12.1.0-runtime-ubuntu22.04
 
3
 
4
+ # Set up Python and other system dependencies
5
+ RUN apt-get update && apt-get install -y \
6
+ python3 \
7
+ python3-pip \
8
+ git \
9
+ wget \
10
+ libgl1-mesa-glx \
11
+ libglib2.0-0 && \
12
+ rm -rf /var/lib/apt/lists/*
13
+
14
+ # Set the working directory
15
  WORKDIR /app
16
 
17
+ # Install Conda and create the environment
18
  COPY environment.yml .
19
+ RUN wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh && \
20
+ bash miniconda.sh -b -p /opt/conda && \
21
+ rm miniconda.sh
22
+ ENV PATH="/opt/conda/bin:$PATH"
23
 
24
+ # Create the Conda environment
 
25
  RUN conda env create -f environment.yml
26
 
27
+ # Activate the environment and install GPU-specific dependencies
 
 
28
  SHELL ["conda", "run", "-n", "yolo-onnx-cpu-env", "/bin/bash", "-c"]
 
29
 
30
+ # Replace onnxruntime with the GPU version
31
+ RUN pip uninstall -y onnxruntime && pip install onnxruntime-gpu==1.22.1 gunicorn flask
32
+
33
+ # Copy your application files
34
  COPY scoring_Yolo_Model.py .
35
  COPY best.onnx .
36
+ COPY scoring_Yolo_Model_Gunicorn.py . # This is the file you provided
 
37
 
38
+ # Expose the default port for Hugging Face Spaces Docker
39
+ EXPOSE 7860
 
40
 
41
+ # The CMD to run the Gunicorn server
42
+ # Note: Hugging Face Spaces uses port 7860 by default for Docker.
43
+ CMD ["conda", "run", "-n", "yolo-onnx-cpu-env", "gunicorn", "--bind", "0.0.0.0:7860", "scoring_Yolo_Model_Gunicorn:app"]