Rope / Dockerfile
charbel-malo's picture
Update Dockerfile
69d7e42 verified
# 1. Base Image: NVIDIA CUDA 11.8 with cuDNN 8 on Ubuntu 22.04
FROM nvidia/cuda:11.8.0-cudnn8-devel-ubuntu22.04
# 2. Environment Variables
ENV DEBIAN_FRONTEND=noninteractive
ENV PYTHONUNBUFFERED=1
ENV APP_DIR=/app/Rope
# NVIDIA base images usually set CUDA paths, but explicitly ensuring they are in PATH and LD_LIBRARY_PATH
ENV PATH="/usr/local/cuda/bin:${PATH}"
ENV LD_LIBRARY_PATH="/usr/local/cuda/lib64:${LD_LIBRARY_PATH}"
# 3. Install System Dependencies
# Ubuntu 22.04 comes with Python 3.10.x as 'python3'
RUN apt-get update && \
apt-get install -y --no-install-recommends \
git \
git-lfs \
ffmpeg \
python3-venv \
python3-pip \
curl \
ca-certificates && \
rm -rf /var/lib/apt/lists/*
# Verify Python version (should be 3.10.x)
RUN python3 --version
# 4. Create App Directory and Clone Rope Repository
WORKDIR ${APP_DIR}
# Clone the Rope application repository into the current directory (${APP_DIR})
RUN git clone https://github.com/Hillobar/Rope.git .
# 5. Set up Python Virtual Environment and Install Dependencies
RUN python3 -m venv venv
# Install dependencies using pip from the virtual environment
# The requirements.txt contains --extra-index-url for PyTorch CUDA 11.8 variants
RUN ./venv/bin/python -m pip install --no-cache-dir -r requirements.txt
# 6. Download Models
# The Rope application expects models in an 'models' subdirectory within its root.
# The main application repo clone (Hillobar/Rope) creates an 'models' directory,
# which usually only contains a .gitkeep file.
# We remove this directory to make way for the actual models cloned from Hugging Face.
RUN rm -rf ${APP_DIR}/models
# Initialize LFS for cloning model files from Hugging Face.
# This command configures Git on the system to use LFS.
RUN git lfs install --system
# Clone the models from Hugging Face into the ${APP_DIR}/models directory.
# This will create the ${APP_DIR}/models directory again and populate it with model files.
RUN git clone https://huggingface.co/Hillobar/Rope ${APP_DIR}/models
# 7. Verify CUDA Installation (nvcc should be in PATH)
RUN nvcc --version
# 8. Set Working Directory (already ${APP_DIR})
WORKDIR ${APP_DIR}
# 9. Expose Port for Gradio Web UI (default Gradio port)
EXPOSE 7860
# 10. Command to run the application
# Use the python from the virtual environment.
# Add '--listen' to make Gradio listen on 0.0.0.0 to be accessible from outside the container.
CMD ["./venv/bin/python", "app.py", "--listen"]