Upload apex-master/examples/docker/Dockerfile with huggingface_hub
Browse files
apex-master/examples/docker/Dockerfile
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Base image must at least have pytorch and CUDA installed.
|
| 2 |
+
ARG BASE_IMAGE=nvcr.io/nvidia/pytorch:23.03-py3
|
| 3 |
+
FROM $BASE_IMAGE
|
| 4 |
+
ARG BASE_IMAGE
|
| 5 |
+
RUN echo "Installing Apex on top of ${BASE_IMAGE}"
|
| 6 |
+
# make sure we don't overwrite some existing directory called "apex"
|
| 7 |
+
WORKDIR /tmp/unique_for_apex
|
| 8 |
+
# uninstall Apex if present, twice to make absolutely sure :)
|
| 9 |
+
RUN pip uninstall -y apex || :
|
| 10 |
+
RUN pip uninstall -y apex || :
|
| 11 |
+
# SHA is something the user can touch to force recreation of this Docker layer,
|
| 12 |
+
# and therefore force cloning of the latest version of Apex
|
| 13 |
+
RUN SHA=ToUcHMe git clone https://github.com/NVIDIA/apex.git
|
| 14 |
+
WORKDIR /tmp/unique_for_apex/apex
|
| 15 |
+
RUN pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" .
|
| 16 |
+
WORKDIR /workspace
|