Spaces:
Sleeping
Sleeping
Added Dockerfile
Browse files(cherry picked from commit 8e9f5ea9a739e9b1feb4a94f7888b98b72a3e953)
- Dockerfile +41 -0
Dockerfile
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
ARG UBUNTU_VERSION=22.04
|
| 2 |
+
ARG CUDA_VERSION=12.3.1
|
| 3 |
+
ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}
|
| 4 |
+
ARG BASE_CUDA_RUN_CONTAINER=nvidia/cuda:${CUDA_VERSION}-runtime-ubuntu${UBUNTU_VERSION}
|
| 5 |
+
|
| 6 |
+
FROM ${BASE_CUDA_DEV_CONTAINER} as build
|
| 7 |
+
|
| 8 |
+
ARG CUDA_DOCKER_ARCH=all
|
| 9 |
+
|
| 10 |
+
RUN apt-get update && \
|
| 11 |
+
apt-get install -y build-essential git cmake wget
|
| 12 |
+
|
| 13 |
+
WORKDIR /build
|
| 14 |
+
|
| 15 |
+
RUN git clone https://github.com/ggerganov/llama.cpp.git
|
| 16 |
+
|
| 17 |
+
WORKDIR /build/llama.cpp
|
| 18 |
+
|
| 19 |
+
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
|
| 20 |
+
ENV LLAMA_CUBLAS=1
|
| 21 |
+
|
| 22 |
+
RUN mkdir build && \
|
| 23 |
+
cd build && \
|
| 24 |
+
cmake .. -DLLAMA_CUBLAS=ON && \
|
| 25 |
+
cmake --build . --config Release
|
| 26 |
+
|
| 27 |
+
WORKDIR /data
|
| 28 |
+
RUN wget https://huggingface.co/IlyaGusev/saiga2_7b_gguf/resolve/main/model-q8_0.gguf -O model.gguf
|
| 29 |
+
|
| 30 |
+
FROM ${BASE_CUDA_RUN_CONTAINER} as runtime
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
WORKDIR /app
|
| 34 |
+
|
| 35 |
+
# Copy the executable from the build stage
|
| 36 |
+
COPY --from=build /build/llama.cpp/build/bin/server /app
|
| 37 |
+
COPY --from=build /data/model.gguf /data/model.gguf
|
| 38 |
+
WORKDIR /app
|
| 39 |
+
EXPOSE 7860
|
| 40 |
+
|
| 41 |
+
CMD ./server -m /data/model.gguf -c 2048 --port 7860
|