|
|
ARG UBUNTU_VERSION=22.04 |
|
|
ARG CUDA_VERSION=12.3.1 |
|
|
ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION} |
|
|
ARG BASE_CUDA_RUN_CONTAINER=nvidia/cuda:${CUDA_VERSION}-runtime-ubuntu${UBUNTU_VERSION} |
|
|
|
|
|
FROM ${BASE_CUDA_DEV_CONTAINER} as build |
|
|
|
|
|
ARG CUDA_DOCKER_ARCH=all |
|
|
|
|
|
RUN apt-get update --fix-missing && \ |
|
|
apt-get install -y --no-install-recommends git build-essential gcc cmake && \ |
|
|
rm -rf /var/lib/apt/lists/* |
|
|
|
|
|
WORKDIR /build |
|
|
|
|
|
RUN git clone https://github.com/ggerganov/llama.cpp.git |
|
|
|
|
|
WORKDIR /build/llama.cpp |
|
|
RUN git checkout 821f0a271e7c9ee737945245dd7abfa22cc9b5b0 |
|
|
|
|
|
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH} |
|
|
ENV LLAMA_CUBLAS=1 |
|
|
|
|
|
RUN mkdir build && \ |
|
|
cd build && \ |
|
|
cmake .. -DLLAMA_CUBLAS=ON && \ |
|
|
cmake --build . --config Release |
|
|
|
|
|
FROM ${BASE_CUDA_RUN_CONTAINER} as runtime |
|
|
RUN apt-get update --fix-missing && \ |
|
|
apt-get install -y --no-install-recommends wget && \ |
|
|
rm -rf /var/lib/apt/lists/* |
|
|
|
|
|
WORKDIR /app |
|
|
|
|
|
|
|
|
COPY --from=build /build/llama.cpp/build/bin/server /app |
|
|
COPY --from=build /build/llama.cpp/examples/server/public /app/public |
|
|
COPY ./run.sh /app/run.sh |
|
|
WORKDIR /app |
|
|
EXPOSE 7867 |
|
|
|
|
|
|
|
|
RUN chmod +x run.sh |
|
|
|
|
|
|
|
|
CMD ./run.sh |