File size: 1,380 Bytes
c84c1b2
608d0ea
c84c1b2
 
 
6b58a8e
 
608d0ea
 
 
 
 
c84c1b2
 
 
 
 
 
 
 
 
 
608d0ea
 
 
 
f2a6212
c84c1b2
608d0ea
8a4ac98
608d0ea
c84c1b2
 
 
 
608d0ea
 
98aa1e2
5e1491f
1ed3257
d3613bc
8a4ac98
1ed3257
98aa1e2
 
608d0ea
98aa1e2
608d0ea
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
ARG UBUNTU_VERSION=22.04
ARG CUDA_VERSION=12.3.1
ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}
ARG BASE_CUDA_RUN_CONTAINER=nvidia/cuda:${CUDA_VERSION}-runtime-ubuntu${UBUNTU_VERSION}

FROM ${BASE_CUDA_DEV_CONTAINER} as build

ARG CUDA_DOCKER_ARCH=all

RUN apt-get update --fix-missing && \
    apt-get install -y --no-install-recommends git build-essential gcc cmake && \
    rm -rf /var/lib/apt/lists/*

WORKDIR /build

RUN git clone https://github.com/ggerganov/llama.cpp.git

WORKDIR /build/llama.cpp

ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
ENV LLAMA_CUBLAS=1

RUN mkdir build && \
    cd build && \
    cmake .. -DLLAMA_CUBLAS=ON && \
    cmake --build . --config Release

FROM ${BASE_CUDA_RUN_CONTAINER} as runtime
RUN apt-get update --fix-missing && \
    apt-get install -y --no-install-recommends curl wget && \
    rm -rf /var/lib/apt/lists/*

WORKDIR /app

# Copy the executable from the build stage
COPY --from=build /build/llama.cpp/build/bin/server /app
COPY --from=build /build/llama.cpp/examples/server/public /app/public
COPY ./run.sh /app/run.sh

WORKDIR /models
RUN wget https://huggingface.co/TheBloke/Mixtral-8x7B-v0.1-GGUF/blob/main/mixtral-8x7b-v0.1.Q5_0.gguf -nv -O mixtral-8x7b-instruct-v0.1.Q2_K.gguf
EXPOSE 7860
WORKDIR /app
# Make the script executable
RUN chmod +x run.sh

# CMD to run your script
CMD ./run.sh