File size: 723 Bytes
89b549f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2e44ca0
89b549f
 
 
c200944
6ea5e99
76d5751
89b549f
c200944
 
6ea5e99
89b549f
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
FROM ubuntu:22.04

ENV DEBIAN_FRONTEND=noninteractive
WORKDIR /app

RUN apt update && apt install -y \
    git \
    cmake \
    build-essential \
    curl \
    ca-certificates \
    && rm -rf /var/lib/apt/lists/*

RUN git clone https://github.com/ggerganov/llama.cpp

RUN cmake -S llama.cpp -B llama.cpp/build \
    -DLLAMA_BUILD_SERVER=ON \
    -DCMAKE_BUILD_TYPE=Release \
    && cmake --build llama.cpp/build --target llama-server

RUN mkdir -p /models

# ✅ UNGATED MODEL
RUN curl -L --fail -o /models/model.gguf \
https://huggingface.co/Xlnk/Xlnk-Ai/resolve/main/1.gguf

# ✅ GGUF sanity check (no xxd)
RUN head -c 4 /models/model.gguf

COPY start.sh /start.sh
RUN chmod +x /start.sh

EXPOSE 7860
CMD ["/start.sh"]