aliroohan179 commited on
Commit
3d50cb4
·
verified ·
1 Parent(s): 3312e6e

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +17 -11
Dockerfile CHANGED
@@ -1,31 +1,37 @@
1
  FROM ubuntu:22.04
2
 
3
- # Set non-interactive mode for apt
4
  ENV DEBIAN_FRONTEND=noninteractive
5
 
6
  # Install system dependencies
7
  RUN apt update && apt install -y \
 
 
8
  build-essential \
9
  cmake \
10
- git \
11
  curl \
 
12
  python3 \
13
  python3-pip \
14
- git-lfs \
15
  ca-certificates \
16
- wget \
17
- && git lfs install
 
 
 
18
 
19
  # Clone llama.cpp
20
  WORKDIR /app
21
  RUN git clone https://github.com/ggerganov/llama.cpp.git
22
- WORKDIR /app/llama.cpp
23
 
24
- # Build llama-server
25
- RUN make server LLAMA_SERVER_BUILD=1
 
 
 
26
 
27
- # Expose the default llama-server port
28
  EXPOSE 8080
29
 
30
- # Run llama-server and download model from Hugging Face
31
- CMD ["./server", "-m", "ggml-org/SmolVLM-500M-Instruct-GGUF", "-hf"]
 
1
  FROM ubuntu:22.04
2
 
3
+ # Set environment to avoid prompts
4
  ENV DEBIAN_FRONTEND=noninteractive
5
 
6
  # Install system dependencies
7
  RUN apt update && apt install -y \
8
+ git \
9
+ git-lfs \
10
  build-essential \
11
  cmake \
 
12
  curl \
13
+ wget \
14
  python3 \
15
  python3-pip \
 
16
  ca-certificates \
17
+ libcurl4-openssl-dev \
18
+ libssl-dev
19
+
20
+ # Initialize Git LFS
21
+ RUN git lfs install
22
 
23
  # Clone llama.cpp
24
  WORKDIR /app
25
  RUN git clone https://github.com/ggerganov/llama.cpp.git
 
26
 
27
+ # Build llama-server using CMake
28
+ WORKDIR /app/llama.cpp
29
+ RUN mkdir build && cd build && \
30
+ cmake -DLLAMA_SERVER=ON .. && \
31
+ cmake --build . --config Release
32
 
33
+ # Expose default llama-server port
34
  EXPOSE 8080
35
 
36
+ # Run the server using HF model download
37
+ CMD ["./build/bin/server", "-m", "ggml-org/SmolVLM-500M-Instruct-GGUF", "-hf"]