aliroohan179 commited on
Commit
4b22690
·
verified ·
1 Parent(s): b74f4a9

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +7 -5
Dockerfile CHANGED
@@ -17,20 +17,22 @@ RUN apt update && apt install -y \
17
 
18
  RUN git lfs install
19
 
20
- # Clone a known working version of llama.cpp
21
  WORKDIR /app
22
  RUN git clone https://github.com/ggerganov/llama.cpp.git
23
  WORKDIR /app/llama.cpp
24
- RUN git checkout 5f4d0f7 # ← replace with any working recent commit
25
 
26
- # Build llama-server using CMake
 
 
 
27
  RUN mkdir build && cd build && \
28
  cmake .. -DLLAMA_SERVER=ON && \
29
  cmake --build . --config Release && \
30
  cp ./bin/server /usr/local/bin/llama-server
31
 
32
- # Expose port
33
  EXPOSE 8080
34
 
35
- # ✅ Now you can run like locally
36
  CMD ["llama-server", "-hf", "ggml-org/SmolVLM-500M-Instruct-GGUF"]
 
17
 
18
  RUN git lfs install
19
 
20
+ # Clone llama.cpp
21
  WORKDIR /app
22
  RUN git clone https://github.com/ggerganov/llama.cpp.git
23
  WORKDIR /app/llama.cpp
 
24
 
25
+ # Checkout stable or latest release (optional)
26
+ # RUN git checkout tags/brew-release-tag
27
+
28
+ # Build with server support
29
  RUN mkdir build && cd build && \
30
  cmake .. -DLLAMA_SERVER=ON && \
31
  cmake --build . --config Release && \
32
  cp ./bin/server /usr/local/bin/llama-server
33
 
34
+ # Expose the llama-server default port
35
  EXPOSE 8080
36
 
37
+ # ✅ Now use the same command as your local machine
38
  CMD ["llama-server", "-hf", "ggml-org/SmolVLM-500M-Instruct-GGUF"]