zazaman commited on
Commit
8ae95d2
·
1 Parent(s): b880cfc

Disable CURL in llama.cpp build (not needed for local inference)

Browse files
Files changed (1) hide show
  1. Dockerfile +2 -1
Dockerfile CHANGED
@@ -16,12 +16,13 @@ RUN apt-get update && apt-get install -y \
16
 
17
  # Compile llama.cpp from source (for translation feature)
18
  # This ensures compatibility with the container's architecture
 
19
  RUN cd /tmp && \
20
  git clone --depth 1 --branch master https://github.com/ggerganov/llama.cpp.git && \
21
  cd llama.cpp && \
22
  mkdir build && \
23
  cd build && \
24
- cmake .. -DCMAKE_BUILD_TYPE=Release && \
25
  cmake --build . --config Release -j$(nproc) && \
26
  cp bin/main /usr/local/bin/llama-main && \
27
  chmod 755 /usr/local/bin/llama-main && \
 
16
 
17
  # Compile llama.cpp from source (for translation feature)
18
  # This ensures compatibility with the container's architecture
19
+ # Disable CURL since we don't need it for local GGUF model inference
20
  RUN cd /tmp && \
21
  git clone --depth 1 --branch master https://github.com/ggerganov/llama.cpp.git && \
22
  cd llama.cpp && \
23
  mkdir build && \
24
  cd build && \
25
+ cmake .. -DCMAKE_BUILD_TYPE=Release -DLLAMA_CURL=OFF && \
26
  cmake --build . --config Release -j$(nproc) && \
27
  cp bin/main /usr/local/bin/llama-main && \
28
  chmod 755 /usr/local/bin/llama-main && \