srivatsavdamaraju commited on
Commit
b1189b4
·
verified ·
1 Parent(s): 9a40338

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +11 -24
Dockerfile CHANGED
@@ -1,22 +1,3 @@
1
- # # Use an Ubuntu base image
2
- # FROM ubuntu:22.04
3
-
4
- # # Install necessary packages (update as needed)
5
- # RUN apt-get update && apt-get install -y \
6
- # build-essential \
7
- # cmake \
8
- # git \
9
- # && rm -rf /var/lib/apt/lists/*
10
-
11
- # # Set work directory and copy llama.cpp files
12
- # WORKDIR /app
13
- # COPY . /app
14
-
15
- # # Set environment variable to the ./build directory
16
- # ENV LLAMA_CPP_PATH=/app/build
17
-
18
- # # Simple command to check if env is set and print "OK"
19
- # CMD ["/bin/bash", "-c", "if [ -d \"$LLAMA_CPP_PATH\" ]; then echo OK; else echo 'Directory not found'; exit 1; fi"]
20
  FROM ubuntu:22.04
21
 
22
  # Install dependencies
@@ -30,14 +11,20 @@ RUN apt-get update && apt-get install -y \
30
  # Set working directory
31
  WORKDIR /app
32
 
33
- # Copy llama.cpp project (assumes you have build/bin/llama-server)
34
  COPY . /app
35
 
36
- # Set environment variable to the build directory
37
  ENV LLAMA_CPP_PATH=/app/build
38
 
39
- # Expose the default llama-server port
40
  EXPOSE 8080
41
 
42
- # Run the server from the correct path
43
- CMD ["/bin/bash", "-c", "if [ -x \"$LLAMA_CPP_PATH/bin/llama-server\" ]; then echo OK && $LLAMA_CPP_PATH/bin/llama-server -hf ggml-org/SmolVLM-500M-Instruct-GGUF; else echo 'llama-server not found'; exit 1; fi"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  FROM ubuntu:22.04
2
 
3
  # Install dependencies
 
11
  # Set working directory
12
  WORKDIR /app
13
 
14
+ # Copy the entire llama.cpp repo, including built binaries
15
  COPY . /app
16
 
17
+ # Set environment variable
18
  ENV LLAMA_CPP_PATH=/app/build
19
 
20
+ # Expose port (optional)
21
  EXPOSE 8080
22
 
23
+ # Run the llama-server binary
24
+ CMD ["/bin/bash", "-c", "\
25
+ if [ -x \"$LLAMA_CPP_PATH/bin/llama-server\" ]; then \
26
+ echo OK && \
27
+ $LLAMA_CPP_PATH/bin/llama-server -hf ggml-org/SmolVLM-500M-Instruct-GGUF; \
28
+ else \
29
+ echo 'llama-server not found'; exit 1; \
30
+ fi"]