NS-Genai commited on
Commit
f65e995
·
verified ·
1 Parent(s): feb6f10

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +9 -14
Dockerfile CHANGED
@@ -1,22 +1,17 @@
1
- # Use the official image which has the library correctly installed
2
  FROM ghcr.io/abetlen/llama-cpp-python:latest
3
 
4
- # Set the working directory
5
- WORKDIR /app
6
 
7
- # 1. DO NOT use "COPY . ." or "COPY . /app"
8
- # This prevents copying any local broken/empty llama_cpp folders.
 
9
 
10
- # 2. ONLY copy the model file.
11
- # Ensure 'model/gemma-3-finetuned.Q4_K_M.gguf' exists in your HF Space "Files" tab.
12
- # We rename it to 'model.gguf' inside the container for simplicity.
13
- COPY model/gemma-3-finetuned.Q4_K_M.gguf /app/model/model.gguf
14
-
15
- # 3. Set environment variables for the server
16
  ENV HOST=0.0.0.0
17
  ENV PORT=7860
18
- ENV MODEL=/app/model/model.gguf
19
 
20
  # 4. Start the server
21
- # This runs the installed library from the system paths, ignoring /app
22
- CMD ["python3", "-m", "llama_cpp.server", "--model", "/app/model/model.gguf", "--host", "0.0.0.0", "--port", "7860", "--n_ctx", "2048"]
 
1
+ # Use the official image (lightweight and pre-configured)
2
  FROM ghcr.io/abetlen/llama-cpp-python:latest
3
 
4
+ # 1. Change WORKDIR to something neutral to avoid file conflicts
5
+ WORKDIR /workspace
6
 
7
+ # 2. Copy ONLY the model file.
8
+ # Do NOT use "COPY . ." which brings in broken local folders.
9
+ COPY model/gemma-3-finetuned.Q4_K_M.gguf /workspace/model.gguf
10
 
11
+ # 3. Set Environment Variables
 
 
 
 
 
12
  ENV HOST=0.0.0.0
13
  ENV PORT=7860
14
+ ENV MODEL=/workspace/model.gguf
15
 
16
  # 4. Start the server
17
+ CMD ["python3", "-m", "llama_cpp.server", "--model", "/workspace/model.gguf", "--host", "0.0.0.0", "--port", "7860", "--n_ctx", "2048"]