PreethiCarmelBosco commited on
Commit
c7007a6
·
verified ·
1 Parent(s): 236a3c7

with venv to resolve errors

Browse files
Files changed (1) hide show
  1. Dockerfile +10 -9
Dockerfile CHANGED
@@ -3,27 +3,28 @@ FROM ghcr.io/abetlen/llama-cpp-python:latest
3
 
4
  WORKDIR /app
5
 
6
- # The base image already has llama-cpp-python,
7
- # so we only need to install huggingface_hub
8
- RUN pip install huggingface_hub
9
-
10
- # --- Model Download ---
11
  # Copy the download script into the container
12
  COPY download_model.py .
13
 
14
  # Make the HF_TOKEN secret available as an argument
15
- # This will be passed in by the HF Spaces platform
16
  ARG HF_TOKEN
17
- # Run the script to download the model
 
 
 
 
18
  RUN --mount=type=secret,id=HF_TOKEN \
 
 
 
19
  python download_model.py
20
 
21
  # --- Server Runtime ---
22
  # Expose port 8000 (which we defined in README.md)
23
  EXPOSE 8000
24
 
25
- # This is the command that will run when the container starts
26
- # It reads the API_KEY secret from the environment
27
  CMD [ \
28
  "python", \
29
  "-m", "llama_cpp.server", \
 
3
 
4
  WORKDIR /app
5
 
 
 
 
 
 
6
  # Copy the download script into the container
7
  COPY download_model.py .
8
 
9
  # Make the HF_TOKEN secret available as an argument
 
10
  ARG HF_TOKEN
11
+
12
+ # --- FIX: Use a temporary virtual env to install dependencies ---
13
+ # This creates a venv, installs huggingface_hub inside it,
14
+ # runs the download script, and then this venv is discarded.
15
+ # This prevents our pip install from breaking the base image.
16
  RUN --mount=type=secret,id=HF_TOKEN \
17
+ python -m venv /tmp/downloader-venv && \
18
+ . /tmp/downloader-venv/bin/activate && \
19
+ pip install huggingface_hub && \
20
  python download_model.py
21
 
22
  # --- Server Runtime ---
23
  # Expose port 8000 (which we defined in README.md)
24
  EXPOSE 8000
25
 
26
+ # This command runs in the base image's original environment
27
+ # which should be stable and correctly linked.
28
  CMD [ \
29
  "python", \
30
  "-m", "llama_cpp.server", \